v4.19.13 snapshot.
diff --git a/drivers/net/wireless/intel/Kconfig b/drivers/net/wireless/intel/Kconfig
new file mode 100644
index 0000000..6fdc14b
--- /dev/null
+++ b/drivers/net/wireless/intel/Kconfig
@@ -0,0 +1,18 @@
+config WLAN_VENDOR_INTEL
+	bool "Intel devices"
+	default y
+	---help---
+	  If you have a wireless card belonging to this class, say Y.
+
+	  Note that the answer to this question doesn't directly affect the
+	  kernel: saying N will just cause the configurator to skip all the
+	  questions about these cards. If you say Y, you will be asked for
+	  your specific card in the following questions.
+
+if WLAN_VENDOR_INTEL
+
+source "drivers/net/wireless/intel/ipw2x00/Kconfig"
+source "drivers/net/wireless/intel/iwlegacy/Kconfig"
+source "drivers/net/wireless/intel/iwlwifi/Kconfig"
+
+endif # WLAN_VENDOR_INTEL
diff --git a/drivers/net/wireless/intel/Makefile b/drivers/net/wireless/intel/Makefile
new file mode 100644
index 0000000..c9cbcc8
--- /dev/null
+++ b/drivers/net/wireless/intel/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_IPW2100) += ipw2x00/
+obj-$(CONFIG_IPW2200) += ipw2x00/
+
+obj-$(CONFIG_IWLEGACY)	+= iwlegacy/
+
+obj-$(CONFIG_IWLWIFI)	+= iwlwifi/
diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig
new file mode 100644
index 0000000..d6ec44d
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/Kconfig
@@ -0,0 +1,198 @@
+#
+# Intel Centrino wireless drivers
+#
+
+config IPW2100
+	tristate "Intel PRO/Wireless 2100 Network Connection"
+	depends on PCI && CFG80211
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
+	select FW_LOADER
+	select LIB80211
+	select LIBIPW
+	---help---
+          A driver for the Intel PRO/Wireless 2100 Network 
+	  Connection 802.11b wireless network adapter.
+
+          See <file:Documentation/networking/README.ipw2100> for information on
+          the capabilities currently enabled in this driver and for tips
+          for debugging issues and problems.
+
+	  In order to use this driver, you will need a firmware image for it.
+          You can obtain the firmware from
+	  <http://ipw2100.sf.net/>.  Once you have the firmware image, you 
+	  will need to place it in /lib/firmware.
+
+          You will also very likely need the Wireless Tools in order to
+          configure your card:
+
+          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+
+          It is recommended that you compile this driver as a module (M)
+          rather than built-in (Y). This driver requires firmware at device
+          initialization time, and when built-in this typically happens
+          before the filesystem is accessible (hence firmware will be
+          unavailable and initialization will fail). If you do choose to build
+          this driver into your kernel image, you can avoid this problem by
+          including the firmware and a firmware loader in an initramfs.
+ 
+config IPW2100_MONITOR
+        bool "Enable promiscuous mode"
+        depends on IPW2100
+        ---help---
+	  Enables promiscuous/monitor mode support for the ipw2100 driver.
+	  With this feature compiled into the driver, you can switch to 
+	  promiscuous mode via the Wireless Tool's Monitor mode.  While in this
+	  mode, no packets can be sent.
+
+config IPW2100_DEBUG
+	bool "Enable full debugging output in IPW2100 module."
+	depends on IPW2100
+	---help---
+	  This option will enable debug tracing output for the IPW2100.  
+
+	  This will result in the kernel module being ~60k larger.  You can 
+	  control which debug output is sent to the kernel log by setting the 
+	  value in 
+
+	  /sys/bus/pci/drivers/ipw2100/debug_level
+
+	  This entry will only exist if this option is enabled.
+
+	  If you are not trying to debug or develop the IPW2100 driver, you 
+	  most likely want to say N here.
+
+config IPW2200
+	tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
+	depends on PCI && CFG80211
+	select CFG80211_WEXT_EXPORT
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select WEXT_PRIV
+	select FW_LOADER
+	select LIB80211
+	select LIBIPW
+	---help---
+          A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
+	  Connection adapters. 
+
+          See <file:Documentation/networking/README.ipw2200> for 
+	  information on the capabilities currently enabled in this 
+	  driver and for tips for debugging issues and problems.
+
+	  In order to use this driver, you will need a firmware image for it.
+          You can obtain the firmware from
+	  <http://ipw2200.sf.net/>.  See the above referenced README.ipw2200 
+	  for information on where to install the firmware images.
+
+          You will also very likely need the Wireless Tools in order to
+          configure your card:
+
+          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+
+          It is recommended that you compile this driver as a module (M)
+          rather than built-in (Y). This driver requires firmware at device
+          initialization time, and when built-in this typically happens
+          before the filesystem is accessible (hence firmware will be
+          unavailable and initialization will fail). If you do choose to build
+          this driver into your kernel image, you can avoid this problem by
+          including the firmware and a firmware loader in an initramfs.
+
+config IPW2200_MONITOR
+        bool "Enable promiscuous mode"
+        depends on IPW2200
+        ---help---
+	  Enables promiscuous/monitor mode support for the ipw2200 driver.
+	  With this feature compiled into the driver, you can switch to 
+	  promiscuous mode via the Wireless Tool's Monitor mode.  While in this
+	  mode, no packets can be sent.
+
+config IPW2200_RADIOTAP
+	bool "Enable radiotap format 802.11 raw packet support"
+	depends on IPW2200_MONITOR
+
+config IPW2200_PROMISCUOUS
+	bool "Enable creation of a RF radiotap promiscuous interface"
+	depends on IPW2200_MONITOR
+	select IPW2200_RADIOTAP
+	---help---
+          Enables the creation of a second interface prefixed 'rtap'. 
+          This second interface will provide every received in radiotap
+	  format.
+
+          This is useful for performing wireless network analysis while
+          maintaining an active association.
+
+          Example usage:
+
+            % modprobe ipw2200 rtap_iface=1
+            % ifconfig rtap0 up
+            % tethereal -i rtap0
+
+          If you do not specify 'rtap_iface=1' as a module parameter then 
+          the rtap interface will not be created and you will need to turn 
+          it on via sysfs:
+	
+            % echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
+
+config IPW2200_QOS
+        bool "Enable QoS support"
+        depends on IPW2200
+
+config IPW2200_DEBUG
+	bool "Enable full debugging output in IPW2200 module."
+	depends on IPW2200
+	---help---
+	  This option will enable low level debug tracing output for IPW2200.
+
+	  Note, normal debug code is already compiled in. This low level
+	  debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
+	  will result in the kernel module being ~70 larger.  Most users
+	  will typically not need this high verbosity debug information.
+
+	  If you are not sure, say N here.
+
+config LIBIPW
+	tristate
+	depends on PCI && CFG80211
+	select WIRELESS_EXT
+	select WEXT_SPY
+	select CRYPTO
+	select CRYPTO_ARC4
+	select CRYPTO_ECB
+	select CRYPTO_AES
+	select CRYPTO_MICHAEL_MIC
+	select CRYPTO_ECB
+	select CRC32
+	select LIB80211
+	select LIB80211_CRYPT_WEP
+	select LIB80211_CRYPT_TKIP
+	select LIB80211_CRYPT_CCMP
+	---help---
+	This option enables the hardware independent IEEE 802.11
+	networking stack.  This component is deprecated in favor of the
+	mac80211 component.
+
+config LIBIPW_DEBUG
+	bool "Full debugging output for the LIBIPW component"
+	depends on LIBIPW
+	---help---
+	  This option will enable debug tracing output for the
+	  libipw component.
+
+	  This will result in the kernel module being ~70k larger.  You
+	  can control which debug output is sent to the kernel log by
+	  setting the value in
+
+	  /proc/net/ieee80211/debug_level
+
+	  For example:
+
+	  % echo 0x00000FFO > /proc/net/ieee80211/debug_level
+
+	  For a list of values you can assign to debug_level, you
+	  can look at the bit mask values in ieee80211.h
+
+	  If you are not trying to debug or develop the libipw
+	  component, you most likely want to say N here.
diff --git a/drivers/net/wireless/intel/ipw2x00/Makefile b/drivers/net/wireless/intel/ipw2x00/Makefile
new file mode 100644
index 0000000..e1ec503
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Intel Centrino wireless drivers
+#
+
+obj-$(CONFIG_IPW2100) += ipw2100.o
+obj-$(CONFIG_IPW2200) += ipw2200.o
+
+obj-$(CONFIG_LIBIPW) += libipw.o
+libipw-objs := \
+	libipw_module.o \
+	libipw_tx.o \
+	libipw_rx.o \
+	libipw_wx.o \
+	libipw_geo.o
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw.h b/drivers/net/wireless/intel/ipw2x00/ipw.h
new file mode 100644
index 0000000..4007bf5
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/ipw.h
@@ -0,0 +1,23 @@
+/*
+ * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
+ *
+ * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __IPW_H__
+#define __IPW_H__
+
+#include <linux/ieee80211.h>
+
+static const u32 ipw_cipher_suites[] = {
+	WLAN_CIPHER_SUITE_WEP40,
+	WLAN_CIPHER_SUITE_WEP104,
+	WLAN_CIPHER_SUITE_TKIP,
+	WLAN_CIPHER_SUITE_CCMP,
+};
+
+#endif
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
new file mode 100644
index 0000000..910db46
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -0,0 +1,8638 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+  Portions of this file are based on the sample_* files provided by Wireless
+  Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes
+  <jt@hpl.hp.com>
+
+  Portions of this file are based on the Host AP project,
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+    <j@w1.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+
+  Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and
+  ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c
+  available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox
+
+******************************************************************************/
+/*
+
+ Initial driver on which this is based was developed by Janusz Gorycki,
+ Maciej Urbaniak, and Maciej Sosnowski.
+
+ Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak.
+
+Theory of Operation
+
+Tx - Commands and Data
+
+Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs)
+Each TBD contains a pointer to the physical (dma_addr_t) address of data being
+sent to the firmware as well as the length of the data.
+
+The host writes to the TBD queue at the WRITE index.  The WRITE index points
+to the _next_ packet to be written and is advanced when after the TBD has been
+filled.
+
+The firmware pulls from the TBD queue at the READ index.  The READ index points
+to the currently being read entry, and is advanced once the firmware is
+done with a packet.
+
+When data is sent to the firmware, the first TBD is used to indicate to the
+firmware if a Command or Data is being sent.  If it is Command, all of the
+command information is contained within the physical address referred to by the
+TBD.  If it is Data, the first TBD indicates the type of data packet, number
+of fragments, etc.  The next TBD then refers to the actual packet location.
+
+The Tx flow cycle is as follows:
+
+1) ipw2100_tx() is called by kernel with SKB to transmit
+2) Packet is move from the tx_free_list and appended to the transmit pending
+   list (tx_pend_list)
+3) work is scheduled to move pending packets into the shared circular queue.
+4) when placing packet in the circular queue, the incoming SKB is DMA mapped
+   to a physical address.  That address is entered into a TBD.  Two TBDs are
+   filled out.  The first indicating a data packet, the second referring to the
+   actual payload data.
+5) the packet is removed from tx_pend_list and placed on the end of the
+   firmware pending list (fw_pend_list)
+6) firmware is notified that the WRITE index has
+7) Once the firmware has processed the TBD, INTA is triggered.
+8) For each Tx interrupt received from the firmware, the READ index is checked
+   to see which TBDs are done being processed.
+9) For each TBD that has been processed, the ISR pulls the oldest packet
+   from the fw_pend_list.
+10)The packet structure contained in the fw_pend_list is then used
+   to unmap the DMA address and to free the SKB originally passed to the driver
+   from the kernel.
+11)The packet structure is placed onto the tx_free_list
+
+The above steps are the same for commands, only the msg_free_list/msg_pend_list
+are used instead of tx_free_list/tx_pend_list
+
+...
+
+Critical Sections / Locking :
+
+There are two locks utilized.  The first is the low level lock (priv->low_lock)
+that protects the following:
+
+- Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows:
+
+  tx_free_list : Holds pre-allocated Tx buffers.
+    TAIL modified in __ipw2100_tx_process()
+    HEAD modified in ipw2100_tx()
+
+  tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring
+    TAIL modified ipw2100_tx()
+    HEAD modified by ipw2100_tx_send_data()
+
+  msg_free_list : Holds pre-allocated Msg (Command) buffers
+    TAIL modified in __ipw2100_tx_process()
+    HEAD modified in ipw2100_hw_send_command()
+
+  msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring
+    TAIL modified in ipw2100_hw_send_command()
+    HEAD modified in ipw2100_tx_send_commands()
+
+  The flow of data on the TX side is as follows:
+
+  MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST
+  TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST
+
+  The methods that work on the TBD ring are protected via priv->low_lock.
+
+- The internal data state of the device itself
+- Access to the firmware read/write indexes for the BD queues
+  and associated logic
+
+All external entry functions are locked with the priv->action_lock to ensure
+that only one external action is invoked at a time.
+
+
+*/
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/unistd.h>
+#include <linux/stringify.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/firmware.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+#include <linux/pm_qos.h>
+
+#include <net/lib80211.h>
+
+#include "ipw2100.h"
+#include "ipw.h"
+
+#define IPW2100_VERSION "git-1.2.2"
+
+#define DRV_NAME	"ipw2100"
+#define DRV_VERSION	IPW2100_VERSION
+#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2100 Network Driver"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
+
+static struct pm_qos_request ipw2100_pm_qos_req;
+
+/* Debugging stuff */
+#ifdef CONFIG_IPW2100_DEBUG
+#define IPW2100_RX_DEBUG	/* Reception debugging */
+#endif
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+static int network_mode = 0;
+static int channel = 0;
+static int associate = 0;
+static int disable = 0;
+#ifdef CONFIG_PM
+static struct ipw2100_fw ipw2100_firmware;
+#endif
+
+#include <linux/moduleparam.h>
+module_param(debug, int, 0444);
+module_param_named(mode, network_mode, int, 0444);
+module_param(channel, int, 0444);
+module_param(associate, int, 0444);
+module_param(disable, int, 0444);
+
+MODULE_PARM_DESC(debug, "debug level");
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
+MODULE_PARM_DESC(channel, "channel");
+MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
+MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
+
+static u32 ipw2100_debug_level = IPW_DL_NONE;
+
+#ifdef CONFIG_IPW2100_DEBUG
+#define IPW_DEBUG(level, message...) \
+do { \
+	if (ipw2100_debug_level & (level)) { \
+		printk(KERN_DEBUG "ipw2100: %c %s ", \
+                       in_interrupt() ? 'I' : 'U',  __func__); \
+		printk(message); \
+	} \
+} while (0)
+#else
+#define IPW_DEBUG(level, message...) do {} while (0)
+#endif				/* CONFIG_IPW2100_DEBUG */
+
+#ifdef CONFIG_IPW2100_DEBUG
+static const char *command_types[] = {
+	"undefined",
+	"unused",		/* HOST_ATTENTION */
+	"HOST_COMPLETE",
+	"unused",		/* SLEEP */
+	"unused",		/* HOST_POWER_DOWN */
+	"unused",
+	"SYSTEM_CONFIG",
+	"unused",		/* SET_IMR */
+	"SSID",
+	"MANDATORY_BSSID",
+	"AUTHENTICATION_TYPE",
+	"ADAPTER_ADDRESS",
+	"PORT_TYPE",
+	"INTERNATIONAL_MODE",
+	"CHANNEL",
+	"RTS_THRESHOLD",
+	"FRAG_THRESHOLD",
+	"POWER_MODE",
+	"TX_RATES",
+	"BASIC_TX_RATES",
+	"WEP_KEY_INFO",
+	"unused",
+	"unused",
+	"unused",
+	"unused",
+	"WEP_KEY_INDEX",
+	"WEP_FLAGS",
+	"ADD_MULTICAST",
+	"CLEAR_ALL_MULTICAST",
+	"BEACON_INTERVAL",
+	"ATIM_WINDOW",
+	"CLEAR_STATISTICS",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"TX_POWER_INDEX",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"BROADCAST_SCAN",
+	"CARD_DISABLE",
+	"PREFERRED_BSSID",
+	"SET_SCAN_OPTIONS",
+	"SCAN_DWELL_TIME",
+	"SWEEP_TABLE",
+	"AP_OR_STATION_TABLE",
+	"GROUP_ORDINALS",
+	"SHORT_RETRY_LIMIT",
+	"LONG_RETRY_LIMIT",
+	"unused",		/* SAVE_CALIBRATION */
+	"unused",		/* RESTORE_CALIBRATION */
+	"undefined",
+	"undefined",
+	"undefined",
+	"HOST_PRE_POWER_DOWN",
+	"unused",		/* HOST_INTERRUPT_COALESCING */
+	"undefined",
+	"CARD_DISABLE_PHY_OFF",
+	"MSDU_TX_RATES",
+	"undefined",
+	"SET_STATION_STAT_BITS",
+	"CLEAR_STATIONS_STAT_BITS",
+	"LEAP_ROGUE_MODE",
+	"SET_SECURITY_INFORMATION",
+	"DISASSOCIATION_BSSID",
+	"SET_WPA_ASS_IE"
+};
+#endif
+
+static const long ipw2100_frequencies[] = {
+	2412, 2417, 2422, 2427,
+	2432, 2437, 2442, 2447,
+	2452, 2457, 2462, 2467,
+	2472, 2484
+};
+
+#define FREQ_COUNT	ARRAY_SIZE(ipw2100_frequencies)
+
+static struct ieee80211_rate ipw2100_bg_rates[] = {
+	{ .bitrate = 10 },
+	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+};
+
+#define RATE_COUNT ARRAY_SIZE(ipw2100_bg_rates)
+
+/* Pre-decl until we get the code solid and then we can clean it up */
+static void ipw2100_tx_send_commands(struct ipw2100_priv *priv);
+static void ipw2100_tx_send_data(struct ipw2100_priv *priv);
+static int ipw2100_adapter_setup(struct ipw2100_priv *priv);
+
+static void ipw2100_queues_initialize(struct ipw2100_priv *priv);
+static void ipw2100_queues_free(struct ipw2100_priv *priv);
+static int ipw2100_queues_allocate(struct ipw2100_priv *priv);
+
+static int ipw2100_fw_download(struct ipw2100_priv *priv,
+			       struct ipw2100_fw *fw);
+static int ipw2100_get_firmware(struct ipw2100_priv *priv,
+				struct ipw2100_fw *fw);
+static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf,
+				 size_t max);
+static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf,
+				    size_t max);
+static void ipw2100_release_firmware(struct ipw2100_priv *priv,
+				     struct ipw2100_fw *fw);
+static int ipw2100_ucode_download(struct ipw2100_priv *priv,
+				  struct ipw2100_fw *fw);
+static void ipw2100_wx_event_work(struct work_struct *work);
+static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev);
+static const struct iw_handler_def ipw2100_wx_handler_def;
+
+static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread32(priv->ioaddr + reg);
+	IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
+}
+
+static inline void write_register(struct net_device *dev, u32 reg, u32 val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite32(val, priv->ioaddr + reg);
+	IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
+}
+
+static inline void read_register_word(struct net_device *dev, u32 reg,
+				      u16 * val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread16(priv->ioaddr + reg);
+	IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
+}
+
+static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	*val = ioread8(priv->ioaddr + reg);
+	IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
+}
+
+static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite16(val, priv->ioaddr + reg);
+	IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
+}
+
+static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	iowrite8(val, priv->ioaddr + reg);
+	IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
+}
+
+static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 * val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void read_nic_word(struct net_device *dev, u32 addr, u16 * val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 * val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr)
+{
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+}
+
+static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val)
+{
+	write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val);
+}
+
+static void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
+				    const u8 * buf)
+{
+	u32 aligned_addr;
+	u32 aligned_len;
+	u32 dif_len;
+	u32 i;
+
+	/* read first nibble byte by byte */
+	aligned_addr = addr & (~0x3);
+	dif_len = addr - aligned_addr;
+	if (dif_len) {
+		/* Start reading at aligned_addr + dif_len */
+		write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+			       aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			write_register_byte(dev,
+					    IPW_REG_INDIRECT_ACCESS_DATA + i,
+					    *buf);
+
+		len -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* read DWs through autoincrement registers */
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr);
+	aligned_len = len & (~0x3);
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		write_register(dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *) buf);
+
+	/* copy the last nibble */
+	dif_len = len - aligned_len;
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i,
+				    *buf);
+}
+
+static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
+				   u8 * buf)
+{
+	u32 aligned_addr;
+	u32 aligned_len;
+	u32 dif_len;
+	u32 i;
+
+	/* read first nibble byte by byte */
+	aligned_addr = addr & (~0x3);
+	dif_len = addr - aligned_addr;
+	if (dif_len) {
+		/* Start reading at aligned_addr + dif_len */
+		write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+			       aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			read_register_byte(dev,
+					   IPW_REG_INDIRECT_ACCESS_DATA + i,
+					   buf);
+
+		len -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* read DWs through autoincrement registers */
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr);
+	aligned_len = len & (~0x3);
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		read_register(dev, IPW_REG_AUTOINCREMENT_DATA, (u32 *) buf);
+
+	/* copy the last nibble */
+	dif_len = len - aligned_len;
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
+}
+
+static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
+{
+	u32 dbg;
+
+	read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
+
+	return dbg == IPW_DATA_DOA_DEBUG_VALUE;
+}
+
+static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
+			       void *val, u32 * len)
+{
+	struct ipw2100_ordinals *ordinals = &priv->ordinals;
+	u32 addr;
+	u32 field_info;
+	u16 field_len;
+	u16 field_count;
+	u32 total_length;
+
+	if (ordinals->table1_addr == 0) {
+		printk(KERN_WARNING DRV_NAME ": attempt to use fw ordinals "
+		       "before they have been loaded.\n");
+		return -EINVAL;
+	}
+
+	if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
+		if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) {
+			*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+			printk(KERN_WARNING DRV_NAME
+			       ": ordinal buffer length too small, need %zd\n",
+			       IPW_ORD_TAB_1_ENTRY_SIZE);
+
+			return -EINVAL;
+		}
+
+		read_nic_dword(priv->net_dev,
+			       ordinals->table1_addr + (ord << 2), &addr);
+		read_nic_dword(priv->net_dev, addr, val);
+
+		*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+		return 0;
+	}
+
+	if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) {
+
+		ord -= IPW_START_ORD_TAB_2;
+
+		/* get the address of statistic */
+		read_nic_dword(priv->net_dev,
+			       ordinals->table2_addr + (ord << 3), &addr);
+
+		/* get the second DW of statistics ;
+		 * two 16-bit words - first is length, second is count */
+		read_nic_dword(priv->net_dev,
+			       ordinals->table2_addr + (ord << 3) + sizeof(u32),
+			       &field_info);
+
+		/* get each entry length */
+		field_len = *((u16 *) & field_info);
+
+		/* get number of entries */
+		field_count = *(((u16 *) & field_info) + 1);
+
+		/* abort if no enough memory */
+		total_length = field_len * field_count;
+		if (total_length > *len) {
+			*len = total_length;
+			return -EINVAL;
+		}
+
+		*len = total_length;
+		if (!total_length)
+			return 0;
+
+		/* read the ordinal data from the SRAM */
+		read_nic_memory(priv->net_dev, addr, total_length, val);
+
+		return 0;
+	}
+
+	printk(KERN_WARNING DRV_NAME ": ordinal %d neither in table 1 nor "
+	       "in table 2\n", ord);
+
+	return -EINVAL;
+}
+
+static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 * val,
+			       u32 * len)
+{
+	struct ipw2100_ordinals *ordinals = &priv->ordinals;
+	u32 addr;
+
+	if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
+		if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) {
+			*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+			IPW_DEBUG_INFO("wrong size\n");
+			return -EINVAL;
+		}
+
+		read_nic_dword(priv->net_dev,
+			       ordinals->table1_addr + (ord << 2), &addr);
+
+		write_nic_dword(priv->net_dev, addr, *val);
+
+		*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("wrong table\n");
+	if (IS_ORDINAL_TABLE_TWO(ordinals, ord))
+		return -EINVAL;
+
+	return -EINVAL;
+}
+
+static char *snprint_line(char *buf, size_t count,
+			  const u8 * data, u32 len, u32 ofs)
+{
+	int out, i, j, l;
+	char c;
+
+	out = snprintf(buf, count, "%08X", ofs);
+
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++)
+			out += snprintf(buf + out, count - out, "%02X ",
+					data[(i * 8 + j)]);
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, "   ");
+	}
+
+	out += snprintf(buf + out, count - out, " ");
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++) {
+			c = data[(i * 8 + j)];
+			if (!isascii(c) || !isprint(c))
+				c = '.';
+
+			out += snprintf(buf + out, count - out, "%c", c);
+		}
+
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, " ");
+	}
+
+	return buf;
+}
+
+static void printk_buf(int level, const u8 * data, u32 len)
+{
+	char line[81];
+	u32 ofs = 0;
+	if (!(ipw2100_debug_level & level))
+		return;
+
+	while (len) {
+		printk(KERN_DEBUG "%s\n",
+		       snprint_line(line, sizeof(line), &data[ofs],
+				    min(len, 16U), ofs));
+		ofs += 16;
+		len -= min(len, 16U);
+	}
+}
+
+#define MAX_RESET_BACKOFF 10
+
+static void schedule_reset(struct ipw2100_priv *priv)
+{
+	time64_t now = ktime_get_boottime_seconds();
+
+	/* If we haven't received a reset request within the backoff period,
+	 * then we can reset the backoff interval so this reset occurs
+	 * immediately */
+	if (priv->reset_backoff &&
+	    (now - priv->last_reset > priv->reset_backoff))
+		priv->reset_backoff = 0;
+
+	priv->last_reset = now;
+
+	if (!(priv->status & STATUS_RESET_PENDING)) {
+		IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n",
+			       priv->net_dev->name, priv->reset_backoff);
+		netif_carrier_off(priv->net_dev);
+		netif_stop_queue(priv->net_dev);
+		priv->status |= STATUS_RESET_PENDING;
+		if (priv->reset_backoff)
+			schedule_delayed_work(&priv->reset_work,
+					      priv->reset_backoff * HZ);
+		else
+			schedule_delayed_work(&priv->reset_work, 0);
+
+		if (priv->reset_backoff < MAX_RESET_BACKOFF)
+			priv->reset_backoff++;
+
+		wake_up_interruptible(&priv->wait_command_queue);
+	} else
+		IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n",
+			       priv->net_dev->name);
+
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
+				   struct host_command *cmd)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	unsigned long flags;
+	int err = 0;
+
+	IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
+		     command_types[cmd->host_command], cmd->host_command,
+		     cmd->host_command_length);
+	printk_buf(IPW_DL_HC, (u8 *) cmd->host_command_parameters,
+		   cmd->host_command_length);
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->fatal_error) {
+		IPW_DEBUG_INFO
+		    ("Attempt to send command while hardware in fatal error condition.\n");
+		err = -EIO;
+		goto fail_unlock;
+	}
+
+	if (!(priv->status & STATUS_RUNNING)) {
+		IPW_DEBUG_INFO
+		    ("Attempt to send command while hardware is not running.\n");
+		err = -EIO;
+		goto fail_unlock;
+	}
+
+	if (priv->status & STATUS_CMD_ACTIVE) {
+		IPW_DEBUG_INFO
+		    ("Attempt to send command while another command is pending.\n");
+		err = -EBUSY;
+		goto fail_unlock;
+	}
+
+	if (list_empty(&priv->msg_free_list)) {
+		IPW_DEBUG_INFO("no available msg buffers\n");
+		goto fail_unlock;
+	}
+
+	priv->status |= STATUS_CMD_ACTIVE;
+	priv->messages_sent++;
+
+	element = priv->msg_free_list.next;
+
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+	packet->jiffy_start = jiffies;
+
+	/* initialize the firmware command packet */
+	packet->info.c_struct.cmd->host_command_reg = cmd->host_command;
+	packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1;
+	packet->info.c_struct.cmd->host_command_len_reg =
+	    cmd->host_command_length;
+	packet->info.c_struct.cmd->sequence = cmd->host_command_sequence;
+
+	memcpy(packet->info.c_struct.cmd->host_command_params_reg,
+	       cmd->host_command_parameters,
+	       sizeof(packet->info.c_struct.cmd->host_command_params_reg));
+
+	list_del(element);
+	DEC_STAT(&priv->msg_free_stat);
+
+	list_add_tail(element, &priv->msg_pend_list);
+	INC_STAT(&priv->msg_pend_stat);
+
+	ipw2100_tx_send_commands(priv);
+	ipw2100_tx_send_data(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	/*
+	 * We must wait for this command to complete before another
+	 * command can be sent...  but if we wait more than 3 seconds
+	 * then there is a problem.
+	 */
+
+	err =
+	    wait_event_interruptible_timeout(priv->wait_command_queue,
+					     !(priv->
+					       status & STATUS_CMD_ACTIVE),
+					     HOST_COMPLETE_TIMEOUT);
+
+	if (err == 0) {
+		IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
+			       1000 * (HOST_COMPLETE_TIMEOUT / HZ));
+		priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT;
+		priv->status &= ~STATUS_CMD_ACTIVE;
+		schedule_reset(priv);
+		return -EIO;
+	}
+
+	if (priv->fatal_error) {
+		printk(KERN_WARNING DRV_NAME ": %s: firmware fatal error\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* !!!!! HACK TEST !!!!!
+	 * When lots of debug trace statements are enabled, the driver
+	 * doesn't seem to have as many firmware restart cycles...
+	 *
+	 * As a test, we're sticking in a 1/100s delay here */
+	schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+
+	return 0;
+
+      fail_unlock:
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	return err;
+}
+
+/*
+ * Verify the values and data access of the hardware
+ * No locks needed or used.  No functions called.
+ */
+static int ipw2100_verify(struct ipw2100_priv *priv)
+{
+	u32 data1, data2;
+	u32 address;
+
+	u32 val1 = 0x76543210;
+	u32 val2 = 0xFEDCBA98;
+
+	/* Domain 0 check - all values should be DOA_DEBUG */
+	for (address = IPW_REG_DOA_DEBUG_AREA_START;
+	     address < IPW_REG_DOA_DEBUG_AREA_END; address += sizeof(u32)) {
+		read_register(priv->net_dev, address, &data1);
+		if (data1 != IPW_DATA_DOA_DEBUG_VALUE)
+			return -EIO;
+	}
+
+	/* Domain 1 check - use arbitrary read/write compare  */
+	for (address = 0; address < 5; address++) {
+		/* The memory area is not used now */
+		write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
+			       val1);
+		write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
+			       val2);
+		read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
+			      &data1);
+		read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
+			      &data2);
+		if (val1 == data1 && val2 == data2)
+			return 0;
+	}
+
+	return -EIO;
+}
+
+/*
+ *
+ * Loop until the CARD_DISABLED bit is the same value as the
+ * supplied parameter
+ *
+ * TODO: See if it would be more efficient to do a wait/wake
+ *       cycle and have the completion event trigger the wakeup
+ *
+ */
+#define IPW_CARD_DISABLE_COMPLETE_WAIT		    100	// 100 milli
+static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state)
+{
+	int i;
+	u32 card_state;
+	u32 len = sizeof(card_state);
+	int err;
+
+	for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) {
+		err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED,
+					  &card_state, &len);
+		if (err) {
+			IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal "
+				       "failed.\n");
+			return 0;
+		}
+
+		/* We'll break out if either the HW state says it is
+		 * in the state we want, or if HOST_COMPLETE command
+		 * finishes */
+		if ((card_state == state) ||
+		    ((priv->status & STATUS_ENABLED) ?
+		     IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) {
+			if (state == IPW_HW_STATE_ENABLED)
+				priv->status |= STATUS_ENABLED;
+			else
+				priv->status &= ~STATUS_ENABLED;
+
+			return 0;
+		}
+
+		udelay(50);
+	}
+
+	IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n",
+		       state ? "DISABLED" : "ENABLED");
+	return -EIO;
+}
+
+/*********************************************************************
+    Procedure   :   sw_reset_and_clock
+    Purpose     :   Asserts s/w reset, asserts clock initialization
+                    and waits for clock stabilization
+ ********************************************************************/
+static int sw_reset_and_clock(struct ipw2100_priv *priv)
+{
+	int i;
+	u32 r;
+
+	// assert s/w reset
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+	// wait for clock stabilization
+	for (i = 0; i < 1000; i++) {
+		udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY);
+
+		// check clock ready bit
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &r);
+		if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET)
+			break;
+	}
+
+	if (i == 1000)
+		return -EIO;	// TODO: better error value
+
+	/* set "initialization complete" bit to move adapter to
+	 * D0 state */
+	write_register(priv->net_dev, IPW_REG_GP_CNTRL,
+		       IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE);
+
+	/* wait for clock stabilization */
+	for (i = 0; i < 10000; i++) {
+		udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4);
+
+		/* check clock ready bit */
+		read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
+		if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY)
+			break;
+	}
+
+	if (i == 10000)
+		return -EIO;	/* TODO: better error value */
+
+	/* set D0 standby bit */
+	read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
+	write_register(priv->net_dev, IPW_REG_GP_CNTRL,
+		       r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
+
+	return 0;
+}
+
+/*********************************************************************
+    Procedure   :   ipw2100_download_firmware
+    Purpose     :   Initiaze adapter after power on.
+                    The sequence is:
+                    1. assert s/w reset first!
+                    2. awake clocks & wait for clock stabilization
+                    3. hold ARC (don't ask me why...)
+                    4. load Dino ucode and reset/clock init again
+                    5. zero-out shared mem
+                    6. download f/w
+ *******************************************************************/
+static int ipw2100_download_firmware(struct ipw2100_priv *priv)
+{
+	u32 address;
+	int err;
+
+#ifndef CONFIG_PM
+	/* Fetch the firmware and microcode */
+	struct ipw2100_fw ipw2100_firmware;
+#endif
+
+	if (priv->fatal_error) {
+		IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after "
+				"fatal error %d.  Interface must be brought down.\n",
+				priv->net_dev->name, priv->fatal_error);
+		return -EINVAL;
+	}
+#ifdef CONFIG_PM
+	if (!ipw2100_firmware.version) {
+		err = ipw2100_get_firmware(priv, &ipw2100_firmware);
+		if (err) {
+			IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
+					priv->net_dev->name, err);
+			priv->fatal_error = IPW2100_ERR_FW_LOAD;
+			goto fail;
+		}
+	}
+#else
+	err = ipw2100_get_firmware(priv, &ipw2100_firmware);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
+				priv->net_dev->name, err);
+		priv->fatal_error = IPW2100_ERR_FW_LOAD;
+		goto fail;
+	}
+#endif
+	priv->firmware_version = ipw2100_firmware.version;
+
+	/* s/w reset and clock stabilization */
+	err = sw_reset_and_clock(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
+				priv->net_dev->name, err);
+		goto fail;
+	}
+
+	err = ipw2100_verify(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n",
+				priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* Hold ARC */
+	write_nic_dword(priv->net_dev,
+			IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x80000000);
+
+	/* allow ARC to run */
+	write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
+
+	/* load microcode */
+	err = ipw2100_ucode_download(priv, &ipw2100_firmware);
+	if (err) {
+		printk(KERN_ERR DRV_NAME ": %s: Error loading microcode: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* release ARC */
+	write_nic_dword(priv->net_dev,
+			IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x00000000);
+
+	/* s/w reset and clock stabilization (again!!!) */
+	err = sw_reset_and_clock(priv);
+	if (err) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: sw_reset_and_clock failed: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* load f/w */
+	err = ipw2100_fw_download(priv, &ipw2100_firmware);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n",
+				priv->net_dev->name, err);
+		goto fail;
+	}
+#ifndef CONFIG_PM
+	/*
+	 * When the .resume method of the driver is called, the other
+	 * part of the system, i.e. the ide driver could still stay in
+	 * the suspend stage. This prevents us from loading the firmware
+	 * from the disk.  --YZ
+	 */
+
+	/* free any storage allocated for firmware image */
+	ipw2100_release_firmware(priv, &ipw2100_firmware);
+#endif
+
+	/* zero out Domain 1 area indirectly (Si requirement) */
+	for (address = IPW_HOST_FW_SHARED_AREA0;
+	     address < IPW_HOST_FW_SHARED_AREA0_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA1;
+	     address < IPW_HOST_FW_SHARED_AREA1_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA2;
+	     address < IPW_HOST_FW_SHARED_AREA2_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA3;
+	     address < IPW_HOST_FW_SHARED_AREA3_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_INTERRUPT_AREA;
+	     address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+
+	return 0;
+
+      fail:
+	ipw2100_release_firmware(priv, &ipw2100_firmware);
+	return err;
+}
+
+static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv)
+{
+	if (priv->status & STATUS_INT_ENABLED)
+		return;
+	priv->status |= STATUS_INT_ENABLED;
+	write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK);
+}
+
+static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv)
+{
+	if (!(priv->status & STATUS_INT_ENABLED))
+		return;
+	priv->status &= ~STATUS_INT_ENABLED;
+	write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0);
+}
+
+static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv)
+{
+	struct ipw2100_ordinals *ord = &priv->ordinals;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1,
+		      &ord->table1_addr);
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2,
+		      &ord->table2_addr);
+
+	read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size);
+	read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size);
+
+	ord->table2_size &= 0x0000FFFF;
+
+	IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size);
+	IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size);
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv)
+{
+	u32 reg = 0;
+	/*
+	 * Set GPIO 3 writable by FW; GPIO 1 writable
+	 * by driver and enable clock
+	 */
+	reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE |
+	       IPW_BIT_GPIO_LED_OFF);
+	write_register(priv->net_dev, IPW_REG_GPIO, reg);
+}
+
+static int rf_kill_active(struct ipw2100_priv *priv)
+{
+#define MAX_RF_KILL_CHECKS 5
+#define RF_KILL_CHECK_DELAY 40
+
+	unsigned short value = 0;
+	u32 reg = 0;
+	int i;
+
+	if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
+		priv->status &= ~STATUS_RF_KILL_HW;
+		return 0;
+	}
+
+	for (i = 0; i < MAX_RF_KILL_CHECKS; i++) {
+		udelay(RF_KILL_CHECK_DELAY);
+		read_register(priv->net_dev, IPW_REG_GPIO, &reg);
+		value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
+	}
+
+	if (value == 0) {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
+		priv->status |= STATUS_RF_KILL_HW;
+	} else {
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
+		priv->status &= ~STATUS_RF_KILL_HW;
+	}
+
+	return (value == 0);
+}
+
+static int ipw2100_get_hw_features(struct ipw2100_priv *priv)
+{
+	u32 addr, len;
+	u32 val;
+
+	/*
+	 * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1
+	 */
+	len = sizeof(addr);
+	if (ipw2100_get_ordinal
+	    (priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, &addr, &len)) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+		return -EIO;
+	}
+
+	IPW_DEBUG_INFO("EEPROM address: %08X\n", addr);
+
+	/*
+	 * EEPROM version is the byte at offset 0xfd in firmware
+	 * We read 4 bytes, then shift out the byte we actually want */
+	read_nic_dword(priv->net_dev, addr + 0xFC, &val);
+	priv->eeprom_version = (val >> 24) & 0xFF;
+	IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version);
+
+	/*
+	 *  HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware
+	 *
+	 *  notice that the EEPROM bit is reverse polarity, i.e.
+	 *     bit = 0  signifies HW RF kill switch is supported
+	 *     bit = 1  signifies HW RF kill switch is NOT supported
+	 */
+	read_nic_dword(priv->net_dev, addr + 0x20, &val);
+	if (!((val >> 24) & 0x01))
+		priv->hw_features |= HW_FEATURE_RFKILL;
+
+	IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n",
+		       (priv->hw_features & HW_FEATURE_RFKILL) ? "" : "not ");
+
+	return 0;
+}
+
+/*
+ * Start firmware execution after power on and initialization
+ * The sequence is:
+ *  1. Release ARC
+ *  2. Wait for f/w initialization completes;
+ */
+static int ipw2100_start_adapter(struct ipw2100_priv *priv)
+{
+	int i;
+	u32 inta, inta_mask, gpio;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->status & STATUS_RUNNING)
+		return 0;
+
+	/*
+	 * Initialize the hw - drive adapter to DO state by setting
+	 * init_done bit. Wait for clk_ready bit and Download
+	 * fw & dino ucode
+	 */
+	if (ipw2100_download_firmware(priv)) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: Failed to power on the adapter.\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* Clear the Tx, Rx and Msg queues and the r/w indexes
+	 * in the firmware RBD and TBD ring queue */
+	ipw2100_queues_initialize(priv);
+
+	ipw2100_hw_set_gpio(priv);
+
+	/* TODO -- Look at disabling interrupts here to make sure none
+	 * get fired during FW initialization */
+
+	/* Release ARC - clear reset bit */
+	write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
+
+	/* wait for f/w initialization complete */
+	IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
+	i = 5000;
+	do {
+		schedule_timeout_uninterruptible(msecs_to_jiffies(40));
+		/* Todo... wait for sync command ... */
+
+		read_register(priv->net_dev, IPW_REG_INTA, &inta);
+
+		/* check "init done" bit */
+		if (inta & IPW2100_INTA_FW_INIT_DONE) {
+			/* reset "init done" bit */
+			write_register(priv->net_dev, IPW_REG_INTA,
+				       IPW2100_INTA_FW_INIT_DONE);
+			break;
+		}
+
+		/* check error conditions : we check these after the firmware
+		 * check so that if there is an error, the interrupt handler
+		 * will see it and the adapter will be reset */
+		if (inta &
+		    (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) {
+			/* clear error conditions */
+			write_register(priv->net_dev, IPW_REG_INTA,
+				       IPW2100_INTA_FATAL_ERROR |
+				       IPW2100_INTA_PARITY_ERROR);
+		}
+	} while (--i);
+
+	/* Clear out any pending INTAs since we aren't supposed to have
+	 * interrupts enabled at this point... */
+	read_register(priv->net_dev, IPW_REG_INTA, &inta);
+	read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
+	inta &= IPW_INTERRUPT_MASK;
+	/* Clear out any pending interrupts */
+	if (inta & inta_mask)
+		write_register(priv->net_dev, IPW_REG_INTA, inta);
+
+	IPW_DEBUG_FW("f/w initialization complete: %s\n",
+		     i ? "SUCCESS" : "FAILED");
+
+	if (!i) {
+		printk(KERN_WARNING DRV_NAME
+		       ": %s: Firmware did not initialize.\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* allow firmware to write to GPIO1 & GPIO3 */
+	read_register(priv->net_dev, IPW_REG_GPIO, &gpio);
+
+	gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK);
+
+	write_register(priv->net_dev, IPW_REG_GPIO, gpio);
+
+	/* Ready to receive commands */
+	priv->status |= STATUS_RUNNING;
+
+	/* The adapter has been reset; we are not associated */
+	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv)
+{
+	if (!priv->fatal_error)
+		return;
+
+	priv->fatal_errors[priv->fatal_index++] = priv->fatal_error;
+	priv->fatal_index %= IPW2100_ERROR_QUEUE;
+	priv->fatal_error = 0;
+}
+
+/* NOTE: Our interrupt is disabled when this method is called */
+static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
+{
+	u32 reg;
+	int i;
+
+	IPW_DEBUG_INFO("Power cycling the hardware.\n");
+
+	ipw2100_hw_set_gpio(priv);
+
+	/* Step 1. Stop Master Assert */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+
+	/* Step 2. Wait for stop Master Assert
+	 *         (not more than 50us, otherwise ret error */
+	i = 5;
+	do {
+		udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	} while (--i);
+
+	priv->status &= ~STATUS_RESET_PENDING;
+
+	if (!i) {
+		IPW_DEBUG_INFO
+		    ("exit - waited too long for master assert stop\n");
+		return -EIO;
+	}
+
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+	/* Reset any fatal_error conditions */
+	ipw2100_reset_fatalerror(priv);
+
+	/* At this point, the adapter is now stopped and disabled */
+	priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING |
+			  STATUS_ASSOCIATED | STATUS_ENABLED);
+
+	return 0;
+}
+
+/*
+ * Send the CARD_DISABLE_PHY_OFF command to the card to disable it
+ *
+ * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
+ *
+ * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of
+ * if STATUS_ASSN_LOST is sent.
+ */
+static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
+{
+
+#define HW_PHY_OFF_LOOP_DELAY (msecs_to_jiffies(50))
+
+	struct host_command cmd = {
+		.host_command = CARD_DISABLE_PHY_OFF,
+		.host_command_sequence = 0,
+		.host_command_length = 0,
+	};
+	int err, i;
+	u32 val1, val2;
+
+	IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n");
+
+	/* Turn off the radio */
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	for (i = 0; i < 2500; i++) {
+		read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1);
+		read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2);
+
+		if ((val1 & IPW2100_CONTROL_PHY_OFF) &&
+		    (val2 & IPW2100_COMMAND_PHY_OFF))
+			return 0;
+
+		schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY);
+	}
+
+	return -EIO;
+}
+
+static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = HOST_COMPLETE,
+		.host_command_sequence = 0,
+		.host_command_length = 0
+	};
+	int err = 0;
+
+	IPW_DEBUG_HC("HOST_COMPLETE\n");
+
+	if (priv->status & STATUS_ENABLED)
+		return 0;
+
+	mutex_lock(&priv->adapter_mutex);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_HC("Command aborted due to RF kill active.\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED);
+	if (err) {
+		IPW_DEBUG_INFO("%s: card not responding to init command.\n",
+			       priv->net_dev->name);
+		goto fail_up;
+	}
+
+	if (priv->stop_hang_check) {
+		priv->stop_hang_check = 0;
+		schedule_delayed_work(&priv->hang_check, HZ / 2);
+	}
+
+      fail_up:
+	mutex_unlock(&priv->adapter_mutex);
+	return err;
+}
+
+static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
+{
+#define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100))
+
+	struct host_command cmd = {
+		.host_command = HOST_PRE_POWER_DOWN,
+		.host_command_sequence = 0,
+		.host_command_length = 0,
+	};
+	int err, i;
+	u32 reg;
+
+	if (!(priv->status & STATUS_RUNNING))
+		return 0;
+
+	priv->status |= STATUS_STOPPING;
+
+	/* We can only shut down the card if the firmware is operational.  So,
+	 * if we haven't reset since a fatal_error, then we can not send the
+	 * shutdown commands. */
+	if (!priv->fatal_error) {
+		/* First, make sure the adapter is enabled so that the PHY_OFF
+		 * command can shut it down */
+		ipw2100_enable_adapter(priv);
+
+		err = ipw2100_hw_phy_off(priv);
+		if (err)
+			printk(KERN_WARNING DRV_NAME
+			       ": Error disabling radio %d\n", err);
+
+		/*
+		 * If in D0-standby mode going directly to D3 may cause a
+		 * PCI bus violation.  Therefore we must change out of the D0
+		 * state.
+		 *
+		 * Sending the PREPARE_FOR_POWER_DOWN will restrict the
+		 * hardware from going into standby mode and will transition
+		 * out of D0-standby if it is already in that state.
+		 *
+		 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
+		 * driver upon completion.  Once received, the driver can
+		 * proceed to the D3 state.
+		 *
+		 * Prepare for power down command to fw.  This command would
+		 * take HW out of D0-standby and prepare it for D3 state.
+		 *
+		 * Currently FW does not support event notification for this
+		 * event. Therefore, skip waiting for it.  Just wait a fixed
+		 * 100ms
+		 */
+		IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n");
+
+		err = ipw2100_hw_send_command(priv, &cmd);
+		if (err)
+			printk(KERN_WARNING DRV_NAME ": "
+			       "%s: Power down command failed: Error %d\n",
+			       priv->net_dev->name, err);
+		else
+			schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY);
+	}
+
+	priv->status &= ~STATUS_ENABLED;
+
+	/*
+	 * Set GPIO 3 writable by FW; GPIO 1 writable
+	 * by driver and enable clock
+	 */
+	ipw2100_hw_set_gpio(priv);
+
+	/*
+	 * Power down adapter.  Sequence:
+	 * 1. Stop master assert (RESET_REG[9]=1)
+	 * 2. Wait for stop master (RESET_REG[8]==1)
+	 * 3. S/w reset assert (RESET_REG[7] = 1)
+	 */
+
+	/* Stop master assert */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+
+	/* wait stop master not more than 50 usec.
+	 * Otherwise return error. */
+	for (i = 5; i > 0; i--) {
+		udelay(10);
+
+		/* Check master stop bit */
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	}
+
+	if (i == 0)
+		printk(KERN_WARNING DRV_NAME
+		       ": %s: Could now power down adapter.\n",
+		       priv->net_dev->name);
+
+	/* assert s/w reset */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+	priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING);
+
+	return 0;
+}
+
+static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = CARD_DISABLE,
+		.host_command_sequence = 0,
+		.host_command_length = 0
+	};
+	int err = 0;
+
+	IPW_DEBUG_HC("CARD_DISABLE\n");
+
+	if (!(priv->status & STATUS_ENABLED))
+		return 0;
+
+	/* Make sure we clear the associated state */
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+
+	if (!priv->stop_hang_check) {
+		priv->stop_hang_check = 1;
+		cancel_delayed_work(&priv->hang_check);
+	}
+
+	mutex_lock(&priv->adapter_mutex);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       ": exit - failed to send CARD_DISABLE command\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       ": exit - card failed to change to DISABLED\n");
+		goto fail_up;
+	}
+
+	IPW_DEBUG_INFO("TODO: implement scan state machine\n");
+
+      fail_up:
+	mutex_unlock(&priv->adapter_mutex);
+	return err;
+}
+
+static int ipw2100_set_scan_options(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = SET_SCAN_OPTIONS,
+		.host_command_sequence = 0,
+		.host_command_length = 8
+	};
+	int err;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	IPW_DEBUG_SCAN("setting scan options\n");
+
+	cmd.host_command_parameters[0] = 0;
+
+	if (!(priv->config & CFG_ASSOCIATE))
+		cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE;
+	if ((priv->ieee->sec.flags & SEC_ENABLED) && priv->ieee->sec.enabled)
+		cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL;
+	if (priv->config & CFG_PASSIVE_SCAN)
+		cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE;
+
+	cmd.host_command_parameters[1] = priv->channel_mask;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n",
+		     cmd.host_command_parameters[0]);
+
+	return err;
+}
+
+static int ipw2100_start_scan(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = BROADCAST_SCAN,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	IPW_DEBUG_HC("START_SCAN\n");
+
+	cmd.host_command_parameters[0] = 0;
+
+	/* No scanning if in monitor mode */
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		return 1;
+
+	if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_SCAN("Scan requested while already in scan...\n");
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("enter\n");
+
+	/* Not clearing here; doing so makes iwlist always return nothing...
+	 *
+	 * We should modify the table logic to use aging tables vs. clearing
+	 * the table on each scan start.
+	 */
+	IPW_DEBUG_SCAN("starting scan\n");
+
+	priv->status |= STATUS_SCANNING;
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		priv->status &= ~STATUS_SCANNING;
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return err;
+}
+
+static const struct libipw_geo ipw_geos[] = {
+	{			/* Restricted */
+	 "---",
+	 .bg_channels = 14,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}, {2467, 12},
+		{2472, 13}, {2484, 14}},
+	 },
+};
+
+static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
+{
+	unsigned long flags;
+	int err = 0;
+	u32 lock;
+	u32 ord_len = sizeof(lock);
+
+	/* Age scan list entries found before suspend */
+	if (priv->suspend_time) {
+		libipw_networks_age(priv->ieee, priv->suspend_time);
+		priv->suspend_time = 0;
+	}
+
+	/* Quiet if manually disabled. */
+	if (priv->status & STATUS_RF_KILL_SW) {
+		IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable "
+			       "switch\n", priv->net_dev->name);
+		return 0;
+	}
+
+	/* the ipw2100 hardware really doesn't want power management delays
+	 * longer than 175usec
+	 */
+	pm_qos_update_request(&ipw2100_pm_qos_req, 175);
+
+	/* If the interrupt is enabled, turn it off... */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+
+	/* Reset any fatal_error conditions */
+	ipw2100_reset_fatalerror(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	if (priv->status & STATUS_POWERED ||
+	    (priv->status & STATUS_RESET_PENDING)) {
+		/* Power cycle the card ... */
+		err = ipw2100_power_cycle_adapter(priv);
+		if (err) {
+			printk(KERN_WARNING DRV_NAME
+			       ": %s: Could not cycle adapter.\n",
+			       priv->net_dev->name);
+			goto exit;
+		}
+	} else
+		priv->status |= STATUS_POWERED;
+
+	/* Load the firmware, start the clocks, etc. */
+	err = ipw2100_start_adapter(priv);
+	if (err) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: Failed to start the firmware.\n",
+		       priv->net_dev->name);
+		goto exit;
+	}
+
+	ipw2100_initialize_ordinals(priv);
+
+	/* Determine capabilities of this particular HW configuration */
+	err = ipw2100_get_hw_features(priv);
+	if (err) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: Failed to determine HW features.\n",
+		       priv->net_dev->name);
+		goto exit;
+	}
+
+	/* Initialize the geo */
+	libipw_set_geo(priv->ieee, &ipw_geos[0]);
+	priv->ieee->freq_band = LIBIPW_24GHZ_BAND;
+
+	lock = LOCK_NONE;
+	err = ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len);
+	if (err) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: Failed to clear ordinal lock.\n",
+		       priv->net_dev->name);
+		goto exit;
+	}
+
+	priv->status &= ~STATUS_SCANNING;
+
+	if (rf_kill_active(priv)) {
+		printk(KERN_INFO "%s: Radio is disabled by RF switch.\n",
+		       priv->net_dev->name);
+
+		if (priv->stop_rf_kill) {
+			priv->stop_rf_kill = 0;
+			schedule_delayed_work(&priv->rf_kill,
+					      round_jiffies_relative(HZ));
+		}
+
+		deferred = 1;
+	}
+
+	/* Turn on the interrupt so that commands can be processed */
+	ipw2100_enable_interrupts(priv);
+
+	/* Send all of the commands that must be sent prior to
+	 * HOST_COMPLETE */
+	err = ipw2100_adapter_setup(priv);
+	if (err) {
+		printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n",
+		       priv->net_dev->name);
+		goto exit;
+	}
+
+	if (!deferred) {
+		/* Enable the adapter - sends HOST_COMPLETE */
+		err = ipw2100_enable_adapter(priv);
+		if (err) {
+			printk(KERN_ERR DRV_NAME ": "
+			       "%s: failed in call to enable adapter.\n",
+			       priv->net_dev->name);
+			ipw2100_hw_stop_adapter(priv);
+			goto exit;
+		}
+
+		/* Start a scan . . . */
+		ipw2100_set_scan_options(priv);
+		ipw2100_start_scan(priv);
+	}
+
+      exit:
+	return err;
+}
+
+static void ipw2100_down(struct ipw2100_priv *priv)
+{
+	unsigned long flags;
+	union iwreq_data wrqu = {
+		.ap_addr = {
+			    .sa_family = ARPHRD_ETHER}
+	};
+	int associated = priv->status & STATUS_ASSOCIATED;
+
+	/* Kill the RF switch timer */
+	if (!priv->stop_rf_kill) {
+		priv->stop_rf_kill = 1;
+		cancel_delayed_work(&priv->rf_kill);
+	}
+
+	/* Kill the firmware hang check timer */
+	if (!priv->stop_hang_check) {
+		priv->stop_hang_check = 1;
+		cancel_delayed_work(&priv->hang_check);
+	}
+
+	/* Kill any pending resets */
+	if (priv->status & STATUS_RESET_PENDING)
+		cancel_delayed_work(&priv->reset_work);
+
+	/* Make sure the interrupt is on so that FW commands will be
+	 * processed correctly */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_enable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	if (ipw2100_hw_stop_adapter(priv))
+		printk(KERN_ERR DRV_NAME ": %s: Error stopping adapter.\n",
+		       priv->net_dev->name);
+
+	/* Do not disable the interrupt until _after_ we disable
+	 * the adaptor.  Otherwise the CARD_DISABLE command will never
+	 * be ack'd by the firmware */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
+
+	/* We have to signal any supplicant if we are disassociating */
+	if (associated)
+		wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+	netif_carrier_off(priv->net_dev);
+	netif_stop_queue(priv->net_dev);
+}
+
+static int ipw2100_wdev_init(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct wireless_dev *wdev = &priv->ieee->wdev;
+	int i;
+
+	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
+
+	/* fill-out priv->ieee->bg_band */
+	if (geo->bg_channels) {
+		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
+
+		bg_band->band = NL80211_BAND_2GHZ;
+		bg_band->n_channels = geo->bg_channels;
+		bg_band->channels = kcalloc(geo->bg_channels,
+					    sizeof(struct ieee80211_channel),
+					    GFP_KERNEL);
+		if (!bg_band->channels) {
+			ipw2100_down(priv);
+			return -ENOMEM;
+		}
+		/* translate geo->bg to bg_band.channels */
+		for (i = 0; i < geo->bg_channels; i++) {
+			bg_band->channels[i].band = NL80211_BAND_2GHZ;
+			bg_band->channels[i].center_freq = geo->bg[i].freq;
+			bg_band->channels[i].hw_value = geo->bg[i].channel;
+			bg_band->channels[i].max_power = geo->bg[i].max_power;
+			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		bg_band->bitrates = ipw2100_bg_rates;
+		bg_band->n_bitrates = RATE_COUNT;
+
+		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
+	}
+
+	wdev->wiphy->cipher_suites = ipw_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
+	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+	if (wiphy_register(wdev->wiphy))
+		return -EIO;
+	return 0;
+}
+
+static void ipw2100_reset_adapter(struct work_struct *work)
+{
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, reset_work.work);
+	unsigned long flags;
+	union iwreq_data wrqu = {
+		.ap_addr = {
+			    .sa_family = ARPHRD_ETHER}
+	};
+	int associated = priv->status & STATUS_ASSOCIATED;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	IPW_DEBUG_INFO(": %s: Restarting adapter.\n", priv->net_dev->name);
+	priv->resets++;
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+	priv->status |= STATUS_SECURITY_UPDATED;
+
+	/* Force a power cycle even if interface hasn't been opened
+	 * yet */
+	cancel_delayed_work(&priv->reset_work);
+	priv->status |= STATUS_RESET_PENDING;
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	mutex_lock(&priv->action_mutex);
+	/* stop timed checks so that they don't interfere with reset */
+	priv->stop_hang_check = 1;
+	cancel_delayed_work(&priv->hang_check);
+
+	/* We have to signal any supplicant if we are disassociating */
+	if (associated)
+		wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+
+	ipw2100_up(priv, 0);
+	mutex_unlock(&priv->action_mutex);
+
+}
+
+static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
+{
+
+#define MAC_ASSOCIATION_READ_DELAY (HZ)
+	int ret;
+	unsigned int len, essid_len;
+	char essid[IW_ESSID_MAX_SIZE];
+	u32 txrate;
+	u32 chan;
+	char *txratename;
+	u8 bssid[ETH_ALEN];
+
+	/*
+	 * TBD: BSSID is usually 00:00:00:00:00:00 here and not
+	 *      an actual MAC of the AP. Seems like FW sets this
+	 *      address too late. Read it later and expose through
+	 *      /proc or schedule a later task to query and update
+	 */
+
+	essid_len = IW_ESSID_MAX_SIZE;
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID,
+				  essid, &essid_len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+		return;
+	}
+
+	len = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &txrate, &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+		return;
+	}
+
+	len = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+		return;
+	}
+	len = ETH_ALEN;
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+				  &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+		return;
+	}
+	memcpy(priv->ieee->bssid, bssid, ETH_ALEN);
+
+	switch (txrate) {
+	case TX_RATE_1_MBIT:
+		txratename = "1Mbps";
+		break;
+	case TX_RATE_2_MBIT:
+		txratename = "2Mbsp";
+		break;
+	case TX_RATE_5_5_MBIT:
+		txratename = "5.5Mbps";
+		break;
+	case TX_RATE_11_MBIT:
+		txratename = "11Mbps";
+		break;
+	default:
+		IPW_DEBUG_INFO("Unknown rate: %d\n", txrate);
+		txratename = "unknown rate";
+		break;
+	}
+
+	IPW_DEBUG_INFO("%s: Associated with '%*pE' at %s, channel %d (BSSID=%pM)\n",
+		       priv->net_dev->name, essid_len, essid,
+		       txratename, chan, bssid);
+
+	/* now we copy read ssid into dev */
+	if (!(priv->config & CFG_STATIC_ESSID)) {
+		priv->essid_len = min((u8) essid_len, (u8) IW_ESSID_MAX_SIZE);
+		memcpy(priv->essid, essid, priv->essid_len);
+	}
+	priv->channel = chan;
+	memcpy(priv->bssid, bssid, ETH_ALEN);
+
+	priv->status |= STATUS_ASSOCIATING;
+	priv->connect_start = ktime_get_boottime_seconds();
+
+	schedule_delayed_work(&priv->wx_event_work, HZ / 10);
+}
+
+static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
+			     int length, int batch_mode)
+{
+	int ssid_len = min(length, IW_ESSID_MAX_SIZE);
+	struct host_command cmd = {
+		.host_command = SSID,
+		.host_command_sequence = 0,
+		.host_command_length = ssid_len
+	};
+	int err;
+
+	IPW_DEBUG_HC("SSID: '%*pE'\n", ssid_len, essid);
+
+	if (ssid_len)
+		memcpy(cmd.host_command_parameters, essid, ssid_len);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	/* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to
+	 * disable auto association -- so we cheat by setting a bogus SSID */
+	if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) {
+		int i;
+		u8 *bogus = (u8 *) cmd.host_command_parameters;
+		for (i = 0; i < IW_ESSID_MAX_SIZE; i++)
+			bogus[i] = 0x18 + i;
+		cmd.host_command_length = IW_ESSID_MAX_SIZE;
+	}
+
+	/* NOTE:  We always send the SSID command even if the provided ESSID is
+	 * the same as what we currently think is set. */
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (!err) {
+		memset(priv->essid + ssid_len, 0, IW_ESSID_MAX_SIZE - ssid_len);
+		memcpy(priv->essid, essid, ssid_len);
+		priv->essid_len = ssid_len;
+	}
+
+	if (!batch_mode) {
+		if (ipw2100_enable_adapter(priv))
+			err = -EIO;
+	}
+
+	return err;
+}
+
+static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+		  "disassociated: '%*pE' %pM\n", priv->essid_len, priv->essid,
+		  priv->bssid);
+
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+
+	if (priv->status & STATUS_STOPPING) {
+		IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n");
+		return;
+	}
+
+	eth_zero_addr(priv->bssid);
+	eth_zero_addr(priv->ieee->bssid);
+
+	netif_carrier_off(priv->net_dev);
+	netif_stop_queue(priv->net_dev);
+
+	if (!(priv->status & STATUS_RUNNING))
+		return;
+
+	if (priv->status & STATUS_SECURITY_UPDATED)
+		schedule_delayed_work(&priv->security_work, 0);
+
+	schedule_delayed_work(&priv->wx_event_work, 0);
+}
+
+static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n",
+		       priv->net_dev->name);
+
+	/* RF_KILL is now enabled (else we wouldn't be here) */
+	wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
+	priv->status |= STATUS_RF_KILL_HW;
+
+	/* Make sure the RF Kill check timer is running */
+	priv->stop_rf_kill = 0;
+	mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ));
+}
+
+static void ipw2100_scan_event(struct work_struct *work)
+{
+	struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv,
+						 scan_event.work);
+	union iwreq_data wrqu;
+
+	wrqu.data.length = 0;
+	wrqu.data.flags = 0;
+	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_SCAN("scan complete\n");
+	/* Age the scan results... */
+	priv->ieee->scans++;
+	priv->status &= ~STATUS_SCANNING;
+
+	/* Only userspace-requested scan completion events go out immediately */
+	if (!priv->user_requested_scan) {
+		schedule_delayed_work(&priv->scan_event,
+				      round_jiffies_relative(msecs_to_jiffies(4000)));
+	} else {
+		priv->user_requested_scan = 0;
+		mod_delayed_work(system_wq, &priv->scan_event, 0);
+	}
+}
+
+#ifdef CONFIG_IPW2100_DEBUG
+#define IPW2100_HANDLER(v, f) { v, f, # v }
+struct ipw2100_status_indicator {
+	int status;
+	void (*cb) (struct ipw2100_priv * priv, u32 status);
+	char *name;
+};
+#else
+#define IPW2100_HANDLER(v, f) { v, f }
+struct ipw2100_status_indicator {
+	int status;
+	void (*cb) (struct ipw2100_priv * priv, u32 status);
+};
+#endif				/* CONFIG_IPW2100_DEBUG */
+
+static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_SCAN("Scanning...\n");
+	priv->status |= STATUS_SCANNING;
+}
+
+static const struct ipw2100_status_indicator status_handlers[] = {
+	IPW2100_HANDLER(IPW_STATE_INITIALIZED, NULL),
+	IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, NULL),
+	IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated),
+	IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost),
+	IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, NULL),
+	IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete),
+	IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, NULL),
+	IPW2100_HANDLER(IPW_STATE_LEFT_PSP, NULL),
+	IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill),
+	IPW2100_HANDLER(IPW_STATE_DISABLED, NULL),
+	IPW2100_HANDLER(IPW_STATE_POWER_DOWN, NULL),
+	IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning),
+	IPW2100_HANDLER(-1, NULL)
+};
+
+static void isr_status_change(struct ipw2100_priv *priv, int status)
+{
+	int i;
+
+	if (status == IPW_STATE_SCANNING &&
+	    priv->status & STATUS_ASSOCIATED &&
+	    !(priv->status & STATUS_SCANNING)) {
+		IPW_DEBUG_INFO("Scan detected while associated, with "
+			       "no scan request.  Restarting firmware.\n");
+
+		/* Wake up any sleeping jobs */
+		schedule_reset(priv);
+	}
+
+	for (i = 0; status_handlers[i].status != -1; i++) {
+		if (status == status_handlers[i].status) {
+			IPW_DEBUG_NOTIF("Status change: %s\n",
+					status_handlers[i].name);
+			if (status_handlers[i].cb)
+				status_handlers[i].cb(priv, status);
+			priv->wstats.status = status;
+			return;
+		}
+	}
+
+	IPW_DEBUG_NOTIF("unknown status received: %04x\n", status);
+}
+
+static void isr_rx_complete_command(struct ipw2100_priv *priv,
+				    struct ipw2100_cmd_header *cmd)
+{
+#ifdef CONFIG_IPW2100_DEBUG
+	if (cmd->host_command_reg < ARRAY_SIZE(command_types)) {
+		IPW_DEBUG_HC("Command completed '%s (%d)'\n",
+			     command_types[cmd->host_command_reg],
+			     cmd->host_command_reg);
+	}
+#endif
+	if (cmd->host_command_reg == HOST_COMPLETE)
+		priv->status |= STATUS_ENABLED;
+
+	if (cmd->host_command_reg == CARD_DISABLE)
+		priv->status &= ~STATUS_ENABLED;
+
+	priv->status &= ~STATUS_CMD_ACTIVE;
+
+	wake_up_interruptible(&priv->wait_command_queue);
+}
+
+#ifdef CONFIG_IPW2100_DEBUG
+static const char *frame_types[] = {
+	"COMMAND_STATUS_VAL",
+	"STATUS_CHANGE_VAL",
+	"P80211_DATA_VAL",
+	"P8023_DATA_VAL",
+	"HOST_NOTIFICATION_VAL"
+};
+#endif
+
+static int ipw2100_alloc_skb(struct ipw2100_priv *priv,
+				    struct ipw2100_rx_packet *packet)
+{
+	packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
+	if (!packet->skb)
+		return -ENOMEM;
+
+	packet->rxp = (struct ipw2100_rx *)packet->skb->data;
+	packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
+					  sizeof(struct ipw2100_rx),
+					  PCI_DMA_FROMDEVICE);
+	if (pci_dma_mapping_error(priv->pci_dev, packet->dma_addr)) {
+		dev_kfree_skb(packet->skb);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+#define SEARCH_ERROR   0xffffffff
+#define SEARCH_FAIL    0xfffffffe
+#define SEARCH_SUCCESS 0xfffffff0
+#define SEARCH_DISCARD 0
+#define SEARCH_SNAPSHOT 1
+
+#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
+static void ipw2100_snapshot_free(struct ipw2100_priv *priv)
+{
+	int i;
+	if (!priv->snapshot[0])
+		return;
+	for (i = 0; i < 0x30; i++)
+		kfree(priv->snapshot[i]);
+	priv->snapshot[0] = NULL;
+}
+
+#ifdef IPW2100_DEBUG_C3
+static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
+{
+	int i;
+	if (priv->snapshot[0])
+		return 1;
+	for (i = 0; i < 0x30; i++) {
+		priv->snapshot[i] = kmalloc(0x1000, GFP_ATOMIC);
+		if (!priv->snapshot[i]) {
+			IPW_DEBUG_INFO("%s: Error allocating snapshot "
+				       "buffer %d\n", priv->net_dev->name, i);
+			while (i > 0)
+				kfree(priv->snapshot[--i]);
+			priv->snapshot[0] = NULL;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf,
+				    size_t len, int mode)
+{
+	u32 i, j;
+	u32 tmp;
+	u8 *s, *d;
+	u32 ret;
+
+	s = in_buf;
+	if (mode == SEARCH_SNAPSHOT) {
+		if (!ipw2100_snapshot_alloc(priv))
+			mode = SEARCH_DISCARD;
+	}
+
+	for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) {
+		read_nic_dword(priv->net_dev, i, &tmp);
+		if (mode == SEARCH_SNAPSHOT)
+			*(u32 *) SNAPSHOT_ADDR(i) = tmp;
+		if (ret == SEARCH_FAIL) {
+			d = (u8 *) & tmp;
+			for (j = 0; j < 4; j++) {
+				if (*s != *d) {
+					s = in_buf;
+					continue;
+				}
+
+				s++;
+				d++;
+
+				if ((s - in_buf) == len)
+					ret = (i + j) - len + 1;
+			}
+		} else if (mode == SEARCH_DISCARD)
+			return ret;
+	}
+
+	return ret;
+}
+#endif
+
+/*
+ *
+ * 0) Disconnect the SKB from the firmware (just unmap)
+ * 1) Pack the ETH header into the SKB
+ * 2) Pass the SKB to the network stack
+ *
+ * When packet is provided by the firmware, it contains the following:
+ *
+ * .  libipw_hdr
+ * .  libipw_snap_hdr
+ *
+ * The size of the constructed ethernet
+ *
+ */
+#ifdef IPW2100_RX_DEBUG
+static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
+#endif
+
+static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
+{
+#ifdef IPW2100_DEBUG_C3
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	u32 match, reg;
+	int j;
+#endif
+
+	IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n",
+		       i * sizeof(struct ipw2100_status));
+
+#ifdef IPW2100_DEBUG_C3
+	/* Halt the firmware so we can get a good image */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+	j = 5;
+	do {
+		udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	} while (j--);
+
+	match = ipw2100_match_buf(priv, (u8 *) status,
+				  sizeof(struct ipw2100_status),
+				  SEARCH_SNAPSHOT);
+	if (match < SEARCH_SUCCESS)
+		IPW_DEBUG_INFO("%s: DMA status match in Firmware at "
+			       "offset 0x%06X, length %d:\n",
+			       priv->net_dev->name, match,
+			       sizeof(struct ipw2100_status));
+	else
+		IPW_DEBUG_INFO("%s: No DMA status match in "
+			       "Firmware.\n", priv->net_dev->name);
+
+	printk_buf((u8 *) priv->status_queue.drv,
+		   sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH);
+#endif
+
+	priv->fatal_error = IPW2100_ERR_C3_CORRUPTION;
+	priv->net_dev->stats.rx_errors++;
+	schedule_reset(priv);
+}
+
+static void isr_rx(struct ipw2100_priv *priv, int i,
+			  struct libipw_rx_stats *stats)
+{
+	struct net_device *dev = priv->net_dev;
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
+
+	IPW_DEBUG_RX("Handler...\n");
+
+	if (unlikely(status->frame_size > skb_tailroom(packet->skb))) {
+		IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
+			       "  Dropping.\n",
+			       dev->name,
+			       status->frame_size, skb_tailroom(packet->skb));
+		dev->stats.rx_errors++;
+		return;
+	}
+
+	if (unlikely(!netif_running(dev))) {
+		dev->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR &&
+		     !(priv->status & STATUS_ASSOCIATED))) {
+		IPW_DEBUG_DROP("Dropping packet while not associated.\n");
+		priv->wstats.discard.misc++;
+		return;
+	}
+
+	pci_unmap_single(priv->pci_dev,
+			 packet->dma_addr,
+			 sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+
+	skb_put(packet->skb, status->frame_size);
+
+#ifdef IPW2100_RX_DEBUG
+	/* Make a copy of the frame so we can dump it to the logs if
+	 * libipw_rx fails */
+	skb_copy_from_linear_data(packet->skb, packet_data,
+				  min_t(u32, status->frame_size,
+					     IPW_RX_NIC_BUFFER_LENGTH));
+#endif
+
+	if (!libipw_rx(priv->ieee, packet->skb, stats)) {
+#ifdef IPW2100_RX_DEBUG
+		IPW_DEBUG_DROP("%s: Non consumed packet:\n",
+			       dev->name);
+		printk_buf(IPW_DL_DROP, packet_data, status->frame_size);
+#endif
+		dev->stats.rx_errors++;
+
+		/* libipw_rx failed, so it didn't free the SKB */
+		dev_kfree_skb_any(packet->skb);
+		packet->skb = NULL;
+	}
+
+	/* We need to allocate a new SKB and attach it to the RDB. */
+	if (unlikely(ipw2100_alloc_skb(priv, packet))) {
+		printk(KERN_WARNING DRV_NAME ": "
+		       "%s: Unable to allocate SKB onto RBD ring - disabling "
+		       "adapter.\n", dev->name);
+		/* TODO: schedule adapter shutdown */
+		IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
+	}
+
+	/* Update the RDB entry */
+	priv->rx_queue.drv[i].host_addr = packet->dma_addr;
+}
+
+#ifdef CONFIG_IPW2100_MONITOR
+
+static void isr_rx_monitor(struct ipw2100_priv *priv, int i,
+		   struct libipw_rx_stats *stats)
+{
+	struct net_device *dev = priv->net_dev;
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
+
+	/* Magic struct that slots into the radiotap header -- no reason
+	 * to build this manually element by element, we can write it much
+	 * more efficiently than we can parse it. ORDER MATTERS HERE */
+	struct ipw_rt_hdr {
+		struct ieee80211_radiotap_header rt_hdr;
+		s8 rt_dbmsignal; /* signal in dbM, kluged to signed */
+	} *ipw_rt;
+
+	IPW_DEBUG_RX("Handler...\n");
+
+	if (unlikely(status->frame_size > skb_tailroom(packet->skb) -
+				sizeof(struct ipw_rt_hdr))) {
+		IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
+			       "  Dropping.\n",
+			       dev->name,
+			       status->frame_size,
+			       skb_tailroom(packet->skb));
+		dev->stats.rx_errors++;
+		return;
+	}
+
+	if (unlikely(!netif_running(dev))) {
+		dev->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	if (unlikely(priv->config & CFG_CRC_CHECK &&
+		     status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
+		IPW_DEBUG_RX("CRC error in packet.  Dropping.\n");
+		dev->stats.rx_errors++;
+		return;
+	}
+
+	pci_unmap_single(priv->pci_dev, packet->dma_addr,
+			 sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE);
+	memmove(packet->skb->data + sizeof(struct ipw_rt_hdr),
+		packet->skb->data, status->frame_size);
+
+	ipw_rt = (struct ipw_rt_hdr *) packet->skb->data;
+
+	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+	ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
+	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total hdr+data */
+
+	ipw_rt->rt_hdr.it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+
+	ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM;
+
+	skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr));
+
+	if (!libipw_rx(priv->ieee, packet->skb, stats)) {
+		dev->stats.rx_errors++;
+
+		/* libipw_rx failed, so it didn't free the SKB */
+		dev_kfree_skb_any(packet->skb);
+		packet->skb = NULL;
+	}
+
+	/* We need to allocate a new SKB and attach it to the RDB. */
+	if (unlikely(ipw2100_alloc_skb(priv, packet))) {
+		IPW_DEBUG_WARNING(
+			"%s: Unable to allocate SKB onto RBD ring - disabling "
+			"adapter.\n", dev->name);
+		/* TODO: schedule adapter shutdown */
+		IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
+	}
+
+	/* Update the RDB entry */
+	priv->rx_queue.drv[i].host_addr = packet->dma_addr;
+}
+
+#endif
+
+static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
+{
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	struct ipw2100_rx *u = priv->rx_buffers[i].rxp;
+	u16 frame_type = status->status_fields & STATUS_TYPE_MASK;
+
+	switch (frame_type) {
+	case COMMAND_STATUS_VAL:
+		return (status->frame_size != sizeof(u->rx_data.command));
+	case STATUS_CHANGE_VAL:
+		return (status->frame_size != sizeof(u->rx_data.status));
+	case HOST_NOTIFICATION_VAL:
+		return (status->frame_size < sizeof(u->rx_data.notification));
+	case P80211_DATA_VAL:
+	case P8023_DATA_VAL:
+#ifdef CONFIG_IPW2100_MONITOR
+		return 0;
+#else
+		switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) {
+		case IEEE80211_FTYPE_MGMT:
+		case IEEE80211_FTYPE_CTL:
+			return 0;
+		case IEEE80211_FTYPE_DATA:
+			return (status->frame_size >
+				IPW_MAX_802_11_PAYLOAD_LENGTH);
+		}
+#endif
+	}
+
+	return 1;
+}
+
+/*
+ * ipw2100 interrupts are disabled at this point, and the ISR
+ * is the only code that calls this method.  So, we do not need
+ * to play with any locks.
+ *
+ * RX Queue works as follows:
+ *
+ * Read index - firmware places packet in entry identified by the
+ *              Read index and advances Read index.  In this manner,
+ *              Read index will always point to the next packet to
+ *              be filled--but not yet valid.
+ *
+ * Write index - driver fills this entry with an unused RBD entry.
+ *               This entry has not filled by the firmware yet.
+ *
+ * In between the W and R indexes are the RBDs that have been received
+ * but not yet processed.
+ *
+ * The process of handling packets will start at WRITE + 1 and advance
+ * until it reaches the READ index.
+ *
+ * The WRITE index is cached in the variable 'priv->rx_queue.next'.
+ *
+ */
+static void __ipw2100_rx_process(struct ipw2100_priv *priv)
+{
+	struct ipw2100_bd_queue *rxq = &priv->rx_queue;
+	struct ipw2100_status_queue *sq = &priv->status_queue;
+	struct ipw2100_rx_packet *packet;
+	u16 frame_type;
+	u32 r, w, i, s;
+	struct ipw2100_rx *u;
+	struct libipw_rx_stats stats = {
+		.mac_time = jiffies,
+	};
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r);
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w);
+
+	if (r >= rxq->entries) {
+		IPW_DEBUG_RX("exit - bad read index\n");
+		return;
+	}
+
+	i = (rxq->next + 1) % rxq->entries;
+	s = i;
+	while (i != r) {
+		/* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n",
+		   r, rxq->next, i); */
+
+		packet = &priv->rx_buffers[i];
+
+		/* Sync the DMA for the RX buffer so CPU is sure to get
+		 * the correct values */
+		pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
+					    sizeof(struct ipw2100_rx),
+					    PCI_DMA_FROMDEVICE);
+
+		if (unlikely(ipw2100_corruption_check(priv, i))) {
+			ipw2100_corruption_detected(priv, i);
+			goto increment;
+		}
+
+		u = packet->rxp;
+		frame_type = sq->drv[i].status_fields & STATUS_TYPE_MASK;
+		stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM;
+		stats.len = sq->drv[i].frame_size;
+
+		stats.mask = 0;
+		if (stats.rssi != 0)
+			stats.mask |= LIBIPW_STATMASK_RSSI;
+		stats.freq = LIBIPW_24GHZ_BAND;
+
+		IPW_DEBUG_RX("%s: '%s' frame type received (%d).\n",
+			     priv->net_dev->name, frame_types[frame_type],
+			     stats.len);
+
+		switch (frame_type) {
+		case COMMAND_STATUS_VAL:
+			/* Reset Rx watchdog */
+			isr_rx_complete_command(priv, &u->rx_data.command);
+			break;
+
+		case STATUS_CHANGE_VAL:
+			isr_status_change(priv, u->rx_data.status);
+			break;
+
+		case P80211_DATA_VAL:
+		case P8023_DATA_VAL:
+#ifdef CONFIG_IPW2100_MONITOR
+			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+				isr_rx_monitor(priv, i, &stats);
+				break;
+			}
+#endif
+			if (stats.len < sizeof(struct libipw_hdr_3addr))
+				break;
+			switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) {
+			case IEEE80211_FTYPE_MGMT:
+				libipw_rx_mgt(priv->ieee,
+						 &u->rx_data.header, &stats);
+				break;
+
+			case IEEE80211_FTYPE_CTL:
+				break;
+
+			case IEEE80211_FTYPE_DATA:
+				isr_rx(priv, i, &stats);
+				break;
+
+			}
+			break;
+		}
+
+	      increment:
+		/* clear status field associated with this RBD */
+		rxq->drv[i].status.info.field = 0;
+
+		i = (i + 1) % rxq->entries;
+	}
+
+	if (i != s) {
+		/* backtrack one entry, wrapping to end if at 0 */
+		rxq->next = (i ? i : rxq->entries) - 1;
+
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next);
+	}
+}
+
+/*
+ * __ipw2100_tx_process
+ *
+ * This routine will determine whether the next packet on
+ * the fw_pend_list has been processed by the firmware yet.
+ *
+ * If not, then it does nothing and returns.
+ *
+ * If so, then it removes the item from the fw_pend_list, frees
+ * any associated storage, and places the item back on the
+ * free list of its source (either msg_free_list or tx_free_list)
+ *
+ * TX Queue works as follows:
+ *
+ * Read index - points to the next TBD that the firmware will
+ *              process.  The firmware will read the data, and once
+ *              done processing, it will advance the Read index.
+ *
+ * Write index - driver fills this entry with an constructed TBD
+ *               entry.  The Write index is not advanced until the
+ *               packet has been configured.
+ *
+ * In between the W and R indexes are the TBDs that have NOT been
+ * processed.  Lagging behind the R index are packets that have
+ * been processed but have not been freed by the driver.
+ *
+ * In order to free old storage, an internal index will be maintained
+ * that points to the next packet to be freed.  When all used
+ * packets have been freed, the oldest index will be the same as the
+ * firmware's read index.
+ *
+ * The OLDEST index is cached in the variable 'priv->tx_queue.oldest'
+ *
+ * Because the TBD structure can not contain arbitrary data, the
+ * driver must keep an internal queue of cached allocations such that
+ * it can put that data back into the tx_free_list and msg_free_list
+ * for use by future command and data packets.
+ *
+ */
+static int __ipw2100_tx_process(struct ipw2100_priv *priv)
+{
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+	struct ipw2100_bd *tbd;
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	int descriptors_used;
+	int e, i;
+	u32 r, w, frag_num = 0;
+
+	if (list_empty(&priv->fw_pend_list))
+		return 0;
+
+	element = priv->fw_pend_list.next;
+
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+	tbd = &txq->drv[packet->index];
+
+	/* Determine how many TBD entries must be finished... */
+	switch (packet->type) {
+	case COMMAND:
+		/* COMMAND uses only one slot; don't advance */
+		descriptors_used = 1;
+		e = txq->oldest;
+		break;
+
+	case DATA:
+		/* DATA uses two slots; advance and loop position. */
+		descriptors_used = tbd->num_fragments;
+		frag_num = tbd->num_fragments - 1;
+		e = txq->oldest + frag_num;
+		e %= txq->entries;
+		break;
+
+	default:
+		printk(KERN_WARNING DRV_NAME ": %s: Bad fw_pend_list entry!\n",
+		       priv->net_dev->name);
+		return 0;
+	}
+
+	/* if the last TBD is not done by NIC yet, then packet is
+	 * not ready to be released.
+	 *
+	 */
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
+		      &r);
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+		      &w);
+	if (w != txq->next)
+		printk(KERN_WARNING DRV_NAME ": %s: write index mismatch\n",
+		       priv->net_dev->name);
+
+	/*
+	 * txq->next is the index of the last packet written txq->oldest is
+	 * the index of the r is the index of the next packet to be read by
+	 * firmware
+	 */
+
+	/*
+	 * Quick graphic to help you visualize the following
+	 * if / else statement
+	 *
+	 * ===>|                     s---->|===============
+	 *                               e>|
+	 * | a | b | c | d | e | f | g | h | i | j | k | l
+	 *       r---->|
+	 *               w
+	 *
+	 * w - updated by driver
+	 * r - updated by firmware
+	 * s - start of oldest BD entry (txq->oldest)
+	 * e - end of oldest BD entry
+	 *
+	 */
+	if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) {
+		IPW_DEBUG_TX("exit - no processed packets ready to release.\n");
+		return 0;
+	}
+
+	list_del(element);
+	DEC_STAT(&priv->fw_pend_stat);
+
+#ifdef CONFIG_IPW2100_DEBUG
+	{
+		i = txq->oldest;
+		IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i,
+			     &txq->drv[i],
+			     (u32) (txq->nic + i * sizeof(struct ipw2100_bd)),
+			     txq->drv[i].host_addr, txq->drv[i].buf_length);
+
+		if (packet->type == DATA) {
+			i = (i + 1) % txq->entries;
+
+			IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i,
+				     &txq->drv[i],
+				     (u32) (txq->nic + i *
+					    sizeof(struct ipw2100_bd)),
+				     (u32) txq->drv[i].host_addr,
+				     txq->drv[i].buf_length);
+		}
+	}
+#endif
+
+	switch (packet->type) {
+	case DATA:
+		if (txq->drv[txq->oldest].status.info.fields.txType != 0)
+			printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch.  "
+			       "Expecting DATA TBD but pulled "
+			       "something else: ids %d=%d.\n",
+			       priv->net_dev->name, txq->oldest, packet->index);
+
+		/* DATA packet; we have to unmap and free the SKB */
+		for (i = 0; i < frag_num; i++) {
+			tbd = &txq->drv[(packet->index + 1 + i) % txq->entries];
+
+			IPW_DEBUG_TX("TX%d P=%08x L=%d\n",
+				     (packet->index + 1 + i) % txq->entries,
+				     tbd->host_addr, tbd->buf_length);
+
+			pci_unmap_single(priv->pci_dev,
+					 tbd->host_addr,
+					 tbd->buf_length, PCI_DMA_TODEVICE);
+		}
+
+		libipw_txb_free(packet->info.d_struct.txb);
+		packet->info.d_struct.txb = NULL;
+
+		list_add_tail(element, &priv->tx_free_list);
+		INC_STAT(&priv->tx_free_stat);
+
+		/* We have a free slot in the Tx queue, so wake up the
+		 * transmit layer if it is stopped. */
+		if (priv->status & STATUS_ASSOCIATED)
+			netif_wake_queue(priv->net_dev);
+
+		/* A packet was processed by the hardware, so update the
+		 * watchdog */
+		netif_trans_update(priv->net_dev);
+
+		break;
+
+	case COMMAND:
+		if (txq->drv[txq->oldest].status.info.fields.txType != 1)
+			printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch.  "
+			       "Expecting COMMAND TBD but pulled "
+			       "something else: ids %d=%d.\n",
+			       priv->net_dev->name, txq->oldest, packet->index);
+
+#ifdef CONFIG_IPW2100_DEBUG
+		if (packet->info.c_struct.cmd->host_command_reg <
+		    ARRAY_SIZE(command_types))
+			IPW_DEBUG_TX("Command '%s (%d)' processed: %d.\n",
+				     command_types[packet->info.c_struct.cmd->
+						   host_command_reg],
+				     packet->info.c_struct.cmd->
+				     host_command_reg,
+				     packet->info.c_struct.cmd->cmd_status_reg);
+#endif
+
+		list_add_tail(element, &priv->msg_free_list);
+		INC_STAT(&priv->msg_free_stat);
+		break;
+	}
+
+	/* advance oldest used TBD pointer to start of next entry */
+	txq->oldest = (e + 1) % txq->entries;
+	/* increase available TBDs number */
+	txq->available += descriptors_used;
+	SET_STAT(&priv->txq_stat, txq->available);
+
+	IPW_DEBUG_TX("packet latency (send to process)  %ld jiffies\n",
+		     jiffies - packet->jiffy_start);
+
+	return (!list_empty(&priv->fw_pend_list));
+}
+
+static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv)
+{
+	int i = 0;
+
+	while (__ipw2100_tx_process(priv) && i < 200)
+		i++;
+
+	if (i == 200) {
+		printk(KERN_WARNING DRV_NAME ": "
+		       "%s: Driver is running slow (%d iters).\n",
+		       priv->net_dev->name, i);
+	}
+}
+
+static void ipw2100_tx_send_commands(struct ipw2100_priv *priv)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+	struct ipw2100_bd *tbd;
+	int next = txq->next;
+
+	while (!list_empty(&priv->msg_pend_list)) {
+		/* if there isn't enough space in TBD queue, then
+		 * don't stuff a new one in.
+		 * NOTE: 3 are needed as a command will take one,
+		 *       and there is a minimum of 2 that must be
+		 *       maintained between the r and w indexes
+		 */
+		if (txq->available <= 3) {
+			IPW_DEBUG_TX("no room in tx_queue\n");
+			break;
+		}
+
+		element = priv->msg_pend_list.next;
+		list_del(element);
+		DEC_STAT(&priv->msg_pend_stat);
+
+		packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+		IPW_DEBUG_TX("using TBD at virt=%p, phys=%04X\n",
+			     &txq->drv[txq->next],
+			     (u32) (txq->nic + txq->next *
+				      sizeof(struct ipw2100_bd)));
+
+		packet->index = txq->next;
+
+		tbd = &txq->drv[txq->next];
+
+		/* initialize TBD */
+		tbd->host_addr = packet->info.c_struct.cmd_phys;
+		tbd->buf_length = sizeof(struct ipw2100_cmd_header);
+		/* not marking number of fragments causes problems
+		 * with f/w debug version */
+		tbd->num_fragments = 1;
+		tbd->status.info.field =
+		    IPW_BD_STATUS_TX_FRAME_COMMAND |
+		    IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
+
+		/* update TBD queue counters */
+		txq->next++;
+		txq->next %= txq->entries;
+		txq->available--;
+		DEC_STAT(&priv->txq_stat);
+
+		list_add_tail(element, &priv->fw_pend_list);
+		INC_STAT(&priv->fw_pend_stat);
+	}
+
+	if (txq->next != next) {
+		/* kick off the DMA by notifying firmware the
+		 * write index has moved; make sure TBD stores are sync'd */
+		wmb();
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+			       txq->next);
+	}
+}
+
+/*
+ * ipw2100_tx_send_data
+ *
+ */
+static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+	struct ipw2100_bd *tbd;
+	int next = txq->next;
+	int i = 0;
+	struct ipw2100_data_header *ipw_hdr;
+	struct libipw_hdr_3addr *hdr;
+
+	while (!list_empty(&priv->tx_pend_list)) {
+		/* if there isn't enough space in TBD queue, then
+		 * don't stuff a new one in.
+		 * NOTE: 4 are needed as a data will take two,
+		 *       and there is a minimum of 2 that must be
+		 *       maintained between the r and w indexes
+		 */
+		element = priv->tx_pend_list.next;
+		packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+		if (unlikely(1 + packet->info.d_struct.txb->nr_frags >
+			     IPW_MAX_BDS)) {
+			/* TODO: Support merging buffers if more than
+			 * IPW_MAX_BDS are used */
+			IPW_DEBUG_INFO("%s: Maximum BD threshold exceeded.  "
+				       "Increase fragmentation level.\n",
+				       priv->net_dev->name);
+		}
+
+		if (txq->available <= 3 + packet->info.d_struct.txb->nr_frags) {
+			IPW_DEBUG_TX("no room in tx_queue\n");
+			break;
+		}
+
+		list_del(element);
+		DEC_STAT(&priv->tx_pend_stat);
+
+		tbd = &txq->drv[txq->next];
+
+		packet->index = txq->next;
+
+		ipw_hdr = packet->info.d_struct.data;
+		hdr = (struct libipw_hdr_3addr *)packet->info.d_struct.txb->
+		    fragments[0]->data;
+
+		if (priv->ieee->iw_mode == IW_MODE_INFRA) {
+			/* To DS: Addr1 = BSSID, Addr2 = SA,
+			   Addr3 = DA */
+			memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
+			memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN);
+		} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+			/* not From/To DS: Addr1 = DA, Addr2 = SA,
+			   Addr3 = BSSID */
+			memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
+			memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN);
+		}
+
+		ipw_hdr->host_command_reg = SEND;
+		ipw_hdr->host_command_reg1 = 0;
+
+		/* For now we only support host based encryption */
+		ipw_hdr->needs_encryption = 0;
+		ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted;
+		if (packet->info.d_struct.txb->nr_frags > 1)
+			ipw_hdr->fragment_size =
+			    packet->info.d_struct.txb->frag_size -
+			    LIBIPW_3ADDR_LEN;
+		else
+			ipw_hdr->fragment_size = 0;
+
+		tbd->host_addr = packet->info.d_struct.data_phys;
+		tbd->buf_length = sizeof(struct ipw2100_data_header);
+		tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags;
+		tbd->status.info.field =
+		    IPW_BD_STATUS_TX_FRAME_802_3 |
+		    IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
+		txq->next++;
+		txq->next %= txq->entries;
+
+		IPW_DEBUG_TX("data header tbd TX%d P=%08x L=%d\n",
+			     packet->index, tbd->host_addr, tbd->buf_length);
+#ifdef CONFIG_IPW2100_DEBUG
+		if (packet->info.d_struct.txb->nr_frags > 1)
+			IPW_DEBUG_FRAG("fragment Tx: %d frames\n",
+				       packet->info.d_struct.txb->nr_frags);
+#endif
+
+		for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) {
+			tbd = &txq->drv[txq->next];
+			if (i == packet->info.d_struct.txb->nr_frags - 1)
+				tbd->status.info.field =
+				    IPW_BD_STATUS_TX_FRAME_802_3 |
+				    IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
+			else
+				tbd->status.info.field =
+				    IPW_BD_STATUS_TX_FRAME_802_3 |
+				    IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
+
+			tbd->buf_length = packet->info.d_struct.txb->
+			    fragments[i]->len - LIBIPW_3ADDR_LEN;
+
+			tbd->host_addr = pci_map_single(priv->pci_dev,
+							packet->info.d_struct.
+							txb->fragments[i]->
+							data +
+							LIBIPW_3ADDR_LEN,
+							tbd->buf_length,
+							PCI_DMA_TODEVICE);
+			if (pci_dma_mapping_error(priv->pci_dev,
+						  tbd->host_addr)) {
+				IPW_DEBUG_TX("dma mapping error\n");
+				break;
+			}
+
+			IPW_DEBUG_TX("data frag tbd TX%d P=%08x L=%d\n",
+				     txq->next, tbd->host_addr,
+				     tbd->buf_length);
+
+			pci_dma_sync_single_for_device(priv->pci_dev,
+						       tbd->host_addr,
+						       tbd->buf_length,
+						       PCI_DMA_TODEVICE);
+
+			txq->next++;
+			txq->next %= txq->entries;
+		}
+
+		txq->available -= 1 + packet->info.d_struct.txb->nr_frags;
+		SET_STAT(&priv->txq_stat, txq->available);
+
+		list_add_tail(element, &priv->fw_pend_list);
+		INC_STAT(&priv->fw_pend_stat);
+	}
+
+	if (txq->next != next) {
+		/* kick off the DMA by notifying firmware the
+		 * write index has moved; make sure TBD stores are sync'd */
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+			       txq->next);
+	}
+}
+
+static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+{
+	struct net_device *dev = priv->net_dev;
+	unsigned long flags;
+	u32 inta, tmp;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+
+	read_register(dev, IPW_REG_INTA, &inta);
+
+	IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n",
+		      (unsigned long)inta & IPW_INTERRUPT_MASK);
+
+	priv->in_isr++;
+	priv->interrupts++;
+
+	/* We do not loop and keep polling for more interrupts as this
+	 * is frowned upon and doesn't play nicely with other potentially
+	 * chained IRQs */
+	IPW_DEBUG_ISR("INTA: 0x%08lX\n",
+		      (unsigned long)inta & IPW_INTERRUPT_MASK);
+
+	if (inta & IPW2100_INTA_FATAL_ERROR) {
+		printk(KERN_WARNING DRV_NAME
+		       ": Fatal interrupt. Scheduling firmware restart.\n");
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_FATAL_ERROR);
+
+		read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error);
+		IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n",
+			       priv->net_dev->name, priv->fatal_error);
+
+		read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp);
+		IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n",
+			       priv->net_dev->name, tmp);
+
+		/* Wake up any sleeping jobs */
+		schedule_reset(priv);
+	}
+
+	if (inta & IPW2100_INTA_PARITY_ERROR) {
+		printk(KERN_ERR DRV_NAME
+		       ": ***** PARITY ERROR INTERRUPT !!!!\n");
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
+	}
+
+	if (inta & IPW2100_INTA_RX_TRANSFER) {
+		IPW_DEBUG_ISR("RX interrupt\n");
+
+		priv->rx_interrupts++;
+
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_RX_TRANSFER);
+
+		__ipw2100_rx_process(priv);
+		__ipw2100_tx_complete(priv);
+	}
+
+	if (inta & IPW2100_INTA_TX_TRANSFER) {
+		IPW_DEBUG_ISR("TX interrupt\n");
+
+		priv->tx_interrupts++;
+
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_TRANSFER);
+
+		__ipw2100_tx_complete(priv);
+		ipw2100_tx_send_commands(priv);
+		ipw2100_tx_send_data(priv);
+	}
+
+	if (inta & IPW2100_INTA_TX_COMPLETE) {
+		IPW_DEBUG_ISR("TX complete\n");
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_COMPLETE);
+
+		__ipw2100_tx_complete(priv);
+	}
+
+	if (inta & IPW2100_INTA_EVENT_INTERRUPT) {
+		/* ipw2100_handle_event(dev); */
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_EVENT_INTERRUPT);
+	}
+
+	if (inta & IPW2100_INTA_FW_INIT_DONE) {
+		IPW_DEBUG_ISR("FW init done interrupt\n");
+		priv->inta_other++;
+
+		read_register(dev, IPW_REG_INTA, &tmp);
+		if (tmp & (IPW2100_INTA_FATAL_ERROR |
+			   IPW2100_INTA_PARITY_ERROR)) {
+			write_register(dev, IPW_REG_INTA,
+				       IPW2100_INTA_FATAL_ERROR |
+				       IPW2100_INTA_PARITY_ERROR);
+		}
+
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_FW_INIT_DONE);
+	}
+
+	if (inta & IPW2100_INTA_STATUS_CHANGE) {
+		IPW_DEBUG_ISR("Status change interrupt\n");
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA, IPW2100_INTA_STATUS_CHANGE);
+	}
+
+	if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) {
+		IPW_DEBUG_ISR("slave host mode interrupt\n");
+		priv->inta_other++;
+		write_register(dev, IPW_REG_INTA,
+			       IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE);
+	}
+
+	priv->in_isr--;
+	ipw2100_enable_interrupts(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	IPW_DEBUG_ISR("exit\n");
+}
+
+static irqreturn_t ipw2100_interrupt(int irq, void *data)
+{
+	struct ipw2100_priv *priv = data;
+	u32 inta, inta_mask;
+
+	if (!data)
+		return IRQ_NONE;
+
+	spin_lock(&priv->low_lock);
+
+	/* We check to see if we should be ignoring interrupts before
+	 * we touch the hardware.  During ucode load if we try and handle
+	 * an interrupt we can cause keyboard problems as well as cause
+	 * the ucode to fail to initialize */
+	if (!(priv->status & STATUS_INT_ENABLED)) {
+		/* Shared IRQ */
+		goto none;
+	}
+
+	read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
+	read_register(priv->net_dev, IPW_REG_INTA, &inta);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		printk(KERN_WARNING DRV_NAME ": IRQ INTA == 0xFFFFFFFF\n");
+		goto none;
+	}
+
+	inta &= IPW_INTERRUPT_MASK;
+
+	if (!(inta & inta_mask)) {
+		/* Shared interrupt */
+		goto none;
+	}
+
+	/* We disable the hardware interrupt here just to prevent unneeded
+	 * calls to be made.  We disable this again within the actual
+	 * work tasklet, so if another part of the code re-enables the
+	 * interrupt, that is fine */
+	ipw2100_disable_interrupts(priv);
+
+	tasklet_schedule(&priv->irq_tasklet);
+	spin_unlock(&priv->low_lock);
+
+	return IRQ_HANDLED;
+      none:
+	spin_unlock(&priv->low_lock);
+	return IRQ_NONE;
+}
+
+static netdev_tx_t ipw2100_tx(struct libipw_txb *txb,
+			      struct net_device *dev, int pri)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_INFO("Can not transmit when not connected.\n");
+		priv->net_dev->stats.tx_carrier_errors++;
+		netif_stop_queue(dev);
+		goto fail_unlock;
+	}
+
+	if (list_empty(&priv->tx_free_list))
+		goto fail_unlock;
+
+	element = priv->tx_free_list.next;
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+	packet->info.d_struct.txb = txb;
+
+	IPW_DEBUG_TX("Sending fragment (%d bytes):\n", txb->fragments[0]->len);
+	printk_buf(IPW_DL_TX, txb->fragments[0]->data, txb->fragments[0]->len);
+
+	packet->jiffy_start = jiffies;
+
+	list_del(element);
+	DEC_STAT(&priv->tx_free_stat);
+
+	list_add_tail(element, &priv->tx_pend_list);
+	INC_STAT(&priv->tx_pend_stat);
+
+	ipw2100_tx_send_data(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+	return NETDEV_TX_OK;
+
+fail_unlock:
+	netif_stop_queue(dev);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+	return NETDEV_TX_BUSY;
+}
+
+static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+	void *v;
+	dma_addr_t p;
+
+	priv->msg_buffers =
+	    kmalloc_array(IPW_COMMAND_POOL_SIZE,
+			  sizeof(struct ipw2100_tx_packet),
+			  GFP_KERNEL);
+	if (!priv->msg_buffers)
+		return -ENOMEM;
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
+		v = pci_zalloc_consistent(priv->pci_dev,
+					  sizeof(struct ipw2100_cmd_header),
+					  &p);
+		if (!v) {
+			printk(KERN_ERR DRV_NAME ": "
+			       "%s: PCI alloc failed for msg "
+			       "buffers.\n", priv->net_dev->name);
+			err = -ENOMEM;
+			break;
+		}
+
+		priv->msg_buffers[i].type = COMMAND;
+		priv->msg_buffers[i].info.c_struct.cmd =
+		    (struct ipw2100_cmd_header *)v;
+		priv->msg_buffers[i].info.c_struct.cmd_phys = p;
+	}
+
+	if (i == IPW_COMMAND_POOL_SIZE)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_free_consistent(priv->pci_dev,
+				    sizeof(struct ipw2100_cmd_header),
+				    priv->msg_buffers[j].info.c_struct.cmd,
+				    priv->msg_buffers[j].info.c_struct.
+				    cmd_phys);
+	}
+
+	kfree(priv->msg_buffers);
+	priv->msg_buffers = NULL;
+
+	return err;
+}
+
+static int ipw2100_msg_initialize(struct ipw2100_priv *priv)
+{
+	int i;
+
+	INIT_LIST_HEAD(&priv->msg_free_list);
+	INIT_LIST_HEAD(&priv->msg_pend_list);
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++)
+		list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list);
+	SET_STAT(&priv->msg_free_stat, i);
+
+	return 0;
+}
+
+static void ipw2100_msg_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	if (!priv->msg_buffers)
+		return;
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
+		pci_free_consistent(priv->pci_dev,
+				    sizeof(struct ipw2100_cmd_header),
+				    priv->msg_buffers[i].info.c_struct.cmd,
+				    priv->msg_buffers[i].info.c_struct.
+				    cmd_phys);
+	}
+
+	kfree(priv->msg_buffers);
+	priv->msg_buffers = NULL;
+}
+
+static ssize_t show_pci(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct pci_dev *pci_dev = to_pci_dev(d);
+	char *out = buf;
+	int i, j;
+	u32 val;
+
+	for (i = 0; i < 16; i++) {
+		out += sprintf(out, "[%08X] ", i * 16);
+		for (j = 0; j < 16; j += 4) {
+			pci_read_config_dword(pci_dev, i * 16 + j, &val);
+			out += sprintf(out, "%08X ", val);
+		}
+		out += sprintf(out, "\n");
+	}
+
+	return out - buf;
+}
+
+static DEVICE_ATTR(pci, 0444, show_pci, NULL);
+
+static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw2100_priv *p = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08x\n", (int)p->config);
+}
+
+static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+
+static ssize_t show_status(struct device *d, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ipw2100_priv *p = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08x\n", (int)p->status);
+}
+
+static DEVICE_ATTR(status, 0444, show_status, NULL);
+
+static ssize_t show_capability(struct device *d, struct device_attribute *attr,
+			       char *buf)
+{
+	struct ipw2100_priv *p = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08x\n", (int)p->capability);
+}
+
+static DEVICE_ATTR(capability, 0444, show_capability, NULL);
+
+#define IPW2100_REG(x) { IPW_ ##x, #x }
+static const struct {
+	u32 addr;
+	const char *name;
+} hw_data[] = {
+IPW2100_REG(REG_GP_CNTRL),
+	    IPW2100_REG(REG_GPIO),
+	    IPW2100_REG(REG_INTA),
+	    IPW2100_REG(REG_INTA_MASK), IPW2100_REG(REG_RESET_REG),};
+#define IPW2100_NIC(x, s) { x, #x, s }
+static const struct {
+	u32 addr;
+	const char *name;
+	size_t size;
+} nic_data[] = {
+IPW2100_NIC(IPW2100_CONTROL_REG, 2),
+	    IPW2100_NIC(0x210014, 1), IPW2100_NIC(0x210000, 1),};
+#define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d }
+static const struct {
+	u8 index;
+	const char *name;
+	const char *desc;
+} ord_data[] = {
+IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
+	    IPW2100_ORD(STAT_TX_HOST_COMPLETE,
+				"successful Host Tx's (MSDU)"),
+	    IPW2100_ORD(STAT_TX_DIR_DATA,
+				"successful Directed Tx's (MSDU)"),
+	    IPW2100_ORD(STAT_TX_DIR_DATA1,
+				"successful Directed Tx's (MSDU) @ 1MB"),
+	    IPW2100_ORD(STAT_TX_DIR_DATA2,
+				"successful Directed Tx's (MSDU) @ 2MB"),
+	    IPW2100_ORD(STAT_TX_DIR_DATA5_5,
+				"successful Directed Tx's (MSDU) @ 5_5MB"),
+	    IPW2100_ORD(STAT_TX_DIR_DATA11,
+				"successful Directed Tx's (MSDU) @ 11MB"),
+	    IPW2100_ORD(STAT_TX_NODIR_DATA1,
+				"successful Non_Directed Tx's (MSDU) @ 1MB"),
+	    IPW2100_ORD(STAT_TX_NODIR_DATA2,
+				"successful Non_Directed Tx's (MSDU) @ 2MB"),
+	    IPW2100_ORD(STAT_TX_NODIR_DATA5_5,
+				"successful Non_Directed Tx's (MSDU) @ 5.5MB"),
+	    IPW2100_ORD(STAT_TX_NODIR_DATA11,
+				"successful Non_Directed Tx's (MSDU) @ 11MB"),
+	    IPW2100_ORD(STAT_NULL_DATA, "successful NULL data Tx's"),
+	    IPW2100_ORD(STAT_TX_RTS, "successful Tx RTS"),
+	    IPW2100_ORD(STAT_TX_CTS, "successful Tx CTS"),
+	    IPW2100_ORD(STAT_TX_ACK, "successful Tx ACK"),
+	    IPW2100_ORD(STAT_TX_ASSN, "successful Association Tx's"),
+	    IPW2100_ORD(STAT_TX_ASSN_RESP,
+				"successful Association response Tx's"),
+	    IPW2100_ORD(STAT_TX_REASSN,
+				"successful Reassociation Tx's"),
+	    IPW2100_ORD(STAT_TX_REASSN_RESP,
+				"successful Reassociation response Tx's"),
+	    IPW2100_ORD(STAT_TX_PROBE,
+				"probes successfully transmitted"),
+	    IPW2100_ORD(STAT_TX_PROBE_RESP,
+				"probe responses successfully transmitted"),
+	    IPW2100_ORD(STAT_TX_BEACON, "tx beacon"),
+	    IPW2100_ORD(STAT_TX_ATIM, "Tx ATIM"),
+	    IPW2100_ORD(STAT_TX_DISASSN,
+				"successful Disassociation TX"),
+	    IPW2100_ORD(STAT_TX_AUTH, "successful Authentication Tx"),
+	    IPW2100_ORD(STAT_TX_DEAUTH,
+				"successful Deauthentication TX"),
+	    IPW2100_ORD(STAT_TX_TOTAL_BYTES,
+				"Total successful Tx data bytes"),
+	    IPW2100_ORD(STAT_TX_RETRIES, "Tx retries"),
+	    IPW2100_ORD(STAT_TX_RETRY1, "Tx retries at 1MBPS"),
+	    IPW2100_ORD(STAT_TX_RETRY2, "Tx retries at 2MBPS"),
+	    IPW2100_ORD(STAT_TX_RETRY5_5, "Tx retries at 5.5MBPS"),
+	    IPW2100_ORD(STAT_TX_RETRY11, "Tx retries at 11MBPS"),
+	    IPW2100_ORD(STAT_TX_FAILURES, "Tx Failures"),
+	    IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP,
+				"times max tries in a hop failed"),
+	    IPW2100_ORD(STAT_TX_DISASSN_FAIL,
+				"times disassociation failed"),
+	    IPW2100_ORD(STAT_TX_ERR_CTS, "missed/bad CTS frames"),
+	    IPW2100_ORD(STAT_TX_ERR_ACK, "tx err due to acks"),
+	    IPW2100_ORD(STAT_RX_HOST, "packets passed to host"),
+	    IPW2100_ORD(STAT_RX_DIR_DATA, "directed packets"),
+	    IPW2100_ORD(STAT_RX_DIR_DATA1, "directed packets at 1MB"),
+	    IPW2100_ORD(STAT_RX_DIR_DATA2, "directed packets at 2MB"),
+	    IPW2100_ORD(STAT_RX_DIR_DATA5_5,
+				"directed packets at 5.5MB"),
+	    IPW2100_ORD(STAT_RX_DIR_DATA11, "directed packets at 11MB"),
+	    IPW2100_ORD(STAT_RX_NODIR_DATA, "nondirected packets"),
+	    IPW2100_ORD(STAT_RX_NODIR_DATA1,
+				"nondirected packets at 1MB"),
+	    IPW2100_ORD(STAT_RX_NODIR_DATA2,
+				"nondirected packets at 2MB"),
+	    IPW2100_ORD(STAT_RX_NODIR_DATA5_5,
+				"nondirected packets at 5.5MB"),
+	    IPW2100_ORD(STAT_RX_NODIR_DATA11,
+				"nondirected packets at 11MB"),
+	    IPW2100_ORD(STAT_RX_NULL_DATA, "null data rx's"),
+	    IPW2100_ORD(STAT_RX_RTS, "Rx RTS"), IPW2100_ORD(STAT_RX_CTS,
+								    "Rx CTS"),
+	    IPW2100_ORD(STAT_RX_ACK, "Rx ACK"),
+	    IPW2100_ORD(STAT_RX_CFEND, "Rx CF End"),
+	    IPW2100_ORD(STAT_RX_CFEND_ACK, "Rx CF End + CF Ack"),
+	    IPW2100_ORD(STAT_RX_ASSN, "Association Rx's"),
+	    IPW2100_ORD(STAT_RX_ASSN_RESP, "Association response Rx's"),
+	    IPW2100_ORD(STAT_RX_REASSN, "Reassociation Rx's"),
+	    IPW2100_ORD(STAT_RX_REASSN_RESP,
+				"Reassociation response Rx's"),
+	    IPW2100_ORD(STAT_RX_PROBE, "probe Rx's"),
+	    IPW2100_ORD(STAT_RX_PROBE_RESP, "probe response Rx's"),
+	    IPW2100_ORD(STAT_RX_BEACON, "Rx beacon"),
+	    IPW2100_ORD(STAT_RX_ATIM, "Rx ATIM"),
+	    IPW2100_ORD(STAT_RX_DISASSN, "disassociation Rx"),
+	    IPW2100_ORD(STAT_RX_AUTH, "authentication Rx"),
+	    IPW2100_ORD(STAT_RX_DEAUTH, "deauthentication Rx"),
+	    IPW2100_ORD(STAT_RX_TOTAL_BYTES,
+				"Total rx data bytes received"),
+	    IPW2100_ORD(STAT_RX_ERR_CRC, "packets with Rx CRC error"),
+	    IPW2100_ORD(STAT_RX_ERR_CRC1, "Rx CRC errors at 1MB"),
+	    IPW2100_ORD(STAT_RX_ERR_CRC2, "Rx CRC errors at 2MB"),
+	    IPW2100_ORD(STAT_RX_ERR_CRC5_5, "Rx CRC errors at 5.5MB"),
+	    IPW2100_ORD(STAT_RX_ERR_CRC11, "Rx CRC errors at 11MB"),
+	    IPW2100_ORD(STAT_RX_DUPLICATE1,
+				"duplicate rx packets at 1MB"),
+	    IPW2100_ORD(STAT_RX_DUPLICATE2,
+				"duplicate rx packets at 2MB"),
+	    IPW2100_ORD(STAT_RX_DUPLICATE5_5,
+				"duplicate rx packets at 5.5MB"),
+	    IPW2100_ORD(STAT_RX_DUPLICATE11,
+				"duplicate rx packets at 11MB"),
+	    IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"),
+	    IPW2100_ORD(PERS_DB_LOCK, "locking fw permanent  db"),
+	    IPW2100_ORD(PERS_DB_SIZE, "size of fw permanent  db"),
+	    IPW2100_ORD(PERS_DB_ADDR, "address of fw permanent  db"),
+	    IPW2100_ORD(STAT_RX_INVALID_PROTOCOL,
+				"rx frames with invalid protocol"),
+	    IPW2100_ORD(SYS_BOOT_TIME, "Boot time"),
+	    IPW2100_ORD(STAT_RX_NO_BUFFER,
+				"rx frames rejected due to no buffer"),
+	    IPW2100_ORD(STAT_RX_MISSING_FRAG,
+				"rx frames dropped due to missing fragment"),
+	    IPW2100_ORD(STAT_RX_ORPHAN_FRAG,
+				"rx frames dropped due to non-sequential fragment"),
+	    IPW2100_ORD(STAT_RX_ORPHAN_FRAME,
+				"rx frames dropped due to unmatched 1st frame"),
+	    IPW2100_ORD(STAT_RX_FRAG_AGEOUT,
+				"rx frames dropped due to uncompleted frame"),
+	    IPW2100_ORD(STAT_RX_ICV_ERRORS,
+				"ICV errors during decryption"),
+	    IPW2100_ORD(STAT_PSP_SUSPENSION, "times adapter suspended"),
+	    IPW2100_ORD(STAT_PSP_BCN_TIMEOUT, "beacon timeout"),
+	    IPW2100_ORD(STAT_PSP_POLL_TIMEOUT,
+				"poll response timeouts"),
+	    IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT,
+				"timeouts waiting for last {broad,multi}cast pkt"),
+	    IPW2100_ORD(STAT_PSP_RX_DTIMS, "PSP DTIMs received"),
+	    IPW2100_ORD(STAT_PSP_RX_TIMS, "PSP TIMs received"),
+	    IPW2100_ORD(STAT_PSP_STATION_ID, "PSP Station ID"),
+	    IPW2100_ORD(LAST_ASSN_TIME, "RTC time of last association"),
+	    IPW2100_ORD(STAT_PERCENT_MISSED_BCNS,
+				"current calculation of % missed beacons"),
+	    IPW2100_ORD(STAT_PERCENT_RETRIES,
+				"current calculation of % missed tx retries"),
+	    IPW2100_ORD(ASSOCIATED_AP_PTR,
+				"0 if not associated, else pointer to AP table entry"),
+	    IPW2100_ORD(AVAILABLE_AP_CNT,
+				"AP's described in the AP table"),
+	    IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"),
+	    IPW2100_ORD(STAT_AP_ASSNS, "associations"),
+	    IPW2100_ORD(STAT_ASSN_FAIL, "association failures"),
+	    IPW2100_ORD(STAT_ASSN_RESP_FAIL,
+				"failures due to response fail"),
+	    IPW2100_ORD(STAT_FULL_SCANS, "full scans"),
+	    IPW2100_ORD(CARD_DISABLED, "Card Disabled"),
+	    IPW2100_ORD(STAT_ROAM_INHIBIT,
+				"times roaming was inhibited due to activity"),
+	    IPW2100_ORD(RSSI_AT_ASSN,
+				"RSSI of associated AP at time of association"),
+	    IPW2100_ORD(STAT_ASSN_CAUSE1,
+				"reassociation: no probe response or TX on hop"),
+	    IPW2100_ORD(STAT_ASSN_CAUSE2,
+				"reassociation: poor tx/rx quality"),
+	    IPW2100_ORD(STAT_ASSN_CAUSE3,
+				"reassociation: tx/rx quality (excessive AP load"),
+	    IPW2100_ORD(STAT_ASSN_CAUSE4,
+				"reassociation: AP RSSI level"),
+	    IPW2100_ORD(STAT_ASSN_CAUSE5,
+				"reassociations due to load leveling"),
+	    IPW2100_ORD(STAT_AUTH_FAIL, "times authentication failed"),
+	    IPW2100_ORD(STAT_AUTH_RESP_FAIL,
+				"times authentication response failed"),
+	    IPW2100_ORD(STATION_TABLE_CNT,
+				"entries in association table"),
+	    IPW2100_ORD(RSSI_AVG_CURR, "Current avg RSSI"),
+	    IPW2100_ORD(POWER_MGMT_MODE, "Power mode - 0=CAM, 1=PSP"),
+	    IPW2100_ORD(COUNTRY_CODE,
+				"IEEE country code as recv'd from beacon"),
+	    IPW2100_ORD(COUNTRY_CHANNELS,
+				"channels supported by country"),
+	    IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
+	    IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
+	    IPW2100_ORD(ANTENNA_DIVERSITY,
+				"TRUE if antenna diversity is disabled"),
+	    IPW2100_ORD(DTIM_PERIOD, "beacon intervals between DTIMs"),
+	    IPW2100_ORD(OUR_FREQ,
+				"current radio freq lower digits - channel ID"),
+	    IPW2100_ORD(RTC_TIME, "current RTC time"),
+	    IPW2100_ORD(PORT_TYPE, "operating mode"),
+	    IPW2100_ORD(CURRENT_TX_RATE, "current tx rate"),
+	    IPW2100_ORD(SUPPORTED_RATES, "supported tx rates"),
+	    IPW2100_ORD(ATIM_WINDOW, "current ATIM Window"),
+	    IPW2100_ORD(BASIC_RATES, "basic tx rates"),
+	    IPW2100_ORD(NIC_HIGHEST_RATE, "NIC highest tx rate"),
+	    IPW2100_ORD(AP_HIGHEST_RATE, "AP highest tx rate"),
+	    IPW2100_ORD(CAPABILITIES,
+				"Management frame capability field"),
+	    IPW2100_ORD(AUTH_TYPE, "Type of authentication"),
+	    IPW2100_ORD(RADIO_TYPE, "Adapter card platform type"),
+	    IPW2100_ORD(RTS_THRESHOLD,
+				"Min packet length for RTS handshaking"),
+	    IPW2100_ORD(INT_MODE, "International mode"),
+	    IPW2100_ORD(FRAGMENTATION_THRESHOLD,
+				"protocol frag threshold"),
+	    IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS,
+				"EEPROM offset in SRAM"),
+	    IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE,
+				"EEPROM size in SRAM"),
+	    IPW2100_ORD(EEPROM_SKU_CAPABILITY, "EEPROM SKU Capability"),
+	    IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS,
+				"EEPROM IBSS 11b channel set"),
+	    IPW2100_ORD(MAC_VERSION, "MAC Version"),
+	    IPW2100_ORD(MAC_REVISION, "MAC Revision"),
+	    IPW2100_ORD(RADIO_VERSION, "Radio Version"),
+	    IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"),
+	    IPW2100_ORD(UCODE_VERSION, "Ucode Version"),};
+
+static ssize_t show_registers(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	int i;
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char *out = buf;
+	u32 val = 0;
+
+	out += sprintf(out, "%30s [Address ] : Hex\n", "Register");
+
+	for (i = 0; i < ARRAY_SIZE(hw_data); i++) {
+		read_register(dev, hw_data[i].addr, &val);
+		out += sprintf(out, "%30s [%08X] : %08X\n",
+			       hw_data[i].name, hw_data[i].addr, val);
+	}
+
+	return out - buf;
+}
+
+static DEVICE_ATTR(registers, 0444, show_registers, NULL);
+
+static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char *out = buf;
+	int i;
+
+	out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry");
+
+	for (i = 0; i < ARRAY_SIZE(nic_data); i++) {
+		u8 tmp8;
+		u16 tmp16;
+		u32 tmp32;
+
+		switch (nic_data[i].size) {
+		case 1:
+			read_nic_byte(dev, nic_data[i].addr, &tmp8);
+			out += sprintf(out, "%30s [%08X] : %02X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp8);
+			break;
+		case 2:
+			read_nic_word(dev, nic_data[i].addr, &tmp16);
+			out += sprintf(out, "%30s [%08X] : %04X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp16);
+			break;
+		case 4:
+			read_nic_dword(dev, nic_data[i].addr, &tmp32);
+			out += sprintf(out, "%30s [%08X] : %08X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp32);
+			break;
+		}
+	}
+	return out - buf;
+}
+
+static DEVICE_ATTR(hardware, 0444, show_hardware, NULL);
+
+static ssize_t show_memory(struct device *d, struct device_attribute *attr,
+			   char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	static unsigned long loop = 0;
+	int len = 0;
+	u32 buffer[4];
+	int i;
+	char line[81];
+
+	if (loop >= 0x30000)
+		loop = 0;
+
+	/* sysfs provides us PAGE_SIZE buffer */
+	while (len < PAGE_SIZE - 128 && loop < 0x30000) {
+
+		if (priv->snapshot[0])
+			for (i = 0; i < 4; i++)
+				buffer[i] =
+				    *(u32 *) SNAPSHOT_ADDR(loop + i * 4);
+		else
+			for (i = 0; i < 4; i++)
+				read_nic_dword(dev, loop + i * 4, &buffer[i]);
+
+		if (priv->dump_raw)
+			len += sprintf(buf + len,
+				       "%c%c%c%c"
+				       "%c%c%c%c"
+				       "%c%c%c%c"
+				       "%c%c%c%c",
+				       ((u8 *) buffer)[0x0],
+				       ((u8 *) buffer)[0x1],
+				       ((u8 *) buffer)[0x2],
+				       ((u8 *) buffer)[0x3],
+				       ((u8 *) buffer)[0x4],
+				       ((u8 *) buffer)[0x5],
+				       ((u8 *) buffer)[0x6],
+				       ((u8 *) buffer)[0x7],
+				       ((u8 *) buffer)[0x8],
+				       ((u8 *) buffer)[0x9],
+				       ((u8 *) buffer)[0xa],
+				       ((u8 *) buffer)[0xb],
+				       ((u8 *) buffer)[0xc],
+				       ((u8 *) buffer)[0xd],
+				       ((u8 *) buffer)[0xe],
+				       ((u8 *) buffer)[0xf]);
+		else
+			len += sprintf(buf + len, "%s\n",
+				       snprint_line(line, sizeof(line),
+						    (u8 *) buffer, 16, loop));
+		loop += 16;
+	}
+
+	return len;
+}
+
+static ssize_t store_memory(struct device *d, struct device_attribute *attr,
+			    const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	const char *p = buf;
+
+	(void)dev;		/* kill unused-var warning for debug-only code */
+
+	if (count < 1)
+		return count;
+
+	if (p[0] == '1' ||
+	    (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) {
+		IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n",
+			       dev->name);
+		priv->dump_raw = 1;
+
+	} else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' &&
+				   tolower(p[1]) == 'f')) {
+		IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n",
+			       dev->name);
+		priv->dump_raw = 0;
+
+	} else if (tolower(p[0]) == 'r') {
+		IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n", dev->name);
+		ipw2100_snapshot_free(priv);
+
+	} else
+		IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, "
+			       "reset = clear memory snapshot\n", dev->name);
+
+	return count;
+}
+
+static DEVICE_ATTR(memory, 0644, show_memory, store_memory);
+
+static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	u32 val = 0;
+	int len = 0;
+	u32 val_len;
+	static int loop = 0;
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		return 0;
+
+	if (loop >= ARRAY_SIZE(ord_data))
+		loop = 0;
+
+	/* sysfs provides us PAGE_SIZE buffer */
+	while (len < PAGE_SIZE - 128 && loop < ARRAY_SIZE(ord_data)) {
+		val_len = sizeof(u32);
+
+		if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val,
+					&val_len))
+			len += sprintf(buf + len, "[0x%02X] = ERROR    %s\n",
+				       ord_data[loop].index,
+				       ord_data[loop].desc);
+		else
+			len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n",
+				       ord_data[loop].index, val,
+				       ord_data[loop].desc);
+		loop++;
+	}
+
+	return len;
+}
+
+static DEVICE_ATTR(ordinals, 0444, show_ordinals, NULL);
+
+static ssize_t show_stats(struct device *d, struct device_attribute *attr,
+			  char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char *out = buf;
+
+	out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n",
+		       priv->interrupts, priv->tx_interrupts,
+		       priv->rx_interrupts, priv->inta_other);
+	out += sprintf(out, "firmware resets: %d\n", priv->resets);
+	out += sprintf(out, "firmware hangs: %d\n", priv->hangs);
+#ifdef CONFIG_IPW2100_DEBUG
+	out += sprintf(out, "packet mismatch image: %s\n",
+		       priv->snapshot[0] ? "YES" : "NO");
+#endif
+
+	return out - buf;
+}
+
+static DEVICE_ATTR(stats, 0444, show_stats, NULL);
+
+static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
+{
+	int err;
+
+	if (mode == priv->ieee->iw_mode)
+		return 0;
+
+	err = ipw2100_disable_adapter(priv);
+	if (err) {
+		printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n",
+		       priv->net_dev->name, err);
+		return err;
+	}
+
+	switch (mode) {
+	case IW_MODE_INFRA:
+		priv->net_dev->type = ARPHRD_ETHER;
+		break;
+	case IW_MODE_ADHOC:
+		priv->net_dev->type = ARPHRD_ETHER;
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		priv->last_mode = priv->ieee->iw_mode;
+		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+		break;
+#endif				/* CONFIG_IPW2100_MONITOR */
+	}
+
+	priv->ieee->iw_mode = mode;
+
+#ifdef CONFIG_PM
+	/* Indicate ipw2100_download_firmware download firmware
+	 * from disk instead of memory. */
+	ipw2100_firmware.version = 0;
+#endif
+
+	printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
+	priv->reset_backoff = 0;
+	schedule_reset(priv);
+
+	return 0;
+}
+
+static ssize_t show_internals(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	int len = 0;
+
+#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x)
+
+	if (priv->status & STATUS_ASSOCIATED)
+		len += sprintf(buf + len, "connected: %llu\n",
+			       ktime_get_boottime_seconds() - priv->connect_start);
+	else
+		len += sprintf(buf + len, "not connected\n");
+
+	DUMP_VAR(ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx], "p");
+	DUMP_VAR(status, "08lx");
+	DUMP_VAR(config, "08lx");
+	DUMP_VAR(capability, "08lx");
+
+	len +=
+	    sprintf(buf + len, "last_rtc: %lu\n",
+		    (unsigned long)priv->last_rtc);
+
+	DUMP_VAR(fatal_error, "d");
+	DUMP_VAR(stop_hang_check, "d");
+	DUMP_VAR(stop_rf_kill, "d");
+	DUMP_VAR(messages_sent, "d");
+
+	DUMP_VAR(tx_pend_stat.value, "d");
+	DUMP_VAR(tx_pend_stat.hi, "d");
+
+	DUMP_VAR(tx_free_stat.value, "d");
+	DUMP_VAR(tx_free_stat.lo, "d");
+
+	DUMP_VAR(msg_free_stat.value, "d");
+	DUMP_VAR(msg_free_stat.lo, "d");
+
+	DUMP_VAR(msg_pend_stat.value, "d");
+	DUMP_VAR(msg_pend_stat.hi, "d");
+
+	DUMP_VAR(fw_pend_stat.value, "d");
+	DUMP_VAR(fw_pend_stat.hi, "d");
+
+	DUMP_VAR(txq_stat.value, "d");
+	DUMP_VAR(txq_stat.lo, "d");
+
+	DUMP_VAR(ieee->scans, "d");
+	DUMP_VAR(reset_backoff, "lld");
+
+	return len;
+}
+
+static DEVICE_ATTR(internals, 0444, show_internals, NULL);
+
+static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char essid[IW_ESSID_MAX_SIZE + 1];
+	u8 bssid[ETH_ALEN];
+	u32 chan = 0;
+	char *out = buf;
+	unsigned int length;
+	int ret;
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		return 0;
+
+	memset(essid, 0, sizeof(essid));
+	memset(bssid, 0, sizeof(bssid));
+
+	length = IW_ESSID_MAX_SIZE;
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	length = sizeof(bssid);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
+				  bssid, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	length = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	out += sprintf(out, "ESSID: %s\n", essid);
+	out += sprintf(out, "BSSID:   %pM\n", bssid);
+	out += sprintf(out, "Channel: %d\n", chan);
+
+	return out - buf;
+}
+
+static DEVICE_ATTR(bssinfo, 0444, show_bssinfo, NULL);
+
+#ifdef CONFIG_IPW2100_DEBUG
+static ssize_t debug_level_show(struct device_driver *d, char *buf)
+{
+	return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
+}
+
+static ssize_t debug_level_store(struct device_driver *d,
+				 const char *buf, size_t count)
+{
+	u32 val;
+	int ret;
+
+	ret = kstrtou32(buf, 0, &val);
+	if (ret)
+		IPW_DEBUG_INFO(": %s is not in hex or decimal form.\n", buf);
+	else
+		ipw2100_debug_level = val;
+
+	return strnlen(buf, count);
+}
+static DRIVER_ATTR_RW(debug_level);
+#endif				/* CONFIG_IPW2100_DEBUG */
+
+static ssize_t show_fatal_error(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char *out = buf;
+	int i;
+
+	if (priv->fatal_error)
+		out += sprintf(out, "0x%08X\n", priv->fatal_error);
+	else
+		out += sprintf(out, "0\n");
+
+	for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) {
+		if (!priv->fatal_errors[(priv->fatal_index - i) %
+					IPW2100_ERROR_QUEUE])
+			continue;
+
+		out += sprintf(out, "%d. 0x%08X\n", i,
+			       priv->fatal_errors[(priv->fatal_index - i) %
+						  IPW2100_ERROR_QUEUE]);
+	}
+
+	return out - buf;
+}
+
+static ssize_t store_fatal_error(struct device *d,
+				 struct device_attribute *attr, const char *buf,
+				 size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	schedule_reset(priv);
+	return count;
+}
+
+static DEVICE_ATTR(fatal_error, 0644, show_fatal_error, store_fatal_error);
+
+static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", priv->ieee->scan_age);
+}
+
+static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	unsigned long val;
+	int ret;
+
+	(void)dev;		/* kill unused-var warning for debug-only code */
+
+	IPW_DEBUG_INFO("enter\n");
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret) {
+		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
+	} else {
+		priv->ieee->scan_age = val;
+		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+
+static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	/* 0 - RF kill not enabled
+	   1 - SW based RF kill active (sysfs)
+	   2 - HW based RF kill active
+	   3 - Both HW and SW baed RF kill active */
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
+	    (rf_kill_active(priv) ? 0x2 : 0x0);
+	return sprintf(buf, "%i\n", val);
+}
+
+static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
+{
+	if ((disable_radio ? 1 : 0) ==
+	    (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
+		return 0;
+
+	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
+			  disable_radio ? "OFF" : "ON");
+
+	mutex_lock(&priv->action_mutex);
+
+	if (disable_radio) {
+		priv->status |= STATUS_RF_KILL_SW;
+		ipw2100_down(priv);
+	} else {
+		priv->status &= ~STATUS_RF_KILL_SW;
+		if (rf_kill_active(priv)) {
+			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
+					  "disabled by HW switch\n");
+			/* Make sure the RF_KILL check timer is running */
+			priv->stop_rf_kill = 0;
+			mod_delayed_work(system_wq, &priv->rf_kill,
+					 round_jiffies_relative(HZ));
+		} else
+			schedule_reset(priv);
+	}
+
+	mutex_unlock(&priv->action_mutex);
+	return 1;
+}
+
+static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	ipw_radio_kill_sw(priv, buf[0] == '1');
+	return count;
+}
+
+static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+
+static struct attribute *ipw2100_sysfs_entries[] = {
+	&dev_attr_hardware.attr,
+	&dev_attr_registers.attr,
+	&dev_attr_ordinals.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_stats.attr,
+	&dev_attr_internals.attr,
+	&dev_attr_bssinfo.attr,
+	&dev_attr_memory.attr,
+	&dev_attr_scan_age.attr,
+	&dev_attr_fatal_error.attr,
+	&dev_attr_rf_kill.attr,
+	&dev_attr_cfg.attr,
+	&dev_attr_status.attr,
+	&dev_attr_capability.attr,
+	NULL,
+};
+
+static const struct attribute_group ipw2100_attribute_group = {
+	.attrs = ipw2100_sysfs_entries,
+};
+
+static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
+{
+	struct ipw2100_status_queue *q = &priv->status_queue;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	q->size = entries * sizeof(struct ipw2100_status);
+	q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+	if (!q->drv) {
+		IPW_DEBUG_WARNING("Can not allocate status queue.\n");
+		return -ENOMEM;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static void status_queue_free(struct ipw2100_priv *priv)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->status_queue.drv) {
+		pci_free_consistent(priv->pci_dev, priv->status_queue.size,
+				    priv->status_queue.drv,
+				    priv->status_queue.nic);
+		priv->status_queue.drv = NULL;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static int bd_queue_allocate(struct ipw2100_priv *priv,
+			     struct ipw2100_bd_queue *q, int entries)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	memset(q, 0, sizeof(struct ipw2100_bd_queue));
+
+	q->entries = entries;
+	q->size = entries * sizeof(struct ipw2100_bd);
+	q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic);
+	if (!q->drv) {
+		IPW_DEBUG_INFO
+		    ("can't allocate shared memory for buffer descriptors\n");
+		return -ENOMEM;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	if (!q)
+		return;
+
+	if (q->drv) {
+		pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic);
+		q->drv = NULL;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void bd_queue_initialize(struct ipw2100_priv *priv,
+				struct ipw2100_bd_queue *q, u32 base, u32 size,
+				u32 r, u32 w)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv,
+		       (u32) q->nic);
+
+	write_register(priv->net_dev, base, q->nic);
+	write_register(priv->net_dev, size, q->entries);
+	write_register(priv->net_dev, r, q->oldest);
+	write_register(priv->net_dev, w, q->next);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void ipw2100_kill_works(struct ipw2100_priv *priv)
+{
+	priv->stop_rf_kill = 1;
+	priv->stop_hang_check = 1;
+	cancel_delayed_work_sync(&priv->reset_work);
+	cancel_delayed_work_sync(&priv->security_work);
+	cancel_delayed_work_sync(&priv->wx_event_work);
+	cancel_delayed_work_sync(&priv->hang_check);
+	cancel_delayed_work_sync(&priv->rf_kill);
+	cancel_delayed_work_sync(&priv->scan_event);
+}
+
+static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+	void *v;
+	dma_addr_t p;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n",
+				priv->net_dev->name);
+		return err;
+	}
+
+	priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH,
+					 sizeof(struct ipw2100_tx_packet),
+					 GFP_ATOMIC);
+	if (!priv->tx_buffers) {
+		bd_queue_free(priv, &priv->tx_queue);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		v = pci_alloc_consistent(priv->pci_dev,
+					 sizeof(struct ipw2100_data_header),
+					 &p);
+		if (!v) {
+			printk(KERN_ERR DRV_NAME
+			       ": %s: PCI alloc failed for tx " "buffers.\n",
+			       priv->net_dev->name);
+			err = -ENOMEM;
+			break;
+		}
+
+		priv->tx_buffers[i].type = DATA;
+		priv->tx_buffers[i].info.d_struct.data =
+		    (struct ipw2100_data_header *)v;
+		priv->tx_buffers[i].info.d_struct.data_phys = p;
+		priv->tx_buffers[i].info.d_struct.txb = NULL;
+	}
+
+	if (i == TX_PENDED_QUEUE_LENGTH)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_free_consistent(priv->pci_dev,
+				    sizeof(struct ipw2100_data_header),
+				    priv->tx_buffers[j].info.d_struct.data,
+				    priv->tx_buffers[j].info.d_struct.
+				    data_phys);
+	}
+
+	kfree(priv->tx_buffers);
+	priv->tx_buffers = NULL;
+
+	return err;
+}
+
+static void ipw2100_tx_initialize(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	/*
+	 * reinitialize packet info lists
+	 */
+	INIT_LIST_HEAD(&priv->fw_pend_list);
+	INIT_STAT(&priv->fw_pend_stat);
+
+	/*
+	 * reinitialize lists
+	 */
+	INIT_LIST_HEAD(&priv->tx_pend_list);
+	INIT_LIST_HEAD(&priv->tx_free_list);
+	INIT_STAT(&priv->tx_pend_stat);
+	INIT_STAT(&priv->tx_free_stat);
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		/* We simply drop any SKBs that have been queued for
+		 * transmit */
+		if (priv->tx_buffers[i].info.d_struct.txb) {
+			libipw_txb_free(priv->tx_buffers[i].info.d_struct.
+					   txb);
+			priv->tx_buffers[i].info.d_struct.txb = NULL;
+		}
+
+		list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list);
+	}
+
+	SET_STAT(&priv->tx_free_stat, i);
+
+	priv->tx_queue.oldest = 0;
+	priv->tx_queue.available = priv->tx_queue.entries;
+	priv->tx_queue.next = 0;
+	INIT_STAT(&priv->txq_stat);
+	SET_STAT(&priv->txq_stat, priv->tx_queue.available);
+
+	bd_queue_initialize(priv, &priv->tx_queue,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX);
+
+	IPW_DEBUG_INFO("exit\n");
+
+}
+
+static void ipw2100_tx_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	bd_queue_free(priv, &priv->tx_queue);
+
+	if (!priv->tx_buffers)
+		return;
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		if (priv->tx_buffers[i].info.d_struct.txb) {
+			libipw_txb_free(priv->tx_buffers[i].info.d_struct.
+					   txb);
+			priv->tx_buffers[i].info.d_struct.txb = NULL;
+		}
+		if (priv->tx_buffers[i].info.d_struct.data)
+			pci_free_consistent(priv->pci_dev,
+					    sizeof(struct ipw2100_data_header),
+					    priv->tx_buffers[i].info.d_struct.
+					    data,
+					    priv->tx_buffers[i].info.d_struct.
+					    data_phys);
+	}
+
+	kfree(priv->tx_buffers);
+	priv->tx_buffers = NULL;
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_INFO("failed bd_queue_allocate\n");
+		return err;
+	}
+
+	err = status_queue_allocate(priv, RX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_INFO("failed status_queue_allocate\n");
+		bd_queue_free(priv, &priv->rx_queue);
+		return err;
+	}
+
+	/*
+	 * allocate packets
+	 */
+	priv->rx_buffers = kmalloc_array(RX_QUEUE_LENGTH,
+					 sizeof(struct ipw2100_rx_packet),
+					 GFP_KERNEL);
+	if (!priv->rx_buffers) {
+		IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
+
+		bd_queue_free(priv, &priv->rx_queue);
+
+		status_queue_free(priv);
+
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+		struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
+
+		err = ipw2100_alloc_skb(priv, packet);
+		if (unlikely(err)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		/* The BD holds the cache aligned address */
+		priv->rx_queue.drv[i].host_addr = packet->dma_addr;
+		priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH;
+		priv->status_queue.drv[i].status_fields = 0;
+	}
+
+	if (i == RX_QUEUE_LENGTH)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
+				 sizeof(struct ipw2100_rx_packet),
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(priv->rx_buffers[j].skb);
+	}
+
+	kfree(priv->rx_buffers);
+	priv->rx_buffers = NULL;
+
+	bd_queue_free(priv, &priv->rx_queue);
+
+	status_queue_free(priv);
+
+	return err;
+}
+
+static void ipw2100_rx_initialize(struct ipw2100_priv *priv)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	priv->rx_queue.oldest = 0;
+	priv->rx_queue.available = priv->rx_queue.entries - 1;
+	priv->rx_queue.next = priv->rx_queue.entries - 1;
+
+	INIT_STAT(&priv->rxq_stat);
+	SET_STAT(&priv->rxq_stat, priv->rx_queue.available);
+
+	bd_queue_initialize(priv, &priv->rx_queue,
+			    IPW_MEM_HOST_SHARED_RX_BD_BASE,
+			    IPW_MEM_HOST_SHARED_RX_BD_SIZE,
+			    IPW_MEM_HOST_SHARED_RX_READ_INDEX,
+			    IPW_MEM_HOST_SHARED_RX_WRITE_INDEX);
+
+	/* set up the status queue */
+	write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE,
+		       priv->status_queue.nic);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void ipw2100_rx_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	bd_queue_free(priv, &priv->rx_queue);
+	status_queue_free(priv);
+
+	if (!priv->rx_buffers)
+		return;
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+		if (priv->rx_buffers[i].rxp) {
+			pci_unmap_single(priv->pci_dev,
+					 priv->rx_buffers[i].dma_addr,
+					 sizeof(struct ipw2100_rx),
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(priv->rx_buffers[i].skb);
+		}
+	}
+
+	kfree(priv->rx_buffers);
+	priv->rx_buffers = NULL;
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
+{
+	u32 length = ETH_ALEN;
+	u8 addr[ETH_ALEN];
+
+	int err;
+
+	err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, addr, &length);
+	if (err) {
+		IPW_DEBUG_INFO("MAC address read failed\n");
+		return -EIO;
+	}
+
+	memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN);
+	IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr);
+
+	return 0;
+}
+
+/********************************************************************
+ *
+ * Firmware Commands
+ *
+ ********************************************************************/
+
+static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = ADAPTER_ADDRESS,
+		.host_command_sequence = 0,
+		.host_command_length = ETH_ALEN
+	};
+	int err;
+
+	IPW_DEBUG_HC("SET_MAC_ADDRESS\n");
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->config & CFG_CUSTOM_MAC) {
+		memcpy(cmd.host_command_parameters, priv->mac_addr, ETH_ALEN);
+		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+	} else
+		memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr,
+		       ETH_ALEN);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	IPW_DEBUG_INFO("exit\n");
+	return err;
+}
+
+static int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type,
+				 int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = PORT_TYPE,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(u32)
+	};
+	int err;
+
+	switch (port_type) {
+	case IW_MODE_INFRA:
+		cmd.host_command_parameters[0] = IPW_BSS;
+		break;
+	case IW_MODE_ADHOC:
+		cmd.host_command_parameters[0] = IPW_IBSS;
+		break;
+	}
+
+	IPW_DEBUG_HC("PORT_TYPE: %s\n",
+		     port_type == IPW_IBSS ? "Ad-Hoc" : "Managed");
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			printk(KERN_ERR DRV_NAME
+			       ": %s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel,
+			       int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = CHANNEL,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(u32)
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = channel;
+
+	IPW_DEBUG_HC("CHANNEL: %d\n", channel);
+
+	/* If BSS then we don't support channel selection */
+	if (priv->ieee->iw_mode == IW_MODE_INFRA)
+		return 0;
+
+	if ((channel != 0) &&
+	    ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL)))
+		return -EINVAL;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		IPW_DEBUG_INFO("Failed to set channel to %d", channel);
+		return err;
+	}
+
+	if (channel)
+		priv->config |= CFG_STATIC_CHANNEL;
+	else
+		priv->config &= ~CFG_STATIC_CHANNEL;
+
+	priv->channel = channel;
+
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SYSTEM_CONFIG,
+		.host_command_sequence = 0,
+		.host_command_length = 12,
+	};
+	u32 ibss_mask, len = sizeof(u32);
+	int err;
+
+	/* Set system configuration */
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START;
+
+	cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK |
+	    IPW_CFG_BSS_MASK | IPW_CFG_802_1x_ENABLE;
+
+	if (!(priv->config & CFG_LONG_PREAMBLE))
+		cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO;
+
+	err = ipw2100_get_ordinal(priv,
+				  IPW_ORD_EEPROM_IBSS_11B_CHANNELS,
+				  &ibss_mask, &len);
+	if (err)
+		ibss_mask = IPW_IBSS_11B_DEFAULT_MASK;
+
+	cmd.host_command_parameters[1] = REG_CHANNEL_MASK;
+	cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask;
+
+	/* 11b only */
+	/*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A; */
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+/* If IPv6 is configured in the kernel then we don't want to filter out all
+ * of the multicast packets as IPv6 needs some. */
+#if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE)
+	cmd.host_command = ADD_MULTICAST;
+	cmd.host_command_sequence = 0;
+	cmd.host_command_length = 0;
+
+	ipw2100_hw_send_command(priv, &cmd);
+#endif
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate,
+				int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = BASIC_TX_RATES,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = rate & TX_RATE_MASK;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	/* Set BASIC TX Rate first */
+	ipw2100_hw_send_command(priv, &cmd);
+
+	/* Set TX Rate */
+	cmd.host_command = TX_RATES;
+	ipw2100_hw_send_command(priv, &cmd);
+
+	/* Set MSDU TX Rate */
+	cmd.host_command = MSDU_TX_RATES;
+	ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	priv->tx_rates = rate;
+
+	return 0;
+}
+
+static int ipw2100_set_power_mode(struct ipw2100_priv *priv, int power_level)
+{
+	struct host_command cmd = {
+		.host_command = POWER_MODE,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = power_level;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	if (power_level == IPW_POWER_MODE_CAM)
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+	else
+		priv->power_mode = IPW_POWER_ENABLED | power_level;
+
+#ifdef IPW2100_TX_POWER
+	if (priv->port_type == IBSS && priv->adhoc_power != DFTL_IBSS_TX_POWER) {
+		/* Set beacon interval */
+		cmd.host_command = TX_POWER_INDEX;
+		cmd.host_command_parameters[0] = (u32) priv->adhoc_power;
+
+		err = ipw2100_hw_send_command(priv, &cmd);
+		if (err)
+			return err;
+	}
+#endif
+
+	return 0;
+}
+
+static int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold)
+{
+	struct host_command cmd = {
+		.host_command = RTS_THRESHOLD,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	if (threshold & RTS_DISABLED)
+		cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD;
+	else
+		cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->rts_threshold = threshold;
+
+	return 0;
+}
+
+#if 0
+int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv,
+					u32 threshold, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = FRAG_THRESHOLD,
+		.host_command_sequence = 0,
+		.host_command_length = 4,
+		.host_command_parameters[0] = 0,
+	};
+	int err;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (threshold == 0)
+		threshold = DEFAULT_FRAG_THRESHOLD;
+	else {
+		threshold = max(threshold, MIN_FRAG_THRESHOLD);
+		threshold = min(threshold, MAX_FRAG_THRESHOLD);
+	}
+
+	cmd.host_command_parameters[0] = threshold;
+
+	IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	if (!err)
+		priv->frag_threshold = threshold;
+
+	return err;
+}
+#endif
+
+static int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry)
+{
+	struct host_command cmd = {
+		.host_command = SHORT_RETRY_LIMIT,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = retry;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->short_retry_limit = retry;
+
+	return 0;
+}
+
+static int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry)
+{
+	struct host_command cmd = {
+		.host_command = LONG_RETRY_LIMIT,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = retry;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->long_retry_limit = retry;
+
+	return 0;
+}
+
+static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid,
+				       int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = MANDATORY_BSSID,
+		.host_command_sequence = 0,
+		.host_command_length = (bssid == NULL) ? 0 : ETH_ALEN
+	};
+	int err;
+
+#ifdef CONFIG_IPW2100_DEBUG
+	if (bssid != NULL)
+		IPW_DEBUG_HC("MANDATORY_BSSID: %pM\n", bssid);
+	else
+		IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n");
+#endif
+	/* if BSSID is empty then we disable mandatory bssid mode */
+	if (bssid != NULL)
+		memcpy(cmd.host_command_parameters, bssid, ETH_ALEN);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = DISASSOCIATION_BSSID,
+		.host_command_sequence = 0,
+		.host_command_length = ETH_ALEN
+	};
+	int err;
+
+	IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
+
+	/* The Firmware currently ignores the BSSID and just disassociates from
+	 * the currently associated AP -- but in the off chance that a future
+	 * firmware does use the BSSID provided here, we go ahead and try and
+	 * set it to the currently associated AP's BSSID */
+	memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	return err;
+}
+
+static int ipw2100_set_wpa_ie(struct ipw2100_priv *,
+			      struct ipw2100_wpa_assoc_frame *, int)
+    __attribute__ ((unused));
+
+static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv,
+			      struct ipw2100_wpa_assoc_frame *wpa_frame,
+			      int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SET_WPA_IE,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct ipw2100_wpa_assoc_frame),
+	};
+	int err;
+
+	IPW_DEBUG_HC("SET_WPA_IE\n");
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	memcpy(cmd.host_command_parameters, wpa_frame,
+	       sizeof(struct ipw2100_wpa_assoc_frame));
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		if (ipw2100_enable_adapter(priv))
+			err = -EIO;
+	}
+
+	return err;
+}
+
+struct security_info_params {
+	u32 allowed_ciphers;
+	u16 version;
+	u8 auth_mode;
+	u8 replay_counters_number;
+	u8 unicast_using_group;
+} __packed;
+
+static int ipw2100_set_security_information(struct ipw2100_priv *priv,
+					    int auth_mode,
+					    int security_level,
+					    int unicast_using_group,
+					    int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SET_SECURITY_INFORMATION,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct security_info_params)
+	};
+	struct security_info_params *security =
+	    (struct security_info_params *)&cmd.host_command_parameters;
+	int err;
+	memset(security, 0, sizeof(*security));
+
+	/* If shared key AP authentication is turned on, then we need to
+	 * configure the firmware to try and use it.
+	 *
+	 * Actual data encryption/decryption is handled by the host. */
+	security->auth_mode = auth_mode;
+	security->unicast_using_group = unicast_using_group;
+
+	switch (security_level) {
+	default:
+	case SEC_LEVEL_0:
+		security->allowed_ciphers = IPW_NONE_CIPHER;
+		break;
+	case SEC_LEVEL_1:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+		    IPW_WEP104_CIPHER;
+		break;
+	case SEC_LEVEL_2:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+		    IPW_WEP104_CIPHER | IPW_TKIP_CIPHER;
+		break;
+	case SEC_LEVEL_2_CKIP:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+		    IPW_WEP104_CIPHER | IPW_CKIP_CIPHER;
+		break;
+	case SEC_LEVEL_3:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+		    IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER;
+		break;
+	}
+
+	IPW_DEBUG_HC
+	    ("SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n",
+	     security->auth_mode, security->allowed_ciphers, security_level);
+
+	security->replay_counters_number = 0;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power)
+{
+	struct host_command cmd = {
+		.host_command = TX_POWER_INDEX,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err = 0;
+	u32 tmp = tx_power;
+
+	if (tx_power != IPW_TX_POWER_DEFAULT)
+		tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 /
+		      (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM);
+
+	cmd.host_command_parameters[0] = tmp;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		err = ipw2100_hw_send_command(priv, &cmd);
+	if (!err)
+		priv->tx_power = tx_power;
+
+	return 0;
+}
+
+static int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv,
+					    u32 interval, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = BEACON_INTERVAL,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = interval;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		if (!batch_mode) {
+			err = ipw2100_disable_adapter(priv);
+			if (err)
+				return err;
+		}
+
+		ipw2100_hw_send_command(priv, &cmd);
+
+		if (!batch_mode) {
+			err = ipw2100_enable_adapter(priv);
+			if (err)
+				return err;
+		}
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static void ipw2100_queues_initialize(struct ipw2100_priv *priv)
+{
+	ipw2100_tx_initialize(priv);
+	ipw2100_rx_initialize(priv);
+	ipw2100_msg_initialize(priv);
+}
+
+static void ipw2100_queues_free(struct ipw2100_priv *priv)
+{
+	ipw2100_tx_free(priv);
+	ipw2100_rx_free(priv);
+	ipw2100_msg_free(priv);
+}
+
+static int ipw2100_queues_allocate(struct ipw2100_priv *priv)
+{
+	if (ipw2100_tx_allocate(priv) ||
+	    ipw2100_rx_allocate(priv) || ipw2100_msg_allocate(priv))
+		goto fail;
+
+	return 0;
+
+      fail:
+	ipw2100_tx_free(priv);
+	ipw2100_rx_free(priv);
+	ipw2100_msg_free(priv);
+	return -ENOMEM;
+}
+
+#define IPW_PRIVACY_CAPABLE 0x0008
+
+static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags,
+				 int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = WEP_FLAGS,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = flags;
+
+	IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			printk(KERN_ERR DRV_NAME
+			       ": %s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+struct ipw2100_wep_key {
+	u8 idx;
+	u8 len;
+	u8 key[13];
+};
+
+/* Macros to ease up priting WEP keys */
+#define WEP_FMT_64  "%02X%02X%02X%02X-%02X"
+#define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X"
+#define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4]
+#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
+
+/**
+ * Set a the wep key
+ *
+ * @priv: struct to work on
+ * @idx: index of the key we want to set
+ * @key: ptr to the key data to set
+ * @len: length of the buffer at @key
+ * @batch_mode: FIXME perform the operation in batch mode, not
+ *              disabling the device.
+ *
+ * @returns 0 if OK, < 0 errno code on error.
+ *
+ * Fill out a command structure with the new wep key, length an
+ * index and send it down the wire.
+ */
+static int ipw2100_set_key(struct ipw2100_priv *priv,
+			   int idx, char *key, int len, int batch_mode)
+{
+	int keylen = len ? (len <= 5 ? 5 : 13) : 0;
+	struct host_command cmd = {
+		.host_command = WEP_KEY_INFO,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct ipw2100_wep_key),
+	};
+	struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
+	int err;
+
+	IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
+		     idx, keylen, len);
+
+	/* NOTE: We don't check cached values in case the firmware was reset
+	 * or some other problem is occurring.  If the user is setting the key,
+	 * then we push the change */
+
+	wep_key->idx = idx;
+	wep_key->len = keylen;
+
+	if (keylen) {
+		memcpy(wep_key->key, key, len);
+		memset(wep_key->key + len, 0, keylen - len);
+	}
+
+	/* Will be optimized out on debug not being configured in */
+	if (keylen == 0)
+		IPW_DEBUG_WEP("%s: Clearing key %d\n",
+			      priv->net_dev->name, wep_key->idx);
+	else if (keylen == 5)
+		IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n",
+			      priv->net_dev->name, wep_key->idx, wep_key->len,
+			      WEP_STR_64(wep_key->key));
+	else
+		IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128
+			      "\n",
+			      priv->net_dev->name, wep_key->idx, wep_key->len,
+			      WEP_STR_128(wep_key->key));
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		/* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */
+		if (err) {
+			printk(KERN_ERR DRV_NAME
+			       ": %s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		int err2 = ipw2100_enable_adapter(priv);
+		if (err == 0)
+			err = err2;
+	}
+	return err;
+}
+
+static int ipw2100_set_key_index(struct ipw2100_priv *priv,
+				 int idx, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = WEP_KEY_INDEX,
+		.host_command_sequence = 0,
+		.host_command_length = 4,
+		.host_command_parameters = {idx},
+	};
+	int err;
+
+	IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx);
+
+	if (idx < 0 || idx > 3)
+		return -EINVAL;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			printk(KERN_ERR DRV_NAME
+			       ": %s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode)
+{
+	int i, err, auth_mode, sec_level, use_group;
+
+	if (!(priv->status & STATUS_RUNNING))
+		return 0;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (!priv->ieee->sec.enabled) {
+		err =
+		    ipw2100_set_security_information(priv, IPW_AUTH_OPEN,
+						     SEC_LEVEL_0, 0, 1);
+	} else {
+		auth_mode = IPW_AUTH_OPEN;
+		if (priv->ieee->sec.flags & SEC_AUTH_MODE) {
+			if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)
+				auth_mode = IPW_AUTH_SHARED;
+			else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP)
+				auth_mode = IPW_AUTH_LEAP_CISCO_ID;
+		}
+
+		sec_level = SEC_LEVEL_0;
+		if (priv->ieee->sec.flags & SEC_LEVEL)
+			sec_level = priv->ieee->sec.level;
+
+		use_group = 0;
+		if (priv->ieee->sec.flags & SEC_UNICAST_GROUP)
+			use_group = priv->ieee->sec.unicast_uses_group;
+
+		err =
+		    ipw2100_set_security_information(priv, auth_mode, sec_level,
+						     use_group, 1);
+	}
+
+	if (err)
+		goto exit;
+
+	if (priv->ieee->sec.enabled) {
+		for (i = 0; i < 4; i++) {
+			if (!(priv->ieee->sec.flags & (1 << i))) {
+				memset(priv->ieee->sec.keys[i], 0, WEP_KEY_LEN);
+				priv->ieee->sec.key_sizes[i] = 0;
+			} else {
+				err = ipw2100_set_key(priv, i,
+						      priv->ieee->sec.keys[i],
+						      priv->ieee->sec.
+						      key_sizes[i], 1);
+				if (err)
+					goto exit;
+			}
+		}
+
+		ipw2100_set_key_index(priv, priv->ieee->crypt_info.tx_keyidx, 1);
+	}
+
+	/* Always enable privacy so the Host can filter WEP packets if
+	 * encrypted data is sent up */
+	err =
+	    ipw2100_set_wep_flags(priv,
+				  priv->ieee->sec.
+				  enabled ? IPW_PRIVACY_CAPABLE : 0, 1);
+	if (err)
+		goto exit;
+
+	priv->status &= ~STATUS_SECURITY_UPDATED;
+
+      exit:
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static void ipw2100_security_work(struct work_struct *work)
+{
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, security_work.work);
+
+	/* If we happen to have reconnected before we get a chance to
+	 * process this, then update the security settings--which causes
+	 * a disassociation to occur */
+	if (!(priv->status & STATUS_ASSOCIATED) &&
+	    priv->status & STATUS_SECURITY_UPDATED)
+		ipw2100_configure_security(priv, 0);
+}
+
+static void shim__set_security(struct net_device *dev,
+			       struct libipw_security *sec)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int i, force_update = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED))
+		goto done;
+
+	for (i = 0; i < 4; i++) {
+		if (sec->flags & (1 << i)) {
+			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
+			if (sec->key_sizes[i] == 0)
+				priv->ieee->sec.flags &= ~(1 << i);
+			else
+				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
+				       sec->key_sizes[i]);
+			if (sec->level == SEC_LEVEL_1) {
+				priv->ieee->sec.flags |= (1 << i);
+				priv->status |= STATUS_SECURITY_UPDATED;
+			} else
+				priv->ieee->sec.flags &= ~(1 << i);
+		}
+	}
+
+	if ((sec->flags & SEC_ACTIVE_KEY) &&
+	    priv->ieee->sec.active_key != sec->active_key) {
+		if (sec->active_key <= 3) {
+			priv->ieee->sec.active_key = sec->active_key;
+			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
+		} else
+			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
+
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if ((sec->flags & SEC_AUTH_MODE) &&
+	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
+		priv->ieee->sec.auth_mode = sec->auth_mode;
+		priv->ieee->sec.flags |= SEC_AUTH_MODE;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
+		priv->ieee->sec.flags |= SEC_ENABLED;
+		priv->ieee->sec.enabled = sec->enabled;
+		priv->status |= STATUS_SECURITY_UPDATED;
+		force_update = 1;
+	}
+
+	if (sec->flags & SEC_ENCRYPT)
+		priv->ieee->sec.encrypt = sec->encrypt;
+
+	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
+		priv->ieee->sec.level = sec->level;
+		priv->ieee->sec.flags |= SEC_LEVEL;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n",
+		      priv->ieee->sec.flags & (1 << 8) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 7) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 6) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 5) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 4) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 3) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 2) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 1) ? '1' : '0',
+		      priv->ieee->sec.flags & (1 << 0) ? '1' : '0');
+
+/* As a temporary work around to enable WPA until we figure out why
+ * wpa_supplicant toggles the security capability of the driver, which
+ * forces a disassociation with force_update...
+ *
+ *	if (force_update || !(priv->status & STATUS_ASSOCIATED))*/
+	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
+		ipw2100_configure_security(priv, 0);
+      done:
+	mutex_unlock(&priv->action_mutex);
+}
+
+static int ipw2100_adapter_setup(struct ipw2100_priv *priv)
+{
+	int err;
+	int batch_mode = 1;
+	u8 *bssid;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = ipw2100_disable_adapter(priv);
+	if (err)
+		return err;
+#ifdef CONFIG_IPW2100_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+		err = ipw2100_set_channel(priv, priv->channel, batch_mode);
+		if (err)
+			return err;
+
+		IPW_DEBUG_INFO("exit\n");
+
+		return 0;
+	}
+#endif				/* CONFIG_IPW2100_MONITOR */
+
+	err = ipw2100_read_mac_address(priv);
+	if (err)
+		return -EIO;
+
+	err = ipw2100_set_mac_address(priv, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		err = ipw2100_set_channel(priv, priv->channel, batch_mode);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_system_config(priv, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode);
+	if (err)
+		return err;
+
+	/* Default to power mode OFF */
+	err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
+	if (err)
+		return err;
+
+	err = ipw2100_set_rts_threshold(priv, priv->rts_threshold);
+	if (err)
+		return err;
+
+	if (priv->config & CFG_STATIC_BSSID)
+		bssid = priv->bssid;
+	else
+		bssid = NULL;
+	err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->config & CFG_STATIC_ESSID)
+		err = ipw2100_set_essid(priv, priv->essid, priv->essid_len,
+					batch_mode);
+	else
+		err = ipw2100_set_essid(priv, NULL, 0, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_configure_security(priv, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		err =
+		    ipw2100_set_ibss_beacon_interval(priv,
+						     priv->beacon_interval,
+						     batch_mode);
+		if (err)
+			return err;
+
+		err = ipw2100_set_tx_power(priv, priv->tx_power);
+		if (err)
+			return err;
+	}
+
+	/*
+	   err = ipw2100_set_fragmentation_threshold(
+	   priv, priv->frag_threshold, batch_mode);
+	   if (err)
+	   return err;
+	 */
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+/*************************************************************************
+ *
+ * EXTERNALLY CALLED METHODS
+ *
+ *************************************************************************/
+
+/* This method is called by the network layer -- not to be confused with
+ * ipw2100_set_mac_address() declared above called by this driver (and this
+ * method as well) to talk to the firmware */
+static int ipw2100_set_address(struct net_device *dev, void *p)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct sockaddr *addr = p;
+	int err = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	mutex_lock(&priv->action_mutex);
+
+	priv->config |= CFG_CUSTOM_MAC;
+	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+
+	err = ipw2100_set_mac_address(priv, 0);
+	if (err)
+		goto done;
+
+	priv->reset_backoff = 0;
+	mutex_unlock(&priv->action_mutex);
+	ipw2100_reset_adapter(&priv->reset_work.work);
+	return 0;
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_open(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	unsigned long flags;
+	IPW_DEBUG_INFO("dev->open\n");
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	if (priv->status & STATUS_ASSOCIATED) {
+		netif_carrier_on(dev);
+		netif_start_queue(dev);
+	}
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	return 0;
+}
+
+static int ipw2100_close(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	unsigned long flags;
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->status & STATUS_ASSOCIATED)
+		netif_carrier_off(dev);
+	netif_stop_queue(dev);
+
+	/* Flush the TX queue ... */
+	while (!list_empty(&priv->tx_pend_list)) {
+		element = priv->tx_pend_list.next;
+		packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+		list_del(element);
+		DEC_STAT(&priv->tx_pend_stat);
+
+		libipw_txb_free(packet->info.d_struct.txb);
+		packet->info.d_struct.txb = NULL;
+
+		list_add_tail(element, &priv->tx_free_list);
+		INC_STAT(&priv->tx_free_stat);
+	}
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+/*
+ * TODO:  Fix this function... its just wrong
+ */
+static void ipw2100_tx_timeout(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	dev->stats.tx_errors++;
+
+#ifdef CONFIG_IPW2100_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		return;
+#endif
+
+	IPW_DEBUG_INFO("%s: TX timed out.  Scheduling firmware restart.\n",
+		       dev->name);
+	schedule_reset(priv);
+}
+
+static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
+{
+	/* This is called when wpa_supplicant loads and closes the driver
+	 * interface. */
+	priv->ieee->wpa_enabled = value;
+	return 0;
+}
+
+static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
+{
+
+	struct libipw_device *ieee = priv->ieee;
+	struct libipw_security sec = {
+		.flags = SEC_AUTH_MODE,
+	};
+	int ret = 0;
+
+	if (value & IW_AUTH_ALG_SHARED_KEY) {
+		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
+		ieee->open_wep = 0;
+	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
+		sec.auth_mode = WLAN_AUTH_OPEN;
+		ieee->open_wep = 1;
+	} else if (value & IW_AUTH_ALG_LEAP) {
+		sec.auth_mode = WLAN_AUTH_LEAP;
+		ieee->open_wep = 1;
+	} else
+		return -EINVAL;
+
+	if (ieee->set_security)
+		ieee->set_security(ieee->dev, &sec);
+	else
+		ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
+				    char *wpa_ie, int wpa_ie_len)
+{
+
+	struct ipw2100_wpa_assoc_frame frame;
+
+	frame.fixed_ie_mask = 0;
+
+	/* copy WPA IE */
+	memcpy(frame.var_ie, wpa_ie, wpa_ie_len);
+	frame.var_ie_len = wpa_ie_len;
+
+	/* make sure WPA is enabled */
+	ipw2100_wpa_enable(priv, 1);
+	ipw2100_set_wpa_ie(priv, &frame, 0);
+}
+
+static void ipw_ethtool_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *info)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	char fw_ver[64], ucode_ver[64];
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+
+	ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
+	ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
+
+	snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
+		 fw_ver, priv->eeprom_version, ucode_ver);
+
+	strlcpy(info->bus_info, pci_name(priv->pci_dev),
+		sizeof(info->bus_info));
+}
+
+static u32 ipw2100_ethtool_get_link(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return (priv->status & STATUS_ASSOCIATED) ? 1 : 0;
+}
+
+static const struct ethtool_ops ipw2100_ethtool_ops = {
+	.get_link = ipw2100_ethtool_get_link,
+	.get_drvinfo = ipw_ethtool_get_drvinfo,
+};
+
+static void ipw2100_hang_check(struct work_struct *work)
+{
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, hang_check.work);
+	unsigned long flags;
+	u32 rtc = 0xa5a5a5a5;
+	u32 len = sizeof(rtc);
+	int restart = 0;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->fatal_error != 0) {
+		/* If fatal_error is set then we need to restart */
+		IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n",
+			       priv->net_dev->name);
+
+		restart = 1;
+	} else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) ||
+		   (rtc == priv->last_rtc)) {
+		/* Check if firmware is hung */
+		IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n",
+			       priv->net_dev->name);
+
+		restart = 1;
+	}
+
+	if (restart) {
+		/* Kill timer */
+		priv->stop_hang_check = 1;
+		priv->hangs++;
+
+		/* Restart the NIC */
+		schedule_reset(priv);
+	}
+
+	priv->last_rtc = rtc;
+
+	if (!priv->stop_hang_check)
+		schedule_delayed_work(&priv->hang_check, HZ / 2);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+}
+
+static void ipw2100_rf_kill(struct work_struct *work)
+{
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, rf_kill.work);
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
+		if (!priv->stop_rf_kill)
+			schedule_delayed_work(&priv->rf_kill,
+					      round_jiffies_relative(HZ));
+		goto exit_unlock;
+	}
+
+	/* RF Kill is now disabled, so bring the device back up */
+
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
+				  "device\n");
+		schedule_reset(priv);
+	} else
+		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
+				  "enabled\n");
+
+      exit_unlock:
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+}
+
+static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+
+static const struct net_device_ops ipw2100_netdev_ops = {
+	.ndo_open		= ipw2100_open,
+	.ndo_stop		= ipw2100_close,
+	.ndo_start_xmit		= libipw_xmit,
+	.ndo_tx_timeout		= ipw2100_tx_timeout,
+	.ndo_set_mac_address	= ipw2100_set_address,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+/* Look into using netdev destructor to shutdown libipw? */
+
+static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
+					       void __iomem * ioaddr)
+{
+	struct ipw2100_priv *priv;
+	struct net_device *dev;
+
+	dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
+	if (!dev)
+		return NULL;
+	priv = libipw_priv(dev);
+	priv->ieee = netdev_priv(dev);
+	priv->pci_dev = pci_dev;
+	priv->net_dev = dev;
+	priv->ioaddr = ioaddr;
+
+	priv->ieee->hard_start_xmit = ipw2100_tx;
+	priv->ieee->set_security = shim__set_security;
+
+	priv->ieee->perfect_rssi = -20;
+	priv->ieee->worst_rssi = -85;
+
+	dev->netdev_ops = &ipw2100_netdev_ops;
+	dev->ethtool_ops = &ipw2100_ethtool_ops;
+	dev->wireless_handlers = &ipw2100_wx_handler_def;
+	priv->wireless_data.libipw = priv->ieee;
+	dev->wireless_data = &priv->wireless_data;
+	dev->watchdog_timeo = 3 * HZ;
+	dev->irq = 0;
+	dev->min_mtu = 68;
+	dev->max_mtu = LIBIPW_DATA_LEN;
+
+	/* NOTE: We don't use the wireless_handlers hook
+	 * in dev as the system will start throwing WX requests
+	 * to us before we're actually initialized and it just
+	 * ends up causing problems.  So, we just handle
+	 * the WX extensions through the ipw2100_ioctl interface */
+
+	/* memset() puts everything to 0, so we only have explicitly set
+	 * those values that need to be something else */
+
+	/* If power management is turned on, default to AUTO mode */
+	priv->power_mode = IPW_POWER_AUTO;
+
+#ifdef CONFIG_IPW2100_MONITOR
+	priv->config |= CFG_CRC_CHECK;
+#endif
+	priv->ieee->wpa_enabled = 0;
+	priv->ieee->drop_unencrypted = 0;
+	priv->ieee->privacy_invoked = 0;
+	priv->ieee->ieee802_1x = 1;
+
+	/* Set module parameters */
+	switch (network_mode) {
+	case 1:
+		priv->ieee->iw_mode = IW_MODE_ADHOC;
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case 2:
+		priv->ieee->iw_mode = IW_MODE_MONITOR;
+		break;
+#endif
+	default:
+	case 0:
+		priv->ieee->iw_mode = IW_MODE_INFRA;
+		break;
+	}
+
+	if (disable == 1)
+		priv->status |= STATUS_RF_KILL_SW;
+
+	if (channel != 0 &&
+	    ((channel >= REG_MIN_CHANNEL) && (channel <= REG_MAX_CHANNEL))) {
+		priv->config |= CFG_STATIC_CHANNEL;
+		priv->channel = channel;
+	}
+
+	if (associate)
+		priv->config |= CFG_ASSOCIATE;
+
+	priv->beacon_interval = DEFAULT_BEACON_INTERVAL;
+	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
+	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
+	priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED;
+	priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED;
+	priv->tx_power = IPW_TX_POWER_DEFAULT;
+	priv->tx_rates = DEFAULT_TX_RATES;
+
+	strcpy(priv->nick, "ipw2100");
+
+	spin_lock_init(&priv->low_lock);
+	mutex_init(&priv->action_mutex);
+	mutex_init(&priv->adapter_mutex);
+
+	init_waitqueue_head(&priv->wait_command_queue);
+
+	netif_carrier_off(dev);
+
+	INIT_LIST_HEAD(&priv->msg_free_list);
+	INIT_LIST_HEAD(&priv->msg_pend_list);
+	INIT_STAT(&priv->msg_free_stat);
+	INIT_STAT(&priv->msg_pend_stat);
+
+	INIT_LIST_HEAD(&priv->tx_free_list);
+	INIT_LIST_HEAD(&priv->tx_pend_list);
+	INIT_STAT(&priv->tx_free_stat);
+	INIT_STAT(&priv->tx_pend_stat);
+
+	INIT_LIST_HEAD(&priv->fw_pend_list);
+	INIT_STAT(&priv->fw_pend_stat);
+
+	INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter);
+	INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work);
+	INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work);
+	INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
+	INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
+
+	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+		     ipw2100_irq_tasklet, (unsigned long)priv);
+
+	/* NOTE:  We do not start the deferred work for status checks yet */
+	priv->stop_rf_kill = 1;
+	priv->stop_hang_check = 1;
+
+	return dev;
+}
+
+static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
+				const struct pci_device_id *ent)
+{
+	void __iomem *ioaddr;
+	struct net_device *dev = NULL;
+	struct ipw2100_priv *priv = NULL;
+	int err = 0;
+	int registered = 0;
+	u32 val;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
+		IPW_DEBUG_INFO("weird - resource type is not memory\n");
+		err = -ENODEV;
+		goto out;
+	}
+
+	ioaddr = pci_iomap(pci_dev, 0, 0);
+	if (!ioaddr) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling ioremap_nocache.\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/* allocate and initialize our net_device */
+	dev = ipw2100_alloc_device(pci_dev, ioaddr);
+	if (!dev) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling ipw2100_alloc_device.\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* set up PCI mappings for device */
+	err = pci_enable_device(pci_dev);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_enable_device.\n");
+		return err;
+	}
+
+	priv = libipw_priv(dev);
+
+	pci_set_master(pci_dev);
+	pci_set_drvdata(pci_dev, priv);
+
+	err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_set_dma_mask.\n");
+		pci_disable_device(pci_dev);
+		return err;
+	}
+
+	err = pci_request_regions(pci_dev, DRV_NAME);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_request_regions.\n");
+		pci_disable_device(pci_dev);
+		return err;
+	}
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_read_config_dword(pci_dev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
+
+	if (!ipw2100_hw_is_adapter_in_system(dev)) {
+		printk(KERN_WARNING DRV_NAME
+		       "Device not found via register read.\n");
+		err = -ENODEV;
+		goto fail;
+	}
+
+	SET_NETDEV_DEV(dev, &pci_dev->dev);
+
+	/* Force interrupts to be shut off on the device */
+	priv->status |= STATUS_INT_ENABLED;
+	ipw2100_disable_interrupts(priv);
+
+	/* Allocate and initialize the Tx/Rx queues and lists */
+	if (ipw2100_queues_allocate(priv)) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling ipw2100_queues_allocate.\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+	ipw2100_queues_initialize(priv);
+
+	err = request_irq(pci_dev->irq,
+			  ipw2100_interrupt, IRQF_SHARED, dev->name, priv);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling request_irq: %d.\n", pci_dev->irq);
+		goto fail;
+	}
+	dev->irq = pci_dev->irq;
+
+	IPW_DEBUG_INFO("Attempting to register device...\n");
+
+	printk(KERN_INFO DRV_NAME
+	       ": Detected Intel PRO/Wireless 2100 Network Connection\n");
+
+	err = ipw2100_up(priv, 1);
+	if (err)
+		goto fail;
+
+	err = ipw2100_wdev_init(dev);
+	if (err)
+		goto fail;
+	registered = 1;
+
+	/* Bring up the interface.  Pre 0.46, after we registered the
+	 * network device we would call ipw2100_up.  This introduced a race
+	 * condition with newer hotplug configurations (network was coming
+	 * up and making calls before the device was initialized).
+	 */
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling register_netdev.\n");
+		goto fail;
+	}
+	registered = 2;
+
+	mutex_lock(&priv->action_mutex);
+
+	IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
+
+	/* perform this after register_netdev so that dev->name is set */
+	err = sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
+	if (err)
+		goto fail_unlock;
+
+	/* If the RF Kill switch is disabled, go ahead and complete the
+	 * startup sequence */
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		/* Enable the adapter - sends HOST_COMPLETE */
+		if (ipw2100_enable_adapter(priv)) {
+			printk(KERN_WARNING DRV_NAME
+			       ": %s: failed in call to enable adapter.\n",
+			       priv->net_dev->name);
+			ipw2100_hw_stop_adapter(priv);
+			err = -EIO;
+			goto fail_unlock;
+		}
+
+		/* Start a scan . . . */
+		ipw2100_set_scan_options(priv);
+		ipw2100_start_scan(priv);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	priv->status |= STATUS_INITIALIZED;
+
+	mutex_unlock(&priv->action_mutex);
+out:
+	return err;
+
+      fail_unlock:
+	mutex_unlock(&priv->action_mutex);
+      fail:
+	if (dev) {
+		if (registered >= 2)
+			unregister_netdev(dev);
+
+		if (registered) {
+			wiphy_unregister(priv->ieee->wdev.wiphy);
+			kfree(priv->ieee->bg_band.channels);
+		}
+
+		ipw2100_hw_stop_adapter(priv);
+
+		ipw2100_disable_interrupts(priv);
+
+		if (dev->irq)
+			free_irq(dev->irq, priv);
+
+		ipw2100_kill_works(priv);
+
+		/* These are safe to call even if they weren't allocated */
+		ipw2100_queues_free(priv);
+		sysfs_remove_group(&pci_dev->dev.kobj,
+				   &ipw2100_attribute_group);
+
+		free_libipw(dev, 0);
+	}
+
+	pci_iounmap(pci_dev, ioaddr);
+
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+	goto out;
+}
+
+static void ipw2100_pci_remove_one(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev = priv->net_dev;
+
+	mutex_lock(&priv->action_mutex);
+
+	priv->status &= ~STATUS_INITIALIZED;
+
+	sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
+
+#ifdef CONFIG_PM
+	if (ipw2100_firmware.version)
+		ipw2100_release_firmware(priv, &ipw2100_firmware);
+#endif
+	/* Take down the hardware */
+	ipw2100_down(priv);
+
+	/* Release the mutex so that the network subsystem can
+	 * complete any needed calls into the driver... */
+	mutex_unlock(&priv->action_mutex);
+
+	/* Unregister the device first - this results in close()
+	 * being called if the device is open.  If we free storage
+	 * first, then close() will crash.
+	 * FIXME: remove the comment above. */
+	unregister_netdev(dev);
+
+	ipw2100_kill_works(priv);
+
+	ipw2100_queues_free(priv);
+
+	/* Free potential debugging firmware snapshot */
+	ipw2100_snapshot_free(priv);
+
+	free_irq(dev->irq, priv);
+
+	pci_iounmap(pci_dev, priv->ioaddr);
+
+	/* wiphy_unregister needs to be here, before free_libipw */
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->bg_band.channels);
+	free_libipw(dev, 0);
+
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+#ifdef CONFIG_PM
+static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev = priv->net_dev;
+
+	IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name);
+
+	mutex_lock(&priv->action_mutex);
+	if (priv->status & STATUS_INITIALIZED) {
+		/* Take down the device; powers it off, etc. */
+		ipw2100_down(priv);
+	}
+
+	/* Remove the PRESENT state of the device */
+	netif_device_detach(dev);
+
+	pci_save_state(pci_dev);
+	pci_disable_device(pci_dev);
+	pci_set_power_state(pci_dev, PCI_D3hot);
+
+	priv->suspend_at = ktime_get_boottime_seconds();
+
+	mutex_unlock(&priv->action_mutex);
+
+	return 0;
+}
+
+static int ipw2100_resume(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev = priv->net_dev;
+	int err;
+	u32 val;
+
+	if (IPW2100_PM_DISABLED)
+		return 0;
+
+	mutex_lock(&priv->action_mutex);
+
+	IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name);
+
+	pci_set_power_state(pci_dev, PCI_D0);
+	err = pci_enable_device(pci_dev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		mutex_unlock(&priv->action_mutex);
+		return err;
+	}
+	pci_restore_state(pci_dev);
+
+	/*
+	 * Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pci_dev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
+
+	/* Set the device back into the PRESENT state; this will also wake
+	 * the queue of needed */
+	netif_device_attach(dev);
+
+	priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
+
+	/* Bring the device back up */
+	if (!(priv->status & STATUS_RF_KILL_SW))
+		ipw2100_up(priv, 0);
+
+	mutex_unlock(&priv->action_mutex);
+
+	return 0;
+}
+#endif
+
+static void ipw2100_shutdown(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+
+	/* Take down the device; powers it off, etc. */
+	ipw2100_down(priv);
+
+	pci_disable_device(pci_dev);
+}
+
+#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
+
+static const struct pci_device_id ipw2100_pci_id_table[] = {
+	IPW2100_DEV_ID(0x2520),	/* IN 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2521),	/* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2524),	/* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2525),	/* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2526),	/* IN 2100A mPCI Gen A3 */
+	IPW2100_DEV_ID(0x2522),	/* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2523),	/* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2527),	/* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2528),	/* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2529),	/* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x252B),	/* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x252C),	/* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x252D),	/* IN 2100 mPCI 3A */
+
+	IPW2100_DEV_ID(0x2550),	/* IB 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2551),	/* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2553),	/* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2554),	/* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2555),	/* IB 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2560),	/* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2562),	/* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2563),	/* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2561),	/* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2565),	/* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2566),	/* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2567),	/* DE 2100 mPCI 3A */
+
+	IPW2100_DEV_ID(0x2570),	/* GA 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2580),	/* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2582),	/* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2583),	/* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2581),	/* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2585),	/* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2586),	/* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2587),	/* TO 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2590),	/* SO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2592),	/* SO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2591),	/* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2593),	/* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2596),	/* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2598),	/* SO 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x25A0),	/* HP 2100 mPCI 3B */
+	{0,},
+};
+
+MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table);
+
+static struct pci_driver ipw2100_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = ipw2100_pci_id_table,
+	.probe = ipw2100_pci_init_one,
+	.remove = ipw2100_pci_remove_one,
+#ifdef CONFIG_PM
+	.suspend = ipw2100_suspend,
+	.resume = ipw2100_resume,
+#endif
+	.shutdown = ipw2100_shutdown,
+};
+
+/**
+ * Initialize the ipw2100 driver/module
+ *
+ * @returns 0 if ok, < 0 errno node con error.
+ *
+ * Note: we cannot init the /proc stuff until the PCI driver is there,
+ * or we risk an unlikely race condition on someone accessing
+ * uninitialized data in the PCI dev struct through /proc.
+ */
+static int __init ipw2100_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+	printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
+
+	pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+			   PM_QOS_DEFAULT_VALUE);
+
+	ret = pci_register_driver(&ipw2100_pci_driver);
+	if (ret)
+		goto out;
+
+#ifdef CONFIG_IPW2100_DEBUG
+	ipw2100_debug_level = debug;
+	ret = driver_create_file(&ipw2100_pci_driver.driver,
+				 &driver_attr_debug_level);
+#endif
+
+out:
+	return ret;
+}
+
+/**
+ * Cleanup ipw2100 driver registration
+ */
+static void __exit ipw2100_exit(void)
+{
+	/* FIXME: IPG: check that we have no instances of the devices open */
+#ifdef CONFIG_IPW2100_DEBUG
+	driver_remove_file(&ipw2100_pci_driver.driver,
+			   &driver_attr_debug_level);
+#endif
+	pci_unregister_driver(&ipw2100_pci_driver);
+	pm_qos_remove_request(&ipw2100_pm_qos_req);
+}
+
+module_init(ipw2100_init);
+module_exit(ipw2100_exit);
+
+static int ipw2100_wx_get_name(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	if (!(priv->status & STATUS_ASSOCIATED))
+		strcpy(wrqu->name, "unassociated");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
+
+	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
+	return 0;
+}
+
+static int ipw2100_wx_set_freq(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct iw_freq *fwrq = &wrqu->freq;
+	int err = 0;
+
+	if (priv->ieee->iw_mode == IW_MODE_INFRA)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	/* if setting by freq convert to channel */
+	if (fwrq->e == 1) {
+		if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) {
+			int f = fwrq->m / 100000;
+			int c = 0;
+
+			while ((c < REG_MAX_CHANNEL) &&
+			       (f != ipw2100_frequencies[c]))
+				c++;
+
+			/* hack to fall through */
+			fwrq->e = 0;
+			fwrq->m = c + 1;
+		}
+	}
+
+	if (fwrq->e > 0 || fwrq->m > 1000) {
+		err = -EOPNOTSUPP;
+		goto done;
+	} else {		/* Set the channel */
+		IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
+		err = ipw2100_set_channel(priv, fwrq->m, 0);
+	}
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_freq(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->freq.e = 0;
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured CHANNEL then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_CHANNEL ||
+	    priv->status & STATUS_ASSOCIATED)
+		wrqu->freq.m = priv->channel;
+	else
+		wrqu->freq.m = 0;
+
+	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
+	return 0;
+
+}
+
+static int ipw2100_wx_set_mode(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
+
+	if (wrqu->mode == priv->ieee->iw_mode)
+		return 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	switch (wrqu->mode) {
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
+		break;
+#endif				/* CONFIG_IPW2100_MONITOR */
+	case IW_MODE_ADHOC:
+		err = ipw2100_switch_mode(priv, IW_MODE_ADHOC);
+		break;
+	case IW_MODE_INFRA:
+	case IW_MODE_AUTO:
+	default:
+		err = ipw2100_switch_mode(priv, IW_MODE_INFRA);
+		break;
+	}
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_mode(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->mode = priv->ieee->iw_mode;
+	IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode);
+
+	return 0;
+}
+
+#define POWER_MODES 5
+
+/* Values are in microsecond */
+static const s32 timeout_duration[POWER_MODES] = {
+	350000,
+	250000,
+	75000,
+	37000,
+	25000,
+};
+
+static const s32 period_duration[POWER_MODES] = {
+	400000,
+	700000,
+	1000000,
+	1000000,
+	1000000
+};
+
+static int ipw2100_wx_get_range(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct iw_range *range = (struct iw_range *)extra;
+	u16 val;
+	int i, level;
+
+	wrqu->data.length = sizeof(*range);
+	memset(range, 0, sizeof(*range));
+
+	/* Let's try to keep this struct in the same order as in
+	 * linux/include/wireless.h
+	 */
+
+	/* TODO: See what values we can set, and remove the ones we can't
+	 * set, or fill them with some default data.
+	 */
+
+	/* ~5 Mb/s real (802.11b) */
+	range->throughput = 5 * 1000 * 1000;
+
+//      range->sensitivity;     /* signal level threshold range */
+
+	range->max_qual.qual = 100;
+	/* TODO: Find real max RSSI and stick here */
+	range->max_qual.level = 0;
+	range->max_qual.noise = 0;
+	range->max_qual.updated = 7;	/* Updated all three */
+
+	range->avg_qual.qual = 70;	/* > 8% missed beacons is 'bad' */
+	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
+	range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
+	range->avg_qual.noise = 0;
+	range->avg_qual.updated = 7;	/* Updated all three */
+
+	range->num_bitrates = RATE_COUNT;
+
+	for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
+		range->bitrate[i] = ipw2100_bg_rates[i].bitrate * 100 * 1000;
+	}
+
+	range->min_rts = MIN_RTS_THRESHOLD;
+	range->max_rts = MAX_RTS_THRESHOLD;
+	range->min_frag = MIN_FRAG_THRESHOLD;
+	range->max_frag = MAX_FRAG_THRESHOLD;
+
+	range->min_pmp = period_duration[0];	/* Minimal PM period */
+	range->max_pmp = period_duration[POWER_MODES - 1];	/* Maximal PM period */
+	range->min_pmt = timeout_duration[POWER_MODES - 1];	/* Minimal PM timeout */
+	range->max_pmt = timeout_duration[0];	/* Maximal PM timeout */
+
+	/* How to decode max/min PM period */
+	range->pmp_flags = IW_POWER_PERIOD;
+	/* How to decode max/min PM period */
+	range->pmt_flags = IW_POWER_TIMEOUT;
+	/* What PM options are supported */
+	range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD;
+
+	range->encoding_size[0] = 5;
+	range->encoding_size[1] = 13;	/* Different token sizes */
+	range->num_encoding_sizes = 2;	/* Number of entry in the list */
+	range->max_encoding_tokens = WEP_KEYS;	/* Max number of tokens */
+//      range->encoding_login_index;            /* token index for login token */
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		range->txpower_capa = IW_TXPOW_DBM;
+		range->num_txpower = IW_MAX_TXPOWER;
+		for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16);
+		     i < IW_MAX_TXPOWER;
+		     i++, level -=
+		     ((IPW_TX_POWER_MAX_DBM -
+		       IPW_TX_POWER_MIN_DBM) * 16) / (IW_MAX_TXPOWER - 1))
+			range->txpower[i] = level / 16;
+	} else {
+		range->txpower_capa = 0;
+		range->num_txpower = 0;
+	}
+
+	/* Set the Wireless Extension versions */
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 18;
+
+//      range->retry_capa;      /* What retry options are supported */
+//      range->retry_flags;     /* How to decode max/min retry limit */
+//      range->r_time_flags;    /* How to decode max/min retry life */
+//      range->min_retry;       /* Minimal number of retries */
+//      range->max_retry;       /* Maximal number of retries */
+//      range->min_r_time;      /* Minimal retry lifetime */
+//      range->max_r_time;      /* Maximal retry lifetime */
+
+	range->num_channels = FREQ_COUNT;
+
+	val = 0;
+	for (i = 0; i < FREQ_COUNT; i++) {
+		// TODO: Include only legal frequencies for some countries
+//              if (local->channel_mask & (1 << i)) {
+		range->freq[val].i = i + 1;
+		range->freq[val].m = ipw2100_frequencies[i] * 100000;
+		range->freq[val].e = 1;
+		val++;
+//              }
+		if (val == IW_MAX_FREQUENCIES)
+			break;
+	}
+	range->num_frequency = val;
+
+	/* Event capability (kernel + driver) */
+	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+				IW_EVENT_CAPA_MASK(SIOCGIWAP));
+	range->event_capa[1] = IW_EVENT_CAPA_K_1;
+
+	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+	IPW_DEBUG_WX("GET Range\n");
+
+	return 0;
+}
+
+static int ipw2100_wx_set_wap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	// sanity checks
+	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
+		/* we disable mandatory BSSID association */
+		IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
+		priv->config &= ~CFG_STATIC_BSSID;
+		err = ipw2100_set_mandatory_bssid(priv, NULL, 0);
+		goto done;
+	}
+
+	priv->config |= CFG_STATIC_BSSID;
+	memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN);
+
+	err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0);
+
+	IPW_DEBUG_WX("SET BSSID -> %pM\n", wrqu->ap_addr.sa_data);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_wap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured BSSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_BSSID || priv->status & STATUS_ASSOCIATED) {
+		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
+	} else
+		eth_zero_addr(wrqu->ap_addr.sa_data);
+
+	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data);
+	return 0;
+}
+
+static int ipw2100_wx_set_essid(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	char *essid = "";	/* ANY */
+	int length = 0;
+	int err = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->essid.flags && wrqu->essid.length) {
+		length = wrqu->essid.length;
+		essid = extra;
+	}
+
+	if (length == 0) {
+		IPW_DEBUG_WX("Setting ESSID to ANY\n");
+		priv->config &= ~CFG_STATIC_ESSID;
+		err = ipw2100_set_essid(priv, NULL, 0, 0);
+		goto done;
+	}
+
+	length = min(length, IW_ESSID_MAX_SIZE);
+
+	priv->config |= CFG_STATIC_ESSID;
+
+	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
+		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
+		err = 0;
+		goto done;
+	}
+
+	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, essid, length);
+
+	priv->essid_len = length;
+	memcpy(priv->essid, essid, priv->essid_len);
+
+	err = ipw2100_set_essid(priv, essid, length, 0);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_essid(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured ESSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_ESSID || priv->status & STATUS_ASSOCIATED) {
+		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
+			     priv->essid_len, priv->essid);
+		memcpy(extra, priv->essid, priv->essid_len);
+		wrqu->essid.length = priv->essid_len;
+		wrqu->essid.flags = 1;	/* active */
+	} else {
+		IPW_DEBUG_WX("Getting essid: ANY\n");
+		wrqu->essid.length = 0;
+		wrqu->essid.flags = 0;	/* active */
+	}
+
+	return 0;
+}
+
+static int ipw2100_wx_set_nick(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
+		return -E2BIG;
+
+	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
+	memset(priv->nick, 0, sizeof(priv->nick));
+	memcpy(priv->nick, extra, wrqu->data.length);
+
+	IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
+
+	return 0;
+}
+
+static int ipw2100_wx_get_nick(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->data.length = strlen(priv->nick);
+	memcpy(extra, priv->nick, wrqu->data.length);
+	wrqu->data.flags = 1;	/* active */
+
+	IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_rate(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	u32 target_rate = wrqu->bitrate.value;
+	u32 rate;
+	int err = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	rate = 0;
+
+	if (target_rate == 1000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 1000000))
+		rate |= TX_RATE_1_MBIT;
+	if (target_rate == 2000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 2000000))
+		rate |= TX_RATE_2_MBIT;
+	if (target_rate == 5500000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 5500000))
+		rate |= TX_RATE_5_5_MBIT;
+	if (target_rate == 11000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 11000000))
+		rate |= TX_RATE_11_MBIT;
+	if (rate == 0)
+		rate = DEFAULT_TX_RATES;
+
+	err = ipw2100_set_tx_rates(priv, rate, 0);
+
+	IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_rate(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int val;
+	unsigned int len = sizeof(val);
+	int err = 0;
+
+	if (!(priv->status & STATUS_ENABLED) ||
+	    priv->status & STATUS_RF_KILL_MASK ||
+	    !(priv->status & STATUS_ASSOCIATED)) {
+		wrqu->bitrate.value = 0;
+		return 0;
+	}
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
+	if (err) {
+		IPW_DEBUG_WX("failed querying ordinals.\n");
+		goto done;
+	}
+
+	switch (val & TX_RATE_MASK) {
+	case TX_RATE_1_MBIT:
+		wrqu->bitrate.value = 1000000;
+		break;
+	case TX_RATE_2_MBIT:
+		wrqu->bitrate.value = 2000000;
+		break;
+	case TX_RATE_5_5_MBIT:
+		wrqu->bitrate.value = 5500000;
+		break;
+	case TX_RATE_11_MBIT:
+		wrqu->bitrate.value = 11000000;
+		break;
+	default:
+		wrqu->bitrate.value = 0;
+	}
+
+	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_set_rts(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int value, err;
+
+	/* Auto RTS not yet supported */
+	if (wrqu->rts.fixed == 0)
+		return -EINVAL;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->rts.disabled)
+		value = priv->rts_threshold | RTS_DISABLED;
+	else {
+		if (wrqu->rts.value < 1 || wrqu->rts.value > 2304) {
+			err = -EINVAL;
+			goto done;
+		}
+		value = wrqu->rts.value;
+	}
+
+	err = ipw2100_set_rts_threshold(priv, value);
+
+	IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_rts(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED;
+	wrqu->rts.fixed = 1;	/* no auto select */
+
+	/* If RTS is set to the default value, then it is disabled */
+	wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
+
+	IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_txpow(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0, value;
+	
+	if (ipw_radio_kill_sw(priv, wrqu->txpower.disabled))
+		return -EINPROGRESS;
+
+	if (priv->ieee->iw_mode != IW_MODE_ADHOC)
+		return 0;
+
+	if ((wrqu->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM)
+		return -EINVAL;
+
+	if (wrqu->txpower.fixed == 0)
+		value = IPW_TX_POWER_DEFAULT;
+	else {
+		if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
+		    wrqu->txpower.value > IPW_TX_POWER_MAX_DBM)
+			return -EINVAL;
+
+		value = wrqu->txpower.value;
+	}
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	err = ipw2100_set_tx_power(priv, value);
+
+	IPW_DEBUG_WX("SET TX Power -> %d\n", value);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_txpow(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->txpower.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
+
+	if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
+		wrqu->txpower.fixed = 0;
+		wrqu->txpower.value = IPW_TX_POWER_MAX_DBM;
+	} else {
+		wrqu->txpower.fixed = 1;
+		wrqu->txpower.value = priv->tx_power;
+	}
+
+	wrqu->txpower.flags = IW_TXPOW_DBM;
+
+	IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	if (!wrqu->frag.fixed)
+		return -EINVAL;
+
+	if (wrqu->frag.disabled) {
+		priv->frag_threshold |= FRAG_DISABLED;
+		priv->ieee->fts = DEFAULT_FTS;
+	} else {
+		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+		    wrqu->frag.value > MAX_FRAG_THRESHOLD)
+			return -EINVAL;
+
+		priv->ieee->fts = wrqu->frag.value & ~0x1;
+		priv->frag_threshold = priv->ieee->fts;
+	}
+
+	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
+
+	return 0;
+}
+
+static int ipw2100_wx_get_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED;
+	wrqu->frag.fixed = 0;	/* no auto select */
+	wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
+
+	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_retry(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
+		return -EINVAL;
+
+	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
+		return 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->retry.flags & IW_RETRY_SHORT) {
+		err = ipw2100_set_short_retry(priv, wrqu->retry.value);
+		IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
+			     wrqu->retry.value);
+		goto done;
+	}
+
+	if (wrqu->retry.flags & IW_RETRY_LONG) {
+		err = ipw2100_set_long_retry(priv, wrqu->retry.value);
+		IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
+			     wrqu->retry.value);
+		goto done;
+	}
+
+	err = ipw2100_set_short_retry(priv, wrqu->retry.value);
+	if (!err)
+		err = ipw2100_set_long_retry(priv, wrqu->retry.value);
+
+	IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_retry(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	wrqu->retry.disabled = 0;	/* can't be disabled */
+
+	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	if (wrqu->retry.flags & IW_RETRY_LONG) {
+		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
+		wrqu->retry.value = priv->long_retry_limit;
+	} else {
+		wrqu->retry.flags =
+		    (priv->short_retry_limit !=
+		     priv->long_retry_limit) ?
+		    IW_RETRY_LIMIT | IW_RETRY_SHORT : IW_RETRY_LIMIT;
+
+		wrqu->retry.value = priv->short_retry_limit;
+	}
+
+	IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_scan(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	IPW_DEBUG_WX("Initiating scan...\n");
+
+	priv->user_requested_scan = 1;
+	if (ipw2100_set_scan_options(priv) || ipw2100_start_scan(priv)) {
+		IPW_DEBUG_WX("Start scan failed.\n");
+
+		/* TODO: Mark a scan as pending so when hardware initialized
+		 *       a scan starts */
+	}
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_scan(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
+}
+
+/*
+ * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c
+ */
+static int ipw2100_wx_set_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	/*
+	 * No check of STATUS_INITIALIZED required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return libipw_wx_set_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw2100_wx_get_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw2100_wx_set_power(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->power.disabled) {
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+		err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
+		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
+		goto done;
+	}
+
+	switch (wrqu->power.flags & IW_POWER_MODE) {
+	case IW_POWER_ON:	/* If not specified */
+	case IW_POWER_MODE:	/* If set all mask */
+	case IW_POWER_ALL_R:	/* If explicitly state all */
+		break;
+	default:		/* Otherwise we don't support it */
+		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
+			     wrqu->power.flags);
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
+	/* If the user hasn't specified a power management mode yet, default
+	 * to BATTERY */
+	priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+	err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
+
+	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+
+}
+
+static int ipw2100_wx_get_power(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED))
+		wrqu->power.disabled = 1;
+	else {
+		wrqu->power.disabled = 0;
+		wrqu->power.flags = 0;
+	}
+
+	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
+
+	return 0;
+}
+
+/*
+ * WE-18 WPA support
+ */
+
+/* SIOCSIWGENIE */
+static int ipw2100_wx_set_genie(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	u8 *buf;
+
+	if (!ieee->wpa_enabled)
+		return -EOPNOTSUPP;
+
+	if (wrqu->data.length > MAX_WPA_IE_LEN ||
+	    (wrqu->data.length && extra == NULL))
+		return -EINVAL;
+
+	if (wrqu->data.length) {
+		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
+		if (buf == NULL)
+			return -ENOMEM;
+
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = buf;
+		ieee->wpa_ie_len = wrqu->data.length;
+	} else {
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = NULL;
+		ieee->wpa_ie_len = 0;
+	}
+
+	ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
+
+	return 0;
+}
+
+/* SIOCGIWGENIE */
+static int ipw2100_wx_get_genie(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+
+	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
+		wrqu->data.length = 0;
+		return 0;
+	}
+
+	if (wrqu->data.length < ieee->wpa_ie_len)
+		return -E2BIG;
+
+	wrqu->data.length = ieee->wpa_ie_len;
+	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
+
+	return 0;
+}
+
+/* SIOCSIWAUTH */
+static int ipw2100_wx_set_auth(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	struct iw_param *param = &wrqu->param;
+	struct lib80211_crypt_data *crypt;
+	unsigned long flags;
+	int ret = 0;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * ipw2200 does not use these parameters
+		 */
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
+		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
+			break;
+
+		flags = crypt->ops->get_flags(crypt->priv);
+
+		if (param->value)
+			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
+		else
+			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
+
+		crypt->ops->set_flags(flags, crypt->priv);
+
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:{
+			/* HACK:
+			 *
+			 * wpa_supplicant calls set_wpa_enabled when the driver
+			 * is loaded and unloaded, regardless of if WPA is being
+			 * used.  No other calls are made which can be used to
+			 * determine if encryption will be used or not prior to
+			 * association being expected.  If encryption is not being
+			 * used, drop_unencrypted is set to false, else true -- we
+			 * can use this to determine if the CAP_PRIVACY_ON bit should
+			 * be set.
+			 */
+			struct libipw_security sec = {
+				.flags = SEC_ENABLED,
+				.enabled = param->value,
+			};
+			priv->ieee->drop_unencrypted = param->value;
+			/* We only change SEC_LEVEL for open mode. Others
+			 * are set by ipw_wpa_set_encryption.
+			 */
+			if (!param->value) {
+				sec.flags |= SEC_LEVEL;
+				sec.level = SEC_LEVEL_0;
+			} else {
+				sec.flags |= SEC_LEVEL;
+				sec.level = SEC_LEVEL_1;
+			}
+			if (priv->ieee->set_security)
+				priv->ieee->set_security(priv->ieee->dev, &sec);
+			break;
+		}
+
+	case IW_AUTH_80211_AUTH_ALG:
+		ret = ipw2100_wpa_set_auth_algs(priv, param->value);
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		ret = ipw2100_wpa_enable(priv, param->value);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		ieee->ieee802_1x = param->value;
+		break;
+
+		//case IW_AUTH_ROAMING_CONTROL:
+	case IW_AUTH_PRIVACY_INVOKED:
+		ieee->privacy_invoked = param->value;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+/* SIOCGIWAUTH */
+static int ipw2100_wx_get_auth(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	struct lib80211_crypt_data *crypt;
+	struct iw_param *param = &wrqu->param;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * wpa_supplicant will control these internally
+		 */
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
+		if (!crypt || !crypt->ops->get_flags) {
+			IPW_DEBUG_WARNING("Can't get TKIP countermeasures: "
+					  "crypt not set!\n");
+			break;
+		}
+
+		param->value = (crypt->ops->get_flags(crypt->priv) &
+				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
+
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		param->value = ieee->drop_unencrypted;
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		param->value = priv->ieee->sec.auth_mode;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		param->value = ieee->wpa_enabled;
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		param->value = ieee->ieee802_1x;
+		break;
+
+	case IW_AUTH_ROAMING_CONTROL:
+	case IW_AUTH_PRIVACY_INVOKED:
+		param->value = ieee->privacy_invoked;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/* SIOCSIWENCODEEXT */
+static int ipw2100_wx_set_encodeext(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
+}
+
+/* SIOCGIWENCODEEXT */
+static int ipw2100_wx_get_encodeext(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
+}
+
+/* SIOCSIWMLME */
+static int ipw2100_wx_set_mlme(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct iw_mlme *mlme = (struct iw_mlme *)extra;
+
+	switch (mlme->cmd) {
+	case IW_MLME_DEAUTH:
+		// silently ignore
+		break;
+
+	case IW_MLME_DISASSOC:
+		ipw2100_disassociate_bssid(priv);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ *
+ * IWPRIV handlers
+ *
+ */
+#ifdef CONFIG_IPW2100_MONITOR
+static int ipw2100_wx_set_promisc(struct net_device *dev,
+				  struct iw_request_info *info,
+				  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int *parms = (int *)extra;
+	int enable = (parms[0] > 0);
+	int err = 0;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (enable) {
+		if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+			err = ipw2100_set_channel(priv, parms[1], 0);
+			goto done;
+		}
+		priv->channel = parms[1];
+		err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
+	} else {
+		if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+			err = ipw2100_switch_mode(priv, priv->last_mode);
+	}
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_reset(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	if (priv->status & STATUS_INITIALIZED)
+		schedule_reset(priv);
+	return 0;
+}
+
+#endif
+
+static int ipw2100_wx_set_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err = 0, mode = *(int *)extra;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if ((mode < 0) || (mode > POWER_MODES))
+		mode = IPW_POWER_AUTO;
+
+	if (IPW_POWER_LEVEL(priv->power_mode) != mode)
+		err = ipw2100_set_power_mode(priv, mode);
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+#define MAX_POWER_STRING 80
+static int ipw2100_wx_get_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int level = IPW_POWER_LEVEL(priv->power_mode);
+	s32 timeout, period;
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED)) {
+		snprintf(extra, MAX_POWER_STRING,
+			 "Power save level: %d (Off)", level);
+	} else {
+		switch (level) {
+		case IPW_POWER_MODE_CAM:
+			snprintf(extra, MAX_POWER_STRING,
+				 "Power save level: %d (None)", level);
+			break;
+		case IPW_POWER_AUTO:
+			snprintf(extra, MAX_POWER_STRING,
+				 "Power save level: %d (Auto)", level);
+			break;
+		default:
+			timeout = timeout_duration[level - 1] / 1000;
+			period = period_duration[level - 1] / 1000;
+			snprintf(extra, MAX_POWER_STRING,
+				 "Power save level: %d "
+				 "(Timeout %dms, Period %dms)",
+				 level, timeout, period);
+		}
+	}
+
+	wrqu->data.length = strlen(extra) + 1;
+
+	return 0;
+}
+
+static int ipw2100_wx_set_preamble(struct net_device *dev,
+				   struct iw_request_info *info,
+				   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err, mode = *(int *)extra;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (mode == 1)
+		priv->config |= CFG_LONG_PREAMBLE;
+	else if (mode == 0)
+		priv->config &= ~CFG_LONG_PREAMBLE;
+	else {
+		err = -EINVAL;
+		goto done;
+	}
+
+	err = ipw2100_system_config(priv, 0);
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_preamble(struct net_device *dev,
+				   struct iw_request_info *info,
+				   union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	if (priv->config & CFG_LONG_PREAMBLE)
+		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
+
+	return 0;
+}
+
+#ifdef CONFIG_IPW2100_MONITOR
+static int ipw2100_wx_set_crc_check(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	int err, mode = *(int *)extra;
+
+	mutex_lock(&priv->action_mutex);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (mode == 1)
+		priv->config |= CFG_CRC_CHECK;
+	else if (mode == 0)
+		priv->config &= ~CFG_CRC_CHECK;
+	else {
+		err = -EINVAL;
+		goto done;
+	}
+	err = 0;
+
+      done:
+	mutex_unlock(&priv->action_mutex);
+	return err;
+}
+
+static int ipw2100_wx_get_crc_check(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+
+	if (priv->config & CFG_CRC_CHECK)
+		snprintf(wrqu->name, IFNAMSIZ, "CRC checked (1)");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "CRC ignored (0)");
+
+	return 0;
+}
+#endif				/* CONFIG_IPW2100_MONITOR */
+
+static iw_handler ipw2100_wx_handlers[] = {
+	IW_HANDLER(SIOCGIWNAME, ipw2100_wx_get_name),
+	IW_HANDLER(SIOCSIWFREQ, ipw2100_wx_set_freq),
+	IW_HANDLER(SIOCGIWFREQ, ipw2100_wx_get_freq),
+	IW_HANDLER(SIOCSIWMODE, ipw2100_wx_set_mode),
+	IW_HANDLER(SIOCGIWMODE, ipw2100_wx_get_mode),
+	IW_HANDLER(SIOCGIWRANGE, ipw2100_wx_get_range),
+	IW_HANDLER(SIOCSIWAP, ipw2100_wx_set_wap),
+	IW_HANDLER(SIOCGIWAP, ipw2100_wx_get_wap),
+	IW_HANDLER(SIOCSIWMLME, ipw2100_wx_set_mlme),
+	IW_HANDLER(SIOCSIWSCAN, ipw2100_wx_set_scan),
+	IW_HANDLER(SIOCGIWSCAN, ipw2100_wx_get_scan),
+	IW_HANDLER(SIOCSIWESSID, ipw2100_wx_set_essid),
+	IW_HANDLER(SIOCGIWESSID, ipw2100_wx_get_essid),
+	IW_HANDLER(SIOCSIWNICKN, ipw2100_wx_set_nick),
+	IW_HANDLER(SIOCGIWNICKN, ipw2100_wx_get_nick),
+	IW_HANDLER(SIOCSIWRATE, ipw2100_wx_set_rate),
+	IW_HANDLER(SIOCGIWRATE, ipw2100_wx_get_rate),
+	IW_HANDLER(SIOCSIWRTS, ipw2100_wx_set_rts),
+	IW_HANDLER(SIOCGIWRTS, ipw2100_wx_get_rts),
+	IW_HANDLER(SIOCSIWFRAG, ipw2100_wx_set_frag),
+	IW_HANDLER(SIOCGIWFRAG, ipw2100_wx_get_frag),
+	IW_HANDLER(SIOCSIWTXPOW, ipw2100_wx_set_txpow),
+	IW_HANDLER(SIOCGIWTXPOW, ipw2100_wx_get_txpow),
+	IW_HANDLER(SIOCSIWRETRY, ipw2100_wx_set_retry),
+	IW_HANDLER(SIOCGIWRETRY, ipw2100_wx_get_retry),
+	IW_HANDLER(SIOCSIWENCODE, ipw2100_wx_set_encode),
+	IW_HANDLER(SIOCGIWENCODE, ipw2100_wx_get_encode),
+	IW_HANDLER(SIOCSIWPOWER, ipw2100_wx_set_power),
+	IW_HANDLER(SIOCGIWPOWER, ipw2100_wx_get_power),
+	IW_HANDLER(SIOCSIWGENIE, ipw2100_wx_set_genie),
+	IW_HANDLER(SIOCGIWGENIE, ipw2100_wx_get_genie),
+	IW_HANDLER(SIOCSIWAUTH, ipw2100_wx_set_auth),
+	IW_HANDLER(SIOCGIWAUTH, ipw2100_wx_get_auth),
+	IW_HANDLER(SIOCSIWENCODEEXT, ipw2100_wx_set_encodeext),
+	IW_HANDLER(SIOCGIWENCODEEXT, ipw2100_wx_get_encodeext),
+};
+
+#define IPW2100_PRIV_SET_MONITOR	SIOCIWFIRSTPRIV
+#define IPW2100_PRIV_RESET		SIOCIWFIRSTPRIV+1
+#define IPW2100_PRIV_SET_POWER		SIOCIWFIRSTPRIV+2
+#define IPW2100_PRIV_GET_POWER		SIOCIWFIRSTPRIV+3
+#define IPW2100_PRIV_SET_LONGPREAMBLE	SIOCIWFIRSTPRIV+4
+#define IPW2100_PRIV_GET_LONGPREAMBLE	SIOCIWFIRSTPRIV+5
+#define IPW2100_PRIV_SET_CRC_CHECK	SIOCIWFIRSTPRIV+6
+#define IPW2100_PRIV_GET_CRC_CHECK	SIOCIWFIRSTPRIV+7
+
+static const struct iw_priv_args ipw2100_private_args[] = {
+
+#ifdef CONFIG_IPW2100_MONITOR
+	{
+	 IPW2100_PRIV_SET_MONITOR,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
+	{
+	 IPW2100_PRIV_RESET,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
+#endif				/* CONFIG_IPW2100_MONITOR */
+
+	{
+	 IPW2100_PRIV_SET_POWER,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"},
+	{
+	 IPW2100_PRIV_GET_POWER,
+	 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING,
+	 "get_power"},
+	{
+	 IPW2100_PRIV_SET_LONGPREAMBLE,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"},
+	{
+	 IPW2100_PRIV_GET_LONGPREAMBLE,
+	 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"},
+#ifdef CONFIG_IPW2100_MONITOR
+	{
+	 IPW2100_PRIV_SET_CRC_CHECK,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_crc_check"},
+	{
+	 IPW2100_PRIV_GET_CRC_CHECK,
+	 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_crc_check"},
+#endif				/* CONFIG_IPW2100_MONITOR */
+};
+
+static iw_handler ipw2100_private_handler[] = {
+#ifdef CONFIG_IPW2100_MONITOR
+	ipw2100_wx_set_promisc,
+	ipw2100_wx_reset,
+#else				/* CONFIG_IPW2100_MONITOR */
+	NULL,
+	NULL,
+#endif				/* CONFIG_IPW2100_MONITOR */
+	ipw2100_wx_set_powermode,
+	ipw2100_wx_get_powermode,
+	ipw2100_wx_set_preamble,
+	ipw2100_wx_get_preamble,
+#ifdef CONFIG_IPW2100_MONITOR
+	ipw2100_wx_set_crc_check,
+	ipw2100_wx_get_crc_check,
+#else				/* CONFIG_IPW2100_MONITOR */
+	NULL,
+	NULL,
+#endif				/* CONFIG_IPW2100_MONITOR */
+};
+
+/*
+ * Get wireless statistics.
+ * Called by /proc/net/wireless
+ * Also called by SIOCGIWSTATS
+ */
+static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev)
+{
+	enum {
+		POOR = 30,
+		FAIR = 60,
+		GOOD = 80,
+		VERY_GOOD = 90,
+		EXCELLENT = 95,
+		PERFECT = 100
+	};
+	int rssi_qual;
+	int tx_qual;
+	int beacon_qual;
+	int quality;
+
+	struct ipw2100_priv *priv = libipw_priv(dev);
+	struct iw_statistics *wstats;
+	u32 rssi, tx_retries, missed_beacons, tx_failures;
+	u32 ord_len = sizeof(u32);
+
+	if (!priv)
+		return (struct iw_statistics *)NULL;
+
+	wstats = &priv->wstats;
+
+	/* if hw is disabled, then ipw2100_get_ordinal() can't be called.
+	 * ipw2100_wx_wireless_stats seems to be called before fw is
+	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
+	 * and associated; if not associcated, the values are all meaningless
+	 * anyway, so set them all to NULL and INVALID */
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->miss.beacon = 0;
+		wstats->discard.retries = 0;
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+		wstats->qual.noise = 0;
+		wstats->qual.updated = 7;
+		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
+		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
+		return wstats;
+	}
+
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS,
+				&missed_beacons, &ord_len))
+		goto fail_get_ordinal;
+
+	/* If we don't have a connection the quality and level is 0 */
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+	} else {
+		if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR,
+					&rssi, &ord_len))
+			goto fail_get_ordinal;
+		wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
+		if (rssi < 10)
+			rssi_qual = rssi * POOR / 10;
+		else if (rssi < 15)
+			rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR;
+		else if (rssi < 20)
+			rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR;
+		else if (rssi < 30)
+			rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) /
+			    10 + GOOD;
+		else
+			rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) /
+			    10 + VERY_GOOD;
+
+		if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES,
+					&tx_retries, &ord_len))
+			goto fail_get_ordinal;
+
+		if (tx_retries > 75)
+			tx_qual = (90 - tx_retries) * POOR / 15;
+		else if (tx_retries > 70)
+			tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
+		else if (tx_retries > 65)
+			tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
+		else if (tx_retries > 50)
+			tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
+			    15 + GOOD;
+		else
+			tx_qual = (50 - tx_retries) *
+			    (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
+
+		if (missed_beacons > 50)
+			beacon_qual = (60 - missed_beacons) * POOR / 10;
+		else if (missed_beacons > 40)
+			beacon_qual = (50 - missed_beacons) * (FAIR - POOR) /
+			    10 + POOR;
+		else if (missed_beacons > 32)
+			beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) /
+			    18 + FAIR;
+		else if (missed_beacons > 20)
+			beacon_qual = (32 - missed_beacons) *
+			    (VERY_GOOD - GOOD) / 20 + GOOD;
+		else
+			beacon_qual = (20 - missed_beacons) *
+			    (PERFECT - VERY_GOOD) / 20 + VERY_GOOD;
+
+		quality = min(tx_qual, rssi_qual);
+		quality = min(beacon_qual, quality);
+
+#ifdef CONFIG_IPW2100_DEBUG
+		if (beacon_qual == quality)
+			IPW_DEBUG_WX("Quality clamped by Missed Beacons\n");
+		else if (tx_qual == quality)
+			IPW_DEBUG_WX("Quality clamped by Tx Retries\n");
+		else if (quality != 100)
+			IPW_DEBUG_WX("Quality clamped by Signal Strength\n");
+		else
+			IPW_DEBUG_WX("Quality not clamped.\n");
+#endif
+
+		wstats->qual.qual = quality;
+		wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
+	}
+
+	wstats->qual.noise = 0;
+	wstats->qual.updated = 7;
+	wstats->qual.updated |= IW_QUAL_NOISE_INVALID;
+
+	/* FIXME: this is percent and not a # */
+	wstats->miss.beacon = missed_beacons;
+
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES,
+				&tx_failures, &ord_len))
+		goto fail_get_ordinal;
+	wstats->discard.retries = tx_failures;
+
+	return wstats;
+
+      fail_get_ordinal:
+	IPW_DEBUG_WX("failed querying ordinals.\n");
+
+	return (struct iw_statistics *)NULL;
+}
+
+static const struct iw_handler_def ipw2100_wx_handler_def = {
+	.standard = ipw2100_wx_handlers,
+	.num_standard = ARRAY_SIZE(ipw2100_wx_handlers),
+	.num_private = ARRAY_SIZE(ipw2100_private_handler),
+	.num_private_args = ARRAY_SIZE(ipw2100_private_args),
+	.private = (iw_handler *) ipw2100_private_handler,
+	.private_args = (struct iw_priv_args *)ipw2100_private_args,
+	.get_wireless_stats = ipw2100_wx_wireless_stats,
+};
+
+static void ipw2100_wx_event_work(struct work_struct *work)
+{
+	struct ipw2100_priv *priv =
+		container_of(work, struct ipw2100_priv, wx_event_work.work);
+	union iwreq_data wrqu;
+	unsigned int len = ETH_ALEN;
+
+	if (priv->status & STATUS_STOPPING)
+		return;
+
+	mutex_lock(&priv->action_mutex);
+
+	IPW_DEBUG_WX("enter\n");
+
+	mutex_unlock(&priv->action_mutex);
+
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+	/* Fetch BSSID from the hardware */
+	if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) ||
+	    priv->status & STATUS_RF_KILL_MASK ||
+	    ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
+				&priv->bssid, &len)) {
+		eth_zero_addr(wrqu.ap_addr.sa_data);
+	} else {
+		/* We now have the BSSID, so can finish setting to the full
+		 * associated state */
+		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
+		memcpy(priv->ieee->bssid, priv->bssid, ETH_ALEN);
+		priv->status &= ~STATUS_ASSOCIATING;
+		priv->status |= STATUS_ASSOCIATED;
+		netif_carrier_on(priv->net_dev);
+		netif_wake_queue(priv->net_dev);
+	}
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_WX("Configuring ESSID\n");
+		mutex_lock(&priv->action_mutex);
+		/* This is a disassociation event, so kick the firmware to
+		 * look for another AP */
+		if (priv->config & CFG_STATIC_ESSID)
+			ipw2100_set_essid(priv, priv->essid, priv->essid_len,
+					  0);
+		else
+			ipw2100_set_essid(priv, NULL, 0, 0);
+		mutex_unlock(&priv->action_mutex);
+	}
+
+	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+#define IPW2100_FW_MAJOR_VERSION 1
+#define IPW2100_FW_MINOR_VERSION 3
+
+#define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8)
+#define IPW2100_FW_MAJOR(x) (x & 0xff)
+
+#define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \
+                             IPW2100_FW_MAJOR_VERSION)
+
+#define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \
+"." __stringify(IPW2100_FW_MINOR_VERSION)
+
+#define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw"
+
+/*
+
+BINARY FIRMWARE HEADER FORMAT
+
+offset      length   desc
+0           2        version
+2           2        mode == 0:BSS,1:IBSS,2:MONITOR
+4           4        fw_len
+8           4        uc_len
+C           fw_len   firmware data
+12 + fw_len uc_len   microcode data
+
+*/
+
+struct ipw2100_fw_header {
+	short version;
+	short mode;
+	unsigned int fw_size;
+	unsigned int uc_size;
+} __packed;
+
+static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
+{
+	struct ipw2100_fw_header *h =
+	    (struct ipw2100_fw_header *)fw->fw_entry->data;
+
+	if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
+		printk(KERN_WARNING DRV_NAME ": Firmware image not compatible "
+		       "(detected version id of %u). "
+		       "See Documentation/networking/README.ipw2100\n",
+		       h->version);
+		return 1;
+	}
+
+	fw->version = h->version;
+	fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header);
+	fw->fw.size = h->fw_size;
+	fw->uc.data = fw->fw.data + h->fw_size;
+	fw->uc.size = h->uc_size;
+
+	return 0;
+}
+
+static int ipw2100_get_firmware(struct ipw2100_priv *priv,
+				struct ipw2100_fw *fw)
+{
+	char *fw_name;
+	int rc;
+
+	IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n",
+		       priv->net_dev->name);
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		fw_name = IPW2100_FW_NAME("-i");
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		fw_name = IPW2100_FW_NAME("-p");
+		break;
+#endif
+	case IW_MODE_INFRA:
+	default:
+		fw_name = IPW2100_FW_NAME("");
+		break;
+	}
+
+	rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev);
+
+	if (rc < 0) {
+		printk(KERN_ERR DRV_NAME ": "
+		       "%s: Firmware '%s' not available or load failed.\n",
+		       priv->net_dev->name, fw_name);
+		return rc;
+	}
+	IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data,
+		       fw->fw_entry->size);
+
+	ipw2100_mod_firmware_load(fw);
+
+	return 0;
+}
+
+MODULE_FIRMWARE(IPW2100_FW_NAME("-i"));
+#ifdef CONFIG_IPW2100_MONITOR
+MODULE_FIRMWARE(IPW2100_FW_NAME("-p"));
+#endif
+MODULE_FIRMWARE(IPW2100_FW_NAME(""));
+
+static void ipw2100_release_firmware(struct ipw2100_priv *priv,
+				     struct ipw2100_fw *fw)
+{
+	fw->version = 0;
+	release_firmware(fw->fw_entry);
+	fw->fw_entry = NULL;
+}
+
+static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf,
+				 size_t max)
+{
+	char ver[MAX_FW_VERSION_LEN];
+	u32 len = MAX_FW_VERSION_LEN;
+	u32 tmp;
+	int i;
+	/* firmware version is an ascii string (max len of 14) */
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM, ver, &len))
+		return -EIO;
+	tmp = max;
+	if (len >= max)
+		len = max - 1;
+	for (i = 0; i < len; i++)
+		buf[i] = ver[i];
+	buf[i] = '\0';
+	return tmp;
+}
+
+static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf,
+				    size_t max)
+{
+	u32 ver;
+	u32 len = sizeof(ver);
+	/* microcode version is a 32 bit integer */
+	if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION, &ver, &len))
+		return -EIO;
+	return snprintf(buf, max, "%08X", ver);
+}
+
+/*
+ * On exit, the firmware will have been freed from the fw list
+ */
+static int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
+{
+	/* firmware is constructed of N contiguous entries, each entry is
+	 * structured as:
+	 *
+	 * offset    sie         desc
+	 * 0         4           address to write to
+	 * 4         2           length of data run
+	 * 6         length      data
+	 */
+	unsigned int addr;
+	unsigned short len;
+
+	const unsigned char *firmware_data = fw->fw.data;
+	unsigned int firmware_data_left = fw->fw.size;
+
+	while (firmware_data_left > 0) {
+		addr = *(u32 *) (firmware_data);
+		firmware_data += 4;
+		firmware_data_left -= 4;
+
+		len = *(u16 *) (firmware_data);
+		firmware_data += 2;
+		firmware_data_left -= 2;
+
+		if (len > 32) {
+			printk(KERN_ERR DRV_NAME ": "
+			       "Invalid firmware run-length of %d bytes\n",
+			       len);
+			return -EINVAL;
+		}
+
+		write_nic_memory(priv->net_dev, addr, len, firmware_data);
+		firmware_data += len;
+		firmware_data_left -= len;
+	}
+
+	return 0;
+}
+
+struct symbol_alive_response {
+	u8 cmd_id;
+	u8 seq_num;
+	u8 ucode_rev;
+	u8 eeprom_valid;
+	u16 valid_flags;
+	u8 IEEE_addr[6];
+	u16 flags;
+	u16 pcb_rev;
+	u16 clock_settle_time;	// 1us LSB
+	u16 powerup_settle_time;	// 1us LSB
+	u16 hop_settle_time;	// 1us LSB
+	u8 date[3];		// month, day, year
+	u8 time[2];		// hours, minutes
+	u8 ucode_valid;
+};
+
+static int ipw2100_ucode_download(struct ipw2100_priv *priv,
+				  struct ipw2100_fw *fw)
+{
+	struct net_device *dev = priv->net_dev;
+	const unsigned char *microcode_data = fw->uc.data;
+	unsigned int microcode_data_left = fw->uc.size;
+	void __iomem *reg = priv->ioaddr;
+
+	struct symbol_alive_response response;
+	int i, j;
+	u8 data;
+
+	/* Symbol control */
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
+	readl(reg);
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
+	readl(reg);
+
+	/* HW config */
+	write_nic_byte(dev, 0x210014, 0x72);	/* fifo width =16 */
+	readl(reg);
+	write_nic_byte(dev, 0x210014, 0x72);	/* fifo width =16 */
+	readl(reg);
+
+	/* EN_CS_ACCESS bit to reset control store pointer */
+	write_nic_byte(dev, 0x210000, 0x40);
+	readl(reg);
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl(reg);
+	write_nic_byte(dev, 0x210000, 0x40);
+	readl(reg);
+
+	/* copy microcode from buffer into Symbol */
+
+	while (microcode_data_left > 0) {
+		write_nic_byte(dev, 0x210010, *microcode_data++);
+		write_nic_byte(dev, 0x210010, *microcode_data++);
+		microcode_data_left -= 2;
+	}
+
+	/* EN_CS_ACCESS bit to reset the control store pointer */
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl(reg);
+
+	/* Enable System (Reg 0)
+	 * first enable causes garbage in RX FIFO */
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl(reg);
+	write_nic_byte(dev, 0x210000, 0x80);
+	readl(reg);
+
+	/* Reset External Baseband Reg */
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
+	readl(reg);
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
+	readl(reg);
+
+	/* HW Config (Reg 5) */
+	write_nic_byte(dev, 0x210014, 0x72);	// fifo width =16
+	readl(reg);
+	write_nic_byte(dev, 0x210014, 0x72);	// fifo width =16
+	readl(reg);
+
+	/* Enable System (Reg 0)
+	 * second enable should be OK */
+	write_nic_byte(dev, 0x210000, 0x00);	// clear enable system
+	readl(reg);
+	write_nic_byte(dev, 0x210000, 0x80);	// set enable system
+
+	/* check Symbol is enabled - upped this from 5 as it wasn't always
+	 * catching the update */
+	for (i = 0; i < 10; i++) {
+		udelay(10);
+
+		/* check Dino is enabled bit */
+		read_nic_byte(dev, 0x210000, &data);
+		if (data & 0x1)
+			break;
+	}
+
+	if (i == 10) {
+		printk(KERN_ERR DRV_NAME ": %s: Error initializing Symbol\n",
+		       dev->name);
+		return -EIO;
+	}
+
+	/* Get Symbol alive response */
+	for (i = 0; i < 30; i++) {
+		/* Read alive response structure */
+		for (j = 0;
+		     j < (sizeof(struct symbol_alive_response) >> 1); j++)
+			read_nic_word(dev, 0x210004, ((u16 *) & response) + j);
+
+		if ((response.cmd_id == 1) && (response.ucode_valid == 0x1))
+			break;
+		udelay(10);
+	}
+
+	if (i == 30) {
+		printk(KERN_ERR DRV_NAME
+		       ": %s: No response from Symbol - hw not alive\n",
+		       dev->name);
+		printk_buf(IPW_DL_ERROR, (u8 *) & response, sizeof(response));
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
new file mode 100644
index 0000000..8c11c7f
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h
@@ -0,0 +1,1156 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#ifndef _IPW2100_H
+#define _IPW2100_H
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+#include <linux/socket.h>
+#include <linux/if_arp.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>	// new driver API
+
+#ifdef CONFIG_IPW2100_MONITOR
+#include <net/ieee80211_radiotap.h>
+#endif
+
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#include "libipw.h"
+
+struct ipw2100_priv;
+struct ipw2100_tx_packet;
+struct ipw2100_rx_packet;
+
+#define IPW_DL_UNINIT    0x80000000
+#define IPW_DL_NONE      0x00000000
+#define IPW_DL_ALL       0x7FFFFFFF
+
+/*
+ * To use the debug system;
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define IPW_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry.  xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a IPW2100_xxxx_DEBUG() macro definition for your
+ * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ipw2100/debug_level
+ *
+ * you simply need to add your entry to the ipw2100_debug_levels array.
+ *
+ * If you do not see debug_level in /proc/net/ipw2100 then you do not have
+ * CONFIG_IPW2100_DEBUG defined in your kernel configuration
+ *
+ */
+
+#define IPW_DL_ERROR         (1<<0)
+#define IPW_DL_WARNING       (1<<1)
+#define IPW_DL_INFO          (1<<2)
+#define IPW_DL_WX            (1<<3)
+#define IPW_DL_HC            (1<<5)
+#define IPW_DL_STATE         (1<<6)
+
+#define IPW_DL_NOTIF         (1<<10)
+#define IPW_DL_SCAN          (1<<11)
+#define IPW_DL_ASSOC         (1<<12)
+#define IPW_DL_DROP          (1<<13)
+
+#define IPW_DL_IOCTL         (1<<14)
+#define IPW_DL_RF_KILL       (1<<17)
+
+#define IPW_DL_MANAGE        (1<<15)
+#define IPW_DL_FW            (1<<16)
+
+#define IPW_DL_FRAG          (1<<21)
+#define IPW_DL_WEP           (1<<22)
+#define IPW_DL_TX            (1<<23)
+#define IPW_DL_RX            (1<<24)
+#define IPW_DL_ISR           (1<<25)
+#define IPW_DL_IO            (1<<26)
+#define IPW_DL_TRACE         (1<<28)
+
+#define IPW_DEBUG_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_INFO(f...)    IPW_DEBUG(IPW_DL_INFO, ## f)
+#define IPW_DEBUG_WX(f...)     IPW_DEBUG(IPW_DL_WX, ## f)
+#define IPW_DEBUG_SCAN(f...)   IPW_DEBUG(IPW_DL_SCAN, ## f)
+#define IPW_DEBUG_NOTIF(f...) IPW_DEBUG(IPW_DL_NOTIF, ## f)
+#define IPW_DEBUG_TRACE(f...)  IPW_DEBUG(IPW_DL_TRACE, ## f)
+#define IPW_DEBUG_RX(f...)     IPW_DEBUG(IPW_DL_RX, ## f)
+#define IPW_DEBUG_TX(f...)     IPW_DEBUG(IPW_DL_TX, ## f)
+#define IPW_DEBUG_ISR(f...)    IPW_DEBUG(IPW_DL_ISR, ## f)
+#define IPW_DEBUG_MANAGEMENT(f...) IPW_DEBUG(IPW_DL_MANAGE, ## f)
+#define IPW_DEBUG_WEP(f...)    IPW_DEBUG(IPW_DL_WEP, ## f)
+#define IPW_DEBUG_HC(f...) IPW_DEBUG(IPW_DL_HC, ## f)
+#define IPW_DEBUG_FRAG(f...) IPW_DEBUG(IPW_DL_FRAG, ## f)
+#define IPW_DEBUG_FW(f...) IPW_DEBUG(IPW_DL_FW, ## f)
+#define IPW_DEBUG_RF_KILL(f...) IPW_DEBUG(IPW_DL_RF_KILL, ## f)
+#define IPW_DEBUG_DROP(f...) IPW_DEBUG(IPW_DL_DROP, ## f)
+#define IPW_DEBUG_IO(f...) IPW_DEBUG(IPW_DL_IO, ## f)
+#define IPW_DEBUG_IOCTL(f...) IPW_DEBUG(IPW_DL_IOCTL, ## f)
+#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+
+enum {
+	IPW_HW_STATE_DISABLED = 1,
+	IPW_HW_STATE_ENABLED = 0
+};
+
+extern const char *port_type_str[];
+extern const char *band_str[];
+
+#define NUMBER_OF_BD_PER_COMMAND_PACKET		1
+#define NUMBER_OF_BD_PER_DATA_PACKET		2
+
+#define IPW_MAX_BDS 6
+#define NUMBER_OF_OVERHEAD_BDS_PER_PACKETR	2
+#define NUMBER_OF_BDS_TO_LEAVE_FOR_COMMANDS	1
+
+#define REQUIRED_SPACE_IN_RING_FOR_COMMAND_PACKET \
+    (IPW_BD_QUEUE_W_R_MIN_SPARE + NUMBER_OF_BD_PER_COMMAND_PACKET)
+
+struct bd_status {
+	union {
+		struct {
+			u8 nlf:1, txType:2, intEnabled:1, reserved:4;
+		} fields;
+		u8 field;
+	} info;
+} __packed;
+
+struct ipw2100_bd {
+	u32 host_addr;
+	u32 buf_length;
+	struct bd_status status;
+	/* number of fragments for frame (should be set only for
+	 * 1st TBD) */
+	u8 num_fragments;
+	u8 reserved[6];
+} __packed;
+
+#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
+#define IPW_BD_ALIGNMENT(L)    (L*sizeof(struct ipw2100_bd))
+
+#define IPW_BD_STATUS_TX_FRAME_802_3             0x00
+#define IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT 0x01
+#define IPW_BD_STATUS_TX_FRAME_COMMAND		 0x02
+#define IPW_BD_STATUS_TX_FRAME_802_11	         0x04
+#define IPW_BD_STATUS_TX_INTERRUPT_ENABLE	 0x08
+
+struct ipw2100_bd_queue {
+	/* driver (virtual) pointer to queue */
+	struct ipw2100_bd *drv;
+
+	/* firmware (physical) pointer to queue */
+	dma_addr_t nic;
+
+	/* Length of phy memory allocated for BDs */
+	u32 size;
+
+	/* Number of BDs in queue (and in array) */
+	u32 entries;
+
+	/* Number of available BDs (invalid for NIC BDs) */
+	u32 available;
+
+	/* Offset of oldest used BD in array (next one to
+	 * check for completion) */
+	u32 oldest;
+
+	/* Offset of next available (unused) BD */
+	u32 next;
+};
+
+#define RX_QUEUE_LENGTH 256
+#define TX_QUEUE_LENGTH 256
+#define HW_QUEUE_LENGTH 256
+
+#define TX_PENDED_QUEUE_LENGTH (TX_QUEUE_LENGTH / NUMBER_OF_BD_PER_DATA_PACKET)
+
+#define STATUS_TYPE_MASK	0x0000000f
+#define COMMAND_STATUS_VAL	0
+#define STATUS_CHANGE_VAL	1
+#define P80211_DATA_VAL 	2
+#define P8023_DATA_VAL		3
+#define HOST_NOTIFICATION_VAL	4
+
+#define IPW2100_RSSI_TO_DBM (-98)
+
+struct ipw2100_status {
+	u32 frame_size;
+	u16 status_fields;
+	u8 flags;
+#define IPW_STATUS_FLAG_DECRYPTED	(1<<0)
+#define IPW_STATUS_FLAG_WEP_ENCRYPTED	(1<<1)
+#define IPW_STATUS_FLAG_CRC_ERROR       (1<<2)
+	u8 rssi;
+} __packed;
+
+struct ipw2100_status_queue {
+	/* driver (virtual) pointer to queue */
+	struct ipw2100_status *drv;
+
+	/* firmware (physical) pointer to queue */
+	dma_addr_t nic;
+
+	/* Length of phy memory allocated for BDs */
+	u32 size;
+};
+
+#define HOST_COMMAND_PARAMS_REG_LEN	100
+#define CMD_STATUS_PARAMS_REG_LEN 	3
+
+#define IPW_WPA_CAPABILITIES   0x1
+#define IPW_WPA_LISTENINTERVAL 0x2
+#define IPW_WPA_AP_ADDRESS     0x4
+
+#define IPW_MAX_VAR_IE_LEN ((HOST_COMMAND_PARAMS_REG_LEN - 4) * sizeof(u32))
+
+struct ipw2100_wpa_assoc_frame {
+	u16 fixed_ie_mask;
+	struct {
+		u16 capab_info;
+		u16 listen_interval;
+		u8 current_ap[ETH_ALEN];
+	} fixed_ies;
+	u32 var_ie_len;
+	u8 var_ie[IPW_MAX_VAR_IE_LEN];
+};
+
+#define IPW_BSS     1
+#define IPW_MONITOR 2
+#define IPW_IBSS    3
+
+/**
+ * @struct _tx_cmd - HWCommand
+ * @brief H/W command structure.
+ */
+struct ipw2100_cmd_header {
+	u32 host_command_reg;
+	u32 host_command_reg1;
+	u32 sequence;
+	u32 host_command_len_reg;
+	u32 host_command_params_reg[HOST_COMMAND_PARAMS_REG_LEN];
+	u32 cmd_status_reg;
+	u32 cmd_status_params_reg[CMD_STATUS_PARAMS_REG_LEN];
+	u32 rxq_base_ptr;
+	u32 rxq_next_ptr;
+	u32 rxq_host_ptr;
+	u32 txq_base_ptr;
+	u32 txq_next_ptr;
+	u32 txq_host_ptr;
+	u32 tx_status_reg;
+	u32 reserved;
+	u32 status_change_reg;
+	u32 reserved1[3];
+	u32 *ordinal1_ptr;
+	u32 *ordinal2_ptr;
+} __packed;
+
+struct ipw2100_data_header {
+	u32 host_command_reg;
+	u32 host_command_reg1;
+	u8 encrypted;		// BOOLEAN in win! TRUE if frame is enc by driver
+	u8 needs_encryption;	// BOOLEAN in win! TRUE if frma need to be enc in NIC
+	u8 wep_index;		// 0 no key, 1-4 key index, 0xff immediate key
+	u8 key_size;		// 0 no imm key, 0x5 64bit encr, 0xd 128bit encr, 0x10 128bit encr and 128bit IV
+	u8 key[16];
+	u8 reserved[10];	// f/w reserved
+	u8 src_addr[ETH_ALEN];
+	u8 dst_addr[ETH_ALEN];
+	u16 fragment_size;
+} __packed;
+
+/* Host command data structure */
+struct host_command {
+	u32 host_command;	// COMMAND ID
+	u32 host_command1;	// COMMAND ID
+	u32 host_command_sequence;	// UNIQUE COMMAND NUMBER (ID)
+	u32 host_command_length;	// LENGTH
+	u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN];	// COMMAND PARAMETERS
+} __packed;
+
+typedef enum {
+	POWER_ON_RESET,
+	EXIT_POWER_DOWN_RESET,
+	SW_RESET,
+	EEPROM_RW,
+	SW_RE_INIT
+} ipw2100_reset_event;
+
+enum {
+	COMMAND = 0xCAFE,
+	DATA,
+	RX
+};
+
+struct ipw2100_tx_packet {
+	int type;
+	int index;
+	union {
+		struct {	/* COMMAND */
+			struct ipw2100_cmd_header *cmd;
+			dma_addr_t cmd_phys;
+		} c_struct;
+		struct {	/* DATA */
+			struct ipw2100_data_header *data;
+			dma_addr_t data_phys;
+			struct libipw_txb *txb;
+		} d_struct;
+	} info;
+	int jiffy_start;
+
+	struct list_head list;
+};
+
+struct ipw2100_rx_packet {
+	struct ipw2100_rx *rxp;
+	dma_addr_t dma_addr;
+	int jiffy_start;
+	struct sk_buff *skb;
+	struct list_head list;
+};
+
+#define FRAG_DISABLED             (1<<31)
+#define RTS_DISABLED              (1<<31)
+#define MAX_RTS_THRESHOLD         2304U
+#define MIN_RTS_THRESHOLD         1U
+#define DEFAULT_RTS_THRESHOLD     1000U
+
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct ipw2100_ordinals {
+	u32 table1_addr;
+	u32 table2_addr;
+	u32 table1_size;
+	u32 table2_size;
+};
+
+/* Host Notification header */
+struct ipw2100_notification {
+	u32 hnhdr_subtype;	/* type of host notification */
+	u32 hnhdr_size;		/* size in bytes of data
+				   or number of entries, if table.
+				   Does NOT include header */
+} __packed;
+
+#define MAX_KEY_SIZE	16
+#define	MAX_KEYS	8
+
+#define IPW2100_WEP_ENABLE     (1<<1)
+#define IPW2100_WEP_DROP_CLEAR (1<<2)
+
+#define IPW_NONE_CIPHER   (1<<0)
+#define IPW_WEP40_CIPHER  (1<<1)
+#define IPW_TKIP_CIPHER   (1<<2)
+#define IPW_CCMP_CIPHER   (1<<4)
+#define IPW_WEP104_CIPHER (1<<5)
+#define IPW_CKIP_CIPHER   (1<<6)
+
+#define	IPW_AUTH_OPEN     	0
+#define	IPW_AUTH_SHARED   	1
+#define IPW_AUTH_LEAP	  	2
+#define IPW_AUTH_LEAP_CISCO_ID	0x80
+
+struct statistic {
+	int value;
+	int hi;
+	int lo;
+};
+
+#define INIT_STAT(x) do {  \
+  (x)->value = (x)->hi = 0; \
+  (x)->lo = 0x7fffffff; \
+} while (0)
+#define SET_STAT(x,y) do { \
+  (x)->value = y; \
+  if ((x)->value > (x)->hi) (x)->hi = (x)->value; \
+  if ((x)->value < (x)->lo) (x)->lo = (x)->value; \
+} while (0)
+#define INC_STAT(x) do { if (++(x)->value > (x)->hi) (x)->hi = (x)->value; } \
+while (0)
+#define DEC_STAT(x) do { if (--(x)->value < (x)->lo) (x)->lo = (x)->value; } \
+while (0)
+
+#define IPW2100_ERROR_QUEUE 5
+
+/* Power management code: enable or disable? */
+enum {
+#ifdef CONFIG_PM
+	IPW2100_PM_DISABLED = 0,
+	PM_STATE_SIZE = 16,
+#else
+	IPW2100_PM_DISABLED = 1,
+	PM_STATE_SIZE = 0,
+#endif
+};
+
+#define STATUS_POWERED          (1<<0)
+#define STATUS_CMD_ACTIVE       (1<<1)	/**< host command in progress */
+#define STATUS_RUNNING          (1<<2)	/* Card initialized, but not enabled */
+#define STATUS_ENABLED          (1<<3)	/* Card enabled -- can scan,Tx,Rx */
+#define STATUS_STOPPING         (1<<4)	/* Card is in shutdown phase */
+#define STATUS_INITIALIZED      (1<<5)	/* Card is ready for external calls */
+#define STATUS_ASSOCIATING      (1<<9)	/* Associated, but no BSSID yet */
+#define STATUS_ASSOCIATED       (1<<10)	/* Associated and BSSID valid */
+#define STATUS_INT_ENABLED      (1<<11)
+#define STATUS_RF_KILL_HW       (1<<12)
+#define STATUS_RF_KILL_SW       (1<<13)
+#define STATUS_RF_KILL_MASK     (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
+#define STATUS_EXIT_PENDING     (1<<14)
+
+#define STATUS_SCAN_PENDING     (1<<23)
+#define STATUS_SCANNING         (1<<24)
+#define STATUS_SCAN_ABORTING    (1<<25)
+#define STATUS_SCAN_COMPLETE    (1<<26)
+#define STATUS_WX_EVENT_PENDING (1<<27)
+#define STATUS_RESET_PENDING    (1<<29)
+#define STATUS_SECURITY_UPDATED (1<<30)	/* Security sync needed */
+
+/* Internal NIC states */
+#define IPW_STATE_INITIALIZED	(1<<0)
+#define IPW_STATE_COUNTRY_FOUND	(1<<1)
+#define IPW_STATE_ASSOCIATED    (1<<2)
+#define IPW_STATE_ASSN_LOST	(1<<3)
+#define IPW_STATE_ASSN_CHANGED 	(1<<4)
+#define IPW_STATE_SCAN_COMPLETE	(1<<5)
+#define IPW_STATE_ENTERED_PSP 	(1<<6)
+#define IPW_STATE_LEFT_PSP 	(1<<7)
+#define IPW_STATE_RF_KILL       (1<<8)
+#define IPW_STATE_DISABLED	(1<<9)
+#define IPW_STATE_POWER_DOWN	(1<<10)
+#define IPW_STATE_SCANNING      (1<<11)
+
+#define CFG_STATIC_CHANNEL      (1<<0)	/* Restrict assoc. to single channel */
+#define CFG_STATIC_ESSID        (1<<1)	/* Restrict assoc. to single SSID */
+#define CFG_STATIC_BSSID        (1<<2)	/* Restrict assoc. to single BSSID */
+#define CFG_CUSTOM_MAC          (1<<3)
+#define CFG_LONG_PREAMBLE       (1<<4)
+#define CFG_ASSOCIATE           (1<<6)
+#define CFG_FIXED_RATE          (1<<7)
+#define CFG_ADHOC_CREATE        (1<<8)
+#define CFG_PASSIVE_SCAN        (1<<10)
+#ifdef CONFIG_IPW2100_MONITOR
+#define CFG_CRC_CHECK           (1<<11)
+#endif
+
+#define CAP_SHARED_KEY          (1<<0)	/* Off = OPEN */
+#define CAP_PRIVACY_ON          (1<<1)	/* Off = No privacy */
+
+struct ipw2100_priv {
+	void __iomem *ioaddr;
+
+	int stop_hang_check;	/* Set 1 when shutting down to kill hang_check */
+	int stop_rf_kill;	/* Set 1 when shutting down to kill rf_kill */
+
+	struct libipw_device *ieee;
+	unsigned long status;
+	unsigned long config;
+	unsigned long capability;
+
+	/* Statistics */
+	int resets;
+	time64_t reset_backoff;
+
+	/* Context */
+	u8 essid[IW_ESSID_MAX_SIZE];
+	u8 essid_len;
+	u8 bssid[ETH_ALEN];
+	u8 channel;
+	int last_mode;
+
+	time64_t connect_start;
+	time64_t last_reset;
+
+	u32 channel_mask;
+	u32 fatal_error;
+	u32 fatal_errors[IPW2100_ERROR_QUEUE];
+	u32 fatal_index;
+	int eeprom_version;
+	int firmware_version;
+	unsigned long hw_features;
+	int hangs;
+	u32 last_rtc;
+	int dump_raw;		/* 1 to dump raw bytes in /sys/.../memory */
+	u8 *snapshot[0x30];
+
+	u8 mandatory_bssid_mac[ETH_ALEN];
+	u8 mac_addr[ETH_ALEN];
+
+	int power_mode;
+
+	int messages_sent;
+
+	int short_retry_limit;
+	int long_retry_limit;
+
+	u32 rts_threshold;
+	u32 frag_threshold;
+
+	int in_isr;
+
+	u32 tx_rates;
+	int tx_power;
+	u32 beacon_interval;
+
+	char nick[IW_ESSID_MAX_SIZE + 1];
+
+	struct ipw2100_status_queue status_queue;
+
+	struct statistic txq_stat;
+	struct statistic rxq_stat;
+	struct ipw2100_bd_queue rx_queue;
+	struct ipw2100_bd_queue tx_queue;
+	struct ipw2100_rx_packet *rx_buffers;
+
+	struct statistic fw_pend_stat;
+	struct list_head fw_pend_list;
+
+	struct statistic msg_free_stat;
+	struct statistic msg_pend_stat;
+	struct list_head msg_free_list;
+	struct list_head msg_pend_list;
+	struct ipw2100_tx_packet *msg_buffers;
+
+	struct statistic tx_free_stat;
+	struct statistic tx_pend_stat;
+	struct list_head tx_free_list;
+	struct list_head tx_pend_list;
+	struct ipw2100_tx_packet *tx_buffers;
+
+	struct ipw2100_ordinals ordinals;
+
+	struct pci_dev *pci_dev;
+
+	struct proc_dir_entry *dir_dev;
+
+	struct net_device *net_dev;
+	struct iw_statistics wstats;
+
+	struct iw_public_data wireless_data;
+
+	struct tasklet_struct irq_tasklet;
+
+	struct delayed_work reset_work;
+	struct delayed_work security_work;
+	struct delayed_work wx_event_work;
+	struct delayed_work hang_check;
+	struct delayed_work rf_kill;
+	struct delayed_work scan_event;
+
+	int user_requested_scan;
+
+	/* Track time in suspend, using CLOCK_BOOTTIME */
+	time64_t suspend_at;
+	time64_t suspend_time;
+
+	u32 interrupts;
+	int tx_interrupts;
+	int rx_interrupts;
+	int inta_other;
+
+	spinlock_t low_lock;
+	struct mutex action_mutex;
+	struct mutex adapter_mutex;
+
+	wait_queue_head_t wait_command_queue;
+};
+
+/*********************************************************
+ * Host Command -> From Driver to FW
+ *********************************************************/
+
+/**
+ * Host command identifiers
+ */
+#define HOST_COMPLETE           2
+#define SYSTEM_CONFIG           6
+#define SSID                    8
+#define MANDATORY_BSSID         9
+#define AUTHENTICATION_TYPE    10
+#define ADAPTER_ADDRESS        11
+#define PORT_TYPE              12
+#define INTERNATIONAL_MODE     13
+#define CHANNEL                14
+#define RTS_THRESHOLD          15
+#define FRAG_THRESHOLD         16
+#define POWER_MODE             17
+#define TX_RATES               18
+#define BASIC_TX_RATES         19
+#define WEP_KEY_INFO           20
+#define WEP_KEY_INDEX          25
+#define WEP_FLAGS              26
+#define ADD_MULTICAST          27
+#define CLEAR_ALL_MULTICAST    28
+#define BEACON_INTERVAL        29
+#define ATIM_WINDOW            30
+#define CLEAR_STATISTICS       31
+#define SEND		       33
+#define TX_POWER_INDEX         36
+#define BROADCAST_SCAN         43
+#define CARD_DISABLE           44
+#define PREFERRED_BSSID        45
+#define SET_SCAN_OPTIONS       46
+#define SCAN_DWELL_TIME        47
+#define SWEEP_TABLE            48
+#define AP_OR_STATION_TABLE    49
+#define GROUP_ORDINALS         50
+#define SHORT_RETRY_LIMIT      51
+#define LONG_RETRY_LIMIT       52
+
+#define HOST_PRE_POWER_DOWN    58
+#define CARD_DISABLE_PHY_OFF   61
+#define MSDU_TX_RATES          62
+
+/* Rogue AP Detection */
+#define SET_STATION_STAT_BITS      64
+#define CLEAR_STATIONS_STAT_BITS   65
+#define LEAP_ROGUE_MODE            66	//TODO tbw replaced by CFG_LEAP_ROGUE_AP
+#define SET_SECURITY_INFORMATION   67
+#define DISASSOCIATION_BSSID	   68
+#define SET_WPA_IE                 69
+
+/* system configuration bit mask: */
+#define IPW_CFG_MONITOR               0x00004
+#define IPW_CFG_PREAMBLE_AUTO        0x00010
+#define IPW_CFG_IBSS_AUTO_START     0x00020
+#define IPW_CFG_LOOPBACK            0x00100
+#define IPW_CFG_ANSWER_BCSSID_PROBE 0x00800
+#define IPW_CFG_BT_SIDEBAND_SIGNAL	0x02000
+#define IPW_CFG_802_1x_ENABLE       0x04000
+#define IPW_CFG_BSS_MASK		0x08000
+#define IPW_CFG_IBSS_MASK		0x10000
+
+#define IPW_SCAN_NOASSOCIATE (1<<0)
+#define IPW_SCAN_MIXED_CELL (1<<1)
+/* RESERVED (1<<2) */
+#define IPW_SCAN_PASSIVE (1<<3)
+
+#define IPW_NIC_FATAL_ERROR 0x2A7F0
+#define IPW_ERROR_ADDR(x) (x & 0x3FFFF)
+#define IPW_ERROR_CODE(x) ((x & 0xFF000000) >> 24)
+#define IPW2100_ERR_C3_CORRUPTION (0x10 << 24)
+#define IPW2100_ERR_MSG_TIMEOUT   (0x11 << 24)
+#define IPW2100_ERR_FW_LOAD       (0x12 << 24)
+
+#define IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND			0x200
+#define IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND  	IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x0D80
+
+#define IPW_MEM_HOST_SHARED_RX_BD_BASE                  (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x40)
+#define IPW_MEM_HOST_SHARED_RX_STATUS_BASE              (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x44)
+#define IPW_MEM_HOST_SHARED_RX_BD_SIZE                  (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x48)
+#define IPW_MEM_HOST_SHARED_RX_READ_INDEX               (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0xa0)
+
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE          (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x00)
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE          (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x04)
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX       (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x80)
+
+#define IPW_MEM_HOST_SHARED_RX_WRITE_INDEX \
+    (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND + 0x20)
+
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX \
+    (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND)
+
+#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1   (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x180)
+#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2   (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x184)
+
+#define IPW2100_INTA_TX_TRANSFER               (0x00000001)	// Bit 0 (LSB)
+#define IPW2100_INTA_RX_TRANSFER               (0x00000002)	// Bit 1
+#define IPW2100_INTA_TX_COMPLETE	       (0x00000004)	// Bit 2
+#define IPW2100_INTA_EVENT_INTERRUPT           (0x00000008)	// Bit 3
+#define IPW2100_INTA_STATUS_CHANGE             (0x00000010)	// Bit 4
+#define IPW2100_INTA_BEACON_PERIOD_EXPIRED     (0x00000020)	// Bit 5
+#define IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE  (0x00010000)	// Bit 16
+#define IPW2100_INTA_FW_INIT_DONE              (0x01000000)	// Bit 24
+#define IPW2100_INTA_FW_CALIBRATION_CALC       (0x02000000)	// Bit 25
+#define IPW2100_INTA_FATAL_ERROR               (0x40000000)	// Bit 30
+#define IPW2100_INTA_PARITY_ERROR              (0x80000000)	// Bit 31 (MSB)
+
+#define IPW_AUX_HOST_RESET_REG_PRINCETON_RESET              (0x00000001)
+#define IPW_AUX_HOST_RESET_REG_FORCE_NMI                    (0x00000002)
+#define IPW_AUX_HOST_RESET_REG_PCI_HOST_CLUSTER_FATAL_NMI   (0x00000004)
+#define IPW_AUX_HOST_RESET_REG_CORE_FATAL_NMI               (0x00000008)
+#define IPW_AUX_HOST_RESET_REG_SW_RESET                     (0x00000080)
+#define IPW_AUX_HOST_RESET_REG_MASTER_DISABLED              (0x00000100)
+#define IPW_AUX_HOST_RESET_REG_STOP_MASTER                  (0x00000200)
+
+#define IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY           (0x00000001)	// Bit 0 (LSB)
+#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY   (0x00000002)	// Bit 1
+#define IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE             (0x00000004)	// Bit 2
+#define IPW_AUX_HOST_GP_CNTRL_BITS_SYS_CONFIG           (0x000007c0)	// Bits 6-10
+#define IPW_AUX_HOST_GP_CNTRL_BIT_BUS_TYPE              (0x00000200)	// Bit 9
+#define IPW_AUX_HOST_GP_CNTRL_BIT_BAR0_BLOCK_SIZE       (0x00000400)	// Bit 10
+#define IPW_AUX_HOST_GP_CNTRL_BIT_USB_MODE              (0x20000000)	// Bit 29
+#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_FORCES_SYS_CLK   (0x40000000)	// Bit 30
+#define IPW_AUX_HOST_GP_CNTRL_BIT_FW_FORCES_SYS_CLK     (0x80000000)	// Bit 31 (MSB)
+
+#define IPW_BIT_GPIO_GPIO1_MASK         0x0000000C
+#define IPW_BIT_GPIO_GPIO3_MASK         0x000000C0
+#define IPW_BIT_GPIO_GPIO1_ENABLE       0x00000008
+#define IPW_BIT_GPIO_RF_KILL            0x00010000
+
+#define IPW_BIT_GPIO_LED_OFF            0x00002000	// Bit 13 = 1
+
+#define IPW_REG_DOMAIN_0_OFFSET 	0x0000
+#define IPW_REG_DOMAIN_1_OFFSET 	IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND
+
+#define IPW_REG_INTA			IPW_REG_DOMAIN_0_OFFSET + 0x0008
+#define IPW_REG_INTA_MASK		IPW_REG_DOMAIN_0_OFFSET + 0x000C
+#define IPW_REG_INDIRECT_ACCESS_ADDRESS	IPW_REG_DOMAIN_0_OFFSET + 0x0010
+#define IPW_REG_INDIRECT_ACCESS_DATA	IPW_REG_DOMAIN_0_OFFSET + 0x0014
+#define IPW_REG_AUTOINCREMENT_ADDRESS	IPW_REG_DOMAIN_0_OFFSET + 0x0018
+#define IPW_REG_AUTOINCREMENT_DATA	IPW_REG_DOMAIN_0_OFFSET + 0x001C
+#define IPW_REG_RESET_REG		IPW_REG_DOMAIN_0_OFFSET + 0x0020
+#define IPW_REG_GP_CNTRL		IPW_REG_DOMAIN_0_OFFSET + 0x0024
+#define IPW_REG_GPIO			IPW_REG_DOMAIN_0_OFFSET + 0x0030
+#define IPW_REG_FW_TYPE                 IPW_REG_DOMAIN_1_OFFSET + 0x0188
+#define IPW_REG_FW_VERSION 		IPW_REG_DOMAIN_1_OFFSET + 0x018C
+#define IPW_REG_FW_COMPATIBILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
+
+#define IPW_REG_INDIRECT_ADDR_MASK	0x00FFFFFC
+
+#define IPW_INTERRUPT_MASK		0xC1010013
+
+#define IPW2100_CONTROL_REG             0x220000
+#define IPW2100_CONTROL_PHY_OFF         0x8
+
+#define IPW2100_COMMAND			0x00300004
+#define IPW2100_COMMAND_PHY_ON		0x0
+#define IPW2100_COMMAND_PHY_OFF		0x1
+
+/* in DEBUG_AREA, values of memory always 0xd55555d5 */
+#define IPW_REG_DOA_DEBUG_AREA_START    IPW_REG_DOMAIN_0_OFFSET + 0x0090
+#define IPW_REG_DOA_DEBUG_AREA_END      IPW_REG_DOMAIN_0_OFFSET + 0x00FF
+#define IPW_DATA_DOA_DEBUG_VALUE        0xd55555d5
+
+#define IPW_INTERNAL_REGISTER_HALT_AND_RESET	0x003000e0
+
+#define IPW_WAIT_CLOCK_STABILIZATION_DELAY	    50	// micro seconds
+#define IPW_WAIT_RESET_ARC_COMPLETE_DELAY	    10	// micro seconds
+#define IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY 10	// micro seconds
+
+// BD ring queue read/write difference
+#define IPW_BD_QUEUE_W_R_MIN_SPARE 2
+
+#define IPW_CACHE_LINE_LENGTH_DEFAULT		    0x80
+
+#define IPW_CARD_DISABLE_PHY_OFF_COMPLETE_WAIT	    100	// 100 milli
+#define IPW_PREPARE_POWER_DOWN_COMPLETE_WAIT	    100	// 100 milli
+
+#define IPW_HEADER_802_11_SIZE		 sizeof(struct libipw_hdr_3addr)
+#define IPW_MAX_80211_PAYLOAD_SIZE              2304U
+#define IPW_MAX_802_11_PAYLOAD_LENGTH		2312
+#define IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH	1536
+#define IPW_MIN_ACCEPTABLE_RX_FRAME_LENGTH	60
+#define IPW_MAX_ACCEPTABLE_RX_FRAME_LENGTH \
+	(IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH + IPW_HEADER_802_11_SIZE - \
+        sizeof(struct ethhdr))
+
+#define IPW_802_11_FCS_LENGTH 4
+#define IPW_RX_NIC_BUFFER_LENGTH \
+        (IPW_MAX_802_11_PAYLOAD_LENGTH + IPW_HEADER_802_11_SIZE + \
+		IPW_802_11_FCS_LENGTH)
+
+#define IPW_802_11_PAYLOAD_OFFSET \
+        (sizeof(struct libipw_hdr_3addr) + \
+         sizeof(struct libipw_snap_hdr))
+
+struct ipw2100_rx {
+	union {
+		unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
+		struct libipw_hdr_4addr header;
+		u32 status;
+		struct ipw2100_notification notification;
+		struct ipw2100_cmd_header command;
+	} rx_data;
+} __packed;
+
+/* Bit 0-7 are for 802.11b tx rates - .  Bit 5-7 are reserved */
+#define TX_RATE_1_MBIT              0x0001
+#define TX_RATE_2_MBIT              0x0002
+#define TX_RATE_5_5_MBIT            0x0004
+#define TX_RATE_11_MBIT             0x0008
+#define TX_RATE_MASK                0x000F
+#define DEFAULT_TX_RATES            0x000F
+
+#define IPW_POWER_MODE_CAM           0x00	//(always on)
+#define IPW_POWER_INDEX_1            0x01
+#define IPW_POWER_INDEX_2            0x02
+#define IPW_POWER_INDEX_3            0x03
+#define IPW_POWER_INDEX_4            0x04
+#define IPW_POWER_INDEX_5            0x05
+#define IPW_POWER_AUTO               0x06
+#define IPW_POWER_MASK               0x0F
+#define IPW_POWER_ENABLED            0x10
+#define IPW_POWER_LEVEL(x)           ((x) & IPW_POWER_MASK)
+
+#define IPW_TX_POWER_AUTO            0
+#define IPW_TX_POWER_ENHANCED        1
+
+#define IPW_TX_POWER_DEFAULT         32
+#define IPW_TX_POWER_MIN             0
+#define IPW_TX_POWER_MAX             16
+#define IPW_TX_POWER_MIN_DBM         (-12)
+#define IPW_TX_POWER_MAX_DBM         16
+
+#define FW_SCAN_DONOT_ASSOCIATE     0x0001	// Dont Attempt to Associate after Scan
+#define FW_SCAN_PASSIVE             0x0008	// Force PASSSIVE Scan
+
+#define REG_MIN_CHANNEL             0
+#define REG_MAX_CHANNEL             14
+
+#define REG_CHANNEL_MASK            0x00003FFF
+#define IPW_IBSS_11B_DEFAULT_MASK   0x87ff
+
+#define DIVERSITY_EITHER            0	// Use both antennas
+#define DIVERSITY_ANTENNA_A         1	// Use antenna A
+#define DIVERSITY_ANTENNA_B         2	// Use antenna B
+
+#define HOST_COMMAND_WAIT 0
+#define HOST_COMMAND_NO_WAIT 1
+
+#define LOCK_NONE 0
+#define LOCK_DRIVER 1
+#define LOCK_FW 2
+
+#define TYPE_SWEEP_ORD                  0x000D
+#define TYPE_IBSS_STTN_ORD              0x000E
+#define TYPE_BSS_AP_ORD                 0x000F
+#define TYPE_RAW_BEACON_ENTRY           0x0010
+#define TYPE_CALIBRATION_DATA           0x0011
+#define TYPE_ROGUE_AP_DATA              0x0012
+#define TYPE_ASSOCIATION_REQUEST	0x0013
+#define TYPE_REASSOCIATION_REQUEST	0x0014
+
+#define HW_FEATURE_RFKILL 0x0001
+#define RF_KILLSWITCH_OFF 1
+#define RF_KILLSWITCH_ON  0
+
+#define IPW_COMMAND_POOL_SIZE        40
+
+#define IPW_START_ORD_TAB_1			1
+#define IPW_START_ORD_TAB_2			1000
+
+#define IPW_ORD_TAB_1_ENTRY_SIZE		sizeof(u32)
+
+#define IS_ORDINAL_TABLE_ONE(mgr,id) \
+    ((id >= IPW_START_ORD_TAB_1) && (id < mgr->table1_size))
+#define IS_ORDINAL_TABLE_TWO(mgr,id) \
+    ((id >= IPW_START_ORD_TAB_2) && (id < (mgr->table2_size + IPW_START_ORD_TAB_2)))
+
+#define BSS_ID_LENGTH               6
+
+// Fixed size data: Ordinal Table 1
+typedef enum _ORDINAL_TABLE_1 {	// NS - means Not Supported by FW
+// Transmit statistics
+	IPW_ORD_STAT_TX_HOST_REQUESTS = 1,	// # of requested Host Tx's (MSDU)
+	IPW_ORD_STAT_TX_HOST_COMPLETE,	// # of successful Host Tx's (MSDU)
+	IPW_ORD_STAT_TX_DIR_DATA,	// # of successful Directed Tx's (MSDU)
+
+	IPW_ORD_STAT_TX_DIR_DATA1 = 4,	// # of successful Directed Tx's (MSDU) @ 1MB
+	IPW_ORD_STAT_TX_DIR_DATA2,	// # of successful Directed Tx's (MSDU) @ 2MB
+	IPW_ORD_STAT_TX_DIR_DATA5_5,	// # of successful Directed Tx's (MSDU) @ 5_5MB
+	IPW_ORD_STAT_TX_DIR_DATA11,	// # of successful Directed Tx's (MSDU) @ 11MB
+	IPW_ORD_STAT_TX_DIR_DATA22,	// # of successful Directed Tx's (MSDU) @ 22MB
+
+	IPW_ORD_STAT_TX_NODIR_DATA1 = 13,	// # of successful Non_Directed Tx's (MSDU) @ 1MB
+	IPW_ORD_STAT_TX_NODIR_DATA2,	// # of successful Non_Directed Tx's (MSDU) @ 2MB
+	IPW_ORD_STAT_TX_NODIR_DATA5_5,	// # of successful Non_Directed Tx's (MSDU) @ 5.5MB
+	IPW_ORD_STAT_TX_NODIR_DATA11,	// # of successful Non_Directed Tx's (MSDU) @ 11MB
+
+	IPW_ORD_STAT_NULL_DATA = 21,	// # of successful NULL data Tx's
+	IPW_ORD_STAT_TX_RTS,	// # of successful Tx RTS
+	IPW_ORD_STAT_TX_CTS,	// # of successful Tx CTS
+	IPW_ORD_STAT_TX_ACK,	// # of successful Tx ACK
+	IPW_ORD_STAT_TX_ASSN,	// # of successful Association Tx's
+	IPW_ORD_STAT_TX_ASSN_RESP,	// # of successful Association response Tx's
+	IPW_ORD_STAT_TX_REASSN,	// # of successful Reassociation Tx's
+	IPW_ORD_STAT_TX_REASSN_RESP,	// # of successful Reassociation response Tx's
+	IPW_ORD_STAT_TX_PROBE,	// # of probes successfully transmitted
+	IPW_ORD_STAT_TX_PROBE_RESP,	// # of probe responses successfully transmitted
+	IPW_ORD_STAT_TX_BEACON,	// # of tx beacon
+	IPW_ORD_STAT_TX_ATIM,	// # of Tx ATIM
+	IPW_ORD_STAT_TX_DISASSN,	// # of successful Disassociation TX
+	IPW_ORD_STAT_TX_AUTH,	// # of successful Authentication Tx
+	IPW_ORD_STAT_TX_DEAUTH,	// # of successful Deauthentication TX
+
+	IPW_ORD_STAT_TX_TOTAL_BYTES = 41,	// Total successful Tx data bytes
+	IPW_ORD_STAT_TX_RETRIES,	// # of Tx retries
+	IPW_ORD_STAT_TX_RETRY1,	// # of Tx retries at 1MBPS
+	IPW_ORD_STAT_TX_RETRY2,	// # of Tx retries at 2MBPS
+	IPW_ORD_STAT_TX_RETRY5_5,	// # of Tx retries at 5.5MBPS
+	IPW_ORD_STAT_TX_RETRY11,	// # of Tx retries at 11MBPS
+
+	IPW_ORD_STAT_TX_FAILURES = 51,	// # of Tx Failures
+	IPW_ORD_STAT_TX_ABORT_AT_HOP,	//NS // # of Tx's aborted at hop time
+	IPW_ORD_STAT_TX_MAX_TRIES_IN_HOP,	// # of times max tries in a hop failed
+	IPW_ORD_STAT_TX_ABORT_LATE_DMA,	//NS // # of times tx aborted due to late dma setup
+	IPW_ORD_STAT_TX_ABORT_STX,	//NS // # of times backoff aborted
+	IPW_ORD_STAT_TX_DISASSN_FAIL,	// # of times disassociation failed
+	IPW_ORD_STAT_TX_ERR_CTS,	// # of missed/bad CTS frames
+	IPW_ORD_STAT_TX_BPDU,	//NS // # of spanning tree BPDUs sent
+	IPW_ORD_STAT_TX_ERR_ACK,	// # of tx err due to acks
+
+	// Receive statistics
+	IPW_ORD_STAT_RX_HOST = 61,	// # of packets passed to host
+	IPW_ORD_STAT_RX_DIR_DATA,	// # of directed packets
+	IPW_ORD_STAT_RX_DIR_DATA1,	// # of directed packets at 1MB
+	IPW_ORD_STAT_RX_DIR_DATA2,	// # of directed packets at 2MB
+	IPW_ORD_STAT_RX_DIR_DATA5_5,	// # of directed packets at 5.5MB
+	IPW_ORD_STAT_RX_DIR_DATA11,	// # of directed packets at 11MB
+	IPW_ORD_STAT_RX_DIR_DATA22,	// # of directed packets at 22MB
+
+	IPW_ORD_STAT_RX_NODIR_DATA = 71,	// # of nondirected packets
+	IPW_ORD_STAT_RX_NODIR_DATA1,	// # of nondirected packets at 1MB
+	IPW_ORD_STAT_RX_NODIR_DATA2,	// # of nondirected packets at 2MB
+	IPW_ORD_STAT_RX_NODIR_DATA5_5,	// # of nondirected packets at 5.5MB
+	IPW_ORD_STAT_RX_NODIR_DATA11,	// # of nondirected packets at 11MB
+
+	IPW_ORD_STAT_RX_NULL_DATA = 80,	// # of null data rx's
+	IPW_ORD_STAT_RX_POLL,	//NS // # of poll rx
+	IPW_ORD_STAT_RX_RTS,	// # of Rx RTS
+	IPW_ORD_STAT_RX_CTS,	// # of Rx CTS
+	IPW_ORD_STAT_RX_ACK,	// # of Rx ACK
+	IPW_ORD_STAT_RX_CFEND,	// # of Rx CF End
+	IPW_ORD_STAT_RX_CFEND_ACK,	// # of Rx CF End + CF Ack
+	IPW_ORD_STAT_RX_ASSN,	// # of Association Rx's
+	IPW_ORD_STAT_RX_ASSN_RESP,	// # of Association response Rx's
+	IPW_ORD_STAT_RX_REASSN,	// # of Reassociation Rx's
+	IPW_ORD_STAT_RX_REASSN_RESP,	// # of Reassociation response Rx's
+	IPW_ORD_STAT_RX_PROBE,	// # of probe Rx's
+	IPW_ORD_STAT_RX_PROBE_RESP,	// # of probe response Rx's
+	IPW_ORD_STAT_RX_BEACON,	// # of Rx beacon
+	IPW_ORD_STAT_RX_ATIM,	// # of Rx ATIM
+	IPW_ORD_STAT_RX_DISASSN,	// # of disassociation Rx
+	IPW_ORD_STAT_RX_AUTH,	// # of authentication Rx
+	IPW_ORD_STAT_RX_DEAUTH,	// # of deauthentication Rx
+
+	IPW_ORD_STAT_RX_TOTAL_BYTES = 101,	// Total rx data bytes received
+	IPW_ORD_STAT_RX_ERR_CRC,	// # of packets with Rx CRC error
+	IPW_ORD_STAT_RX_ERR_CRC1,	// # of Rx CRC errors at 1MB
+	IPW_ORD_STAT_RX_ERR_CRC2,	// # of Rx CRC errors at 2MB
+	IPW_ORD_STAT_RX_ERR_CRC5_5,	// # of Rx CRC errors at 5.5MB
+	IPW_ORD_STAT_RX_ERR_CRC11,	// # of Rx CRC errors at 11MB
+
+	IPW_ORD_STAT_RX_DUPLICATE1 = 112,	// # of duplicate rx packets at 1MB
+	IPW_ORD_STAT_RX_DUPLICATE2,	// # of duplicate rx packets at 2MB
+	IPW_ORD_STAT_RX_DUPLICATE5_5,	// # of duplicate rx packets at 5.5MB
+	IPW_ORD_STAT_RX_DUPLICATE11,	// # of duplicate rx packets at 11MB
+	IPW_ORD_STAT_RX_DUPLICATE = 119,	// # of duplicate rx packets
+
+	IPW_ORD_PERS_DB_LOCK = 120,	// # locking fw permanent  db
+	IPW_ORD_PERS_DB_SIZE,	// # size of fw permanent  db
+	IPW_ORD_PERS_DB_ADDR,	// # address of fw permanent  db
+	IPW_ORD_STAT_RX_INVALID_PROTOCOL,	// # of rx frames with invalid protocol
+	IPW_ORD_SYS_BOOT_TIME,	// # Boot time
+	IPW_ORD_STAT_RX_NO_BUFFER,	// # of rx frames rejected due to no buffer
+	IPW_ORD_STAT_RX_ABORT_LATE_DMA,	//NS // # of rx frames rejected due to dma setup too late
+	IPW_ORD_STAT_RX_ABORT_AT_HOP,	//NS // # of rx frames aborted due to hop
+	IPW_ORD_STAT_RX_MISSING_FRAG,	// # of rx frames dropped due to missing fragment
+	IPW_ORD_STAT_RX_ORPHAN_FRAG,	// # of rx frames dropped due to non-sequential fragment
+	IPW_ORD_STAT_RX_ORPHAN_FRAME,	// # of rx frames dropped due to unmatched 1st frame
+	IPW_ORD_STAT_RX_FRAG_AGEOUT,	// # of rx frames dropped due to uncompleted frame
+	IPW_ORD_STAT_RX_BAD_SSID,	//NS // Bad SSID (unused)
+	IPW_ORD_STAT_RX_ICV_ERRORS,	// # of ICV errors during decryption
+
+// PSP Statistics
+	IPW_ORD_STAT_PSP_SUSPENSION = 137,	// # of times adapter suspended
+	IPW_ORD_STAT_PSP_BCN_TIMEOUT,	// # of beacon timeout
+	IPW_ORD_STAT_PSP_POLL_TIMEOUT,	// # of poll response timeouts
+	IPW_ORD_STAT_PSP_NONDIR_TIMEOUT,	// # of timeouts waiting for last broadcast/muticast pkt
+	IPW_ORD_STAT_PSP_RX_DTIMS,	// # of PSP DTIMs received
+	IPW_ORD_STAT_PSP_RX_TIMS,	// # of PSP TIMs received
+	IPW_ORD_STAT_PSP_STATION_ID,	// PSP Station ID
+
+// Association and roaming
+	IPW_ORD_LAST_ASSN_TIME = 147,	// RTC time of last association
+	IPW_ORD_STAT_PERCENT_MISSED_BCNS,	// current calculation of % missed beacons
+	IPW_ORD_STAT_PERCENT_RETRIES,	// current calculation of % missed tx retries
+	IPW_ORD_ASSOCIATED_AP_PTR,	// If associated, this is ptr to the associated
+	// AP table entry. set to 0 if not associated
+	IPW_ORD_AVAILABLE_AP_CNT,	// # of AP's described in the AP table
+	IPW_ORD_AP_LIST_PTR,	// Ptr to list of available APs
+	IPW_ORD_STAT_AP_ASSNS,	// # of associations
+	IPW_ORD_STAT_ASSN_FAIL,	// # of association failures
+	IPW_ORD_STAT_ASSN_RESP_FAIL,	// # of failuresdue to response fail
+	IPW_ORD_STAT_FULL_SCANS,	// # of full scans
+
+	IPW_ORD_CARD_DISABLED,	// # Card Disabled
+	IPW_ORD_STAT_ROAM_INHIBIT,	// # of times roaming was inhibited due to ongoing activity
+	IPW_FILLER_40,
+	IPW_ORD_RSSI_AT_ASSN = 160,	// RSSI of associated AP at time of association
+	IPW_ORD_STAT_ASSN_CAUSE1,	// # of reassociations due to no tx from AP in last N
+	// hops or no prob_ responses in last 3 minutes
+	IPW_ORD_STAT_ASSN_CAUSE2,	// # of reassociations due to poor tx/rx quality
+	IPW_ORD_STAT_ASSN_CAUSE3,	// # of reassociations due to tx/rx quality with excessive
+	// load at the AP
+	IPW_ORD_STAT_ASSN_CAUSE4,	// # of reassociations due to AP RSSI level fell below
+	// eligible group
+	IPW_ORD_STAT_ASSN_CAUSE5,	// # of reassociations due to load leveling
+	IPW_ORD_STAT_ASSN_CAUSE6,	//NS // # of reassociations due to dropped by Ap
+	IPW_FILLER_41,
+	IPW_FILLER_42,
+	IPW_FILLER_43,
+	IPW_ORD_STAT_AUTH_FAIL,	// # of times authentication failed
+	IPW_ORD_STAT_AUTH_RESP_FAIL,	// # of times authentication response failed
+	IPW_ORD_STATION_TABLE_CNT,	// # of entries in association table
+
+// Other statistics
+	IPW_ORD_RSSI_AVG_CURR = 173,	// Current avg RSSI
+	IPW_ORD_STEST_RESULTS_CURR,	//NS // Current self test results word
+	IPW_ORD_STEST_RESULTS_CUM,	//NS // Cummulative self test results word
+	IPW_ORD_SELF_TEST_STATUS,	//NS //
+	IPW_ORD_POWER_MGMT_MODE,	// Power mode - 0=CAM, 1=PSP
+	IPW_ORD_POWER_MGMT_INDEX,	//NS //
+	IPW_ORD_COUNTRY_CODE,	// IEEE country code as recv'd from beacon
+	IPW_ORD_COUNTRY_CHANNELS,	// channels supported by country
+// IPW_ORD_COUNTRY_CHANNELS:
+// For 11b the lower 2-byte are used for channels from 1-14
+//   and the higher 2-byte are not used.
+	IPW_ORD_RESET_CNT,	// # of adapter resets (warm)
+	IPW_ORD_BEACON_INTERVAL,	// Beacon interval
+
+	IPW_ORD_PRINCETON_VERSION = 184,	//NS // Princeton Version
+	IPW_ORD_ANTENNA_DIVERSITY,	// TRUE if antenna diversity is disabled
+	IPW_ORD_CCA_RSSI,	//NS // CCA RSSI value (factory programmed)
+	IPW_ORD_STAT_EEPROM_UPDATE,	//NS // # of times config EEPROM updated
+	IPW_ORD_DTIM_PERIOD,	// # of beacon intervals between DTIMs
+	IPW_ORD_OUR_FREQ,	// current radio freq lower digits - channel ID
+
+	IPW_ORD_RTC_TIME = 190,	// current RTC time
+	IPW_ORD_PORT_TYPE,	// operating mode
+	IPW_ORD_CURRENT_TX_RATE,	// current tx rate
+	IPW_ORD_SUPPORTED_RATES,	// Bitmap of supported tx rates
+	IPW_ORD_ATIM_WINDOW,	// current ATIM Window
+	IPW_ORD_BASIC_RATES,	// bitmap of basic tx rates
+	IPW_ORD_NIC_HIGHEST_RATE,	// bitmap of basic tx rates
+	IPW_ORD_AP_HIGHEST_RATE,	// bitmap of basic tx rates
+	IPW_ORD_CAPABILITIES,	// Management frame capability field
+	IPW_ORD_AUTH_TYPE,	// Type of authentication
+	IPW_ORD_RADIO_TYPE,	// Adapter card platform type
+	IPW_ORD_RTS_THRESHOLD = 201,	// Min length of packet after which RTS handshaking is used
+	IPW_ORD_INT_MODE,	// International mode
+	IPW_ORD_FRAGMENTATION_THRESHOLD,	// protocol frag threshold
+	IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS,	// EEPROM offset in SRAM
+	IPW_ORD_EEPROM_SRAM_DB_BLOCK_SIZE,	// EEPROM size in SRAM
+	IPW_ORD_EEPROM_SKU_CAPABILITY,	// EEPROM SKU Capability    206 =
+	IPW_ORD_EEPROM_IBSS_11B_CHANNELS,	// EEPROM IBSS 11b channel set
+
+	IPW_ORD_MAC_VERSION = 209,	// MAC Version
+	IPW_ORD_MAC_REVISION,	// MAC Revision
+	IPW_ORD_RADIO_VERSION,	// Radio Version
+	IPW_ORD_NIC_MANF_DATE_TIME,	// MANF Date/Time STAMP
+	IPW_ORD_UCODE_VERSION,	// Ucode Version
+	IPW_ORD_HW_RF_SWITCH_STATE = 214,	// HW RF Kill Switch State
+} ORDINALTABLE1;
+
+// ordinal table 2
+// Variable length data:
+#define IPW_FIRST_VARIABLE_LENGTH_ORDINAL   1001
+
+typedef enum _ORDINAL_TABLE_2 {	// NS - means Not Supported by FW
+	IPW_ORD_STAT_BASE = 1000,	// contains number of variable ORDs
+	IPW_ORD_STAT_ADAPTER_MAC = 1001,	// 6 bytes: our adapter MAC address
+	IPW_ORD_STAT_PREFERRED_BSSID = 1002,	// 6 bytes: BSSID of the preferred AP
+	IPW_ORD_STAT_MANDATORY_BSSID = 1003,	// 6 bytes: BSSID of the mandatory AP
+	IPW_FILL_1,		//NS //
+	IPW_ORD_STAT_COUNTRY_TEXT = 1005,	// 36 bytes: Country name text, First two bytes are Country code
+	IPW_ORD_STAT_ASSN_SSID = 1006,	// 32 bytes: ESSID String
+	IPW_ORD_STATION_TABLE = 1007,	// ? bytes: Station/AP table (via Direct SSID Scans)
+	IPW_ORD_STAT_SWEEP_TABLE = 1008,	// ? bytes: Sweep/Host Table table (via Broadcast Scans)
+	IPW_ORD_STAT_ROAM_LOG = 1009,	// ? bytes: Roaming log
+	IPW_ORD_STAT_RATE_LOG = 1010,	//NS // 0 bytes: Rate log
+	IPW_ORD_STAT_FIFO = 1011,	//NS // 0 bytes: Fifo buffer data structures
+	IPW_ORD_STAT_FW_VER_NUM = 1012,	// 14 bytes: fw version ID string as in (a.bb.ccc; "0.08.011")
+	IPW_ORD_STAT_FW_DATE = 1013,	// 14 bytes: fw date string (mmm dd yyyy; "Mar 13 2002")
+	IPW_ORD_STAT_ASSN_AP_BSSID = 1014,	// 6 bytes: MAC address of associated AP
+	IPW_ORD_STAT_DEBUG = 1015,	//NS // ? bytes:
+	IPW_ORD_STAT_NIC_BPA_NUM = 1016,	// 11 bytes: NIC BPA number in ASCII
+	IPW_ORD_STAT_UCODE_DATE = 1017,	// 5 bytes: uCode date
+	IPW_ORD_SECURITY_NGOTIATION_RESULT = 1018,
+} ORDINALTABLE2;		// NS - means Not Supported by FW
+
+#define IPW_LAST_VARIABLE_LENGTH_ORDINAL   1018
+
+#ifndef WIRELESS_SPY
+#define WIRELESS_SPY		// enable iwspy support
+#endif
+
+#define IPW_HOST_FW_SHARED_AREA0 	0x0002f200
+#define IPW_HOST_FW_SHARED_AREA0_END 	0x0002f510	// 0x310 bytes
+
+#define IPW_HOST_FW_SHARED_AREA1 	0x0002f610
+#define IPW_HOST_FW_SHARED_AREA1_END 	0x0002f630	// 0x20 bytes
+
+#define IPW_HOST_FW_SHARED_AREA2 	0x0002fa00
+#define IPW_HOST_FW_SHARED_AREA2_END 	0x0002fa20	// 0x20 bytes
+
+#define IPW_HOST_FW_SHARED_AREA3 	0x0002fc00
+#define IPW_HOST_FW_SHARED_AREA3_END 	0x0002fc10	// 0x10 bytes
+
+#define IPW_HOST_FW_INTERRUPT_AREA 	0x0002ff80
+#define IPW_HOST_FW_INTERRUPT_AREA_END 	0x00030000	// 0x80 bytes
+
+struct ipw2100_fw_chunk {
+	unsigned char *buf;
+	long len;
+	long pos;
+	struct list_head list;
+};
+
+struct ipw2100_fw_chunk_set {
+	const void *data;
+	unsigned long size;
+};
+
+struct ipw2100_fw {
+	int version;
+	struct ipw2100_fw_chunk_set fw;
+	struct ipw2100_fw_chunk_set uc;
+	const struct firmware *fw_entry;
+};
+
+#define MAX_FW_VERSION_LEN 14
+
+#endif				/* _IPW2100_H */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
new file mode 100644
index 0000000..9644e7b
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -0,0 +1,12054 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
+
+  802.11 status code portion of this file from ethereal-0.10.6:
+    Copyright 2000, Axis Communications AB
+    Ethereal - Network traffic analyzer
+    By Gerald Combs <gerald@ethereal.com>
+    Copyright 1998 Gerald Combs
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <net/cfg80211-wext.h>
+#include "ipw2200.h"
+#include "ipw.h"
+
+
+#ifndef KBUILD_EXTMOD
+#define VK "k"
+#else
+#define VK
+#endif
+
+#ifdef CONFIG_IPW2200_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#ifdef CONFIG_IPW2200_MONITOR
+#define VM "m"
+#else
+#define VM
+#endif
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define VP "p"
+#else
+#define VP
+#endif
+
+#ifdef CONFIG_IPW2200_RADIOTAP
+#define VR "r"
+#else
+#define VR
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
+#define VQ "q"
+#else
+#define VQ
+#endif
+
+#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
+#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2006 Intel Corporation"
+#define DRV_VERSION     IPW2200_VERSION
+
+#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("ipw2200-ibss.fw");
+#ifdef CONFIG_IPW2200_MONITOR
+MODULE_FIRMWARE("ipw2200-sniffer.fw");
+#endif
+MODULE_FIRMWARE("ipw2200-bss.fw");
+
+static int cmdlog = 0;
+static int debug = 0;
+static int default_channel = 0;
+static int network_mode = 0;
+
+static u32 ipw_debug_level;
+static int associate;
+static int auto_create = 1;
+static int led_support = 1;
+static int disable = 0;
+static int bt_coexist = 0;
+static int hwcrypto = 0;
+static int roaming = 1;
+static const char ipw_modes[] = {
+	'a', 'b', 'g', '?'
+};
+static int antenna = CFG_SYS_ANTENNA_BOTH;
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
+#endif
+
+static struct ieee80211_rate ipw2200_rates[] = {
+	{ .bitrate = 10 },
+	{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
+	{ .bitrate = 60 },
+	{ .bitrate = 90 },
+	{ .bitrate = 120 },
+	{ .bitrate = 180 },
+	{ .bitrate = 240 },
+	{ .bitrate = 360 },
+	{ .bitrate = 480 },
+	{ .bitrate = 540 }
+};
+
+#define ipw2200_a_rates		(ipw2200_rates + 4)
+#define ipw2200_num_a_rates	8
+#define ipw2200_bg_rates	(ipw2200_rates + 0)
+#define ipw2200_num_bg_rates	12
+
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+	(((x) <= 14) ? \
+	(((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+	((x) + 1000) * 5)
+
+#ifdef CONFIG_IPW2200_QOS
+static int qos_enable = 0;
+static int qos_burst_enable = 0;
+static int qos_no_ack_mask = 0;
+static int burst_duration_CCK = 0;
+static int burst_duration_OFDM = 0;
+
+static struct libipw_qos_parameters def_qos_parameters_OFDM = {
+	{QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
+	 QOS_TX3_CW_MIN_OFDM},
+	{QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
+	 QOS_TX3_CW_MAX_OFDM},
+	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
+	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
+	{QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
+	 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
+};
+
+static struct libipw_qos_parameters def_qos_parameters_CCK = {
+	{QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
+	 QOS_TX3_CW_MIN_CCK},
+	{QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
+	 QOS_TX3_CW_MAX_CCK},
+	{QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
+	{QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
+	{QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
+	 QOS_TX3_TXOP_LIMIT_CCK}
+};
+
+static struct libipw_qos_parameters def_parameters_OFDM = {
+	{DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
+	 DEF_TX3_CW_MIN_OFDM},
+	{DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
+	 DEF_TX3_CW_MAX_OFDM},
+	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
+	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
+	{DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
+	 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
+};
+
+static struct libipw_qos_parameters def_parameters_CCK = {
+	{DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
+	 DEF_TX3_CW_MIN_CCK},
+	{DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
+	 DEF_TX3_CW_MAX_CCK},
+	{DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
+	{DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
+	{DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
+	 DEF_TX3_TXOP_LIMIT_CCK}
+};
+
+static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
+
+static int from_priority_to_tx_queue[] = {
+	IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
+	IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
+};
+
+static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
+
+static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
+				       *qos_param);
+static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
+				     *qos_param);
+#endif				/* CONFIG_IPW2200_QOS */
+
+static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
+static void ipw_remove_current_network(struct ipw_priv *priv);
+static void ipw_rx(struct ipw_priv *priv);
+static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
+				struct clx2_tx_queue *txq, int qindex);
+static int ipw_queue_reset(struct ipw_priv *priv);
+
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+			     int len, int sync);
+
+static void ipw_tx_queue_free(struct ipw_priv *);
+
+static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
+static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
+static void ipw_rx_queue_replenish(void *);
+static int ipw_up(struct ipw_priv *);
+static void ipw_bg_up(struct work_struct *work);
+static void ipw_down(struct ipw_priv *);
+static void ipw_bg_down(struct work_struct *work);
+static int ipw_config(struct ipw_priv *);
+static int init_supported_rates(struct ipw_priv *priv,
+				struct ipw_supported_rates *prates);
+static void ipw_set_hwcrypto_keys(struct ipw_priv *);
+static void ipw_send_wep_keys(struct ipw_priv *, int);
+
+static int snprint_line(char *buf, size_t count,
+			const u8 * data, u32 len, u32 ofs)
+{
+	int out, i, j, l;
+	char c;
+
+	out = snprintf(buf, count, "%08X", ofs);
+
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++)
+			out += snprintf(buf + out, count - out, "%02X ",
+					data[(i * 8 + j)]);
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, "   ");
+	}
+
+	out += snprintf(buf + out, count - out, " ");
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++) {
+			c = data[(i * 8 + j)];
+			if (!isascii(c) || !isprint(c))
+				c = '.';
+
+			out += snprintf(buf + out, count - out, "%c", c);
+		}
+
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, " ");
+	}
+
+	return out;
+}
+
+static void printk_buf(int level, const u8 * data, u32 len)
+{
+	char line[81];
+	u32 ofs = 0;
+	if (!(ipw_debug_level & level))
+		return;
+
+	while (len) {
+		snprint_line(line, sizeof(line), &data[ofs],
+			     min(len, 16U), ofs);
+		printk(KERN_DEBUG "%s\n", line);
+		ofs += 16;
+		len -= min(len, 16U);
+	}
+}
+
+static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
+{
+	size_t out = size;
+	u32 ofs = 0;
+	int total = 0;
+
+	while (size && len) {
+		out = snprint_line(output, size, &data[ofs],
+				   min_t(size_t, len, 16U), ofs);
+
+		ofs += 16;
+		output += out;
+		size -= out;
+		len -= min_t(size_t, len, 16U);
+		total += out;
+	}
+	return total;
+}
+
+/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
+static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
+#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
+
+/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
+static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
+#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
+
+/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
+static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
+static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
+		     __LINE__, (u32) (b), (u32) (c));
+	_ipw_write_reg8(a, b, c);
+}
+
+/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
+static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
+static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
+		     __LINE__, (u32) (b), (u32) (c));
+	_ipw_write_reg16(a, b, c);
+}
+
+/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
+static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
+static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
+		     __LINE__, (u32) (b), (u32) (c));
+	_ipw_write_reg32(a, b, c);
+}
+
+/* 8-bit direct write (low 4K) */
+static inline void _ipw_write8(struct ipw_priv *ipw, unsigned long ofs,
+		u8 val)
+{
+	writeb(val, ipw->hw_base + ofs);
+}
+
+/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
+#define ipw_write8(ipw, ofs, val) do { \
+	IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, \
+			__LINE__, (u32)(ofs), (u32)(val)); \
+	_ipw_write8(ipw, ofs, val); \
+} while (0)
+
+/* 16-bit direct write (low 4K) */
+static inline void _ipw_write16(struct ipw_priv *ipw, unsigned long ofs,
+		u16 val)
+{
+	writew(val, ipw->hw_base + ofs);
+}
+
+/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
+#define ipw_write16(ipw, ofs, val) do { \
+	IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, \
+			__LINE__, (u32)(ofs), (u32)(val)); \
+	_ipw_write16(ipw, ofs, val); \
+} while (0)
+
+/* 32-bit direct write (low 4K) */
+static inline void _ipw_write32(struct ipw_priv *ipw, unsigned long ofs,
+		u32 val)
+{
+	writel(val, ipw->hw_base + ofs);
+}
+
+/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
+#define ipw_write32(ipw, ofs, val) do { \
+	IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, \
+			__LINE__, (u32)(ofs), (u32)(val)); \
+	_ipw_write32(ipw, ofs, val); \
+} while (0)
+
+/* 8-bit direct read (low 4K) */
+static inline u8 _ipw_read8(struct ipw_priv *ipw, unsigned long ofs)
+{
+	return readb(ipw->hw_base + ofs);
+}
+
+/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
+#define ipw_read8(ipw, ofs) ({ \
+	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", __FILE__, __LINE__, \
+			(u32)(ofs)); \
+	_ipw_read8(ipw, ofs); \
+})
+
+/* 16-bit direct read (low 4K) */
+static inline u16 _ipw_read16(struct ipw_priv *ipw, unsigned long ofs)
+{
+	return readw(ipw->hw_base + ofs);
+}
+
+/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
+#define ipw_read16(ipw, ofs) ({ \
+	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", __FILE__, __LINE__, \
+			(u32)(ofs)); \
+	_ipw_read16(ipw, ofs); \
+})
+
+/* 32-bit direct read (low 4K) */
+static inline u32 _ipw_read32(struct ipw_priv *ipw, unsigned long ofs)
+{
+	return readl(ipw->hw_base + ofs);
+}
+
+/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
+#define ipw_read32(ipw, ofs) ({ \
+	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", __FILE__, __LINE__, \
+			(u32)(ofs)); \
+	_ipw_read32(ipw, ofs); \
+})
+
+static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
+/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
+#define ipw_read_indirect(a, b, c, d) ({ \
+	IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %u bytes\n", __FILE__, \
+			__LINE__, (u32)(b), (u32)(d)); \
+	_ipw_read_indirect(a, b, c, d); \
+})
+
+/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
+static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
+				int num);
+#define ipw_write_indirect(a, b, c, d) do { \
+	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %u bytes\n", __FILE__, \
+			__LINE__, (u32)(b), (u32)(d)); \
+	_ipw_write_indirect(a, b, c, d); \
+} while (0)
+
+/* 32-bit indirect write (above 4K) */
+static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
+{
+	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
+	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
+	_ipw_write32(priv, IPW_INDIRECT_DATA, value);
+}
+
+/* 8-bit indirect write (above 4K) */
+static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
+{
+	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
+	u32 dif_len = reg - aligned_addr;
+
+	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
+	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+	_ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
+}
+
+/* 16-bit indirect write (above 4K) */
+static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
+{
+	u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK;	/* dword align */
+	u32 dif_len = (reg - aligned_addr) & (~0x1ul);
+
+	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
+	_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+	_ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
+}
+
+/* 8-bit indirect read (above 4K) */
+static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
+{
+	u32 word;
+	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
+	IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
+	word = _ipw_read32(priv, IPW_INDIRECT_DATA);
+	return (word >> ((reg & 0x3) * 8)) & 0xff;
+}
+
+/* 32-bit indirect read (above 4K) */
+static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
+{
+	u32 value;
+
+	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
+
+	_ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
+	value = _ipw_read32(priv, IPW_INDIRECT_DATA);
+	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
+	return value;
+}
+
+/* General purpose, no alignment requirement, iterative (multi-byte) read, */
+/*    for area above 1st 4K of SRAM/reg space */
+static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
+			       int num)
+{
+	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
+	u32 dif_len = addr - aligned_addr;
+	u32 i;
+
+	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
+
+	if (num <= 0) {
+		return;
+	}
+
+	/* Read the first dword (or portion) byte by byte */
+	if (unlikely(dif_len)) {
+		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+		/* Start reading at aligned_addr + dif_len */
+		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
+			*buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
+		aligned_addr += 4;
+	}
+
+	/* Read all of the middle dwords as dwords, with auto-increment */
+	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
+	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
+		*(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
+
+	/* Read the last dword (or portion) byte by byte */
+	if (unlikely(num)) {
+		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+		for (i = 0; num > 0; i++, num--)
+			*buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
+	}
+}
+
+/* General purpose, no alignment requirement, iterative (multi-byte) write, */
+/*    for area above 1st 4K of SRAM/reg space */
+static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
+				int num)
+{
+	u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK;	/* dword align */
+	u32 dif_len = addr - aligned_addr;
+	u32 i;
+
+	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
+
+	if (num <= 0) {
+		return;
+	}
+
+	/* Write the first dword (or portion) byte by byte */
+	if (unlikely(dif_len)) {
+		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+		/* Start writing at aligned_addr + dif_len */
+		for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
+			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
+		aligned_addr += 4;
+	}
+
+	/* Write all of the middle dwords as dwords, with auto-increment */
+	_ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
+	for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
+		_ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
+
+	/* Write the last dword (or portion) byte by byte */
+	if (unlikely(num)) {
+		_ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
+		for (i = 0; num > 0; i++, num--, buf++)
+			_ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
+	}
+}
+
+/* General purpose, no alignment requirement, iterative (multi-byte) write, */
+/*    for 1st 4K of SRAM/regs space */
+static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
+			     int num)
+{
+	memcpy_toio((priv->hw_base + addr), buf, num);
+}
+
+/* Set bit(s) in low 4K of SRAM/regs */
+static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
+{
+	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
+}
+
+/* Clear bit(s) in low 4K of SRAM/regs */
+static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
+{
+	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
+}
+
+static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
+{
+	if (priv->status & STATUS_INT_ENABLED)
+		return;
+	priv->status |= STATUS_INT_ENABLED;
+	ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
+}
+
+static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
+{
+	if (!(priv->status & STATUS_INT_ENABLED))
+		return;
+	priv->status &= ~STATUS_INT_ENABLED;
+	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
+}
+
+static inline void ipw_enable_interrupts(struct ipw_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	__ipw_enable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static inline void ipw_disable_interrupts(struct ipw_priv *priv)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	__ipw_disable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+static char *ipw_error_desc(u32 val)
+{
+	switch (val) {
+	case IPW_FW_ERROR_OK:
+		return "ERROR_OK";
+	case IPW_FW_ERROR_FAIL:
+		return "ERROR_FAIL";
+	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
+		return "MEMORY_UNDERFLOW";
+	case IPW_FW_ERROR_MEMORY_OVERFLOW:
+		return "MEMORY_OVERFLOW";
+	case IPW_FW_ERROR_BAD_PARAM:
+		return "BAD_PARAM";
+	case IPW_FW_ERROR_BAD_CHECKSUM:
+		return "BAD_CHECKSUM";
+	case IPW_FW_ERROR_NMI_INTERRUPT:
+		return "NMI_INTERRUPT";
+	case IPW_FW_ERROR_BAD_DATABASE:
+		return "BAD_DATABASE";
+	case IPW_FW_ERROR_ALLOC_FAIL:
+		return "ALLOC_FAIL";
+	case IPW_FW_ERROR_DMA_UNDERRUN:
+		return "DMA_UNDERRUN";
+	case IPW_FW_ERROR_DMA_STATUS:
+		return "DMA_STATUS";
+	case IPW_FW_ERROR_DINO_ERROR:
+		return "DINO_ERROR";
+	case IPW_FW_ERROR_EEPROM_ERROR:
+		return "EEPROM_ERROR";
+	case IPW_FW_ERROR_SYSASSERT:
+		return "SYSASSERT";
+	case IPW_FW_ERROR_FATAL_ERROR:
+		return "FATAL_ERROR";
+	default:
+		return "UNKNOWN_ERROR";
+	}
+}
+
+static void ipw_dump_error_log(struct ipw_priv *priv,
+			       struct ipw_fw_error *error)
+{
+	u32 i;
+
+	if (!error) {
+		IPW_ERROR("Error allocating and capturing error log.  "
+			  "Nothing to dump.\n");
+		return;
+	}
+
+	IPW_ERROR("Start IPW Error Log Dump:\n");
+	IPW_ERROR("Status: 0x%08X, Config: %08X\n",
+		  error->status, error->config);
+
+	for (i = 0; i < error->elem_len; i++)
+		IPW_ERROR("%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
+			  ipw_error_desc(error->elem[i].desc),
+			  error->elem[i].time,
+			  error->elem[i].blink1,
+			  error->elem[i].blink2,
+			  error->elem[i].link1,
+			  error->elem[i].link2, error->elem[i].data);
+	for (i = 0; i < error->log_len; i++)
+		IPW_ERROR("%i\t0x%08x\t%i\n",
+			  error->log[i].time,
+			  error->log[i].data, error->log[i].event);
+}
+
+static inline int ipw_is_init(struct ipw_priv *priv)
+{
+	return (priv->status & STATUS_INIT) ? 1 : 0;
+}
+
+static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
+{
+	u32 addr, field_info, field_len, field_count, total_len;
+
+	IPW_DEBUG_ORD("ordinal = %i\n", ord);
+
+	if (!priv || !val || !len) {
+		IPW_DEBUG_ORD("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	/* verify device ordinal tables have been initialized */
+	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
+		IPW_DEBUG_ORD("Access ordinals before initialization\n");
+		return -EINVAL;
+	}
+
+	switch (IPW_ORD_TABLE_ID_MASK & ord) {
+	case IPW_ORD_TABLE_0_MASK:
+		/*
+		 * TABLE 0: Direct access to a table of 32 bit values
+		 *
+		 * This is a very simple table with the data directly
+		 * read from the table
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table0_len) {
+			IPW_DEBUG_ORD("ordinal value (%i) longer then "
+				      "max (%i)\n", ord, priv->table0_len);
+			return -EINVAL;
+		}
+
+		/* verify we have enough room to store the value */
+		if (*len < sizeof(u32)) {
+			IPW_DEBUG_ORD("ordinal buffer length too small, "
+				      "need %zd\n", sizeof(u32));
+			return -EINVAL;
+		}
+
+		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
+			      ord, priv->table0_addr + (ord << 2));
+
+		*len = sizeof(u32);
+		ord <<= 2;
+		*((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
+		break;
+
+	case IPW_ORD_TABLE_1_MASK:
+		/*
+		 * TABLE 1: Indirect access to a table of 32 bit values
+		 *
+		 * This is a fairly large table of u32 values each
+		 * representing starting addr for the data (which is
+		 * also a u32)
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table1_len) {
+			IPW_DEBUG_ORD("ordinal value too long\n");
+			return -EINVAL;
+		}
+
+		/* verify we have enough room to store the value */
+		if (*len < sizeof(u32)) {
+			IPW_DEBUG_ORD("ordinal buffer length too small, "
+				      "need %zd\n", sizeof(u32));
+			return -EINVAL;
+		}
+
+		*((u32 *) val) =
+		    ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
+		*len = sizeof(u32);
+		break;
+
+	case IPW_ORD_TABLE_2_MASK:
+		/*
+		 * TABLE 2: Indirect access to a table of variable sized values
+		 *
+		 * This table consist of six values, each containing
+		 *     - dword containing the starting offset of the data
+		 *     - dword containing the lengh in the first 16bits
+		 *       and the count in the second 16bits
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table2_len) {
+			IPW_DEBUG_ORD("ordinal value too long\n");
+			return -EINVAL;
+		}
+
+		/* get the address of statistic */
+		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
+
+		/* get the second DW of statistics ;
+		 * two 16-bit words - first is length, second is count */
+		field_info =
+		    ipw_read_reg32(priv,
+				   priv->table2_addr + (ord << 3) +
+				   sizeof(u32));
+
+		/* get each entry length */
+		field_len = *((u16 *) & field_info);
+
+		/* get number of entries */
+		field_count = *(((u16 *) & field_info) + 1);
+
+		/* abort if not enough memory */
+		total_len = field_len * field_count;
+		if (total_len > *len) {
+			*len = total_len;
+			return -EINVAL;
+		}
+
+		*len = total_len;
+		if (!total_len)
+			return 0;
+
+		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
+			      "field_info = 0x%08x\n",
+			      addr, total_len, field_info);
+		ipw_read_indirect(priv, addr, val, total_len);
+		break;
+
+	default:
+		IPW_DEBUG_ORD("Invalid ordinal!\n");
+		return -EINVAL;
+
+	}
+
+	return 0;
+}
+
+static void ipw_init_ordinals(struct ipw_priv *priv)
+{
+	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
+	priv->table0_len = ipw_read32(priv, priv->table0_addr);
+
+	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
+		      priv->table0_addr, priv->table0_len);
+
+	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
+	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
+
+	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
+		      priv->table1_addr, priv->table1_len);
+
+	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
+	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
+	priv->table2_len &= 0x0000ffff;	/* use first two bytes */
+
+	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
+		      priv->table2_addr, priv->table2_len);
+
+}
+
+static u32 ipw_register_toggle(u32 reg)
+{
+	reg &= ~IPW_START_STANDBY;
+	if (reg & IPW_GATE_ODMA)
+		reg &= ~IPW_GATE_ODMA;
+	if (reg & IPW_GATE_IDMA)
+		reg &= ~IPW_GATE_IDMA;
+	if (reg & IPW_GATE_ADMA)
+		reg &= ~IPW_GATE_ADMA;
+	return reg;
+}
+
+/*
+ * LED behavior:
+ * - On radio ON, turn on any LEDs that require to be on during start
+ * - On initialization, start unassociated blink
+ * - On association, disable unassociated blink
+ * - On disassociation, start unassociated blink
+ * - On radio OFF, turn off any LEDs started during radio on
+ *
+ */
+#define LD_TIME_LINK_ON msecs_to_jiffies(300)
+#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
+#define LD_TIME_ACT_ON msecs_to_jiffies(250)
+
+static void ipw_led_link_on(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	u32 led;
+
+	/* If configured to not use LEDs, or nic_type is 1,
+	 * then we don't toggle a LINK led */
+	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (!(priv->status & STATUS_RF_KILL_MASK) &&
+	    !(priv->status & STATUS_LED_LINK_ON)) {
+		IPW_DEBUG_LED("Link LED On\n");
+		led = ipw_read_reg32(priv, IPW_EVENT_REG);
+		led |= priv->led_association_on;
+
+		led = ipw_register_toggle(led);
+
+		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+		ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+		priv->status |= STATUS_LED_LINK_ON;
+
+		/* If we aren't associated, schedule turning the LED off */
+		if (!(priv->status & STATUS_ASSOCIATED))
+			schedule_delayed_work(&priv->led_link_off,
+					      LD_TIME_LINK_ON);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_bg_led_link_on(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_on.work);
+	mutex_lock(&priv->mutex);
+	ipw_led_link_on(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_led_link_off(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	u32 led;
+
+	/* If configured not to use LEDs, or nic type is 1,
+	 * then we don't goggle the LINK led. */
+	if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (priv->status & STATUS_LED_LINK_ON) {
+		led = ipw_read_reg32(priv, IPW_EVENT_REG);
+		led &= priv->led_association_off;
+		led = ipw_register_toggle(led);
+
+		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+		ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+		IPW_DEBUG_LED("Link LED Off\n");
+
+		priv->status &= ~STATUS_LED_LINK_ON;
+
+		/* If we aren't associated and the radio is on, schedule
+		 * turning the LED on (blink while unassociated) */
+		if (!(priv->status & STATUS_RF_KILL_MASK) &&
+		    !(priv->status & STATUS_ASSOCIATED))
+			schedule_delayed_work(&priv->led_link_on,
+					      LD_TIME_LINK_OFF);
+
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_bg_led_link_off(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_link_off.work);
+	mutex_lock(&priv->mutex);
+	ipw_led_link_off(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void __ipw_led_activity_on(struct ipw_priv *priv)
+{
+	u32 led;
+
+	if (priv->config & CFG_NO_LED)
+		return;
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		return;
+
+	if (!(priv->status & STATUS_LED_ACT_ON)) {
+		led = ipw_read_reg32(priv, IPW_EVENT_REG);
+		led |= priv->led_activity_on;
+
+		led = ipw_register_toggle(led);
+
+		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+		ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+		IPW_DEBUG_LED("Activity LED On\n");
+
+		priv->status |= STATUS_LED_ACT_ON;
+
+		cancel_delayed_work(&priv->led_act_off);
+		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
+	} else {
+		/* Reschedule LED off for full time period */
+		cancel_delayed_work(&priv->led_act_off);
+		schedule_delayed_work(&priv->led_act_off, LD_TIME_ACT_ON);
+	}
+}
+
+#if 0
+void ipw_led_activity_on(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	spin_lock_irqsave(&priv->lock, flags);
+	__ipw_led_activity_on(priv);
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+#endif  /*  0  */
+
+static void ipw_led_activity_off(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	u32 led;
+
+	if (priv->config & CFG_NO_LED)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (priv->status & STATUS_LED_ACT_ON) {
+		led = ipw_read_reg32(priv, IPW_EVENT_REG);
+		led &= priv->led_activity_off;
+
+		led = ipw_register_toggle(led);
+
+		IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+		ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+		IPW_DEBUG_LED("Activity LED Off\n");
+
+		priv->status &= ~STATUS_LED_ACT_ON;
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_bg_led_activity_off(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, led_act_off.work);
+	mutex_lock(&priv->mutex);
+	ipw_led_activity_off(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_led_band_on(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	u32 led;
+
+	/* Only nic type 1 supports mode LEDs */
+	if (priv->config & CFG_NO_LED ||
+	    priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	led = ipw_read_reg32(priv, IPW_EVENT_REG);
+	if (priv->assoc_network->mode == IEEE_A) {
+		led |= priv->led_ofdm_on;
+		led &= priv->led_association_off;
+		IPW_DEBUG_LED("Mode LED On: 802.11a\n");
+	} else if (priv->assoc_network->mode == IEEE_G) {
+		led |= priv->led_ofdm_on;
+		led |= priv->led_association_on;
+		IPW_DEBUG_LED("Mode LED On: 802.11g\n");
+	} else {
+		led &= priv->led_ofdm_off;
+		led |= priv->led_association_on;
+		IPW_DEBUG_LED("Mode LED On: 802.11b\n");
+	}
+
+	led = ipw_register_toggle(led);
+
+	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+	ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_led_band_off(struct ipw_priv *priv)
+{
+	unsigned long flags;
+	u32 led;
+
+	/* Only nic type 1 supports mode LEDs */
+	if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
+		return;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	led = ipw_read_reg32(priv, IPW_EVENT_REG);
+	led &= priv->led_ofdm_off;
+	led &= priv->led_association_off;
+
+	led = ipw_register_toggle(led);
+
+	IPW_DEBUG_LED("Reg: 0x%08X\n", led);
+	ipw_write_reg32(priv, IPW_EVENT_REG, led);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_led_radio_on(struct ipw_priv *priv)
+{
+	ipw_led_link_on(priv);
+}
+
+static void ipw_led_radio_off(struct ipw_priv *priv)
+{
+	ipw_led_activity_off(priv);
+	ipw_led_link_off(priv);
+}
+
+static void ipw_led_link_up(struct ipw_priv *priv)
+{
+	/* Set the Link Led on for all nic types */
+	ipw_led_link_on(priv);
+}
+
+static void ipw_led_link_down(struct ipw_priv *priv)
+{
+	ipw_led_activity_off(priv);
+	ipw_led_link_off(priv);
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		ipw_led_radio_off(priv);
+}
+
+static void ipw_led_init(struct ipw_priv *priv)
+{
+	priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
+
+	/* Set the default PINs for the link and activity leds */
+	priv->led_activity_on = IPW_ACTIVITY_LED;
+	priv->led_activity_off = ~(IPW_ACTIVITY_LED);
+
+	priv->led_association_on = IPW_ASSOCIATED_LED;
+	priv->led_association_off = ~(IPW_ASSOCIATED_LED);
+
+	/* Set the default PINs for the OFDM leds */
+	priv->led_ofdm_on = IPW_OFDM_LED;
+	priv->led_ofdm_off = ~(IPW_OFDM_LED);
+
+	switch (priv->nic_type) {
+	case EEPROM_NIC_TYPE_1:
+		/* In this NIC type, the LEDs are reversed.... */
+		priv->led_activity_on = IPW_ASSOCIATED_LED;
+		priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
+		priv->led_association_on = IPW_ACTIVITY_LED;
+		priv->led_association_off = ~(IPW_ACTIVITY_LED);
+
+		if (!(priv->config & CFG_NO_LED))
+			ipw_led_band_on(priv);
+
+		/* And we don't blink link LEDs for this nic, so
+		 * just return here */
+		return;
+
+	case EEPROM_NIC_TYPE_3:
+	case EEPROM_NIC_TYPE_2:
+	case EEPROM_NIC_TYPE_4:
+	case EEPROM_NIC_TYPE_0:
+		break;
+
+	default:
+		IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
+			       priv->nic_type);
+		priv->nic_type = EEPROM_NIC_TYPE_0;
+		break;
+	}
+
+	if (!(priv->config & CFG_NO_LED)) {
+		if (priv->status & STATUS_ASSOCIATED)
+			ipw_led_link_on(priv);
+		else
+			ipw_led_link_off(priv);
+	}
+}
+
+static void ipw_led_shutdown(struct ipw_priv *priv)
+{
+	ipw_led_activity_off(priv);
+	ipw_led_link_off(priv);
+	ipw_led_band_off(priv);
+	cancel_delayed_work(&priv->led_link_on);
+	cancel_delayed_work(&priv->led_link_off);
+	cancel_delayed_work(&priv->led_act_off);
+}
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in ipw for details.
+ */
+static ssize_t debug_level_show(struct device_driver *d, char *buf)
+{
+	return sprintf(buf, "0x%08X\n", ipw_debug_level);
+}
+
+static ssize_t debug_level_store(struct device_driver *d, const char *buf,
+				 size_t count)
+{
+	char *p = (char *)buf;
+	u32 val;
+
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		printk(KERN_INFO DRV_NAME
+		       ": %s is not in hex or decimal form.\n", buf);
+	else
+		ipw_debug_level = val;
+
+	return strnlen(buf, count);
+}
+static DRIVER_ATTR_RW(debug_level);
+
+static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
+{
+	/* length = 1st dword in log */
+	return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
+}
+
+static void ipw_capture_event_log(struct ipw_priv *priv,
+				  u32 log_len, struct ipw_event *log)
+{
+	u32 base;
+
+	if (log_len) {
+		base = ipw_read32(priv, IPW_EVENT_LOG);
+		ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
+				  (u8 *) log, sizeof(*log) * log_len);
+	}
+}
+
+static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
+{
+	struct ipw_fw_error *error;
+	u32 log_len = ipw_get_event_log_len(priv);
+	u32 base = ipw_read32(priv, IPW_ERROR_LOG);
+	u32 elem_len = ipw_read_reg32(priv, base);
+
+	error = kmalloc(sizeof(*error) +
+			sizeof(*error->elem) * elem_len +
+			sizeof(*error->log) * log_len, GFP_ATOMIC);
+	if (!error) {
+		IPW_ERROR("Memory allocation for firmware error log "
+			  "failed.\n");
+		return NULL;
+	}
+	error->jiffies = jiffies;
+	error->status = priv->status;
+	error->config = priv->config;
+	error->elem_len = elem_len;
+	error->log_len = log_len;
+	error->elem = (struct ipw_error_elem *)error->payload;
+	error->log = (struct ipw_event *)(error->elem + elem_len);
+
+	ipw_capture_event_log(priv, log_len, error->log);
+
+	if (elem_len)
+		ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
+				  sizeof(*error->elem) * elem_len);
+
+	return error;
+}
+
+static ssize_t show_event_log(struct device *d,
+			      struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	u32 log_len = ipw_get_event_log_len(priv);
+	u32 log_size;
+	struct ipw_event *log;
+	u32 len = 0, i;
+
+	/* not using min() because of its strict type checking */
+	log_size = PAGE_SIZE / sizeof(*log) > log_len ?
+			sizeof(*log) * log_len : PAGE_SIZE;
+	log = kzalloc(log_size, GFP_KERNEL);
+	if (!log) {
+		IPW_ERROR("Unable to allocate memory for log\n");
+		return 0;
+	}
+	log_len = log_size / sizeof(*log);
+	ipw_capture_event_log(priv, log_len, log);
+
+	len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
+	for (i = 0; i < log_len; i++)
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"\n%08X%08X%08X",
+				log[i].time, log[i].event, log[i].data);
+	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+	kfree(log);
+	return len;
+}
+
+static DEVICE_ATTR(event_log, 0444, show_event_log, NULL);
+
+static ssize_t show_error(struct device *d,
+			  struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	u32 len = 0, i;
+	if (!priv->error)
+		return 0;
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"%08lX%08X%08X%08X",
+			priv->error->jiffies,
+			priv->error->status,
+			priv->error->config, priv->error->elem_len);
+	for (i = 0; i < priv->error->elem_len; i++)
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"\n%08X%08X%08X%08X%08X%08X%08X",
+				priv->error->elem[i].time,
+				priv->error->elem[i].desc,
+				priv->error->elem[i].blink1,
+				priv->error->elem[i].blink2,
+				priv->error->elem[i].link1,
+				priv->error->elem[i].link2,
+				priv->error->elem[i].data);
+
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"\n%08X", priv->error->log_len);
+	for (i = 0; i < priv->error->log_len; i++)
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"\n%08X%08X%08X",
+				priv->error->log[i].time,
+				priv->error->log[i].event,
+				priv->error->log[i].data);
+	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static ssize_t clear_error(struct device *d,
+			   struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	kfree(priv->error);
+	priv->error = NULL;
+	return count;
+}
+
+static DEVICE_ATTR(error, 0644, show_error, clear_error);
+
+static ssize_t show_cmd_log(struct device *d,
+			    struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	u32 len = 0, i;
+	if (!priv->cmdlog)
+		return 0;
+	for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
+	     (i != priv->cmdlog_pos) && (len < PAGE_SIZE);
+	     i = (i + 1) % priv->cmdlog_len) {
+		len +=
+		    snprintf(buf + len, PAGE_SIZE - len,
+			     "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
+			     priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
+			     priv->cmdlog[i].cmd.len);
+		len +=
+		    snprintk_buf(buf + len, PAGE_SIZE - len,
+				 (u8 *) priv->cmdlog[i].cmd.param,
+				 priv->cmdlog[i].cmd.len);
+		len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+	}
+	len += snprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static DEVICE_ATTR(cmd_log, 0444, show_cmd_log, NULL);
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_prom_free(struct ipw_priv *priv);
+static int ipw_prom_alloc(struct ipw_priv *priv);
+static ssize_t store_rtap_iface(struct device *d,
+			 struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	int rc = 0;
+
+	if (count < 1)
+		return -EINVAL;
+
+	switch (buf[0]) {
+	case '0':
+		if (!rtap_iface)
+			return count;
+
+		if (netif_running(priv->prom_net_dev)) {
+			IPW_WARNING("Interface is up.  Cannot unregister.\n");
+			return count;
+		}
+
+		ipw_prom_free(priv);
+		rtap_iface = 0;
+		break;
+
+	case '1':
+		if (rtap_iface)
+			return count;
+
+		rc = ipw_prom_alloc(priv);
+		if (!rc)
+			rtap_iface = 1;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	if (rc) {
+		IPW_ERROR("Failed to register promiscuous network "
+			  "device (error %d).\n", rc);
+	}
+
+	return count;
+}
+
+static ssize_t show_rtap_iface(struct device *d,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	if (rtap_iface)
+		return sprintf(buf, "%s", priv->prom_net_dev->name);
+	else {
+		buf[0] = '-';
+		buf[1] = '1';
+		buf[2] = '\0';
+		return 3;
+	}
+}
+
+static DEVICE_ATTR(rtap_iface, 0600, show_rtap_iface, store_rtap_iface);
+
+static ssize_t store_rtap_filter(struct device *d,
+			 struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	if (!priv->prom_priv) {
+		IPW_ERROR("Attempting to set filter without "
+			  "rtap_iface enabled.\n");
+		return -EPERM;
+	}
+
+	priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
+
+	IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
+		       BIT_ARG16(priv->prom_priv->filter));
+
+	return count;
+}
+
+static ssize_t show_rtap_filter(struct device *d,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "0x%04X",
+		       priv->prom_priv ? priv->prom_priv->filter : 0);
+}
+
+static DEVICE_ATTR(rtap_filter, 0600, show_rtap_filter, store_rtap_filter);
+#endif
+
+static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+			     char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", priv->ieee->scan_age);
+}
+
+static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+			      const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char buffer[] = "00000000";
+	unsigned long len =
+	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
+	unsigned long val;
+	char *p = buffer;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	strncpy(buffer, buf, len);
+	buffer[len] = 0;
+
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buffer) {
+		IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
+	} else {
+		priv->ieee->scan_age = val;
+		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+	return len;
+}
+
+static DEVICE_ATTR(scan_age, 0644, show_scan_age, store_scan_age);
+
+static ssize_t show_led(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
+}
+
+static ssize_t store_led(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (count == 0)
+		return 0;
+
+	if (*buf == 0) {
+		IPW_DEBUG_LED("Disabling LED control.\n");
+		priv->config |= CFG_NO_LED;
+		ipw_led_shutdown(priv);
+	} else {
+		IPW_DEBUG_LED("Enabling LED control.\n");
+		priv->config &= ~CFG_NO_LED;
+		ipw_led_init(priv);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+	return count;
+}
+
+static DEVICE_ATTR(led, 0644, show_led, store_led);
+
+static ssize_t show_status(struct device *d,
+			   struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *p = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08x\n", (int)p->status);
+}
+
+static DEVICE_ATTR(status, 0444, show_status, NULL);
+
+static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw_priv *p = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08x\n", (int)p->config);
+}
+
+static DEVICE_ATTR(cfg, 0444, show_cfg, NULL);
+
+static ssize_t show_nic_type(struct device *d,
+			     struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "TYPE: %d\n", priv->nic_type);
+}
+
+static DEVICE_ATTR(nic_type, 0444, show_nic_type, NULL);
+
+static ssize_t show_ucode_version(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	u32 len = sizeof(u32), tmp = 0;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
+		return 0;
+
+	return sprintf(buf, "0x%08x\n", tmp);
+}
+
+static DEVICE_ATTR(ucode_version, 0644, show_ucode_version, NULL);
+
+static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	u32 len = sizeof(u32), tmp = 0;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
+		return 0;
+
+	return sprintf(buf, "0x%08x\n", tmp);
+}
+
+static DEVICE_ATTR(rtc, 0644, show_rtc, NULL);
+
+/*
+ * Add a device attribute to view/control the delay between eeprom
+ * operations.
+ */
+static ssize_t show_eeprom_delay(struct device *d,
+				 struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *p = dev_get_drvdata(d);
+	int n = p->eeprom_delay;
+	return sprintf(buf, "%i\n", n);
+}
+static ssize_t store_eeprom_delay(struct device *d,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct ipw_priv *p = dev_get_drvdata(d);
+	sscanf(buf, "%i", &p->eeprom_delay);
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(eeprom_delay, 0644, show_eeprom_delay, store_eeprom_delay);
+
+static ssize_t show_command_event_reg(struct device *d,
+				      struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_command_event_reg(struct device *d,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	u32 reg;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	sscanf(buf, "%x", &reg);
+	ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(command_event_reg, 0644,
+		   show_command_event_reg, store_command_event_reg);
+
+static ssize_t show_mem_gpio_reg(struct device *d,
+				 struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	reg = ipw_read_reg32(p, 0x301100);
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_mem_gpio_reg(struct device *d,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	u32 reg;
+	struct ipw_priv *p = dev_get_drvdata(d);
+
+	sscanf(buf, "%x", &reg);
+	ipw_write_reg32(p, 0x301100, reg);
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(mem_gpio_reg, 0644, show_mem_gpio_reg, store_mem_gpio_reg);
+
+static ssize_t show_indirect_dword(struct device *d,
+				   struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	if (priv->status & STATUS_INDIRECT_DWORD)
+		reg = ipw_read_reg32(priv, priv->indirect_dword);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_indirect_dword(struct device *d,
+				    struct device_attribute *attr,
+				    const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	sscanf(buf, "%x", &priv->indirect_dword);
+	priv->status |= STATUS_INDIRECT_DWORD;
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(indirect_dword, 0644,
+		   show_indirect_dword, store_indirect_dword);
+
+static ssize_t show_indirect_byte(struct device *d,
+				  struct device_attribute *attr, char *buf)
+{
+	u8 reg = 0;
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	if (priv->status & STATUS_INDIRECT_BYTE)
+		reg = ipw_read_reg8(priv, priv->indirect_byte);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%02x\n", reg);
+}
+static ssize_t store_indirect_byte(struct device *d,
+				   struct device_attribute *attr,
+				   const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	sscanf(buf, "%x", &priv->indirect_byte);
+	priv->status |= STATUS_INDIRECT_BYTE;
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(indirect_byte, 0644,
+		   show_indirect_byte, store_indirect_byte);
+
+static ssize_t show_direct_dword(struct device *d,
+				 struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	if (priv->status & STATUS_DIRECT_DWORD)
+		reg = ipw_read32(priv, priv->direct_dword);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_direct_dword(struct device *d,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	sscanf(buf, "%x", &priv->direct_dword);
+	priv->status |= STATUS_DIRECT_DWORD;
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(direct_dword, 0644, show_direct_dword, store_direct_dword);
+
+static int rf_kill_active(struct ipw_priv *priv)
+{
+	if (0 == (ipw_read32(priv, 0x30) & 0x10000)) {
+		priv->status |= STATUS_RF_KILL_HW;
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
+	} else {
+		priv->status &= ~STATUS_RF_KILL_HW;
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false);
+	}
+
+	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
+}
+
+static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+			    char *buf)
+{
+	/* 0 - RF kill not enabled
+	   1 - SW based RF kill active (sysfs)
+	   2 - HW based RF kill active
+	   3 - Both HW and SW baed RF kill active */
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
+	    (rf_kill_active(priv) ? 0x2 : 0x0);
+	return sprintf(buf, "%i\n", val);
+}
+
+static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
+{
+	if ((disable_radio ? 1 : 0) ==
+	    ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
+		return 0;
+
+	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
+			  disable_radio ? "OFF" : "ON");
+
+	if (disable_radio) {
+		priv->status |= STATUS_RF_KILL_SW;
+
+		cancel_delayed_work(&priv->request_scan);
+		cancel_delayed_work(&priv->request_direct_scan);
+		cancel_delayed_work(&priv->request_passive_scan);
+		cancel_delayed_work(&priv->scan_event);
+		schedule_work(&priv->down);
+	} else {
+		priv->status &= ~STATUS_RF_KILL_SW;
+		if (rf_kill_active(priv)) {
+			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
+					  "disabled by HW switch\n");
+			/* Make sure the RF_KILL check timer is running */
+			cancel_delayed_work(&priv->rf_kill);
+			schedule_delayed_work(&priv->rf_kill,
+					      round_jiffies_relative(2 * HZ));
+		} else
+			schedule_work(&priv->up);
+	}
+
+	return 1;
+}
+
+static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+
+	ipw_radio_kill_sw(priv, buf[0] == '1');
+
+	return count;
+}
+
+static DEVICE_ATTR(rf_kill, 0644, show_rf_kill, store_rf_kill);
+
+static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
+			       char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	int pos = 0, len = 0;
+	if (priv->config & CFG_SPEED_SCAN) {
+		while (priv->speed_scan[pos] != 0)
+			len += sprintf(&buf[len], "%d ",
+				       priv->speed_scan[pos++]);
+		return len + sprintf(&buf[len], "\n");
+	}
+
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	int channel, pos = 0;
+	const char *p = buf;
+
+	/* list of space separated channels to scan, optionally ending with 0 */
+	while ((channel = simple_strtol(p, NULL, 0))) {
+		if (pos == MAX_SPEED_SCAN - 1) {
+			priv->speed_scan[pos] = 0;
+			break;
+		}
+
+		if (libipw_is_valid_channel(priv->ieee, channel))
+			priv->speed_scan[pos++] = channel;
+		else
+			IPW_WARNING("Skipping invalid channel request: %d\n",
+				    channel);
+		p = strchr(p, ' ');
+		if (!p)
+			break;
+		while (*p == ' ' || *p == '\t')
+			p++;
+	}
+
+	if (pos == 0)
+		priv->config &= ~CFG_SPEED_SCAN;
+	else {
+		priv->speed_scan_pos = 0;
+		priv->config |= CFG_SPEED_SCAN;
+	}
+
+	return count;
+}
+
+static DEVICE_ATTR(speed_scan, 0644, show_speed_scan, store_speed_scan);
+
+static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
+			      char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
+}
+
+static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
+			       const char *buf, size_t count)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	if (buf[0] == '1')
+		priv->config |= CFG_NET_STATS;
+	else
+		priv->config &= ~CFG_NET_STATS;
+
+	return count;
+}
+
+static DEVICE_ATTR(net_stats, 0644, show_net_stats, store_net_stats);
+
+static ssize_t show_channels(struct device *d,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	struct ipw_priv *priv = dev_get_drvdata(d);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	int len = 0, i;
+
+	len = sprintf(&buf[len],
+		      "Displaying %d channels in 2.4Ghz band "
+		      "(802.11bg):\n", geo->bg_channels);
+
+	for (i = 0; i < geo->bg_channels; i++) {
+		len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
+			       geo->bg[i].channel,
+			       geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT ?
+			       " (radar spectrum)" : "",
+			       ((geo->bg[i].flags & LIBIPW_CH_NO_IBSS) ||
+				(geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT))
+			       ? "" : ", IBSS",
+			       geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
+			       "passive only" : "active/passive",
+			       geo->bg[i].flags & LIBIPW_CH_B_ONLY ?
+			       "B" : "B/G");
+	}
+
+	len += sprintf(&buf[len],
+		       "Displaying %d channels in 5.2Ghz band "
+		       "(802.11a):\n", geo->a_channels);
+	for (i = 0; i < geo->a_channels; i++) {
+		len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
+			       geo->a[i].channel,
+			       geo->a[i].flags & LIBIPW_CH_RADAR_DETECT ?
+			       " (radar spectrum)" : "",
+			       ((geo->a[i].flags & LIBIPW_CH_NO_IBSS) ||
+				(geo->a[i].flags & LIBIPW_CH_RADAR_DETECT))
+			       ? "" : ", IBSS",
+			       geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY ?
+			       "passive only" : "active/passive");
+	}
+
+	return len;
+}
+
+static DEVICE_ATTR(channels, 0400, show_channels, NULL);
+
+static void notify_wx_assoc_event(struct ipw_priv *priv)
+{
+	union iwreq_data wrqu;
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	if (priv->status & STATUS_ASSOCIATED)
+		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
+	else
+		eth_zero_addr(wrqu.ap_addr.sa_data);
+	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static void ipw_irq_tasklet(struct ipw_priv *priv)
+{
+	u32 inta, inta_mask, handled = 0;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+
+	inta = ipw_read32(priv, IPW_INTA_RW);
+	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("TASKLET INTA == 0xFFFFFFFF\n");
+		/* Only handle the cached INTA values */
+		inta = 0;
+	}
+	inta &= (IPW_INTA_MASK_ALL & inta_mask);
+
+	/* Add any cached INTA values that need to be handled */
+	inta |= priv->isr_inta;
+
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	/* handle all the justifications for the interrupt */
+	if (inta & IPW_INTA_BIT_RX_TRANSFER) {
+		ipw_rx(priv);
+		handled |= IPW_INTA_BIT_RX_TRANSFER;
+	}
+
+	if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
+		IPW_DEBUG_HC("Command completed.\n");
+		rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
+		priv->status &= ~STATUS_HCMD_ACTIVE;
+		wake_up_interruptible(&priv->wait_command_queue);
+		handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
+	}
+
+	if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
+		IPW_DEBUG_TX("TX_QUEUE_1\n");
+		rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
+		handled |= IPW_INTA_BIT_TX_QUEUE_1;
+	}
+
+	if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
+		IPW_DEBUG_TX("TX_QUEUE_2\n");
+		rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
+		handled |= IPW_INTA_BIT_TX_QUEUE_2;
+	}
+
+	if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
+		IPW_DEBUG_TX("TX_QUEUE_3\n");
+		rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
+		handled |= IPW_INTA_BIT_TX_QUEUE_3;
+	}
+
+	if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
+		IPW_DEBUG_TX("TX_QUEUE_4\n");
+		rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
+		handled |= IPW_INTA_BIT_TX_QUEUE_4;
+	}
+
+	if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
+		IPW_WARNING("STATUS_CHANGE\n");
+		handled |= IPW_INTA_BIT_STATUS_CHANGE;
+	}
+
+	if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
+		IPW_WARNING("TX_PERIOD_EXPIRED\n");
+		handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
+	}
+
+	if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
+		IPW_WARNING("HOST_CMD_DONE\n");
+		handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
+	}
+
+	if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
+		IPW_WARNING("FW_INITIALIZATION_DONE\n");
+		handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
+	}
+
+	if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
+		IPW_WARNING("PHY_OFF_DONE\n");
+		handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
+	}
+
+	if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
+		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
+		priv->status |= STATUS_RF_KILL_HW;
+		wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true);
+		wake_up_interruptible(&priv->wait_command_queue);
+		priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+		cancel_delayed_work(&priv->request_scan);
+		cancel_delayed_work(&priv->request_direct_scan);
+		cancel_delayed_work(&priv->request_passive_scan);
+		cancel_delayed_work(&priv->scan_event);
+		schedule_work(&priv->link_down);
+		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
+		handled |= IPW_INTA_BIT_RF_KILL_DONE;
+	}
+
+	if (inta & IPW_INTA_BIT_FATAL_ERROR) {
+		IPW_WARNING("Firmware error detected.  Restarting.\n");
+		if (priv->error) {
+			IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
+			if (ipw_debug_level & IPW_DL_FW_ERRORS) {
+				struct ipw_fw_error *error =
+				    ipw_alloc_error_log(priv);
+				ipw_dump_error_log(priv, error);
+				kfree(error);
+			}
+		} else {
+			priv->error = ipw_alloc_error_log(priv);
+			if (priv->error)
+				IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
+			else
+				IPW_DEBUG_FW("Error allocating sysfs 'error' "
+					     "log.\n");
+			if (ipw_debug_level & IPW_DL_FW_ERRORS)
+				ipw_dump_error_log(priv, priv->error);
+		}
+
+		/* XXX: If hardware encryption is for WPA/WPA2,
+		 * we have to notify the supplicant. */
+		if (priv->ieee->sec.encrypt) {
+			priv->status &= ~STATUS_ASSOCIATED;
+			notify_wx_assoc_event(priv);
+		}
+
+		/* Keep the restart process from trying to send host
+		 * commands by clearing the INIT status bit */
+		priv->status &= ~STATUS_INIT;
+
+		/* Cancel currently queued command. */
+		priv->status &= ~STATUS_HCMD_ACTIVE;
+		wake_up_interruptible(&priv->wait_command_queue);
+
+		schedule_work(&priv->adapter_restart);
+		handled |= IPW_INTA_BIT_FATAL_ERROR;
+	}
+
+	if (inta & IPW_INTA_BIT_PARITY_ERROR) {
+		IPW_ERROR("Parity error\n");
+		handled |= IPW_INTA_BIT_PARITY_ERROR;
+	}
+
+	if (handled != inta) {
+		IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+	}
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	/* enable all interrupts */
+	ipw_enable_interrupts(priv);
+}
+
+#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
+static char *get_cmd_string(u8 cmd)
+{
+	switch (cmd) {
+		IPW_CMD(HOST_COMPLETE);
+		IPW_CMD(POWER_DOWN);
+		IPW_CMD(SYSTEM_CONFIG);
+		IPW_CMD(MULTICAST_ADDRESS);
+		IPW_CMD(SSID);
+		IPW_CMD(ADAPTER_ADDRESS);
+		IPW_CMD(PORT_TYPE);
+		IPW_CMD(RTS_THRESHOLD);
+		IPW_CMD(FRAG_THRESHOLD);
+		IPW_CMD(POWER_MODE);
+		IPW_CMD(WEP_KEY);
+		IPW_CMD(TGI_TX_KEY);
+		IPW_CMD(SCAN_REQUEST);
+		IPW_CMD(SCAN_REQUEST_EXT);
+		IPW_CMD(ASSOCIATE);
+		IPW_CMD(SUPPORTED_RATES);
+		IPW_CMD(SCAN_ABORT);
+		IPW_CMD(TX_FLUSH);
+		IPW_CMD(QOS_PARAMETERS);
+		IPW_CMD(DINO_CONFIG);
+		IPW_CMD(RSN_CAPABILITIES);
+		IPW_CMD(RX_KEY);
+		IPW_CMD(CARD_DISABLE);
+		IPW_CMD(SEED_NUMBER);
+		IPW_CMD(TX_POWER);
+		IPW_CMD(COUNTRY_INFO);
+		IPW_CMD(AIRONET_INFO);
+		IPW_CMD(AP_TX_POWER);
+		IPW_CMD(CCKM_INFO);
+		IPW_CMD(CCX_VER_INFO);
+		IPW_CMD(SET_CALIBRATION);
+		IPW_CMD(SENSITIVITY_CALIB);
+		IPW_CMD(RETRY_LIMIT);
+		IPW_CMD(IPW_PRE_POWER_DOWN);
+		IPW_CMD(VAP_BEACON_TEMPLATE);
+		IPW_CMD(VAP_DTIM_PERIOD);
+		IPW_CMD(EXT_SUPPORTED_RATES);
+		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
+		IPW_CMD(VAP_QUIET_INTERVALS);
+		IPW_CMD(VAP_CHANNEL_SWITCH);
+		IPW_CMD(VAP_MANDATORY_CHANNELS);
+		IPW_CMD(VAP_CELL_PWR_LIMIT);
+		IPW_CMD(VAP_CF_PARAM_SET);
+		IPW_CMD(VAP_SET_BEACONING_STATE);
+		IPW_CMD(MEASUREMENT);
+		IPW_CMD(POWER_CAPABILITY);
+		IPW_CMD(SUPPORTED_CHANNELS);
+		IPW_CMD(TPC_REPORT);
+		IPW_CMD(WME_INFO);
+		IPW_CMD(PRODUCTION_COMMAND);
+	default:
+		return "UNKNOWN";
+	}
+}
+
+#define HOST_COMPLETE_TIMEOUT HZ
+
+static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+{
+	int rc = 0;
+	unsigned long flags;
+	unsigned long now, end;
+
+	spin_lock_irqsave(&priv->lock, flags);
+	if (priv->status & STATUS_HCMD_ACTIVE) {
+		IPW_ERROR("Failed to send %s: Already sending a command.\n",
+			  get_cmd_string(cmd->cmd));
+		spin_unlock_irqrestore(&priv->lock, flags);
+		return -EAGAIN;
+	}
+
+	priv->status |= STATUS_HCMD_ACTIVE;
+
+	if (priv->cmdlog) {
+		priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
+		priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
+		priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
+		memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
+		       cmd->len);
+		priv->cmdlog[priv->cmdlog_pos].retcode = -1;
+	}
+
+	IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
+		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
+		     priv->status);
+
+#ifndef DEBUG_CMD_WEP_KEY
+	if (cmd->cmd == IPW_CMD_WEP_KEY)
+		IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
+	else
+#endif
+		printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
+
+	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
+	if (rc) {
+		priv->status &= ~STATUS_HCMD_ACTIVE;
+		IPW_ERROR("Failed to send %s: Reason %d\n",
+			  get_cmd_string(cmd->cmd), rc);
+		spin_unlock_irqrestore(&priv->lock, flags);
+		goto exit;
+	}
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	now = jiffies;
+	end = now + HOST_COMPLETE_TIMEOUT;
+again:
+	rc = wait_event_interruptible_timeout(priv->wait_command_queue,
+					      !(priv->
+						status & STATUS_HCMD_ACTIVE),
+					      end - now);
+	if (rc < 0) {
+		now = jiffies;
+		if (time_before(now, end))
+			goto again;
+		rc = 0;
+	}
+
+	if (rc == 0) {
+		spin_lock_irqsave(&priv->lock, flags);
+		if (priv->status & STATUS_HCMD_ACTIVE) {
+			IPW_ERROR("Failed to send %s: Command timed out.\n",
+				  get_cmd_string(cmd->cmd));
+			priv->status &= ~STATUS_HCMD_ACTIVE;
+			spin_unlock_irqrestore(&priv->lock, flags);
+			rc = -EIO;
+			goto exit;
+		}
+		spin_unlock_irqrestore(&priv->lock, flags);
+	} else
+		rc = 0;
+
+	if (priv->status & STATUS_RF_KILL_HW) {
+		IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
+			  get_cmd_string(cmd->cmd));
+		rc = -EIO;
+		goto exit;
+	}
+
+      exit:
+	if (priv->cmdlog) {
+		priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
+		priv->cmdlog_pos %= priv->cmdlog_len;
+	}
+	return rc;
+}
+
+static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
+{
+	struct host_cmd cmd = {
+		.cmd = command,
+	};
+
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
+			    void *data)
+{
+	struct host_cmd cmd = {
+		.cmd = command,
+		.len = len,
+		.param = data,
+	};
+
+	return __ipw_send_cmd(priv, &cmd);
+}
+
+static int ipw_send_host_complete(struct ipw_priv *priv)
+{
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
+}
+
+static int ipw_send_system_config(struct ipw_priv *priv)
+{
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
+				sizeof(priv->sys_config),
+				&priv->sys_config);
+}
+
+static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
+{
+	if (!priv || !ssid) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
+				ssid);
+}
+
+static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
+{
+	if (!priv || !mac) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
+		       priv->net_dev->name, mac);
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
+}
+
+static void ipw_adapter_restart(void *adapter)
+{
+	struct ipw_priv *priv = adapter;
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		return;
+
+	ipw_down(priv);
+
+	if (priv->assoc_network &&
+	    (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
+		ipw_remove_current_network(priv);
+
+	if (ipw_up(priv)) {
+		IPW_ERROR("Failed to up device\n");
+		return;
+	}
+}
+
+static void ipw_bg_adapter_restart(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adapter_restart);
+	mutex_lock(&priv->mutex);
+	ipw_adapter_restart(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_abort_scan(struct ipw_priv *priv);
+
+#define IPW_SCAN_CHECK_WATCHDOG	(5 * HZ)
+
+static void ipw_scan_check(void *data)
+{
+	struct ipw_priv *priv = data;
+
+	if (priv->status & STATUS_SCAN_ABORTING) {
+		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
+			       "adapter after (%dms).\n",
+			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
+		schedule_work(&priv->adapter_restart);
+	} else if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
+			       "after (%dms).\n",
+			       jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
+		ipw_abort_scan(priv);
+		schedule_delayed_work(&priv->scan_check, HZ);
+	}
+}
+
+static void ipw_bg_scan_check(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, scan_check.work);
+	mutex_lock(&priv->mutex);
+	ipw_scan_check(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_send_scan_request_ext(struct ipw_priv *priv,
+				     struct ipw_scan_request_ext *request)
+{
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
+				sizeof(*request), request);
+}
+
+static int ipw_send_scan_abort(struct ipw_priv *priv)
+{
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
+}
+
+static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
+{
+	struct ipw_sensitivity_calib calib = {
+		.beacon_rssi_raw = cpu_to_le16(sens),
+	};
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
+				&calib);
+}
+
+static int ipw_send_associate(struct ipw_priv *priv,
+			      struct ipw_associate *associate)
+{
+	if (!priv || !associate) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
+				associate);
+}
+
+static int ipw_send_supported_rates(struct ipw_priv *priv,
+				    struct ipw_supported_rates *rates)
+{
+	if (!priv || !rates) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
+				rates);
+}
+
+static int ipw_set_random_seed(struct ipw_priv *priv)
+{
+	u32 val;
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	get_random_bytes(&val, sizeof(val));
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
+}
+
+static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
+{
+	__le32 v = cpu_to_le32(phy_off);
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
+}
+
+static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
+{
+	if (!priv || !power) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
+}
+
+static int ipw_set_tx_power(struct ipw_priv *priv)
+{
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct ipw_tx_power tx_power;
+	s8 max_power;
+	int i;
+
+	memset(&tx_power, 0, sizeof(tx_power));
+
+	/* configure device for 'G' band */
+	tx_power.ieee_mode = IPW_G_MODE;
+	tx_power.num_channels = geo->bg_channels;
+	for (i = 0; i < geo->bg_channels; i++) {
+		max_power = geo->bg[i].max_power;
+		tx_power.channels_tx_power[i].channel_number =
+		    geo->bg[i].channel;
+		tx_power.channels_tx_power[i].tx_power = max_power ?
+		    min(max_power, priv->tx_power) : priv->tx_power;
+	}
+	if (ipw_send_tx_power(priv, &tx_power))
+		return -EIO;
+
+	/* configure device to also handle 'B' band */
+	tx_power.ieee_mode = IPW_B_MODE;
+	if (ipw_send_tx_power(priv, &tx_power))
+		return -EIO;
+
+	/* configure device to also handle 'A' band */
+	if (priv->ieee->abg_true) {
+		tx_power.ieee_mode = IPW_A_MODE;
+		tx_power.num_channels = geo->a_channels;
+		for (i = 0; i < tx_power.num_channels; i++) {
+			max_power = geo->a[i].max_power;
+			tx_power.channels_tx_power[i].channel_number =
+			    geo->a[i].channel;
+			tx_power.channels_tx_power[i].tx_power = max_power ?
+			    min(max_power, priv->tx_power) : priv->tx_power;
+		}
+		if (ipw_send_tx_power(priv, &tx_power))
+			return -EIO;
+	}
+	return 0;
+}
+
+static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
+{
+	struct ipw_rts_threshold rts_threshold = {
+		.rts_threshold = cpu_to_le16(rts),
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
+				sizeof(rts_threshold), &rts_threshold);
+}
+
+static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
+{
+	struct ipw_frag_threshold frag_threshold = {
+		.frag_threshold = cpu_to_le16(frag),
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
+				sizeof(frag_threshold), &frag_threshold);
+}
+
+static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
+{
+	__le32 param;
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	/* If on battery, set to 3, if AC set to CAM, else user
+	 * level */
+	switch (mode) {
+	case IPW_POWER_BATTERY:
+		param = cpu_to_le32(IPW_POWER_INDEX_3);
+		break;
+	case IPW_POWER_AC:
+		param = cpu_to_le32(IPW_POWER_MODE_CAM);
+		break;
+	default:
+		param = cpu_to_le32(mode);
+		break;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
+				&param);
+}
+
+static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
+{
+	struct ipw_retry_limit retry_limit = {
+		.short_retry_limit = slimit,
+		.long_retry_limit = llimit
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
+				&retry_limit);
+}
+
+/*
+ * The IPW device contains a Microwire compatible EEPROM that stores
+ * various data like the MAC address.  Usually the firmware has exclusive
+ * access to the eeprom, but during device initialization (before the
+ * device driver has sent the HostComplete command to the firmware) the
+ * device driver has read access to the EEPROM by way of indirect addressing
+ * through a couple of memory mapped registers.
+ *
+ * The following is a simplified implementation for pulling data out of the
+ * the eeprom, along with some helper functions to find information in
+ * the per device private data's copy of the eeprom.
+ *
+ * NOTE: To better understand how these functions work (i.e what is a chip
+ *       select and why do have to keep driving the eeprom clock?), read
+ *       just about any data sheet for a Microwire compatible EEPROM.
+ */
+
+/* write a 32 bit value into the indirect accessor register */
+static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
+{
+	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
+
+	/* the eeprom requires some time to complete the operation */
+	udelay(p->eeprom_delay);
+}
+
+/* perform a chip select operation */
+static void eeprom_cs(struct ipw_priv *priv)
+{
+	eeprom_write_reg(priv, 0);
+	eeprom_write_reg(priv, EEPROM_BIT_CS);
+	eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
+	eeprom_write_reg(priv, EEPROM_BIT_CS);
+}
+
+/* perform a chip select operation */
+static void eeprom_disable_cs(struct ipw_priv *priv)
+{
+	eeprom_write_reg(priv, EEPROM_BIT_CS);
+	eeprom_write_reg(priv, 0);
+	eeprom_write_reg(priv, EEPROM_BIT_SK);
+}
+
+/* push a single bit down to the eeprom */
+static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
+{
+	int d = (bit ? EEPROM_BIT_DI : 0);
+	eeprom_write_reg(p, EEPROM_BIT_CS | d);
+	eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
+}
+
+/* push an opcode followed by an address down to the eeprom */
+static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
+{
+	int i;
+
+	eeprom_cs(priv);
+	eeprom_write_bit(priv, 1);
+	eeprom_write_bit(priv, op & 2);
+	eeprom_write_bit(priv, op & 1);
+	for (i = 7; i >= 0; i--) {
+		eeprom_write_bit(priv, addr & (1 << i));
+	}
+}
+
+/* pull 16 bits off the eeprom, one bit at a time */
+static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
+{
+	int i;
+	u16 r = 0;
+
+	/* Send READ Opcode */
+	eeprom_op(priv, EEPROM_CMD_READ, addr);
+
+	/* Send dummy bit */
+	eeprom_write_reg(priv, EEPROM_BIT_CS);
+
+	/* Read the byte off the eeprom one bit at a time */
+	for (i = 0; i < 16; i++) {
+		u32 data = 0;
+		eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
+		eeprom_write_reg(priv, EEPROM_BIT_CS);
+		data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
+		r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
+	}
+
+	/* Send another dummy bit */
+	eeprom_write_reg(priv, 0);
+	eeprom_disable_cs(priv);
+
+	return r;
+}
+
+/* helper function for pulling the mac address out of the private */
+/* data's copy of the eeprom data                                 */
+static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
+{
+	memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], ETH_ALEN);
+}
+
+static void ipw_read_eeprom(struct ipw_priv *priv)
+{
+	int i;
+	__le16 *eeprom = (__le16 *) priv->eeprom;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	/* read entire contents of eeprom into private buffer */
+	for (i = 0; i < 128; i++)
+		eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
+
+	IPW_DEBUG_TRACE("<<\n");
+}
+
+/*
+ * Either the device driver (i.e. the host) or the firmware can
+ * load eeprom data into the designated region in SRAM.  If neither
+ * happens then the FW will shutdown with a fatal error.
+ *
+ * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
+ * bit needs region of shared SRAM needs to be non-zero.
+ */
+static void ipw_eeprom_init_sram(struct ipw_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	/*
+	   If the data looks correct, then copy it to our private
+	   copy.  Otherwise let the firmware know to perform the operation
+	   on its own.
+	 */
+	if (priv->eeprom[EEPROM_VERSION] != 0) {
+		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
+
+		/* write the eeprom data to sram */
+		for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
+			ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
+
+		/* Do not load eeprom data on fatal error or suspend */
+		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
+	} else {
+		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
+
+		/* Load eeprom data on fatal error or suspend */
+		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
+	}
+
+	IPW_DEBUG_TRACE("<<\n");
+}
+
+static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
+{
+	count >>= 2;
+	if (!count)
+		return;
+	_ipw_write32(priv, IPW_AUTOINC_ADDR, start);
+	while (count--)
+		_ipw_write32(priv, IPW_AUTOINC_DATA, 0);
+}
+
+static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
+{
+	ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
+			CB_NUMBER_OF_ELEMENTS_SMALL *
+			sizeof(struct command_block));
+}
+
+static int ipw_fw_dma_enable(struct ipw_priv *priv)
+{				/* start dma engine but no transfers yet */
+
+	IPW_DEBUG_FW(">> :\n");
+
+	/* Start the dma */
+	ipw_fw_dma_reset_command_blocks(priv);
+
+	/* Write CB base address */
+	ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
+
+	IPW_DEBUG_FW("<< :\n");
+	return 0;
+}
+
+static void ipw_fw_dma_abort(struct ipw_priv *priv)
+{
+	u32 control = 0;
+
+	IPW_DEBUG_FW(">> :\n");
+
+	/* set the Stop and Abort bit */
+	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
+	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
+	priv->sram_desc.last_cb_index = 0;
+
+	IPW_DEBUG_FW("<<\n");
+}
+
+static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
+					  struct command_block *cb)
+{
+	u32 address =
+	    IPW_SHARED_SRAM_DMA_CONTROL +
+	    (sizeof(struct command_block) * index);
+	IPW_DEBUG_FW(">> :\n");
+
+	ipw_write_indirect(priv, address, (u8 *) cb,
+			   (int)sizeof(struct command_block));
+
+	IPW_DEBUG_FW("<< :\n");
+	return 0;
+
+}
+
+static int ipw_fw_dma_kick(struct ipw_priv *priv)
+{
+	u32 control = 0;
+	u32 index = 0;
+
+	IPW_DEBUG_FW(">> :\n");
+
+	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
+		ipw_fw_dma_write_command_block(priv, index,
+					       &priv->sram_desc.cb_list[index]);
+
+	/* Enable the DMA in the CSR register */
+	ipw_clear_bit(priv, IPW_RESET_REG,
+		      IPW_RESET_REG_MASTER_DISABLED |
+		      IPW_RESET_REG_STOP_MASTER);
+
+	/* Set the Start bit. */
+	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
+	ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
+
+	IPW_DEBUG_FW("<< :\n");
+	return 0;
+}
+
+static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
+{
+	u32 address;
+	u32 register_value = 0;
+	u32 cb_fields_address = 0;
+
+	IPW_DEBUG_FW(">> :\n");
+	address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
+	IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
+
+	/* Read the DMA Controlor register */
+	register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
+	IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
+
+	/* Print the CB values */
+	cb_fields_address = address;
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
+			  register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
+
+	IPW_DEBUG_FW(">> :\n");
+}
+
+static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
+{
+	u32 current_cb_address = 0;
+	u32 current_cb_index = 0;
+
+	IPW_DEBUG_FW("<< :\n");
+	current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
+
+	current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
+	    sizeof(struct command_block);
+
+	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
+			  current_cb_index, current_cb_address);
+
+	IPW_DEBUG_FW(">> :\n");
+	return current_cb_index;
+
+}
+
+static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
+					u32 src_address,
+					u32 dest_address,
+					u32 length,
+					int interrupt_enabled, int is_last)
+{
+
+	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
+	    CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
+	    CB_DEST_SIZE_LONG;
+	struct command_block *cb;
+	u32 last_cb_element = 0;
+
+	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
+			  src_address, dest_address, length);
+
+	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
+		return -1;
+
+	last_cb_element = priv->sram_desc.last_cb_index;
+	cb = &priv->sram_desc.cb_list[last_cb_element];
+	priv->sram_desc.last_cb_index++;
+
+	/* Calculate the new CB control word */
+	if (interrupt_enabled)
+		control |= CB_INT_ENABLED;
+
+	if (is_last)
+		control |= CB_LAST_VALID;
+
+	control |= length;
+
+	/* Calculate the CB Element's checksum value */
+	cb->status = control ^ src_address ^ dest_address;
+
+	/* Copy the Source and Destination addresses */
+	cb->dest_addr = dest_address;
+	cb->source_addr = src_address;
+
+	/* Copy the Control Word last */
+	cb->control = control;
+
+	return 0;
+}
+
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
+				 int nr, u32 dest_address, u32 len)
+{
+	int ret, i;
+	u32 size;
+
+	IPW_DEBUG_FW(">>\n");
+	IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
+			  nr, dest_address, len);
+
+	for (i = 0; i < nr; i++) {
+		size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
+		ret = ipw_fw_dma_add_command_block(priv, src_address[i],
+						   dest_address +
+						   i * CB_MAX_LENGTH, size,
+						   0, 0);
+		if (ret) {
+			IPW_DEBUG_FW_INFO(": Failed\n");
+			return -1;
+		} else
+			IPW_DEBUG_FW_INFO(": Added new cb\n");
+	}
+
+	IPW_DEBUG_FW("<<\n");
+	return 0;
+}
+
+static int ipw_fw_dma_wait(struct ipw_priv *priv)
+{
+	u32 current_index = 0, previous_index;
+	u32 watchdog = 0;
+
+	IPW_DEBUG_FW(">> :\n");
+
+	current_index = ipw_fw_dma_command_block_index(priv);
+	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
+			  (int)priv->sram_desc.last_cb_index);
+
+	while (current_index < priv->sram_desc.last_cb_index) {
+		udelay(50);
+		previous_index = current_index;
+		current_index = ipw_fw_dma_command_block_index(priv);
+
+		if (previous_index < current_index) {
+			watchdog = 0;
+			continue;
+		}
+		if (++watchdog > 400) {
+			IPW_DEBUG_FW_INFO("Timeout\n");
+			ipw_fw_dma_dump_command_block(priv);
+			ipw_fw_dma_abort(priv);
+			return -1;
+		}
+	}
+
+	ipw_fw_dma_abort(priv);
+
+	/*Disable the DMA in the CSR register */
+	ipw_set_bit(priv, IPW_RESET_REG,
+		    IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
+
+	IPW_DEBUG_FW("<< dmaWaitSync\n");
+	return 0;
+}
+
+static void ipw_remove_current_network(struct ipw_priv *priv)
+{
+	struct list_head *element, *safe;
+	struct libipw_network *network = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->ieee->lock, flags);
+	list_for_each_safe(element, safe, &priv->ieee->network_list) {
+		network = list_entry(element, struct libipw_network, list);
+		if (ether_addr_equal(network->bssid, priv->bssid)) {
+			list_del(element);
+			list_add_tail(&network->list,
+				      &priv->ieee->network_free_list);
+		}
+	}
+	spin_unlock_irqrestore(&priv->ieee->lock, flags);
+}
+
+/**
+ * Check that card is still alive.
+ * Reads debug register from domain0.
+ * If card is present, pre-defined value should
+ * be found there.
+ *
+ * @param priv
+ * @return 1 if card is present, 0 otherwise
+ */
+static inline int ipw_alive(struct ipw_priv *priv)
+{
+	return ipw_read32(priv, 0x90) == 0xd55555d5;
+}
+
+/* timeout in msec, attempted in 10-msec quanta */
+static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
+			       int timeout)
+{
+	int i = 0;
+
+	do {
+		if ((ipw_read32(priv, addr) & mask) == mask)
+			return i;
+		mdelay(10);
+		i += 10;
+	} while (i < timeout);
+
+	return -ETIME;
+}
+
+/* These functions load the firmware and micro code for the operation of
+ * the ipw hardware.  It assumes the buffer has all the bits for the
+ * image and the caller is handling the memory allocation and clean up.
+ */
+
+static int ipw_stop_master(struct ipw_priv *priv)
+{
+	int rc;
+
+	IPW_DEBUG_TRACE(">>\n");
+	/* stop master. typical delay - 0 */
+	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
+
+	/* timeout is in msec, polled in 10-msec quanta */
+	rc = ipw_poll_bit(priv, IPW_RESET_REG,
+			  IPW_RESET_REG_MASTER_DISABLED, 100);
+	if (rc < 0) {
+		IPW_ERROR("wait for stop master failed after 100ms\n");
+		return -1;
+	}
+
+	IPW_DEBUG_INFO("stop master %dms\n", rc);
+
+	return rc;
+}
+
+static void ipw_arc_release(struct ipw_priv *priv)
+{
+	IPW_DEBUG_TRACE(">>\n");
+	mdelay(5);
+
+	ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
+
+	/* no one knows timing, for safety add some delay */
+	mdelay(5);
+}
+
+struct fw_chunk {
+	__le32 address;
+	__le32 length;
+};
+
+static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
+{
+	int rc = 0, i, addr;
+	u8 cr = 0;
+	__le16 *image;
+
+	image = (__le16 *) data;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	rc = ipw_stop_master(priv);
+
+	if (rc < 0)
+		return rc;
+
+	for (addr = IPW_SHARED_LOWER_BOUND;
+	     addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
+		ipw_write32(priv, addr, 0);
+	}
+
+	/* no ucode (yet) */
+	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
+	/* destroy DMA queues */
+	/* reset sequence */
+
+	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
+	ipw_arc_release(priv);
+	ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
+	mdelay(1);
+
+	/* reset PHY */
+	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
+	mdelay(1);
+
+	ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
+	mdelay(1);
+
+	/* enable ucode store */
+	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
+	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
+	mdelay(1);
+
+	/* write ucode */
+	/**
+	 * @bug
+	 * Do NOT set indirect address register once and then
+	 * store data to indirect data register in the loop.
+	 * It seems very reasonable, but in this case DINO do not
+	 * accept ucode. It is essential to set address each time.
+	 */
+	/* load new ipw uCode */
+	for (i = 0; i < len / 2; i++)
+		ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
+				le16_to_cpu(image[i]));
+
+	/* enable DINO */
+	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
+	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
+
+	/* this is where the igx / win driver deveates from the VAP driver. */
+
+	/* wait for alive response */
+	for (i = 0; i < 100; i++) {
+		/* poll for incoming data */
+		cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
+		if (cr & DINO_RXFIFO_DATA)
+			break;
+		mdelay(1);
+	}
+
+	if (cr & DINO_RXFIFO_DATA) {
+		/* alive_command_responce size is NOT multiple of 4 */
+		__le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
+
+		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
+			response_buffer[i] =
+			    cpu_to_le32(ipw_read_reg32(priv,
+						       IPW_BASEBAND_RX_FIFO_READ));
+		memcpy(&priv->dino_alive, response_buffer,
+		       sizeof(priv->dino_alive));
+		if (priv->dino_alive.alive_command == 1
+		    && priv->dino_alive.ucode_valid == 1) {
+			rc = 0;
+			IPW_DEBUG_INFO
+			    ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
+			     "of %02d/%02d/%02d %02d:%02d\n",
+			     priv->dino_alive.software_revision,
+			     priv->dino_alive.software_revision,
+			     priv->dino_alive.device_identifier,
+			     priv->dino_alive.device_identifier,
+			     priv->dino_alive.time_stamp[0],
+			     priv->dino_alive.time_stamp[1],
+			     priv->dino_alive.time_stamp[2],
+			     priv->dino_alive.time_stamp[3],
+			     priv->dino_alive.time_stamp[4]);
+		} else {
+			IPW_DEBUG_INFO("Microcode is not alive\n");
+			rc = -EINVAL;
+		}
+	} else {
+		IPW_DEBUG_INFO("No alive response from DINO\n");
+		rc = -ETIME;
+	}
+
+	/* disable DINO, otherwise for some reason
+	   firmware have problem getting alive resp. */
+	ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
+
+	return rc;
+}
+
+static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
+{
+	int ret = -1;
+	int offset = 0;
+	struct fw_chunk *chunk;
+	int total_nr = 0;
+	int i;
+	struct dma_pool *pool;
+	void **virts;
+	dma_addr_t *phys;
+
+	IPW_DEBUG_TRACE("<< :\n");
+
+	virts = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(void *),
+			      GFP_KERNEL);
+	if (!virts)
+		return -ENOMEM;
+
+	phys = kmalloc_array(CB_NUMBER_OF_ELEMENTS_SMALL, sizeof(dma_addr_t),
+			     GFP_KERNEL);
+	if (!phys) {
+		kfree(virts);
+		return -ENOMEM;
+	}
+	pool = dma_pool_create("ipw2200", &priv->pci_dev->dev, CB_MAX_LENGTH, 0,
+			       0);
+	if (!pool) {
+		IPW_ERROR("dma_pool_create failed\n");
+		kfree(phys);
+		kfree(virts);
+		return -ENOMEM;
+	}
+
+	/* Start the Dma */
+	ret = ipw_fw_dma_enable(priv);
+
+	/* the DMA is already ready this would be a bug. */
+	BUG_ON(priv->sram_desc.last_cb_index > 0);
+
+	do {
+		u32 chunk_len;
+		u8 *start;
+		int size;
+		int nr = 0;
+
+		chunk = (struct fw_chunk *)(data + offset);
+		offset += sizeof(struct fw_chunk);
+		chunk_len = le32_to_cpu(chunk->length);
+		start = data + offset;
+
+		nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
+		for (i = 0; i < nr; i++) {
+			virts[total_nr] = dma_pool_alloc(pool, GFP_KERNEL,
+							 &phys[total_nr]);
+			if (!virts[total_nr]) {
+				ret = -ENOMEM;
+				goto out;
+			}
+			size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
+				     CB_MAX_LENGTH);
+			memcpy(virts[total_nr], start, size);
+			start += size;
+			total_nr++;
+			/* We don't support fw chunk larger than 64*8K */
+			BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
+		}
+
+		/* build DMA packet and queue up for sending */
+		/* dma to chunk->address, the chunk->length bytes from data +
+		 * offeset*/
+		/* Dma loading */
+		ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
+					    nr, le32_to_cpu(chunk->address),
+					    chunk_len);
+		if (ret) {
+			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
+			goto out;
+		}
+
+		offset += chunk_len;
+	} while (offset < len);
+
+	/* Run the DMA and wait for the answer */
+	ret = ipw_fw_dma_kick(priv);
+	if (ret) {
+		IPW_ERROR("dmaKick Failed\n");
+		goto out;
+	}
+
+	ret = ipw_fw_dma_wait(priv);
+	if (ret) {
+		IPW_ERROR("dmaWaitSync Failed\n");
+		goto out;
+	}
+ out:
+	for (i = 0; i < total_nr; i++)
+		dma_pool_free(pool, virts[i], phys[i]);
+
+	dma_pool_destroy(pool);
+	kfree(phys);
+	kfree(virts);
+
+	return ret;
+}
+
+/* stop nic */
+static int ipw_stop_nic(struct ipw_priv *priv)
+{
+	int rc = 0;
+
+	/* stop */
+	ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
+
+	rc = ipw_poll_bit(priv, IPW_RESET_REG,
+			  IPW_RESET_REG_MASTER_DISABLED, 500);
+	if (rc < 0) {
+		IPW_ERROR("wait for reg master disabled failed after 500ms\n");
+		return rc;
+	}
+
+	ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
+
+	return rc;
+}
+
+static void ipw_start_nic(struct ipw_priv *priv)
+{
+	IPW_DEBUG_TRACE(">>\n");
+
+	/* prvHwStartNic  release ARC */
+	ipw_clear_bit(priv, IPW_RESET_REG,
+		      IPW_RESET_REG_MASTER_DISABLED |
+		      IPW_RESET_REG_STOP_MASTER |
+		      CBD_RESET_REG_PRINCETON_RESET);
+
+	/* enable power management */
+	ipw_set_bit(priv, IPW_GP_CNTRL_RW,
+		    IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
+
+	IPW_DEBUG_TRACE("<<\n");
+}
+
+static int ipw_init_nic(struct ipw_priv *priv)
+{
+	int rc;
+
+	IPW_DEBUG_TRACE(">>\n");
+	/* reset */
+	/*prvHwInitNic */
+	/* set "initialization complete" bit to move adapter to D0 state */
+	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
+
+	/* low-level PLL activation */
+	ipw_write32(priv, IPW_READ_INT_REGISTER,
+		    IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
+
+	/* wait for clock stabilization */
+	rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
+			  IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
+	if (rc < 0)
+		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
+
+	/* assert SW reset */
+	ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
+
+	udelay(10);
+
+	/* set "initialization complete" bit to move adapter to D0 state */
+	ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
+
+	IPW_DEBUG_TRACE(">>\n");
+	return 0;
+}
+
+/* Call this function from process context, it will sleep in request_firmware.
+ * Probe is an ok place to call this from.
+ */
+static int ipw_reset_nic(struct ipw_priv *priv)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	rc = ipw_init_nic(priv);
+
+	spin_lock_irqsave(&priv->lock, flags);
+	/* Clear the 'host command active' bit... */
+	priv->status &= ~STATUS_HCMD_ACTIVE;
+	wake_up_interruptible(&priv->wait_command_queue);
+	priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
+	wake_up_interruptible(&priv->wait_state);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	IPW_DEBUG_TRACE("<<\n");
+	return rc;
+}
+
+
+struct ipw_fw {
+	__le32 ver;
+	__le32 boot_size;
+	__le32 ucode_size;
+	__le32 fw_size;
+	u8 data[0];
+};
+
+static int ipw_get_fw(struct ipw_priv *priv,
+		      const struct firmware **raw, const char *name)
+{
+	struct ipw_fw *fw;
+	int rc;
+
+	/* ask firmware_class module to get the boot firmware off disk */
+	rc = request_firmware(raw, name, &priv->pci_dev->dev);
+	if (rc < 0) {
+		IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
+		return rc;
+	}
+
+	if ((*raw)->size < sizeof(*fw)) {
+		IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
+		return -EINVAL;
+	}
+
+	fw = (void *)(*raw)->data;
+
+	if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
+	    le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
+		IPW_ERROR("%s is too small or corrupt (%zd)\n",
+			  name, (*raw)->size);
+		return -EINVAL;
+	}
+
+	IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
+		       name,
+		       le32_to_cpu(fw->ver) >> 16,
+		       le32_to_cpu(fw->ver) & 0xff,
+		       (*raw)->size - sizeof(*fw));
+	return 0;
+}
+
+#define IPW_RX_BUF_SIZE (3000)
+
+static void ipw_rx_queue_reset(struct ipw_priv *priv,
+				      struct ipw_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].skb != NULL) {
+			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
+					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(rxq->pool[i].skb);
+			rxq->pool[i].skb = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+#ifdef CONFIG_PM
+static int fw_loaded = 0;
+static const struct firmware *raw = NULL;
+
+static void free_firmware(void)
+{
+	if (fw_loaded) {
+		release_firmware(raw);
+		raw = NULL;
+		fw_loaded = 0;
+	}
+}
+#else
+#define free_firmware() do {} while (0)
+#endif
+
+static int ipw_load(struct ipw_priv *priv)
+{
+#ifndef CONFIG_PM
+	const struct firmware *raw = NULL;
+#endif
+	struct ipw_fw *fw;
+	u8 *boot_img, *ucode_img, *fw_img;
+	u8 *name = NULL;
+	int rc = 0, retries = 3;
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		name = "ipw2200-ibss.fw";
+		break;
+#ifdef CONFIG_IPW2200_MONITOR
+	case IW_MODE_MONITOR:
+		name = "ipw2200-sniffer.fw";
+		break;
+#endif
+	case IW_MODE_INFRA:
+		name = "ipw2200-bss.fw";
+		break;
+	}
+
+	if (!name) {
+		rc = -EINVAL;
+		goto error;
+	}
+
+#ifdef CONFIG_PM
+	if (!fw_loaded) {
+#endif
+		rc = ipw_get_fw(priv, &raw, name);
+		if (rc < 0)
+			goto error;
+#ifdef CONFIG_PM
+	}
+#endif
+
+	fw = (void *)raw->data;
+	boot_img = &fw->data[0];
+	ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
+	fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
+			   le32_to_cpu(fw->ucode_size)];
+
+	if (!priv->rxq)
+		priv->rxq = ipw_rx_queue_alloc(priv);
+	else
+		ipw_rx_queue_reset(priv, priv->rxq);
+	if (!priv->rxq) {
+		IPW_ERROR("Unable to initialize Rx queue\n");
+		rc = -ENOMEM;
+		goto error;
+	}
+
+      retry:
+	/* Ensure interrupts are disabled */
+	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
+	priv->status &= ~STATUS_INT_ENABLED;
+
+	/* ack pending interrupts */
+	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
+
+	ipw_stop_nic(priv);
+
+	rc = ipw_reset_nic(priv);
+	if (rc < 0) {
+		IPW_ERROR("Unable to reset NIC\n");
+		goto error;
+	}
+
+	ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
+			IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
+
+	/* DMA the initial boot firmware into the device */
+	rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
+	if (rc < 0) {
+		IPW_ERROR("Unable to load boot firmware: %d\n", rc);
+		goto error;
+	}
+
+	/* kick start the device */
+	ipw_start_nic(priv);
+
+	/* wait for the device to finish its initial startup sequence */
+	rc = ipw_poll_bit(priv, IPW_INTA_RW,
+			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
+	if (rc < 0) {
+		IPW_ERROR("device failed to boot initial fw image\n");
+		goto error;
+	}
+	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
+
+	/* ack fw init done interrupt */
+	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
+
+	/* DMA the ucode into the device */
+	rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
+	if (rc < 0) {
+		IPW_ERROR("Unable to load ucode: %d\n", rc);
+		goto error;
+	}
+
+	/* stop nic */
+	ipw_stop_nic(priv);
+
+	/* DMA bss firmware into the device */
+	rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
+	if (rc < 0) {
+		IPW_ERROR("Unable to load firmware: %d\n", rc);
+		goto error;
+	}
+#ifdef CONFIG_PM
+	fw_loaded = 1;
+#endif
+
+	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
+
+	rc = ipw_queue_reset(priv);
+	if (rc < 0) {
+		IPW_ERROR("Unable to initialize queues\n");
+		goto error;
+	}
+
+	/* Ensure interrupts are disabled */
+	ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
+	/* ack pending interrupts */
+	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
+
+	/* kick start the device */
+	ipw_start_nic(priv);
+
+	if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
+		if (retries > 0) {
+			IPW_WARNING("Parity error.  Retrying init.\n");
+			retries--;
+			goto retry;
+		}
+
+		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
+		rc = -EIO;
+		goto error;
+	}
+
+	/* wait for the device */
+	rc = ipw_poll_bit(priv, IPW_INTA_RW,
+			  IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
+	if (rc < 0) {
+		IPW_ERROR("device failed to start within 500ms\n");
+		goto error;
+	}
+	IPW_DEBUG_INFO("device response after %dms\n", rc);
+
+	/* ack fw init done interrupt */
+	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
+
+	/* read eeprom data */
+	priv->eeprom_delay = 1;
+	ipw_read_eeprom(priv);
+	/* initialize the eeprom region of sram */
+	ipw_eeprom_init_sram(priv);
+
+	/* enable interrupts */
+	ipw_enable_interrupts(priv);
+
+	/* Ensure our queue has valid packets */
+	ipw_rx_queue_replenish(priv);
+
+	ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
+
+	/* ack pending interrupts */
+	ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
+
+#ifndef CONFIG_PM
+	release_firmware(raw);
+#endif
+	return 0;
+
+      error:
+	if (priv->rxq) {
+		ipw_rx_queue_free(priv, priv->rxq);
+		priv->rxq = NULL;
+	}
+	ipw_tx_queue_free(priv);
+	release_firmware(raw);
+#ifdef CONFIG_PM
+	fw_loaded = 0;
+	raw = NULL;
+#endif
+
+	return rc;
+}
+
+/**
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A queue is a circular buffers with 'Read' and 'Write' pointers.
+ * 2 empty entries always kept in the buffer to protect from overflow.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * The IPW operates with six queues, one receive queue in the device's
+ * sram, one transmit queue for sending commands to the device firmware,
+ * and four transmit queues for data.
+ *
+ * The four transmit queues allow for performing quality of service (qos)
+ * transmissions as per the 802.11 protocol.  Currently Linux does not
+ * provide a mechanism to the user for utilizing prioritized queues, so
+ * we only utilize the first data transmit queue (queue1).
+ */
+
+/**
+ * Driver allocates buffers of this size for Rx
+ */
+
+/**
+ * ipw_rx_queue_space - Return number of free slots available in queue.
+ */
+static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
+{
+	int s = q->read - q->write;
+	if (s <= 0)
+		s += RX_QUEUE_SIZE;
+	/* keep some buffer to not confuse full and empty queue */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+static inline int ipw_tx_queue_space(const struct clx2_queue *q)
+{
+	int s = q->last_used - q->first_empty;
+	if (s <= 0)
+		s += q->n_bd;
+	s -= 2;			/* keep some reserve to not confuse empty and full situations */
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+static inline int ipw_queue_inc_wrap(int index, int n_bd)
+{
+	return (++index == n_bd) ? 0 : index;
+}
+
+/**
+ * Initialize common DMA queue structure
+ *
+ * @param q                queue to init
+ * @param count            Number of BD's to allocate. Should be power of 2
+ * @param read_register    Address for 'read' register
+ *                         (not offset within BAR, full address)
+ * @param write_register   Address for 'write' register
+ *                         (not offset within BAR, full address)
+ * @param base_register    Address for 'base' register
+ *                         (not offset within BAR, full address)
+ * @param size             Address for 'size' register
+ *                         (not offset within BAR, full address)
+ */
+static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
+			   int count, u32 read, u32 write, u32 base, u32 size)
+{
+	q->n_bd = count;
+
+	q->low_mark = q->n_bd / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_bd / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->first_empty = q->last_used = 0;
+	q->reg_r = read;
+	q->reg_w = write;
+
+	ipw_write32(priv, base, q->dma_addr);
+	ipw_write32(priv, size, count);
+	ipw_write32(priv, read, 0);
+	ipw_write32(priv, write, 0);
+
+	_ipw_read32(priv, 0x90);
+}
+
+static int ipw_queue_tx_init(struct ipw_priv *priv,
+			     struct clx2_tx_queue *q,
+			     int count, u32 read, u32 write, u32 base, u32 size)
+{
+	struct pci_dev *dev = priv->pci_dev;
+
+	q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL);
+	if (!q->txb) {
+		IPW_ERROR("vmalloc for auxiliary BD structures failed\n");
+		return -ENOMEM;
+	}
+
+	q->bd =
+	    pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
+	if (!q->bd) {
+		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
+			  sizeof(q->bd[0]) * count);
+		kfree(q->txb);
+		q->txb = NULL;
+		return -ENOMEM;
+	}
+
+	ipw_queue_init(priv, &q->q, count, read, write, base, size);
+	return 0;
+}
+
+/**
+ * Free one TFD, those at index [txq->q.last_used].
+ * Do NOT advance any indexes
+ *
+ * @param dev
+ * @param txq
+ */
+static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
+				  struct clx2_tx_queue *txq)
+{
+	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
+	struct pci_dev *dev = priv->pci_dev;
+	int i;
+
+	/* classify bd */
+	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
+		/* nothing to cleanup after for host commands */
+		return;
+
+	/* sanity check */
+	if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
+		IPW_ERROR("Too many chunks: %i\n",
+			  le32_to_cpu(bd->u.data.num_chunks));
+		/** @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* unmap chunks if any */
+	for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
+		pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
+				 le16_to_cpu(bd->u.data.chunk_len[i]),
+				 PCI_DMA_TODEVICE);
+		if (txq->txb[txq->q.last_used]) {
+			libipw_txb_free(txq->txb[txq->q.last_used]);
+			txq->txb[txq->q.last_used] = NULL;
+		}
+	}
+}
+
+/**
+ * Deallocate DMA queue.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ *
+ * @param dev
+ * @param q
+ */
+static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
+{
+	struct clx2_queue *q = &txq->q;
+	struct pci_dev *dev = priv->pci_dev;
+
+	if (q->n_bd == 0)
+		return;
+
+	/* first, empty all BD's */
+	for (; q->first_empty != q->last_used;
+	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
+		ipw_queue_tx_free_tfd(priv, txq);
+	}
+
+	/* free buffers belonging to queue itself */
+	pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
+			    q->dma_addr);
+	kfree(txq->txb);
+
+	/* 0 fill whole structure */
+	memset(txq, 0, sizeof(*txq));
+}
+
+/**
+ * Destroy all DMA queues and structures
+ *
+ * @param priv
+ */
+static void ipw_tx_queue_free(struct ipw_priv *priv)
+{
+	/* Tx CMD queue */
+	ipw_queue_tx_free(priv, &priv->txq_cmd);
+
+	/* Tx queues */
+	ipw_queue_tx_free(priv, &priv->txq[0]);
+	ipw_queue_tx_free(priv, &priv->txq[1]);
+	ipw_queue_tx_free(priv, &priv->txq[2]);
+	ipw_queue_tx_free(priv, &priv->txq[3]);
+}
+
+static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
+{
+	/* First 3 bytes are manufacturer */
+	bssid[0] = priv->mac_addr[0];
+	bssid[1] = priv->mac_addr[1];
+	bssid[2] = priv->mac_addr[2];
+
+	/* Last bytes are random */
+	get_random_bytes(&bssid[3], ETH_ALEN - 3);
+
+	bssid[0] &= 0xfe;	/* clear multicast bit */
+	bssid[0] |= 0x02;	/* set local assignment bit (IEEE802) */
+}
+
+static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
+{
+	struct ipw_station_entry entry;
+	int i;
+
+	for (i = 0; i < priv->num_stations; i++) {
+		if (ether_addr_equal(priv->stations[i], bssid)) {
+			/* Another node is active in network */
+			priv->missed_adhoc_beacons = 0;
+			if (!(priv->config & CFG_STATIC_CHANNEL))
+				/* when other nodes drop out, we drop out */
+				priv->config &= ~CFG_ADHOC_PERSIST;
+
+			return i;
+		}
+	}
+
+	if (i == MAX_STATIONS)
+		return IPW_INVALID_STATION;
+
+	IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
+
+	entry.reserved = 0;
+	entry.support_mode = 0;
+	memcpy(entry.mac_addr, bssid, ETH_ALEN);
+	memcpy(priv->stations[i], bssid, ETH_ALEN);
+	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
+			 &entry, sizeof(entry));
+	priv->num_stations++;
+
+	return i;
+}
+
+static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
+{
+	int i;
+
+	for (i = 0; i < priv->num_stations; i++)
+		if (ether_addr_equal(priv->stations[i], bssid))
+			return i;
+
+	return IPW_INVALID_STATION;
+}
+
+static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
+{
+	int err;
+
+	if (priv->status & STATUS_ASSOCIATING) {
+		IPW_DEBUG_ASSOC("Disassociating while associating.\n");
+		schedule_work(&priv->disassociate);
+		return;
+	}
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
+		return;
+	}
+
+	IPW_DEBUG_ASSOC("Disassociation attempt from %pM "
+			"on channel %d.\n",
+			priv->assoc_request.bssid,
+			priv->assoc_request.channel);
+
+	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
+	priv->status |= STATUS_DISASSOCIATING;
+
+	if (quiet)
+		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
+	else
+		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
+
+	err = ipw_send_associate(priv, &priv->assoc_request);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send [dis]associate command "
+			     "failed.\n");
+		return;
+	}
+
+}
+
+static int ipw_disassociate(void *data)
+{
+	struct ipw_priv *priv = data;
+	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
+		return 0;
+	ipw_send_disassociate(data, 0);
+	netif_carrier_off(priv->net_dev);
+	return 1;
+}
+
+static void ipw_bg_disassociate(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, disassociate);
+	mutex_lock(&priv->mutex);
+	ipw_disassociate(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_system_config(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, system_config);
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+		priv->sys_config.accept_all_data_frames = 1;
+		priv->sys_config.accept_non_directed_frames = 1;
+		priv->sys_config.accept_all_mgmt_bcpr = 1;
+		priv->sys_config.accept_all_mgmt_frames = 1;
+	}
+#endif
+
+	ipw_send_system_config(priv);
+}
+
+struct ipw_status_code {
+	u16 status;
+	const char *reason;
+};
+
+static const struct ipw_status_code ipw_status_codes[] = {
+	{0x00, "Successful"},
+	{0x01, "Unspecified failure"},
+	{0x0A, "Cannot support all requested capabilities in the "
+	 "Capability information field"},
+	{0x0B, "Reassociation denied due to inability to confirm that "
+	 "association exists"},
+	{0x0C, "Association denied due to reason outside the scope of this "
+	 "standard"},
+	{0x0D,
+	 "Responding station does not support the specified authentication "
+	 "algorithm"},
+	{0x0E,
+	 "Received an Authentication frame with authentication sequence "
+	 "transaction sequence number out of expected sequence"},
+	{0x0F, "Authentication rejected because of challenge failure"},
+	{0x10, "Authentication rejected due to timeout waiting for next "
+	 "frame in sequence"},
+	{0x11, "Association denied because AP is unable to handle additional "
+	 "associated stations"},
+	{0x12,
+	 "Association denied due to requesting station not supporting all "
+	 "of the datarates in the BSSBasicServiceSet Parameter"},
+	{0x13,
+	 "Association denied due to requesting station not supporting "
+	 "short preamble operation"},
+	{0x14,
+	 "Association denied due to requesting station not supporting "
+	 "PBCC encoding"},
+	{0x15,
+	 "Association denied due to requesting station not supporting "
+	 "channel agility"},
+	{0x19,
+	 "Association denied due to requesting station not supporting "
+	 "short slot operation"},
+	{0x1A,
+	 "Association denied due to requesting station not supporting "
+	 "DSSS-OFDM operation"},
+	{0x28, "Invalid Information Element"},
+	{0x29, "Group Cipher is not valid"},
+	{0x2A, "Pairwise Cipher is not valid"},
+	{0x2B, "AKMP is not valid"},
+	{0x2C, "Unsupported RSN IE version"},
+	{0x2D, "Invalid RSN IE Capabilities"},
+	{0x2E, "Cipher suite is rejected per security policy"},
+};
+
+static const char *ipw_get_status_code(u16 status)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
+		if (ipw_status_codes[i].status == (status & 0xff))
+			return ipw_status_codes[i].reason;
+	return "Unknown status value.";
+}
+
+static inline void average_init(struct average *avg)
+{
+	memset(avg, 0, sizeof(*avg));
+}
+
+#define DEPTH_RSSI 8
+#define DEPTH_NOISE 16
+static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
+{
+	return ((depth-1)*prev_avg +  val)/depth;
+}
+
+static void average_add(struct average *avg, s16 val)
+{
+	avg->sum -= avg->entries[avg->pos];
+	avg->sum += val;
+	avg->entries[avg->pos++] = val;
+	if (unlikely(avg->pos == AVG_ENTRIES)) {
+		avg->init = 1;
+		avg->pos = 0;
+	}
+}
+
+static s16 average_value(struct average *avg)
+{
+	if (!unlikely(avg->init)) {
+		if (avg->pos)
+			return avg->sum / avg->pos;
+		return 0;
+	}
+
+	return avg->sum / AVG_ENTRIES;
+}
+
+static void ipw_reset_stats(struct ipw_priv *priv)
+{
+	u32 len = sizeof(u32);
+
+	priv->quality = 0;
+
+	average_init(&priv->average_missed_beacons);
+	priv->exp_avg_rssi = -60;
+	priv->exp_avg_noise = -85 + 0x100;
+
+	priv->last_rate = 0;
+	priv->last_missed_beacons = 0;
+	priv->last_rx_packets = 0;
+	priv->last_tx_packets = 0;
+	priv->last_tx_failures = 0;
+
+	/* Firmware managed, reset only when NIC is restarted, so we have to
+	 * normalize on the current value */
+	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
+			&priv->last_rx_err, &len);
+	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
+			&priv->last_tx_failures, &len);
+
+	/* Driver managed, reset with each association */
+	priv->missed_adhoc_beacons = 0;
+	priv->missed_beacons = 0;
+	priv->tx_packets = 0;
+	priv->rx_packets = 0;
+
+}
+
+static u32 ipw_get_max_rate(struct ipw_priv *priv)
+{
+	u32 i = 0x80000000;
+	u32 mask = priv->rates_mask;
+	/* If currently associated in B mode, restrict the maximum
+	 * rate match to B rates */
+	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
+		mask &= LIBIPW_CCK_RATES_MASK;
+
+	/* TODO: Verify that the rate is supported by the current rates
+	 * list. */
+
+	while (i && !(mask & i))
+		i >>= 1;
+	switch (i) {
+	case LIBIPW_CCK_RATE_1MB_MASK:
+		return 1000000;
+	case LIBIPW_CCK_RATE_2MB_MASK:
+		return 2000000;
+	case LIBIPW_CCK_RATE_5MB_MASK:
+		return 5500000;
+	case LIBIPW_OFDM_RATE_6MB_MASK:
+		return 6000000;
+	case LIBIPW_OFDM_RATE_9MB_MASK:
+		return 9000000;
+	case LIBIPW_CCK_RATE_11MB_MASK:
+		return 11000000;
+	case LIBIPW_OFDM_RATE_12MB_MASK:
+		return 12000000;
+	case LIBIPW_OFDM_RATE_18MB_MASK:
+		return 18000000;
+	case LIBIPW_OFDM_RATE_24MB_MASK:
+		return 24000000;
+	case LIBIPW_OFDM_RATE_36MB_MASK:
+		return 36000000;
+	case LIBIPW_OFDM_RATE_48MB_MASK:
+		return 48000000;
+	case LIBIPW_OFDM_RATE_54MB_MASK:
+		return 54000000;
+	}
+
+	if (priv->ieee->mode == IEEE_B)
+		return 11000000;
+	else
+		return 54000000;
+}
+
+static u32 ipw_get_current_rate(struct ipw_priv *priv)
+{
+	u32 rate, len = sizeof(rate);
+	int err;
+
+	if (!(priv->status & STATUS_ASSOCIATED))
+		return 0;
+
+	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
+		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
+				      &len);
+		if (err) {
+			IPW_DEBUG_INFO("failed querying ordinals.\n");
+			return 0;
+		}
+	} else
+		return ipw_get_max_rate(priv);
+
+	switch (rate) {
+	case IPW_TX_RATE_1MB:
+		return 1000000;
+	case IPW_TX_RATE_2MB:
+		return 2000000;
+	case IPW_TX_RATE_5MB:
+		return 5500000;
+	case IPW_TX_RATE_6MB:
+		return 6000000;
+	case IPW_TX_RATE_9MB:
+		return 9000000;
+	case IPW_TX_RATE_11MB:
+		return 11000000;
+	case IPW_TX_RATE_12MB:
+		return 12000000;
+	case IPW_TX_RATE_18MB:
+		return 18000000;
+	case IPW_TX_RATE_24MB:
+		return 24000000;
+	case IPW_TX_RATE_36MB:
+		return 36000000;
+	case IPW_TX_RATE_48MB:
+		return 48000000;
+	case IPW_TX_RATE_54MB:
+		return 54000000;
+	}
+
+	return 0;
+}
+
+#define IPW_STATS_INTERVAL (2 * HZ)
+static void ipw_gather_stats(struct ipw_priv *priv)
+{
+	u32 rx_err, rx_err_delta, rx_packets_delta;
+	u32 tx_failures, tx_failures_delta, tx_packets_delta;
+	u32 missed_beacons_percent, missed_beacons_delta;
+	u32 quality = 0;
+	u32 len = sizeof(u32);
+	s16 rssi;
+	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
+	    rate_quality;
+	u32 max_rate;
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		priv->quality = 0;
+		return;
+	}
+
+	/* Update the statistics */
+	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
+			&priv->missed_beacons, &len);
+	missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
+	priv->last_missed_beacons = priv->missed_beacons;
+	if (priv->assoc_request.beacon_interval) {
+		missed_beacons_percent = missed_beacons_delta *
+		    (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
+		    (IPW_STATS_INTERVAL * 10);
+	} else {
+		missed_beacons_percent = 0;
+	}
+	average_add(&priv->average_missed_beacons, missed_beacons_percent);
+
+	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
+	rx_err_delta = rx_err - priv->last_rx_err;
+	priv->last_rx_err = rx_err;
+
+	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
+	tx_failures_delta = tx_failures - priv->last_tx_failures;
+	priv->last_tx_failures = tx_failures;
+
+	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
+	priv->last_rx_packets = priv->rx_packets;
+
+	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
+	priv->last_tx_packets = priv->tx_packets;
+
+	/* Calculate quality based on the following:
+	 *
+	 * Missed beacon: 100% = 0, 0% = 70% missed
+	 * Rate: 60% = 1Mbs, 100% = Max
+	 * Rx and Tx errors represent a straight % of total Rx/Tx
+	 * RSSI: 100% = > -50,  0% = < -80
+	 * Rx errors: 100% = 0, 0% = 50% missed
+	 *
+	 * The lowest computed quality is used.
+	 *
+	 */
+#define BEACON_THRESHOLD 5
+	beacon_quality = 100 - missed_beacons_percent;
+	if (beacon_quality < BEACON_THRESHOLD)
+		beacon_quality = 0;
+	else
+		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
+		    (100 - BEACON_THRESHOLD);
+	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
+			beacon_quality, missed_beacons_percent);
+
+	priv->last_rate = ipw_get_current_rate(priv);
+	max_rate = ipw_get_max_rate(priv);
+	rate_quality = priv->last_rate * 40 / max_rate + 60;
+	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
+			rate_quality, priv->last_rate / 1000000);
+
+	if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
+		rx_quality = 100 - (rx_err_delta * 100) /
+		    (rx_packets_delta + rx_err_delta);
+	else
+		rx_quality = 100;
+	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
+			rx_quality, rx_err_delta, rx_packets_delta);
+
+	if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
+		tx_quality = 100 - (tx_failures_delta * 100) /
+		    (tx_packets_delta + tx_failures_delta);
+	else
+		tx_quality = 100;
+	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
+			tx_quality, tx_failures_delta, tx_packets_delta);
+
+	rssi = priv->exp_avg_rssi;
+	signal_quality =
+	    (100 *
+	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
+	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
+	     (priv->ieee->perfect_rssi - rssi) *
+	     (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
+	      62 * (priv->ieee->perfect_rssi - rssi))) /
+	    ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
+	     (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
+	if (signal_quality > 100)
+		signal_quality = 100;
+	else if (signal_quality < 1)
+		signal_quality = 0;
+
+	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
+			signal_quality, rssi);
+
+	quality = min(rx_quality, signal_quality);
+	quality = min(tx_quality, quality);
+	quality = min(rate_quality, quality);
+	quality = min(beacon_quality, quality);
+	if (quality == beacon_quality)
+		IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
+				quality);
+	if (quality == rate_quality)
+		IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
+				quality);
+	if (quality == tx_quality)
+		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
+				quality);
+	if (quality == rx_quality)
+		IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
+				quality);
+	if (quality == signal_quality)
+		IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
+				quality);
+
+	priv->quality = quality;
+
+	schedule_delayed_work(&priv->gather_stats, IPW_STATS_INTERVAL);
+}
+
+static void ipw_bg_gather_stats(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, gather_stats.work);
+	mutex_lock(&priv->mutex);
+	ipw_gather_stats(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+/* Missed beacon behavior:
+ * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
+ * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
+ * Above disassociate threshold, give up and stop scanning.
+ * Roaming is disabled if disassociate_threshold <= roaming_threshold  */
+static void ipw_handle_missed_beacon(struct ipw_priv *priv,
+					    int missed_count)
+{
+	priv->notif_missed_beacons = missed_count;
+
+	if (missed_count > priv->disassociate_threshold &&
+	    priv->status & STATUS_ASSOCIATED) {
+		/* If associated and we've hit the missed
+		 * beacon threshold, disassociate, turn
+		 * off roaming, and abort any active scans */
+		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
+			  IPW_DL_STATE | IPW_DL_ASSOC,
+			  "Missed beacon: %d - disassociate\n", missed_count);
+		priv->status &= ~STATUS_ROAMING;
+		if (priv->status & STATUS_SCANNING) {
+			IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
+				  IPW_DL_STATE,
+				  "Aborting scan with missed beacon.\n");
+			schedule_work(&priv->abort_scan);
+		}
+
+		schedule_work(&priv->disassociate);
+		return;
+	}
+
+	if (priv->status & STATUS_ROAMING) {
+		/* If we are currently roaming, then just
+		 * print a debug statement... */
+		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+			  "Missed beacon: %d - roam in progress\n",
+			  missed_count);
+		return;
+	}
+
+	if (roaming &&
+	    (missed_count > priv->roaming_threshold &&
+	     missed_count <= priv->disassociate_threshold)) {
+		/* If we are not already roaming, set the ROAM
+		 * bit in the status and kick off a scan.
+		 * This can happen several times before we reach
+		 * disassociate_threshold. */
+		IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+			  "Missed beacon: %d - initiate "
+			  "roaming\n", missed_count);
+		if (!(priv->status & STATUS_ROAMING)) {
+			priv->status |= STATUS_ROAMING;
+			if (!(priv->status & STATUS_SCANNING))
+				schedule_delayed_work(&priv->request_scan, 0);
+		}
+		return;
+	}
+
+	if (priv->status & STATUS_SCANNING &&
+	    missed_count > IPW_MB_SCAN_CANCEL_THRESHOLD) {
+		/* Stop scan to keep fw from getting
+		 * stuck (only if we aren't roaming --
+		 * otherwise we'll never scan more than 2 or 3
+		 * channels..) */
+		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
+			  "Aborting scan with missed beacon.\n");
+		schedule_work(&priv->abort_scan);
+	}
+
+	IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
+}
+
+static void ipw_scan_event(struct work_struct *work)
+{
+	union iwreq_data wrqu;
+
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, scan_event.work);
+
+	wrqu.data.length = 0;
+	wrqu.data.flags = 0;
+	wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+static void handle_scan_event(struct ipw_priv *priv)
+{
+	/* Only userspace-requested scan completion events go out immediately */
+	if (!priv->user_requested_scan) {
+		schedule_delayed_work(&priv->scan_event,
+				      round_jiffies_relative(msecs_to_jiffies(4000)));
+	} else {
+		priv->user_requested_scan = 0;
+		mod_delayed_work(system_wq, &priv->scan_event, 0);
+	}
+}
+
+/**
+ * Handle host notification packet.
+ * Called from interrupt routine
+ */
+static void ipw_rx_notification(struct ipw_priv *priv,
+				       struct ipw_rx_notification *notif)
+{
+	u16 size = le16_to_cpu(notif->size);
+
+	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
+
+	switch (notif->subtype) {
+	case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
+			struct notif_association *assoc = &notif->u.assoc;
+
+			switch (assoc->state) {
+			case CMAS_ASSOCIATED:{
+					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+						  IPW_DL_ASSOC,
+						  "associated: '%*pE' %pM\n",
+						  priv->essid_len, priv->essid,
+						  priv->bssid);
+
+					switch (priv->ieee->iw_mode) {
+					case IW_MODE_INFRA:
+						memcpy(priv->ieee->bssid,
+						       priv->bssid, ETH_ALEN);
+						break;
+
+					case IW_MODE_ADHOC:
+						memcpy(priv->ieee->bssid,
+						       priv->bssid, ETH_ALEN);
+
+						/* clear out the station table */
+						priv->num_stations = 0;
+
+						IPW_DEBUG_ASSOC
+						    ("queueing adhoc check\n");
+						schedule_delayed_work(
+							&priv->adhoc_check,
+							le16_to_cpu(priv->
+							assoc_request.
+							beacon_interval));
+						break;
+					}
+
+					priv->status &= ~STATUS_ASSOCIATING;
+					priv->status |= STATUS_ASSOCIATED;
+					schedule_work(&priv->system_config);
+
+#ifdef CONFIG_IPW2200_QOS
+#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
+			 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
+					if ((priv->status & STATUS_AUTH) &&
+					    (IPW_GET_PACKET_STYPE(&notif->u.raw)
+					     == IEEE80211_STYPE_ASSOC_RESP)) {
+						if ((sizeof
+						     (struct
+						      libipw_assoc_response)
+						     <= size)
+						    && (size <= 2314)) {
+							struct
+							libipw_rx_stats
+							    stats = {
+								.len = size - 1,
+							};
+
+							IPW_DEBUG_QOS
+							    ("QoS Associate "
+							     "size %d\n", size);
+							libipw_rx_mgt(priv->
+									 ieee,
+									 (struct
+									  libipw_hdr_4addr
+									  *)
+									 &notif->u.raw, &stats);
+						}
+					}
+#endif
+
+					schedule_work(&priv->link_up);
+
+					break;
+				}
+
+			case CMAS_AUTHENTICATED:{
+					if (priv->
+					    status & (STATUS_ASSOCIATED |
+						      STATUS_AUTH)) {
+						struct notif_authenticate *auth
+						    = &notif->u.auth;
+						IPW_DEBUG(IPW_DL_NOTIF |
+							  IPW_DL_STATE |
+							  IPW_DL_ASSOC,
+							  "deauthenticated: '%*pE' %pM: (0x%04X) - %s\n",
+							  priv->essid_len,
+							  priv->essid,
+							  priv->bssid,
+							  le16_to_cpu(auth->status),
+							  ipw_get_status_code
+							  (le16_to_cpu
+							   (auth->status)));
+
+						priv->status &=
+						    ~(STATUS_ASSOCIATING |
+						      STATUS_AUTH |
+						      STATUS_ASSOCIATED);
+
+						schedule_work(&priv->link_down);
+						break;
+					}
+
+					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+						  IPW_DL_ASSOC,
+						  "authenticated: '%*pE' %pM\n",
+						  priv->essid_len, priv->essid,
+						  priv->bssid);
+					break;
+				}
+
+			case CMAS_INIT:{
+					if (priv->status & STATUS_AUTH) {
+						struct
+						    libipw_assoc_response
+						*resp;
+						resp =
+						    (struct
+						     libipw_assoc_response
+						     *)&notif->u.raw;
+						IPW_DEBUG(IPW_DL_NOTIF |
+							  IPW_DL_STATE |
+							  IPW_DL_ASSOC,
+							  "association failed (0x%04X): %s\n",
+							  le16_to_cpu(resp->status),
+							  ipw_get_status_code
+							  (le16_to_cpu
+							   (resp->status)));
+					}
+
+					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+						  IPW_DL_ASSOC,
+						  "disassociated: '%*pE' %pM\n",
+						  priv->essid_len, priv->essid,
+						  priv->bssid);
+
+					priv->status &=
+					    ~(STATUS_DISASSOCIATING |
+					      STATUS_ASSOCIATING |
+					      STATUS_ASSOCIATED | STATUS_AUTH);
+					if (priv->assoc_network
+					    && (priv->assoc_network->
+						capability &
+						WLAN_CAPABILITY_IBSS))
+						ipw_remove_current_network
+						    (priv);
+
+					schedule_work(&priv->link_down);
+
+					break;
+				}
+
+			case CMAS_RX_ASSOC_RESP:
+				break;
+
+			default:
+				IPW_ERROR("assoc: unknown (%d)\n",
+					  assoc->state);
+				break;
+			}
+
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
+			struct notif_authenticate *auth = &notif->u.auth;
+			switch (auth->state) {
+			case CMAS_AUTHENTICATED:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+					  "authenticated: '%*pE' %pM\n",
+					  priv->essid_len, priv->essid,
+					  priv->bssid);
+				priv->status |= STATUS_AUTH;
+				break;
+
+			case CMAS_INIT:
+				if (priv->status & STATUS_AUTH) {
+					IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+						  IPW_DL_ASSOC,
+						  "authentication failed (0x%04X): %s\n",
+						  le16_to_cpu(auth->status),
+						  ipw_get_status_code(le16_to_cpu
+								      (auth->
+								       status)));
+				}
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC,
+					  "deauthenticated: '%*pE' %pM\n",
+					  priv->essid_len, priv->essid,
+					  priv->bssid);
+
+				priv->status &= ~(STATUS_ASSOCIATING |
+						  STATUS_AUTH |
+						  STATUS_ASSOCIATED);
+
+				schedule_work(&priv->link_down);
+				break;
+
+			case CMAS_TX_AUTH_SEQ_1:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_1\n");
+				break;
+			case CMAS_RX_AUTH_SEQ_2:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_2\n");
+				break;
+			case CMAS_AUTH_SEQ_1_PASS:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
+				break;
+			case CMAS_AUTH_SEQ_1_FAIL:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
+				break;
+			case CMAS_TX_AUTH_SEQ_3:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_3\n");
+				break;
+			case CMAS_RX_AUTH_SEQ_4:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
+				break;
+			case CMAS_AUTH_SEQ_2_PASS:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
+				break;
+			case CMAS_AUTH_SEQ_2_FAIL:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
+				break;
+			case CMAS_TX_ASSOC:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "TX_ASSOC\n");
+				break;
+			case CMAS_RX_ASSOC_RESP:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
+
+				break;
+			case CMAS_ASSOCIATED:
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
+					  IPW_DL_ASSOC, "ASSOCIATED\n");
+				break;
+			default:
+				IPW_DEBUG_NOTIF("auth: failure - %d\n",
+						auth->state);
+				break;
+			}
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
+			struct notif_channel_result *x =
+			    &notif->u.channel_result;
+
+			if (size == sizeof(*x)) {
+				IPW_DEBUG_SCAN("Scan result for channel %d\n",
+					       x->channel_num);
+			} else {
+				IPW_DEBUG_SCAN("Scan result of wrong size %d "
+					       "(should be %zd)\n",
+					       size, sizeof(*x));
+			}
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
+			struct notif_scan_complete *x = &notif->u.scan_complete;
+			if (size == sizeof(*x)) {
+				IPW_DEBUG_SCAN
+				    ("Scan completed: type %d, %d channels, "
+				     "%d status\n", x->scan_type,
+				     x->num_channels, x->status);
+			} else {
+				IPW_ERROR("Scan completed of wrong size %d "
+					  "(should be %zd)\n",
+					  size, sizeof(*x));
+			}
+
+			priv->status &=
+			    ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
+
+			wake_up_interruptible(&priv->wait_state);
+			cancel_delayed_work(&priv->scan_check);
+
+			if (priv->status & STATUS_EXIT_PENDING)
+				break;
+
+			priv->ieee->scans++;
+
+#ifdef CONFIG_IPW2200_MONITOR
+			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+				priv->status |= STATUS_SCAN_FORCED;
+				schedule_delayed_work(&priv->request_scan, 0);
+				break;
+			}
+			priv->status &= ~STATUS_SCAN_FORCED;
+#endif				/* CONFIG_IPW2200_MONITOR */
+
+			/* Do queued direct scans first */
+			if (priv->status & STATUS_DIRECT_SCAN_PENDING)
+				schedule_delayed_work(&priv->request_direct_scan, 0);
+
+			if (!(priv->status & (STATUS_ASSOCIATED |
+					      STATUS_ASSOCIATING |
+					      STATUS_ROAMING |
+					      STATUS_DISASSOCIATING)))
+				schedule_work(&priv->associate);
+			else if (priv->status & STATUS_ROAMING) {
+				if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
+					/* If a scan completed and we are in roam mode, then
+					 * the scan that completed was the one requested as a
+					 * result of entering roam... so, schedule the
+					 * roam work */
+					schedule_work(&priv->roam);
+				else
+					/* Don't schedule if we aborted the scan */
+					priv->status &= ~STATUS_ROAMING;
+			} else if (priv->status & STATUS_SCAN_PENDING)
+				schedule_delayed_work(&priv->request_scan, 0);
+			else if (priv->config & CFG_BACKGROUND_SCAN
+				 && priv->status & STATUS_ASSOCIATED)
+				schedule_delayed_work(&priv->request_scan,
+						      round_jiffies_relative(HZ));
+
+			/* Send an empty event to user space.
+			 * We don't send the received data on the event because
+			 * it would require us to do complex transcoding, and
+			 * we want to minimise the work done in the irq handler
+			 * Use a request to extract the data.
+			 * Also, we generate this even for any scan, regardless
+			 * on how the scan was initiated. User space can just
+			 * sync on periodic scan to get fresh data...
+			 * Jean II */
+			if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
+				handle_scan_event(priv);
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
+			struct notif_frag_length *x = &notif->u.frag_len;
+
+			if (size == sizeof(*x))
+				IPW_ERROR("Frag length: %d\n",
+					  le16_to_cpu(x->frag_length));
+			else
+				IPW_ERROR("Frag length of wrong size %d "
+					  "(should be %zd)\n",
+					  size, sizeof(*x));
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
+			struct notif_link_deterioration *x =
+			    &notif->u.link_deterioration;
+
+			if (size == sizeof(*x)) {
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+					"link deterioration: type %d, cnt %d\n",
+					x->silence_notification_type,
+					x->silence_count);
+				memcpy(&priv->last_link_deterioration, x,
+				       sizeof(*x));
+			} else {
+				IPW_ERROR("Link Deterioration of wrong size %d "
+					  "(should be %zd)\n",
+					  size, sizeof(*x));
+			}
+			break;
+		}
+
+	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
+			IPW_ERROR("Dino config\n");
+			if (priv->hcmd
+			    && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
+				IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
+
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
+			struct notif_beacon_state *x = &notif->u.beacon_state;
+			if (size != sizeof(*x)) {
+				IPW_ERROR
+				    ("Beacon state of wrong size %d (should "
+				     "be %zd)\n", size, sizeof(*x));
+				break;
+			}
+
+			if (le32_to_cpu(x->state) ==
+			    HOST_NOTIFICATION_STATUS_BEACON_MISSING)
+				ipw_handle_missed_beacon(priv,
+							 le32_to_cpu(x->
+								     number));
+
+			break;
+		}
+
+	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
+			struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
+			if (size == sizeof(*x)) {
+				IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
+					  "0x%02x station %d\n",
+					  x->key_state, x->security_type,
+					  x->station_index);
+				break;
+			}
+
+			IPW_ERROR
+			    ("TGi Tx Key of wrong size %d (should be %zd)\n",
+			     size, sizeof(*x));
+			break;
+		}
+
+	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
+			struct notif_calibration *x = &notif->u.calibration;
+
+			if (size == sizeof(*x)) {
+				memcpy(&priv->calib, x, sizeof(*x));
+				IPW_DEBUG_INFO("TODO: Calibration\n");
+				break;
+			}
+
+			IPW_ERROR
+			    ("Calibration of wrong size %d (should be %zd)\n",
+			     size, sizeof(*x));
+			break;
+		}
+
+	case HOST_NOTIFICATION_NOISE_STATS:{
+			if (size == sizeof(u32)) {
+				priv->exp_avg_noise =
+				    exponential_average(priv->exp_avg_noise,
+				    (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
+				    DEPTH_NOISE);
+				break;
+			}
+
+			IPW_ERROR
+			    ("Noise stat is wrong size %d (should be %zd)\n",
+			     size, sizeof(u32));
+			break;
+		}
+
+	default:
+		IPW_DEBUG_NOTIF("Unknown notification: "
+				"subtype=%d,flags=0x%2x,size=%d\n",
+				notif->subtype, notif->flags, size);
+	}
+}
+
+/**
+ * Destroys all DMA structures and initialise them again
+ *
+ * @param priv
+ * @return error code
+ */
+static int ipw_queue_reset(struct ipw_priv *priv)
+{
+	int rc = 0;
+	/** @todo customize queue sizes */
+	int nTx = 64, nTxCmd = 8;
+	ipw_tx_queue_free(priv);
+	/* Tx CMD queue */
+	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
+			       IPW_TX_CMD_QUEUE_READ_INDEX,
+			       IPW_TX_CMD_QUEUE_WRITE_INDEX,
+			       IPW_TX_CMD_QUEUE_BD_BASE,
+			       IPW_TX_CMD_QUEUE_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx Cmd queue init failed\n");
+		goto error;
+	}
+	/* Tx queue(s) */
+	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
+			       IPW_TX_QUEUE_0_READ_INDEX,
+			       IPW_TX_QUEUE_0_WRITE_INDEX,
+			       IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 0 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
+			       IPW_TX_QUEUE_1_READ_INDEX,
+			       IPW_TX_QUEUE_1_WRITE_INDEX,
+			       IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 1 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
+			       IPW_TX_QUEUE_2_READ_INDEX,
+			       IPW_TX_QUEUE_2_WRITE_INDEX,
+			       IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 2 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
+			       IPW_TX_QUEUE_3_READ_INDEX,
+			       IPW_TX_QUEUE_3_WRITE_INDEX,
+			       IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 3 queue init failed\n");
+		goto error;
+	}
+	/* statistics */
+	priv->rx_bufs_min = 0;
+	priv->rx_pend_max = 0;
+	return rc;
+
+      error:
+	ipw_tx_queue_free(priv);
+	return rc;
+}
+
+/**
+ * Reclaim Tx queue entries no more used by NIC.
+ *
+ * When FW advances 'R' index, all entries between old and
+ * new 'R' index need to be reclaimed. As result, some free space
+ * forms. If there is enough free space (> low mark), wake Tx queue.
+ *
+ * @note Need to protect against garbage in 'R' index
+ * @param priv
+ * @param txq
+ * @param qindex
+ * @return Number of used entries remains in the queue
+ */
+static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
+				struct clx2_tx_queue *txq, int qindex)
+{
+	u32 hw_tail;
+	int used;
+	struct clx2_queue *q = &txq->q;
+
+	hw_tail = ipw_read32(priv, q->reg_r);
+	if (hw_tail >= q->n_bd) {
+		IPW_ERROR
+		    ("Read index for DMA queue (%d) is out of range [0-%d)\n",
+		     hw_tail, q->n_bd);
+		goto done;
+	}
+	for (; q->last_used != hw_tail;
+	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
+		ipw_queue_tx_free_tfd(priv, txq);
+		priv->tx_packets++;
+	}
+      done:
+	if ((ipw_tx_queue_space(q) > q->low_mark) &&
+	    (qindex >= 0))
+		netif_wake_queue(priv->net_dev);
+	used = q->first_empty - q->last_used;
+	if (used < 0)
+		used += q->n_bd;
+
+	return used;
+}
+
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+			     int len, int sync)
+{
+	struct clx2_tx_queue *txq = &priv->txq_cmd;
+	struct clx2_queue *q = &txq->q;
+	struct tfd_frame *tfd;
+
+	if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
+		IPW_ERROR("No space for Tx\n");
+		return -EBUSY;
+	}
+
+	tfd = &txq->bd[q->first_empty];
+	txq->txb[q->first_empty] = NULL;
+
+	memset(tfd, 0, sizeof(*tfd));
+	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
+	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
+	priv->hcmd_seq++;
+	tfd->u.cmd.index = hcmd;
+	tfd->u.cmd.length = len;
+	memcpy(tfd->u.cmd.payload, buf, len);
+	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
+	ipw_write32(priv, q->reg_w, q->first_empty);
+	_ipw_read32(priv, 0x90);
+
+	return 0;
+}
+
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
+ *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replensish the ipw->rxq->rx_free.
+ * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
+ *   ipw->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the ipw->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
+ *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * ipw_rx_queue_alloc()       Allocates rx_free
+ * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
+ *                            ipw_rx_queue_restock
+ * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules ipw_rx_queue_replenish
+ *
+ * -- enable interrupts --
+ * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls ipw_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/*
+ * If there are slots in the RX queue that  need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void ipw_rx_queue_restock(struct ipw_priv *priv)
+{
+	struct ipw_rx_queue *rxq = priv->rxq;
+	struct list_head *element;
+	struct ipw_rx_mem_buffer *rxb;
+	unsigned long flags;
+	int write;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	write = rxq->write;
+	while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
+		list_del(element);
+
+		ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
+			    rxb->dma_addr);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		schedule_work(&priv->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it */
+	if (write != rxq->write)
+		ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
+}
+
+/*
+ * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
+ * Also restock the Rx queue via ipw_rx_queue_restock.
+ *
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void ipw_rx_queue_replenish(void *data)
+{
+	struct ipw_priv *priv = data;
+	struct ipw_rx_queue *rxq = priv->rxq;
+	struct list_head *element;
+	struct ipw_rx_mem_buffer *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while (!list_empty(&rxq->rx_used)) {
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
+		rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
+		if (!rxb->skb) {
+			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
+			       priv->net_dev->name);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			break;
+		}
+		list_del(element);
+
+		rxb->dma_addr =
+		    pci_map_single(priv->pci_dev, rxb->skb->data,
+				   IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	ipw_rx_queue_restock(priv);
+}
+
+static void ipw_bg_rx_queue_replenish(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rx_replenish);
+	mutex_lock(&priv->mutex);
+	ipw_rx_queue_replenish(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
+{
+	int i;
+
+	if (!rxq)
+		return;
+
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].skb != NULL) {
+			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
+					 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(rxq->pool[i].skb);
+		}
+	}
+
+	kfree(rxq);
+}
+
+static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
+{
+	struct ipw_rx_queue *rxq;
+	int i;
+
+	rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
+	if (unlikely(!rxq)) {
+		IPW_ERROR("memory allocation failed\n");
+		return NULL;
+	}
+	spin_lock_init(&rxq->lock);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->free_count = 0;
+
+	return rxq;
+}
+
+static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
+{
+	rate &= ~LIBIPW_BASIC_RATE_MASK;
+	if (ieee_mode == IEEE_A) {
+		switch (rate) {
+		case LIBIPW_OFDM_RATE_6MB:
+			return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ?
+			    1 : 0;
+		case LIBIPW_OFDM_RATE_9MB:
+			return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ?
+			    1 : 0;
+		case LIBIPW_OFDM_RATE_12MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
+		case LIBIPW_OFDM_RATE_18MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
+		case LIBIPW_OFDM_RATE_24MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
+		case LIBIPW_OFDM_RATE_36MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
+		case LIBIPW_OFDM_RATE_48MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
+		case LIBIPW_OFDM_RATE_54MB:
+			return priv->
+			    rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
+		default:
+			return 0;
+		}
+	}
+
+	/* B and G mixed */
+	switch (rate) {
+	case LIBIPW_CCK_RATE_1MB:
+		return priv->rates_mask & LIBIPW_CCK_RATE_1MB_MASK ? 1 : 0;
+	case LIBIPW_CCK_RATE_2MB:
+		return priv->rates_mask & LIBIPW_CCK_RATE_2MB_MASK ? 1 : 0;
+	case LIBIPW_CCK_RATE_5MB:
+		return priv->rates_mask & LIBIPW_CCK_RATE_5MB_MASK ? 1 : 0;
+	case LIBIPW_CCK_RATE_11MB:
+		return priv->rates_mask & LIBIPW_CCK_RATE_11MB_MASK ? 1 : 0;
+	}
+
+	/* If we are limited to B modulations, bail at this point */
+	if (ieee_mode == IEEE_B)
+		return 0;
+
+	/* G */
+	switch (rate) {
+	case LIBIPW_OFDM_RATE_6MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_6MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_9MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_9MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_12MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_12MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_18MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_18MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_24MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_24MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_36MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_36MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_48MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_48MB_MASK ? 1 : 0;
+	case LIBIPW_OFDM_RATE_54MB:
+		return priv->rates_mask & LIBIPW_OFDM_RATE_54MB_MASK ? 1 : 0;
+	}
+
+	return 0;
+}
+
+static int ipw_compatible_rates(struct ipw_priv *priv,
+				const struct libipw_network *network,
+				struct ipw_supported_rates *rates)
+{
+	int num_rates, i;
+
+	memset(rates, 0, sizeof(*rates));
+	num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
+	rates->num_rates = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (!ipw_is_rate_in_mask(priv, network->mode,
+					 network->rates[i])) {
+
+			if (network->rates[i] & LIBIPW_BASIC_RATE_MASK) {
+				IPW_DEBUG_SCAN("Adding masked mandatory "
+					       "rate %02X\n",
+					       network->rates[i]);
+				rates->supported_rates[rates->num_rates++] =
+				    network->rates[i];
+				continue;
+			}
+
+			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
+				       network->rates[i], priv->rates_mask);
+			continue;
+		}
+
+		rates->supported_rates[rates->num_rates++] = network->rates[i];
+	}
+
+	num_rates = min(network->rates_ex_len,
+			(u8) (IPW_MAX_RATES - num_rates));
+	for (i = 0; i < num_rates; i++) {
+		if (!ipw_is_rate_in_mask(priv, network->mode,
+					 network->rates_ex[i])) {
+			if (network->rates_ex[i] & LIBIPW_BASIC_RATE_MASK) {
+				IPW_DEBUG_SCAN("Adding masked mandatory "
+					       "rate %02X\n",
+					       network->rates_ex[i]);
+				rates->supported_rates[rates->num_rates++] =
+				    network->rates[i];
+				continue;
+			}
+
+			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
+				       network->rates_ex[i], priv->rates_mask);
+			continue;
+		}
+
+		rates->supported_rates[rates->num_rates++] =
+		    network->rates_ex[i];
+	}
+
+	return 1;
+}
+
+static void ipw_copy_rates(struct ipw_supported_rates *dest,
+				  const struct ipw_supported_rates *src)
+{
+	u8 i;
+	for (i = 0; i < src->num_rates; i++)
+		dest->supported_rates[i] = src->supported_rates[i];
+	dest->num_rates = src->num_rates;
+}
+
+/* TODO: Look at sniffed packets in the air to determine if the basic rate
+ * mask should ever be used -- right now all callers to add the scan rates are
+ * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
+static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
+				   u8 modulation, u32 rate_mask)
+{
+	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
+	    LIBIPW_BASIC_RATE_MASK : 0;
+
+	if (rate_mask & LIBIPW_CCK_RATE_1MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_1MB;
+
+	if (rate_mask & LIBIPW_CCK_RATE_2MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_BASIC_RATE_MASK | LIBIPW_CCK_RATE_2MB;
+
+	if (rate_mask & LIBIPW_CCK_RATE_5MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+		    LIBIPW_CCK_RATE_5MB;
+
+	if (rate_mask & LIBIPW_CCK_RATE_11MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+		    LIBIPW_CCK_RATE_11MB;
+}
+
+static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
+				    u8 modulation, u32 rate_mask)
+{
+	u8 basic_mask = (LIBIPW_OFDM_MODULATION == modulation) ?
+	    LIBIPW_BASIC_RATE_MASK : 0;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_6MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+		    LIBIPW_OFDM_RATE_6MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_9MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_OFDM_RATE_9MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_12MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+		    LIBIPW_OFDM_RATE_12MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_18MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_OFDM_RATE_18MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_24MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+		    LIBIPW_OFDM_RATE_24MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_36MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_OFDM_RATE_36MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_48MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_OFDM_RATE_48MB;
+
+	if (rate_mask & LIBIPW_OFDM_RATE_54MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+		    LIBIPW_OFDM_RATE_54MB;
+}
+
+struct ipw_network_match {
+	struct libipw_network *network;
+	struct ipw_supported_rates rates;
+};
+
+static int ipw_find_adhoc_network(struct ipw_priv *priv,
+				  struct ipw_network_match *match,
+				  struct libipw_network *network,
+				  int roaming)
+{
+	struct ipw_supported_rates rates;
+
+	/* Verify that this network's capability is compatible with the
+	 * current mode (AdHoc or Infrastructure) */
+	if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	if (unlikely(roaming)) {
+		/* If we are roaming, then ensure check if this is a valid
+		 * network to try and roam to */
+		if ((network->ssid_len != match->network->ssid_len) ||
+		    memcmp(network->ssid, match->network->ssid,
+			   network->ssid_len)) {
+			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
+					network->ssid_len, network->ssid,
+					network->bssid);
+			return 0;
+		}
+	} else {
+		/* If an ESSID has been configured then compare the broadcast
+		 * ESSID to ours */
+		if ((priv->config & CFG_STATIC_ESSID) &&
+		    ((network->ssid_len != priv->essid_len) ||
+		     memcmp(network->ssid, priv->essid,
+			    min(network->ssid_len, priv->essid_len)))) {
+			IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
+					network->ssid_len, network->ssid,
+					network->bssid, priv->essid_len,
+					priv->essid);
+			return 0;
+		}
+	}
+
+	/* If the old network rate is better than this one, don't bother
+	 * testing everything else. */
+
+	if (network->time_stamp[0] < match->network->time_stamp[0]) {
+		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
+				match->network->ssid_len, match->network->ssid);
+		return 0;
+	} else if (network->time_stamp[1] < match->network->time_stamp[1]) {
+		IPW_DEBUG_MERGE("Network '%*pE excluded because newer than current network.\n",
+				match->network->ssid_len, match->network->ssid);
+		return 0;
+	}
+
+	/* Now go through and see if the requested network is valid... */
+	if (priv->ieee->scan_age != 0 &&
+	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of age: %ums.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				jiffies_to_msecs(jiffies -
+						 network->last_scanned));
+		return 0;
+	}
+
+	if ((priv->config & CFG_STATIC_CHANNEL) &&
+	    (network->channel != priv->channel)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				network->channel, priv->channel);
+		return 0;
+	}
+
+	/* Verify privacy compatibility */
+	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
+	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				priv->
+				capability & CAP_PRIVACY_ON ? "on" : "off",
+				network->
+				capability & WLAN_CAPABILITY_PRIVACY ? "on" :
+				"off");
+		return 0;
+	}
+
+	if (ether_addr_equal(network->bssid, priv->bssid)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of the same BSSID match: %pM.\n",
+				network->ssid_len, network->ssid,
+				network->bssid, priv->bssid);
+		return 0;
+	}
+
+	/* Filter out any incompatible freq / mode combinations */
+	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	/* Ensure that the rates supported by the driver are compatible with
+	 * this AP, including verification of basic rates (mandatory) */
+	if (!ipw_compatible_rates(priv, network, &rates)) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	if (rates.num_rates == 0) {
+		IPW_DEBUG_MERGE("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	/* TODO: Perform any further minimal comparititive tests.  We do not
+	 * want to put too much policy logic here; intelligent scan selection
+	 * should occur within a generic IEEE 802.11 user space tool.  */
+
+	/* Set up 'new' AP to this network */
+	ipw_copy_rates(&match->rates, &rates);
+	match->network = network;
+	IPW_DEBUG_MERGE("Network '%*pE (%pM)' is a viable match.\n",
+			network->ssid_len, network->ssid, network->bssid);
+
+	return 1;
+}
+
+static void ipw_merge_adhoc_network(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, merge_networks);
+	struct libipw_network *network = NULL;
+	struct ipw_network_match match = {
+		.network = priv->assoc_network
+	};
+
+	if ((priv->status & STATUS_ASSOCIATED) &&
+	    (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
+		/* First pass through ROAM process -- look for a better
+		 * network */
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->ieee->lock, flags);
+		list_for_each_entry(network, &priv->ieee->network_list, list) {
+			if (network != priv->assoc_network)
+				ipw_find_adhoc_network(priv, &match, network,
+						       1);
+		}
+		spin_unlock_irqrestore(&priv->ieee->lock, flags);
+
+		if (match.network == priv->assoc_network) {
+			IPW_DEBUG_MERGE("No better ADHOC in this network to "
+					"merge to.\n");
+			return;
+		}
+
+		mutex_lock(&priv->mutex);
+		if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
+			IPW_DEBUG_MERGE("remove network %*pE\n",
+					priv->essid_len, priv->essid);
+			ipw_remove_current_network(priv);
+		}
+
+		ipw_disassociate(priv);
+		priv->assoc_network = match.network;
+		mutex_unlock(&priv->mutex);
+		return;
+	}
+}
+
+static int ipw_best_network(struct ipw_priv *priv,
+			    struct ipw_network_match *match,
+			    struct libipw_network *network, int roaming)
+{
+	struct ipw_supported_rates rates;
+
+	/* Verify that this network's capability is compatible with the
+	 * current mode (AdHoc or Infrastructure) */
+	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
+	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
+	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded due to capability mismatch.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	if (unlikely(roaming)) {
+		/* If we are roaming, then ensure check if this is a valid
+		 * network to try and roam to */
+		if ((network->ssid_len != match->network->ssid_len) ||
+		    memcmp(network->ssid, match->network->ssid,
+			   network->ssid_len)) {
+			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of non-network ESSID.\n",
+					network->ssid_len, network->ssid,
+					network->bssid);
+			return 0;
+		}
+	} else {
+		/* If an ESSID has been configured then compare the broadcast
+		 * ESSID to ours */
+		if ((priv->config & CFG_STATIC_ESSID) &&
+		    ((network->ssid_len != priv->essid_len) ||
+		     memcmp(network->ssid, priv->essid,
+			    min(network->ssid_len, priv->essid_len)))) {
+			IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of ESSID mismatch: '%*pE'.\n",
+					network->ssid_len, network->ssid,
+					network->bssid, priv->essid_len,
+					priv->essid);
+			return 0;
+		}
+	}
+
+	/* If the old network rate is better than this one, don't bother
+	 * testing everything else. */
+	if (match->network && match->network->stats.rssi > network->stats.rssi) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because '%*pE (%pM)' has a stronger signal.\n",
+				network->ssid_len, network->ssid,
+				network->bssid, match->network->ssid_len,
+				match->network->ssid, match->network->bssid);
+		return 0;
+	}
+
+	/* If this network has already had an association attempt within the
+	 * last 3 seconds, do not try and associate again... */
+	if (network->last_associate &&
+	    time_after(network->last_associate + (HZ * 3UL), jiffies)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of storming (%ums since last assoc attempt).\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				jiffies_to_msecs(jiffies -
+						 network->last_associate));
+		return 0;
+	}
+
+	/* Now go through and see if the requested network is valid... */
+	if (priv->ieee->scan_age != 0 &&
+	    time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of age: %ums.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				jiffies_to_msecs(jiffies -
+						 network->last_scanned));
+		return 0;
+	}
+
+	if ((priv->config & CFG_STATIC_CHANNEL) &&
+	    (network->channel != priv->channel)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of channel mismatch: %d != %d.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				network->channel, priv->channel);
+		return 0;
+	}
+
+	/* Verify privacy compatibility */
+	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
+	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of privacy mismatch: %s != %s.\n",
+				network->ssid_len, network->ssid,
+				network->bssid,
+				priv->capability & CAP_PRIVACY_ON ? "on" :
+				"off",
+				network->capability &
+				WLAN_CAPABILITY_PRIVACY ? "on" : "off");
+		return 0;
+	}
+
+	if ((priv->config & CFG_STATIC_BSSID) &&
+	    !ether_addr_equal(network->bssid, priv->bssid)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of BSSID mismatch: %pM.\n",
+				network->ssid_len, network->ssid,
+				network->bssid, priv->bssid);
+		return 0;
+	}
+
+	/* Filter out any incompatible freq / mode combinations */
+	if (!libipw_is_valid_mode(priv->ieee, network->mode)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid frequency/mode combination.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	/* Filter out invalid channel in current GEO */
+	if (!libipw_is_valid_channel(priv->ieee, network->channel)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of invalid channel in current GEO\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	/* Ensure that the rates supported by the driver are compatible with
+	 * this AP, including verification of basic rates (mandatory) */
+	if (!ipw_compatible_rates(priv, network, &rates)) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because configured rate mask excludes AP mandatory rate.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	if (rates.num_rates == 0) {
+		IPW_DEBUG_ASSOC("Network '%*pE (%pM)' excluded because of no compatible rates.\n",
+				network->ssid_len, network->ssid,
+				network->bssid);
+		return 0;
+	}
+
+	/* TODO: Perform any further minimal comparititive tests.  We do not
+	 * want to put too much policy logic here; intelligent scan selection
+	 * should occur within a generic IEEE 802.11 user space tool.  */
+
+	/* Set up 'new' AP to this network */
+	ipw_copy_rates(&match->rates, &rates);
+	match->network = network;
+
+	IPW_DEBUG_ASSOC("Network '%*pE (%pM)' is a viable match.\n",
+			network->ssid_len, network->ssid, network->bssid);
+
+	return 1;
+}
+
+static void ipw_adhoc_create(struct ipw_priv *priv,
+			     struct libipw_network *network)
+{
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	int i;
+
+	/*
+	 * For the purposes of scanning, we can set our wireless mode
+	 * to trigger scans across combinations of bands, but when it
+	 * comes to creating a new ad-hoc network, we have tell the FW
+	 * exactly which band to use.
+	 *
+	 * We also have the possibility of an invalid channel for the
+	 * chossen band.  Attempting to create a new ad-hoc network
+	 * with an invalid channel for wireless mode will trigger a
+	 * FW fatal error.
+	 *
+	 */
+	switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
+	case LIBIPW_52GHZ_BAND:
+		network->mode = IEEE_A;
+		i = libipw_channel_to_index(priv->ieee, priv->channel);
+		BUG_ON(i == -1);
+		if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
+			IPW_WARNING("Overriding invalid channel\n");
+			priv->channel = geo->a[0].channel;
+		}
+		break;
+
+	case LIBIPW_24GHZ_BAND:
+		if (priv->ieee->mode & IEEE_G)
+			network->mode = IEEE_G;
+		else
+			network->mode = IEEE_B;
+		i = libipw_channel_to_index(priv->ieee, priv->channel);
+		BUG_ON(i == -1);
+		if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) {
+			IPW_WARNING("Overriding invalid channel\n");
+			priv->channel = geo->bg[0].channel;
+		}
+		break;
+
+	default:
+		IPW_WARNING("Overriding invalid channel\n");
+		if (priv->ieee->mode & IEEE_A) {
+			network->mode = IEEE_A;
+			priv->channel = geo->a[0].channel;
+		} else if (priv->ieee->mode & IEEE_G) {
+			network->mode = IEEE_G;
+			priv->channel = geo->bg[0].channel;
+		} else {
+			network->mode = IEEE_B;
+			priv->channel = geo->bg[0].channel;
+		}
+		break;
+	}
+
+	network->channel = priv->channel;
+	priv->config |= CFG_ADHOC_PERSIST;
+	ipw_create_bssid(priv, network->bssid);
+	network->ssid_len = priv->essid_len;
+	memcpy(network->ssid, priv->essid, priv->essid_len);
+	memset(&network->stats, 0, sizeof(network->stats));
+	network->capability = WLAN_CAPABILITY_IBSS;
+	if (!(priv->config & CFG_PREAMBLE_LONG))
+		network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
+	if (priv->capability & CAP_PRIVACY_ON)
+		network->capability |= WLAN_CAPABILITY_PRIVACY;
+	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
+	memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
+	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
+	memcpy(network->rates_ex,
+	       &priv->rates.supported_rates[network->rates_len],
+	       network->rates_ex_len);
+	network->last_scanned = 0;
+	network->flags = 0;
+	network->last_associate = 0;
+	network->time_stamp[0] = 0;
+	network->time_stamp[1] = 0;
+	network->beacon_interval = 100;	/* Default */
+	network->listen_interval = 10;	/* Default */
+	network->atim_window = 0;	/* Default */
+	network->wpa_ie_len = 0;
+	network->rsn_ie_len = 0;
+}
+
+static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
+{
+	struct ipw_tgi_tx_key key;
+
+	if (!(priv->ieee->sec.flags & (1 << index)))
+		return;
+
+	key.key_id = index;
+	memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
+	key.security_type = type;
+	key.station_index = 0;	/* always 0 for BSS */
+	key.flags = 0;
+	/* 0 for new key; previous value of counter (after fatal error) */
+	key.tx_counter[0] = cpu_to_le32(0);
+	key.tx_counter[1] = cpu_to_le32(0);
+
+	ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
+}
+
+static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
+{
+	struct ipw_wep_key key;
+	int i;
+
+	key.cmd_id = DINO_CMD_WEP_KEY;
+	key.seq_num = 0;
+
+	/* Note: AES keys cannot be set for multiple times.
+	 * Only set it at the first time. */
+	for (i = 0; i < 4; i++) {
+		key.key_index = i | type;
+		if (!(priv->ieee->sec.flags & (1 << i))) {
+			key.key_size = 0;
+			continue;
+		}
+
+		key.key_size = priv->ieee->sec.key_sizes[i];
+		memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
+
+		ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
+	}
+}
+
+static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
+{
+	if (priv->ieee->host_encrypt)
+		return;
+
+	switch (level) {
+	case SEC_LEVEL_3:
+		priv->sys_config.disable_unicast_decryption = 0;
+		priv->ieee->host_decrypt = 0;
+		break;
+	case SEC_LEVEL_2:
+		priv->sys_config.disable_unicast_decryption = 1;
+		priv->ieee->host_decrypt = 1;
+		break;
+	case SEC_LEVEL_1:
+		priv->sys_config.disable_unicast_decryption = 0;
+		priv->ieee->host_decrypt = 0;
+		break;
+	case SEC_LEVEL_0:
+		priv->sys_config.disable_unicast_decryption = 1;
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
+{
+	if (priv->ieee->host_encrypt)
+		return;
+
+	switch (level) {
+	case SEC_LEVEL_3:
+		priv->sys_config.disable_multicast_decryption = 0;
+		break;
+	case SEC_LEVEL_2:
+		priv->sys_config.disable_multicast_decryption = 1;
+		break;
+	case SEC_LEVEL_1:
+		priv->sys_config.disable_multicast_decryption = 0;
+		break;
+	case SEC_LEVEL_0:
+		priv->sys_config.disable_multicast_decryption = 1;
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
+{
+	switch (priv->ieee->sec.level) {
+	case SEC_LEVEL_3:
+		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
+			ipw_send_tgi_tx_key(priv,
+					    DCT_FLAG_EXT_SECURITY_CCM,
+					    priv->ieee->sec.active_key);
+
+		if (!priv->ieee->host_mc_decrypt)
+			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
+		break;
+	case SEC_LEVEL_2:
+		if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
+			ipw_send_tgi_tx_key(priv,
+					    DCT_FLAG_EXT_SECURITY_TKIP,
+					    priv->ieee->sec.active_key);
+		break;
+	case SEC_LEVEL_1:
+		ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
+		ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
+		ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
+		break;
+	case SEC_LEVEL_0:
+	default:
+		break;
+	}
+}
+
+static void ipw_adhoc_check(void *data)
+{
+	struct ipw_priv *priv = data;
+
+	if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
+	    !(priv->config & CFG_ADHOC_PERSIST)) {
+		IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
+			  IPW_DL_STATE | IPW_DL_ASSOC,
+			  "Missed beacon: %d - disassociate\n",
+			  priv->missed_adhoc_beacons);
+		ipw_remove_current_network(priv);
+		ipw_disassociate(priv);
+		return;
+	}
+
+	schedule_delayed_work(&priv->adhoc_check,
+			      le16_to_cpu(priv->assoc_request.beacon_interval));
+}
+
+static void ipw_bg_adhoc_check(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, adhoc_check.work);
+	mutex_lock(&priv->mutex);
+	ipw_adhoc_check(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_debug_config(struct ipw_priv *priv)
+{
+	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
+		       "[CFG 0x%08X]\n", priv->config);
+	if (priv->config & CFG_STATIC_CHANNEL)
+		IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
+	else
+		IPW_DEBUG_INFO("Channel unlocked.\n");
+	if (priv->config & CFG_STATIC_ESSID)
+		IPW_DEBUG_INFO("ESSID locked to '%*pE'\n",
+			       priv->essid_len, priv->essid);
+	else
+		IPW_DEBUG_INFO("ESSID unlocked.\n");
+	if (priv->config & CFG_STATIC_BSSID)
+		IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
+	else
+		IPW_DEBUG_INFO("BSSID unlocked.\n");
+	if (priv->capability & CAP_PRIVACY_ON)
+		IPW_DEBUG_INFO("PRIVACY on\n");
+	else
+		IPW_DEBUG_INFO("PRIVACY off\n");
+	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
+}
+
+static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
+{
+	/* TODO: Verify that this works... */
+	struct ipw_fixed_rate fr;
+	u32 reg;
+	u16 mask = 0;
+	u16 new_tx_rates = priv->rates_mask;
+
+	/* Identify 'current FW band' and match it with the fixed
+	 * Tx rates */
+
+	switch (priv->ieee->freq_band) {
+	case LIBIPW_52GHZ_BAND:	/* A only */
+		/* IEEE_A */
+		if (priv->rates_mask & ~LIBIPW_OFDM_RATES_MASK) {
+			/* Invalid fixed rate mask */
+			IPW_DEBUG_WX
+			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
+			new_tx_rates = 0;
+			break;
+		}
+
+		new_tx_rates >>= LIBIPW_OFDM_SHIFT_MASK_A;
+		break;
+
+	default:		/* 2.4Ghz or Mixed */
+		/* IEEE_B */
+		if (mode == IEEE_B) {
+			if (new_tx_rates & ~LIBIPW_CCK_RATES_MASK) {
+				/* Invalid fixed rate mask */
+				IPW_DEBUG_WX
+				    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
+				new_tx_rates = 0;
+			}
+			break;
+		}
+
+		/* IEEE_G */
+		if (new_tx_rates & ~(LIBIPW_CCK_RATES_MASK |
+				    LIBIPW_OFDM_RATES_MASK)) {
+			/* Invalid fixed rate mask */
+			IPW_DEBUG_WX
+			    ("invalid fixed rate mask in ipw_set_fixed_rate\n");
+			new_tx_rates = 0;
+			break;
+		}
+
+		if (LIBIPW_OFDM_RATE_6MB_MASK & new_tx_rates) {
+			mask |= (LIBIPW_OFDM_RATE_6MB_MASK >> 1);
+			new_tx_rates &= ~LIBIPW_OFDM_RATE_6MB_MASK;
+		}
+
+		if (LIBIPW_OFDM_RATE_9MB_MASK & new_tx_rates) {
+			mask |= (LIBIPW_OFDM_RATE_9MB_MASK >> 1);
+			new_tx_rates &= ~LIBIPW_OFDM_RATE_9MB_MASK;
+		}
+
+		if (LIBIPW_OFDM_RATE_12MB_MASK & new_tx_rates) {
+			mask |= (LIBIPW_OFDM_RATE_12MB_MASK >> 1);
+			new_tx_rates &= ~LIBIPW_OFDM_RATE_12MB_MASK;
+		}
+
+		new_tx_rates |= mask;
+		break;
+	}
+
+	fr.tx_rates = cpu_to_le16(new_tx_rates);
+
+	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
+	ipw_write_reg32(priv, reg, *(u32 *) & fr);
+}
+
+static void ipw_abort_scan(struct ipw_priv *priv)
+{
+	int err;
+
+	if (priv->status & STATUS_SCAN_ABORTING) {
+		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
+		return;
+	}
+	priv->status |= STATUS_SCAN_ABORTING;
+
+	err = ipw_send_scan_abort(priv);
+	if (err)
+		IPW_DEBUG_HC("Request to abort scan failed.\n");
+}
+
+static void ipw_add_scan_channels(struct ipw_priv *priv,
+				  struct ipw_scan_request_ext *scan,
+				  int scan_type)
+{
+	int channel_index = 0;
+	const struct libipw_geo *geo;
+	int i;
+
+	geo = libipw_get_geo(priv->ieee);
+
+	if (priv->ieee->freq_band & LIBIPW_52GHZ_BAND) {
+		int start = channel_index;
+		for (i = 0; i < geo->a_channels; i++) {
+			if ((priv->status & STATUS_ASSOCIATED) &&
+			    geo->a[i].channel == priv->channel)
+				continue;
+			channel_index++;
+			scan->channels_list[channel_index] = geo->a[i].channel;
+			ipw_set_scan_type(scan, channel_index,
+					  geo->a[i].
+					  flags & LIBIPW_CH_PASSIVE_ONLY ?
+					  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
+					  scan_type);
+		}
+
+		if (start != channel_index) {
+			scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
+			    (channel_index - start);
+			channel_index++;
+		}
+	}
+
+	if (priv->ieee->freq_band & LIBIPW_24GHZ_BAND) {
+		int start = channel_index;
+		if (priv->config & CFG_SPEED_SCAN) {
+			int index;
+			u8 channels[LIBIPW_24GHZ_CHANNELS] = {
+				/* nop out the list */
+				[0] = 0
+			};
+
+			u8 channel;
+			while (channel_index < IPW_SCAN_CHANNELS - 1) {
+				channel =
+				    priv->speed_scan[priv->speed_scan_pos];
+				if (channel == 0) {
+					priv->speed_scan_pos = 0;
+					channel = priv->speed_scan[0];
+				}
+				if ((priv->status & STATUS_ASSOCIATED) &&
+				    channel == priv->channel) {
+					priv->speed_scan_pos++;
+					continue;
+				}
+
+				/* If this channel has already been
+				 * added in scan, break from loop
+				 * and this will be the first channel
+				 * in the next scan.
+				 */
+				if (channels[channel - 1] != 0)
+					break;
+
+				channels[channel - 1] = 1;
+				priv->speed_scan_pos++;
+				channel_index++;
+				scan->channels_list[channel_index] = channel;
+				index =
+				    libipw_channel_to_index(priv->ieee, channel);
+				ipw_set_scan_type(scan, channel_index,
+						  geo->bg[index].
+						  flags &
+						  LIBIPW_CH_PASSIVE_ONLY ?
+						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
+						  : scan_type);
+			}
+		} else {
+			for (i = 0; i < geo->bg_channels; i++) {
+				if ((priv->status & STATUS_ASSOCIATED) &&
+				    geo->bg[i].channel == priv->channel)
+					continue;
+				channel_index++;
+				scan->channels_list[channel_index] =
+				    geo->bg[i].channel;
+				ipw_set_scan_type(scan, channel_index,
+						  geo->bg[i].
+						  flags &
+						  LIBIPW_CH_PASSIVE_ONLY ?
+						  IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
+						  : scan_type);
+			}
+		}
+
+		if (start != channel_index) {
+			scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
+			    (channel_index - start);
+		}
+	}
+}
+
+static int ipw_passive_dwell_time(struct ipw_priv *priv)
+{
+	/* staying on passive channels longer than the DTIM interval during a
+	 * scan, while associated, causes the firmware to cancel the scan
+	 * without notification. Hence, don't stay on passive channels longer
+	 * than the beacon interval.
+	 */
+	if (priv->status & STATUS_ASSOCIATED
+	    && priv->assoc_network->beacon_interval > 10)
+		return priv->assoc_network->beacon_interval - 10;
+	else
+		return 120;
+}
+
+static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
+{
+	struct ipw_scan_request_ext scan;
+	int err = 0, scan_type;
+
+	if (!(priv->status & STATUS_INIT) ||
+	    (priv->status & STATUS_EXIT_PENDING))
+		return 0;
+
+	mutex_lock(&priv->mutex);
+
+	if (direct && (priv->direct_scan_ssid_len == 0)) {
+		IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
+		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
+		goto done;
+	}
+
+	if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_HC("Concurrent scan requested.  Queuing.\n");
+		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
+					STATUS_SCAN_PENDING;
+		goto done;
+	}
+
+	if (!(priv->status & STATUS_SCAN_FORCED) &&
+	    priv->status & STATUS_SCAN_ABORTING) {
+		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
+		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
+					STATUS_SCAN_PENDING;
+		goto done;
+	}
+
+	if (priv->status & STATUS_RF_KILL_MASK) {
+		IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
+		priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
+					STATUS_SCAN_PENDING;
+		goto done;
+	}
+
+	memset(&scan, 0, sizeof(scan));
+	scan.full_scan_index = cpu_to_le32(libipw_get_scans(priv->ieee));
+
+	if (type == IW_SCAN_TYPE_PASSIVE) {
+		IPW_DEBUG_WX("use passive scanning\n");
+		scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
+		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
+			cpu_to_le16(ipw_passive_dwell_time(priv));
+		ipw_add_scan_channels(priv, &scan, scan_type);
+		goto send_request;
+	}
+
+	/* Use active scan by default. */
+	if (priv->config & CFG_SPEED_SCAN)
+		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
+			cpu_to_le16(30);
+	else
+		scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
+			cpu_to_le16(20);
+
+	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
+		cpu_to_le16(20);
+
+	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
+		cpu_to_le16(ipw_passive_dwell_time(priv));
+	scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
+
+#ifdef CONFIG_IPW2200_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+		u8 channel;
+		u8 band = 0;
+
+		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
+		case LIBIPW_52GHZ_BAND:
+			band = (u8) (IPW_A_MODE << 6) | 1;
+			channel = priv->channel;
+			break;
+
+		case LIBIPW_24GHZ_BAND:
+			band = (u8) (IPW_B_MODE << 6) | 1;
+			channel = priv->channel;
+			break;
+
+		default:
+			band = (u8) (IPW_B_MODE << 6) | 1;
+			channel = 9;
+			break;
+		}
+
+		scan.channels_list[0] = band;
+		scan.channels_list[1] = channel;
+		ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
+
+		/* NOTE:  The card will sit on this channel for this time
+		 * period.  Scan aborts are timing sensitive and frequently
+		 * result in firmware restarts.  As such, it is best to
+		 * set a small dwell_time here and just keep re-issuing
+		 * scans.  Otherwise fast channel hopping will not actually
+		 * hop channels.
+		 *
+		 * TODO: Move SPEED SCAN support to all modes and bands */
+		scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
+			cpu_to_le16(2000);
+	} else {
+#endif				/* CONFIG_IPW2200_MONITOR */
+		/* Honor direct scans first, otherwise if we are roaming make
+		 * this a direct scan for the current network.  Finally,
+		 * ensure that every other scan is a fast channel hop scan */
+		if (direct) {
+			err = ipw_send_ssid(priv, priv->direct_scan_ssid,
+			                    priv->direct_scan_ssid_len);
+			if (err) {
+				IPW_DEBUG_HC("Attempt to send SSID command  "
+					     "failed\n");
+				goto done;
+			}
+
+			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
+		} else if ((priv->status & STATUS_ROAMING)
+			   || (!(priv->status & STATUS_ASSOCIATED)
+			       && (priv->config & CFG_STATIC_ESSID)
+			       && (le32_to_cpu(scan.full_scan_index) % 2))) {
+			err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
+			if (err) {
+				IPW_DEBUG_HC("Attempt to send SSID command "
+					     "failed.\n");
+				goto done;
+			}
+
+			scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
+		} else
+			scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
+
+		ipw_add_scan_channels(priv, &scan, scan_type);
+#ifdef CONFIG_IPW2200_MONITOR
+	}
+#endif
+
+send_request:
+	err = ipw_send_scan_request_ext(priv, &scan);
+	if (err) {
+		IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
+		goto done;
+	}
+
+	priv->status |= STATUS_SCANNING;
+	if (direct) {
+		priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
+		priv->direct_scan_ssid_len = 0;
+	} else
+		priv->status &= ~STATUS_SCAN_PENDING;
+
+	schedule_delayed_work(&priv->scan_check, IPW_SCAN_CHECK_WATCHDOG);
+done:
+	mutex_unlock(&priv->mutex);
+	return err;
+}
+
+static void ipw_request_passive_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_passive_scan.work);
+	ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
+}
+
+static void ipw_request_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_scan.work);
+	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
+}
+
+static void ipw_request_direct_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, request_direct_scan.work);
+	ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
+}
+
+static void ipw_bg_abort_scan(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, abort_scan);
+	mutex_lock(&priv->mutex);
+	ipw_abort_scan(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_wpa_enable(struct ipw_priv *priv, int value)
+{
+	/* This is called when wpa_supplicant loads and closes the driver
+	 * interface. */
+	priv->ieee->wpa_enabled = value;
+	return 0;
+}
+
+static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
+{
+	struct libipw_device *ieee = priv->ieee;
+	struct libipw_security sec = {
+		.flags = SEC_AUTH_MODE,
+	};
+	int ret = 0;
+
+	if (value & IW_AUTH_ALG_SHARED_KEY) {
+		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
+		ieee->open_wep = 0;
+	} else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
+		sec.auth_mode = WLAN_AUTH_OPEN;
+		ieee->open_wep = 1;
+	} else if (value & IW_AUTH_ALG_LEAP) {
+		sec.auth_mode = WLAN_AUTH_LEAP;
+		ieee->open_wep = 1;
+	} else
+		return -EINVAL;
+
+	if (ieee->set_security)
+		ieee->set_security(ieee->dev, &sec);
+	else
+		ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
+				int wpa_ie_len)
+{
+	/* make sure WPA is enabled */
+	ipw_wpa_enable(priv, 1);
+}
+
+static int ipw_set_rsn_capa(struct ipw_priv *priv,
+			    char *capabilities, int length)
+{
+	IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
+
+	return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
+				capabilities);
+}
+
+/*
+ * WE-18 support
+ */
+
+/* SIOCSIWGENIE */
+static int ipw_wx_set_genie(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	u8 *buf;
+	int err = 0;
+
+	if (wrqu->data.length > MAX_WPA_IE_LEN ||
+	    (wrqu->data.length && extra == NULL))
+		return -EINVAL;
+
+	if (wrqu->data.length) {
+		buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL);
+		if (buf == NULL) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = buf;
+		ieee->wpa_ie_len = wrqu->data.length;
+	} else {
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = NULL;
+		ieee->wpa_ie_len = 0;
+	}
+
+	ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
+      out:
+	return err;
+}
+
+/* SIOCGIWGENIE */
+static int ipw_wx_get_genie(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	int err = 0;
+
+	if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
+		wrqu->data.length = 0;
+		goto out;
+	}
+
+	if (wrqu->data.length < ieee->wpa_ie_len) {
+		err = -E2BIG;
+		goto out;
+	}
+
+	wrqu->data.length = ieee->wpa_ie_len;
+	memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
+
+      out:
+	return err;
+}
+
+static int wext_cipher2level(int cipher)
+{
+	switch (cipher) {
+	case IW_AUTH_CIPHER_NONE:
+		return SEC_LEVEL_0;
+	case IW_AUTH_CIPHER_WEP40:
+	case IW_AUTH_CIPHER_WEP104:
+		return SEC_LEVEL_1;
+	case IW_AUTH_CIPHER_TKIP:
+		return SEC_LEVEL_2;
+	case IW_AUTH_CIPHER_CCMP:
+		return SEC_LEVEL_3;
+	default:
+		return -1;
+	}
+}
+
+/* SIOCSIWAUTH */
+static int ipw_wx_set_auth(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	struct iw_param *param = &wrqu->param;
+	struct lib80211_crypt_data *crypt;
+	unsigned long flags;
+	int ret = 0;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+		break;
+	case IW_AUTH_CIPHER_PAIRWISE:
+		ipw_set_hw_decrypt_unicast(priv,
+					   wext_cipher2level(param->value));
+		break;
+	case IW_AUTH_CIPHER_GROUP:
+		ipw_set_hw_decrypt_multicast(priv,
+					     wext_cipher2level(param->value));
+		break;
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * ipw2200 does not use these parameters
+		 */
+		break;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
+		if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
+			break;
+
+		flags = crypt->ops->get_flags(crypt->priv);
+
+		if (param->value)
+			flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
+		else
+			flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
+
+		crypt->ops->set_flags(flags, crypt->priv);
+
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:{
+			/* HACK:
+			 *
+			 * wpa_supplicant calls set_wpa_enabled when the driver
+			 * is loaded and unloaded, regardless of if WPA is being
+			 * used.  No other calls are made which can be used to
+			 * determine if encryption will be used or not prior to
+			 * association being expected.  If encryption is not being
+			 * used, drop_unencrypted is set to false, else true -- we
+			 * can use this to determine if the CAP_PRIVACY_ON bit should
+			 * be set.
+			 */
+			struct libipw_security sec = {
+				.flags = SEC_ENABLED,
+				.enabled = param->value,
+			};
+			priv->ieee->drop_unencrypted = param->value;
+			/* We only change SEC_LEVEL for open mode. Others
+			 * are set by ipw_wpa_set_encryption.
+			 */
+			if (!param->value) {
+				sec.flags |= SEC_LEVEL;
+				sec.level = SEC_LEVEL_0;
+			} else {
+				sec.flags |= SEC_LEVEL;
+				sec.level = SEC_LEVEL_1;
+			}
+			if (priv->ieee->set_security)
+				priv->ieee->set_security(priv->ieee->dev, &sec);
+			break;
+		}
+
+	case IW_AUTH_80211_AUTH_ALG:
+		ret = ipw_wpa_set_auth_algs(priv, param->value);
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		ret = ipw_wpa_enable(priv, param->value);
+		ipw_disassociate(priv);
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		ieee->ieee802_1x = param->value;
+		break;
+
+	case IW_AUTH_PRIVACY_INVOKED:
+		ieee->privacy_invoked = param->value;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+/* SIOCGIWAUTH */
+static int ipw_wx_get_auth(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct libipw_device *ieee = priv->ieee;
+	struct lib80211_crypt_data *crypt;
+	struct iw_param *param = &wrqu->param;
+
+	switch (param->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * wpa_supplicant will control these internally
+		 */
+		return -EOPNOTSUPP;
+
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
+		if (!crypt || !crypt->ops->get_flags)
+			break;
+
+		param->value = (crypt->ops->get_flags(crypt->priv) &
+				IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
+
+		break;
+
+	case IW_AUTH_DROP_UNENCRYPTED:
+		param->value = ieee->drop_unencrypted;
+		break;
+
+	case IW_AUTH_80211_AUTH_ALG:
+		param->value = ieee->sec.auth_mode;
+		break;
+
+	case IW_AUTH_WPA_ENABLED:
+		param->value = ieee->wpa_enabled;
+		break;
+
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		param->value = ieee->ieee802_1x;
+		break;
+
+	case IW_AUTH_ROAMING_CONTROL:
+	case IW_AUTH_PRIVACY_INVOKED:
+		param->value = ieee->privacy_invoked;
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/* SIOCSIWENCODEEXT */
+static int ipw_wx_set_encodeext(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+
+	if (hwcrypto) {
+		if (ext->alg == IW_ENCODE_ALG_TKIP) {
+			/* IPW HW can't build TKIP MIC,
+			   host decryption still needed */
+			if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
+				priv->ieee->host_mc_decrypt = 1;
+			else {
+				priv->ieee->host_encrypt = 0;
+				priv->ieee->host_encrypt_msdu = 1;
+				priv->ieee->host_decrypt = 1;
+			}
+		} else {
+			priv->ieee->host_encrypt = 0;
+			priv->ieee->host_encrypt_msdu = 0;
+			priv->ieee->host_decrypt = 0;
+			priv->ieee->host_mc_decrypt = 0;
+		}
+	}
+
+	return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra);
+}
+
+/* SIOCGIWENCODEEXT */
+static int ipw_wx_get_encodeext(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra);
+}
+
+/* SIOCSIWMLME */
+static int ipw_wx_set_mlme(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct iw_mlme *mlme = (struct iw_mlme *)extra;
+	__le16 reason;
+
+	reason = cpu_to_le16(mlme->reason_code);
+
+	switch (mlme->cmd) {
+	case IW_MLME_DEAUTH:
+		/* silently ignore */
+		break;
+
+	case IW_MLME_DISASSOC:
+		ipw_disassociate(priv);
+		break;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+#ifdef CONFIG_IPW2200_QOS
+
+/* QoS */
+/*
+* get the modulation type of the current network or
+* the card current mode
+*/
+static u8 ipw_qos_current_mode(struct ipw_priv * priv)
+{
+	u8 mode = 0;
+
+	if (priv->status & STATUS_ASSOCIATED) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&priv->ieee->lock, flags);
+		mode = priv->assoc_network->mode;
+		spin_unlock_irqrestore(&priv->ieee->lock, flags);
+	} else {
+		mode = priv->ieee->mode;
+	}
+	IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
+	return mode;
+}
+
+/*
+* Handle management frame beacon and probe response
+*/
+static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
+					 int active_network,
+					 struct libipw_network *network)
+{
+	u32 size = sizeof(struct libipw_qos_parameters);
+
+	if (network->capability & WLAN_CAPABILITY_IBSS)
+		network->qos_data.active = network->qos_data.supported;
+
+	if (network->flags & NETWORK_HAS_QOS_MASK) {
+		if (active_network &&
+		    (network->flags & NETWORK_HAS_QOS_PARAMETERS))
+			network->qos_data.active = network->qos_data.supported;
+
+		if ((network->qos_data.active == 1) && (active_network == 1) &&
+		    (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
+		    (network->qos_data.old_param_count !=
+		     network->qos_data.param_count)) {
+			network->qos_data.old_param_count =
+			    network->qos_data.param_count;
+			schedule_work(&priv->qos_activate);
+			IPW_DEBUG_QOS("QoS parameters change call "
+				      "qos_activate\n");
+		}
+	} else {
+		if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
+			memcpy(&network->qos_data.parameters,
+			       &def_parameters_CCK, size);
+		else
+			memcpy(&network->qos_data.parameters,
+			       &def_parameters_OFDM, size);
+
+		if ((network->qos_data.active == 1) && (active_network == 1)) {
+			IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
+			schedule_work(&priv->qos_activate);
+		}
+
+		network->qos_data.active = 0;
+		network->qos_data.supported = 0;
+	}
+	if ((priv->status & STATUS_ASSOCIATED) &&
+	    (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
+		if (!ether_addr_equal(network->bssid, priv->bssid))
+			if (network->capability & WLAN_CAPABILITY_IBSS)
+				if ((network->ssid_len ==
+				     priv->assoc_network->ssid_len) &&
+				    !memcmp(network->ssid,
+					    priv->assoc_network->ssid,
+					    network->ssid_len)) {
+					schedule_work(&priv->merge_networks);
+				}
+	}
+
+	return 0;
+}
+
+/*
+* This function set up the firmware to support QoS. It sends
+* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
+*/
+static int ipw_qos_activate(struct ipw_priv *priv,
+			    struct libipw_qos_data *qos_network_data)
+{
+	int err;
+	struct libipw_qos_parameters qos_parameters[QOS_QOS_SETS];
+	struct libipw_qos_parameters *active_one = NULL;
+	u32 size = sizeof(struct libipw_qos_parameters);
+	u32 burst_duration;
+	int i;
+	u8 type;
+
+	type = ipw_qos_current_mode(priv);
+
+	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
+	memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
+	active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
+	memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
+
+	if (qos_network_data == NULL) {
+		if (type == IEEE_B) {
+			IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
+			active_one = &def_parameters_CCK;
+		} else
+			active_one = &def_parameters_OFDM;
+
+		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
+		burst_duration = ipw_qos_get_burst_duration(priv);
+		for (i = 0; i < QOS_QUEUE_NUM; i++)
+			qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
+			    cpu_to_le16(burst_duration);
+	} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		if (type == IEEE_B) {
+			IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
+				      type);
+			if (priv->qos_data.qos_enable == 0)
+				active_one = &def_parameters_CCK;
+			else
+				active_one = priv->qos_data.def_qos_parm_CCK;
+		} else {
+			if (priv->qos_data.qos_enable == 0)
+				active_one = &def_parameters_OFDM;
+			else
+				active_one = priv->qos_data.def_qos_parm_OFDM;
+		}
+		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
+	} else {
+		unsigned long flags;
+		int active;
+
+		spin_lock_irqsave(&priv->ieee->lock, flags);
+		active_one = &(qos_network_data->parameters);
+		qos_network_data->old_param_count =
+		    qos_network_data->param_count;
+		memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
+		active = qos_network_data->supported;
+		spin_unlock_irqrestore(&priv->ieee->lock, flags);
+
+		if (active == 0) {
+			burst_duration = ipw_qos_get_burst_duration(priv);
+			for (i = 0; i < QOS_QUEUE_NUM; i++)
+				qos_parameters[QOS_PARAM_SET_ACTIVE].
+				    tx_op_limit[i] = cpu_to_le16(burst_duration);
+		}
+	}
+
+	IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
+	err = ipw_send_qos_params_command(priv, &qos_parameters[0]);
+	if (err)
+		IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
+
+	return err;
+}
+
+/*
+* send IPW_CMD_WME_INFO to the firmware
+*/
+static int ipw_qos_set_info_element(struct ipw_priv *priv)
+{
+	int ret = 0;
+	struct libipw_qos_information_element qos_info;
+
+	if (priv == NULL)
+		return -1;
+
+	qos_info.elementID = QOS_ELEMENT_ID;
+	qos_info.length = sizeof(struct libipw_qos_information_element) - 2;
+
+	qos_info.version = QOS_VERSION_1;
+	qos_info.ac_info = 0;
+
+	memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
+	qos_info.qui_type = QOS_OUI_TYPE;
+	qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
+
+	ret = ipw_send_qos_info_command(priv, &qos_info);
+	if (ret != 0) {
+		IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
+	}
+	return ret;
+}
+
+/*
+* Set the QoS parameter with the association request structure
+*/
+static int ipw_qos_association(struct ipw_priv *priv,
+			       struct libipw_network *network)
+{
+	int err = 0;
+	struct libipw_qos_data *qos_data = NULL;
+	struct libipw_qos_data ibss_data = {
+		.supported = 1,
+		.active = 1,
+	};
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
+
+		qos_data = &ibss_data;
+		break;
+
+	case IW_MODE_INFRA:
+		qos_data = &network->qos_data;
+		break;
+
+	default:
+		BUG();
+		break;
+	}
+
+	err = ipw_qos_activate(priv, qos_data);
+	if (err) {
+		priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
+		return err;
+	}
+
+	if (priv->qos_data.qos_enable && qos_data->supported) {
+		IPW_DEBUG_QOS("QoS will be enabled for this association\n");
+		priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
+		return ipw_qos_set_info_element(priv);
+	}
+
+	return 0;
+}
+
+/*
+* handling the beaconing responses. if we get different QoS setting
+* off the network from the associated setting, adjust the QoS
+* setting
+*/
+static int ipw_qos_association_resp(struct ipw_priv *priv,
+				    struct libipw_network *network)
+{
+	int ret = 0;
+	unsigned long flags;
+	u32 size = sizeof(struct libipw_qos_parameters);
+	int set_qos_param = 0;
+
+	if ((priv == NULL) || (network == NULL) ||
+	    (priv->assoc_network == NULL))
+		return ret;
+
+	if (!(priv->status & STATUS_ASSOCIATED))
+		return ret;
+
+	if ((priv->ieee->iw_mode != IW_MODE_INFRA))
+		return ret;
+
+	spin_lock_irqsave(&priv->ieee->lock, flags);
+	if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
+		memcpy(&priv->assoc_network->qos_data, &network->qos_data,
+		       sizeof(struct libipw_qos_data));
+		priv->assoc_network->qos_data.active = 1;
+		if ((network->qos_data.old_param_count !=
+		     network->qos_data.param_count)) {
+			set_qos_param = 1;
+			network->qos_data.old_param_count =
+			    network->qos_data.param_count;
+		}
+
+	} else {
+		if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
+			memcpy(&priv->assoc_network->qos_data.parameters,
+			       &def_parameters_CCK, size);
+		else
+			memcpy(&priv->assoc_network->qos_data.parameters,
+			       &def_parameters_OFDM, size);
+		priv->assoc_network->qos_data.active = 0;
+		priv->assoc_network->qos_data.supported = 0;
+		set_qos_param = 1;
+	}
+
+	spin_unlock_irqrestore(&priv->ieee->lock, flags);
+
+	if (set_qos_param == 1)
+		schedule_work(&priv->qos_activate);
+
+	return ret;
+}
+
+static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
+{
+	u32 ret = 0;
+
+	if (!priv)
+		return 0;
+
+	if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION))
+		ret = priv->qos_data.burst_duration_CCK;
+	else
+		ret = priv->qos_data.burst_duration_OFDM;
+
+	return ret;
+}
+
+/*
+* Initialize the setting of QoS global
+*/
+static void ipw_qos_init(struct ipw_priv *priv, int enable,
+			 int burst_enable, u32 burst_duration_CCK,
+			 u32 burst_duration_OFDM)
+{
+	priv->qos_data.qos_enable = enable;
+
+	if (priv->qos_data.qos_enable) {
+		priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
+		priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
+		IPW_DEBUG_QOS("QoS is enabled\n");
+	} else {
+		priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
+		priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
+		IPW_DEBUG_QOS("QoS is not enabled\n");
+	}
+
+	priv->qos_data.burst_enable = burst_enable;
+
+	if (burst_enable) {
+		priv->qos_data.burst_duration_CCK = burst_duration_CCK;
+		priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
+	} else {
+		priv->qos_data.burst_duration_CCK = 0;
+		priv->qos_data.burst_duration_OFDM = 0;
+	}
+}
+
+/*
+* map the packet priority to the right TX Queue
+*/
+static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
+{
+	if (priority > 7 || !priv->qos_data.qos_enable)
+		priority = 0;
+
+	return from_priority_to_tx_queue[priority] - 1;
+}
+
+static int ipw_is_qos_active(struct net_device *dev,
+			     struct sk_buff *skb)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct libipw_qos_data *qos_data = NULL;
+	int active, supported;
+	u8 *daddr = skb->data + ETH_ALEN;
+	int unicast = !is_multicast_ether_addr(daddr);
+
+	if (!(priv->status & STATUS_ASSOCIATED))
+		return 0;
+
+	qos_data = &priv->assoc_network->qos_data;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		if (unicast == 0)
+			qos_data->active = 0;
+		else
+			qos_data->active = qos_data->supported;
+	}
+	active = qos_data->active;
+	supported = qos_data->supported;
+	IPW_DEBUG_QOS("QoS  %d network is QoS active %d  supported %d  "
+		      "unicast %d\n",
+		      priv->qos_data.qos_enable, active, supported, unicast);
+	if (active && priv->qos_data.qos_enable)
+		return 1;
+
+	return 0;
+
+}
+/*
+* add QoS parameter to the TX command
+*/
+static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
+					u16 priority,
+					struct tfd_data *tfd)
+{
+	int tx_queue_id = 0;
+
+
+	tx_queue_id = from_priority_to_tx_queue[priority] - 1;
+	tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
+
+	if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
+		tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
+		tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
+	}
+	return 0;
+}
+
+/*
+* background support to run QoS activate functionality
+*/
+static void ipw_bg_qos_activate(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, qos_activate);
+
+	mutex_lock(&priv->mutex);
+
+	if (priv->status & STATUS_ASSOCIATED)
+		ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
+
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_handle_probe_response(struct net_device *dev,
+				     struct libipw_probe_response *resp,
+				     struct libipw_network *network)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
+			      (network == priv->assoc_network));
+
+	ipw_qos_handle_probe_response(priv, active_network, network);
+
+	return 0;
+}
+
+static int ipw_handle_beacon(struct net_device *dev,
+			     struct libipw_beacon *resp,
+			     struct libipw_network *network)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int active_network = ((priv->status & STATUS_ASSOCIATED) &&
+			      (network == priv->assoc_network));
+
+	ipw_qos_handle_probe_response(priv, active_network, network);
+
+	return 0;
+}
+
+static int ipw_handle_assoc_response(struct net_device *dev,
+				     struct libipw_assoc_response *resp,
+				     struct libipw_network *network)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	ipw_qos_association_resp(priv, network);
+	return 0;
+}
+
+static int ipw_send_qos_params_command(struct ipw_priv *priv, struct libipw_qos_parameters
+				       *qos_param)
+{
+	return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
+				sizeof(*qos_param) * 3, qos_param);
+}
+
+static int ipw_send_qos_info_command(struct ipw_priv *priv, struct libipw_qos_information_element
+				     *qos_param)
+{
+	return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
+				qos_param);
+}
+
+#endif				/* CONFIG_IPW2200_QOS */
+
+static int ipw_associate_network(struct ipw_priv *priv,
+				 struct libipw_network *network,
+				 struct ipw_supported_rates *rates, int roaming)
+{
+	int err;
+
+	if (priv->config & CFG_FIXED_RATE)
+		ipw_set_fixed_rate(priv, network->mode);
+
+	if (!(priv->config & CFG_STATIC_ESSID)) {
+		priv->essid_len = min(network->ssid_len,
+				      (u8) IW_ESSID_MAX_SIZE);
+		memcpy(priv->essid, network->ssid, priv->essid_len);
+	}
+
+	network->last_associate = jiffies;
+
+	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
+	priv->assoc_request.channel = network->channel;
+	priv->assoc_request.auth_key = 0;
+
+	if ((priv->capability & CAP_PRIVACY_ON) &&
+	    (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
+		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
+		priv->assoc_request.auth_key = priv->ieee->sec.active_key;
+
+		if (priv->ieee->sec.level == SEC_LEVEL_1)
+			ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
+
+	} else if ((priv->capability & CAP_PRIVACY_ON) &&
+		   (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
+		priv->assoc_request.auth_type = AUTH_LEAP;
+	else
+		priv->assoc_request.auth_type = AUTH_OPEN;
+
+	if (priv->ieee->wpa_ie_len) {
+		priv->assoc_request.policy_support = cpu_to_le16(0x02);	/* RSN active */
+		ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
+				 priv->ieee->wpa_ie_len);
+	}
+
+	/*
+	 * It is valid for our ieee device to support multiple modes, but
+	 * when it comes to associating to a given network we have to choose
+	 * just one mode.
+	 */
+	if (network->mode & priv->ieee->mode & IEEE_A)
+		priv->assoc_request.ieee_mode = IPW_A_MODE;
+	else if (network->mode & priv->ieee->mode & IEEE_G)
+		priv->assoc_request.ieee_mode = IPW_G_MODE;
+	else if (network->mode & priv->ieee->mode & IEEE_B)
+		priv->assoc_request.ieee_mode = IPW_B_MODE;
+
+	priv->assoc_request.capability = cpu_to_le16(network->capability);
+	if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+	    && !(priv->config & CFG_PREAMBLE_LONG)) {
+		priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
+	} else {
+		priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
+
+		/* Clear the short preamble if we won't be supporting it */
+		priv->assoc_request.capability &=
+		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
+	}
+
+	/* Clear capability bits that aren't used in Ad Hoc */
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		priv->assoc_request.capability &=
+		    ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
+
+	IPW_DEBUG_ASSOC("%ssociation attempt: '%*pE', channel %d, 802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
+			roaming ? "Rea" : "A",
+			priv->essid_len, priv->essid,
+			network->channel,
+			ipw_modes[priv->assoc_request.ieee_mode],
+			rates->num_rates,
+			(priv->assoc_request.preamble_length ==
+			 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
+			network->capability &
+			WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
+			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
+			priv->capability & CAP_PRIVACY_ON ?
+			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
+			 "(open)") : "",
+			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
+			priv->capability & CAP_PRIVACY_ON ?
+			'1' + priv->ieee->sec.active_key : '.',
+			priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
+
+	priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
+	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
+	    (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
+		priv->assoc_request.assoc_type = HC_IBSS_START;
+		priv->assoc_request.assoc_tsf_msw = 0;
+		priv->assoc_request.assoc_tsf_lsw = 0;
+	} else {
+		if (unlikely(roaming))
+			priv->assoc_request.assoc_type = HC_REASSOCIATE;
+		else
+			priv->assoc_request.assoc_type = HC_ASSOCIATE;
+		priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
+		priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
+	}
+
+	memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		eth_broadcast_addr(priv->assoc_request.dest);
+		priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
+	} else {
+		memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
+		priv->assoc_request.atim_window = 0;
+	}
+
+	priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
+
+	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
+		return err;
+	}
+
+	rates->ieee_mode = priv->assoc_request.ieee_mode;
+	rates->purpose = IPW_RATE_CONNECT;
+	ipw_send_supported_rates(priv, rates);
+
+	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
+		priv->sys_config.dot11g_auto_detection = 1;
+	else
+		priv->sys_config.dot11g_auto_detection = 0;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		priv->sys_config.answer_broadcast_ssid_probe = 1;
+	else
+		priv->sys_config.answer_broadcast_ssid_probe = 0;
+
+	err = ipw_send_system_config(priv);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
+		return err;
+	}
+
+	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
+	err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
+		return err;
+	}
+
+	/*
+	 * If preemption is enabled, it is possible for the association
+	 * to complete before we return from ipw_send_associate.  Therefore
+	 * we have to be sure and update our priviate data first.
+	 */
+	priv->channel = network->channel;
+	memcpy(priv->bssid, network->bssid, ETH_ALEN);
+	priv->status |= STATUS_ASSOCIATING;
+	priv->status &= ~STATUS_SECURITY_UPDATED;
+
+	priv->assoc_network = network;
+
+#ifdef CONFIG_IPW2200_QOS
+	ipw_qos_association(priv, network);
+#endif
+
+	err = ipw_send_associate(priv, &priv->assoc_request);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
+		return err;
+	}
+
+	IPW_DEBUG(IPW_DL_STATE, "associating: '%*pE' %pM\n",
+		  priv->essid_len, priv->essid, priv->bssid);
+
+	return 0;
+}
+
+static void ipw_roam(void *data)
+{
+	struct ipw_priv *priv = data;
+	struct libipw_network *network = NULL;
+	struct ipw_network_match match = {
+		.network = priv->assoc_network
+	};
+
+	/* The roaming process is as follows:
+	 *
+	 * 1.  Missed beacon threshold triggers the roaming process by
+	 *     setting the status ROAM bit and requesting a scan.
+	 * 2.  When the scan completes, it schedules the ROAM work
+	 * 3.  The ROAM work looks at all of the known networks for one that
+	 *     is a better network than the currently associated.  If none
+	 *     found, the ROAM process is over (ROAM bit cleared)
+	 * 4.  If a better network is found, a disassociation request is
+	 *     sent.
+	 * 5.  When the disassociation completes, the roam work is again
+	 *     scheduled.  The second time through, the driver is no longer
+	 *     associated, and the newly selected network is sent an
+	 *     association request.
+	 * 6.  At this point ,the roaming process is complete and the ROAM
+	 *     status bit is cleared.
+	 */
+
+	/* If we are no longer associated, and the roaming bit is no longer
+	 * set, then we are not actively roaming, so just return */
+	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
+		return;
+
+	if (priv->status & STATUS_ASSOCIATED) {
+		/* First pass through ROAM process -- look for a better
+		 * network */
+		unsigned long flags;
+		u8 rssi = priv->assoc_network->stats.rssi;
+		priv->assoc_network->stats.rssi = -128;
+		spin_lock_irqsave(&priv->ieee->lock, flags);
+		list_for_each_entry(network, &priv->ieee->network_list, list) {
+			if (network != priv->assoc_network)
+				ipw_best_network(priv, &match, network, 1);
+		}
+		spin_unlock_irqrestore(&priv->ieee->lock, flags);
+		priv->assoc_network->stats.rssi = rssi;
+
+		if (match.network == priv->assoc_network) {
+			IPW_DEBUG_ASSOC("No better APs in this network to "
+					"roam to.\n");
+			priv->status &= ~STATUS_ROAMING;
+			ipw_debug_config(priv);
+			return;
+		}
+
+		ipw_send_disassociate(priv, 1);
+		priv->assoc_network = match.network;
+
+		return;
+	}
+
+	/* Second pass through ROAM process -- request association */
+	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
+	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
+	priv->status &= ~STATUS_ROAMING;
+}
+
+static void ipw_bg_roam(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, roam);
+	mutex_lock(&priv->mutex);
+	ipw_roam(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_associate(void *data)
+{
+	struct ipw_priv *priv = data;
+
+	struct libipw_network *network = NULL;
+	struct ipw_network_match match = {
+		.network = NULL
+	};
+	struct ipw_supported_rates *rates;
+	struct list_head *element;
+	unsigned long flags;
+
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+		IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
+		return 0;
+	}
+
+	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_ASSOC("Not attempting association (already in "
+				"progress)\n");
+		return 0;
+	}
+
+	if (priv->status & STATUS_DISASSOCIATING) {
+		IPW_DEBUG_ASSOC("Not attempting association (in disassociating)\n");
+		schedule_work(&priv->associate);
+		return 0;
+	}
+
+	if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
+		IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
+				"initialized)\n");
+		return 0;
+	}
+
+	if (!(priv->config & CFG_ASSOCIATE) &&
+	    !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
+		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
+		return 0;
+	}
+
+	/* Protect our use of the network_list */
+	spin_lock_irqsave(&priv->ieee->lock, flags);
+	list_for_each_entry(network, &priv->ieee->network_list, list)
+	    ipw_best_network(priv, &match, network, 0);
+
+	network = match.network;
+	rates = &match.rates;
+
+	if (network == NULL &&
+	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	    priv->config & CFG_ADHOC_CREATE &&
+	    priv->config & CFG_STATIC_ESSID &&
+	    priv->config & CFG_STATIC_CHANNEL) {
+		/* Use oldest network if the free list is empty */
+		if (list_empty(&priv->ieee->network_free_list)) {
+			struct libipw_network *oldest = NULL;
+			struct libipw_network *target;
+
+			list_for_each_entry(target, &priv->ieee->network_list, list) {
+				if ((oldest == NULL) ||
+				    (target->last_scanned < oldest->last_scanned))
+					oldest = target;
+			}
+
+			/* If there are no more slots, expire the oldest */
+			list_del(&oldest->list);
+			target = oldest;
+			IPW_DEBUG_ASSOC("Expired '%*pE' (%pM) from network list.\n",
+					target->ssid_len, target->ssid,
+					target->bssid);
+			list_add_tail(&target->list,
+				      &priv->ieee->network_free_list);
+		}
+
+		element = priv->ieee->network_free_list.next;
+		network = list_entry(element, struct libipw_network, list);
+		ipw_adhoc_create(priv, network);
+		rates = &priv->rates;
+		list_del(element);
+		list_add_tail(&network->list, &priv->ieee->network_list);
+	}
+	spin_unlock_irqrestore(&priv->ieee->lock, flags);
+
+	/* If we reached the end of the list, then we don't have any valid
+	 * matching APs */
+	if (!network) {
+		ipw_debug_config(priv);
+
+		if (!(priv->status & STATUS_SCANNING)) {
+			if (!(priv->config & CFG_SPEED_SCAN))
+				schedule_delayed_work(&priv->request_scan,
+						      SCAN_INTERVAL);
+			else
+				schedule_delayed_work(&priv->request_scan, 0);
+		}
+
+		return 0;
+	}
+
+	ipw_associate_network(priv, network, rates, 0);
+
+	return 1;
+}
+
+static void ipw_bg_associate(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, associate);
+	mutex_lock(&priv->mutex);
+	ipw_associate(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
+				      struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = le16_to_cpu(hdr->frame_control);
+	if (!(fc & IEEE80211_FCTL_PROTECTED))
+		return;
+
+	fc &= ~IEEE80211_FCTL_PROTECTED;
+	hdr->frame_control = cpu_to_le16(fc);
+	switch (priv->ieee->sec.level) {
+	case SEC_LEVEL_3:
+		/* Remove CCMP HDR */
+		memmove(skb->data + LIBIPW_3ADDR_LEN,
+			skb->data + LIBIPW_3ADDR_LEN + 8,
+			skb->len - LIBIPW_3ADDR_LEN - 8);
+		skb_trim(skb, skb->len - 16);	/* CCMP_HDR_LEN + CCMP_MIC_LEN */
+		break;
+	case SEC_LEVEL_2:
+		break;
+	case SEC_LEVEL_1:
+		/* Remove IV */
+		memmove(skb->data + LIBIPW_3ADDR_LEN,
+			skb->data + LIBIPW_3ADDR_LEN + 4,
+			skb->len - LIBIPW_3ADDR_LEN - 4);
+		skb_trim(skb, skb->len - 8);	/* IV + ICV */
+		break;
+	case SEC_LEVEL_0:
+		break;
+	default:
+		printk(KERN_ERR "Unknown security level %d\n",
+		       priv->ieee->sec.level);
+		break;
+	}
+}
+
+static void ipw_handle_data_packet(struct ipw_priv *priv,
+				   struct ipw_rx_mem_buffer *rxb,
+				   struct libipw_rx_stats *stats)
+{
+	struct net_device *dev = priv->net_dev;
+	struct libipw_hdr_4addr *hdr;
+	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+
+	/* We received data from the HW, so stop the watchdog */
+	netif_trans_update(dev);
+
+	/* We only process data packets if the
+	 * interface is open */
+	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
+		     skb_tailroom(rxb->skb))) {
+		dev->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+		return;
+	} else if (unlikely(!netif_running(priv->net_dev))) {
+		dev->stats.rx_dropped++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	/* Advance skb->data to the start of the actual payload */
+	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
+
+	/* Set the size of the skb to the size of the frame */
+	skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
+
+	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
+
+	/* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
+	hdr = (struct libipw_hdr_4addr *)rxb->skb->data;
+	if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
+	    (is_multicast_ether_addr(hdr->addr1) ?
+	     !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
+		ipw_rebuild_decrypted_skb(priv, rxb->skb);
+
+	if (!libipw_rx(priv->ieee, rxb->skb, stats))
+		dev->stats.rx_errors++;
+	else {			/* libipw_rx succeeded, so it now owns the SKB */
+		rxb->skb = NULL;
+		__ipw_led_activity_on(priv);
+	}
+}
+
+#ifdef CONFIG_IPW2200_RADIOTAP
+static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
+					   struct ipw_rx_mem_buffer *rxb,
+					   struct libipw_rx_stats *stats)
+{
+	struct net_device *dev = priv->net_dev;
+	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+	struct ipw_rx_frame *frame = &pkt->u.frame;
+
+	/* initial pull of some data */
+	u16 received_channel = frame->received_channel;
+	u8 antennaAndPhy = frame->antennaAndPhy;
+	s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM;	/* call it signed anyhow */
+	u16 pktrate = frame->rate;
+
+	/* Magic struct that slots into the radiotap header -- no reason
+	 * to build this manually element by element, we can write it much
+	 * more efficiently than we can parse it. ORDER MATTERS HERE */
+	struct ipw_rt_hdr *ipw_rt;
+
+	unsigned short len = le16_to_cpu(pkt->u.frame.length);
+
+	/* We received data from the HW, so stop the watchdog */
+	netif_trans_update(dev);
+
+	/* We only process data packets if the
+	 * interface is open */
+	if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
+		     skb_tailroom(rxb->skb))) {
+		dev->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+		return;
+	} else if (unlikely(!netif_running(priv->net_dev))) {
+		dev->stats.rx_dropped++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
+	 * that now */
+	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
+		/* FIXME: Should alloc bigger skb instead */
+		dev->stats.rx_dropped++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
+		return;
+	}
+
+	/* copy the frame itself */
+	memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
+		rxb->skb->data + IPW_RX_FRAME_SIZE, len);
+
+	ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
+
+	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
+	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr));	/* total header+data */
+
+	/* Big bitfield of all the fields we provide in radiotap */
+	ipw_rt->rt_hdr.it_present = cpu_to_le32(
+	     (1 << IEEE80211_RADIOTAP_TSFT) |
+	     (1 << IEEE80211_RADIOTAP_FLAGS) |
+	     (1 << IEEE80211_RADIOTAP_RATE) |
+	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
+	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+	     (1 << IEEE80211_RADIOTAP_ANTENNA));
+
+	/* Zero the flags, we'll add to them as we go */
+	ipw_rt->rt_flags = 0;
+	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+			       frame->parent_tsf[2] << 16 |
+			       frame->parent_tsf[1] << 8  |
+			       frame->parent_tsf[0]);
+
+	/* Convert signal to DBM */
+	ipw_rt->rt_dbmsignal = antsignal;
+	ipw_rt->rt_dbmnoise = (s8) le16_to_cpu(frame->noise);
+
+	/* Convert the channel data and set the flags */
+	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
+	if (received_channel > 14) {	/* 802.11a */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
+	} else if (antennaAndPhy & 32) {	/* 802.11b */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
+	} else {		/* 802.11g */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
+	}
+
+	/* set the rate in multiples of 500k/s */
+	switch (pktrate) {
+	case IPW_TX_RATE_1MB:
+		ipw_rt->rt_rate = 2;
+		break;
+	case IPW_TX_RATE_2MB:
+		ipw_rt->rt_rate = 4;
+		break;
+	case IPW_TX_RATE_5MB:
+		ipw_rt->rt_rate = 10;
+		break;
+	case IPW_TX_RATE_6MB:
+		ipw_rt->rt_rate = 12;
+		break;
+	case IPW_TX_RATE_9MB:
+		ipw_rt->rt_rate = 18;
+		break;
+	case IPW_TX_RATE_11MB:
+		ipw_rt->rt_rate = 22;
+		break;
+	case IPW_TX_RATE_12MB:
+		ipw_rt->rt_rate = 24;
+		break;
+	case IPW_TX_RATE_18MB:
+		ipw_rt->rt_rate = 36;
+		break;
+	case IPW_TX_RATE_24MB:
+		ipw_rt->rt_rate = 48;
+		break;
+	case IPW_TX_RATE_36MB:
+		ipw_rt->rt_rate = 72;
+		break;
+	case IPW_TX_RATE_48MB:
+		ipw_rt->rt_rate = 96;
+		break;
+	case IPW_TX_RATE_54MB:
+		ipw_rt->rt_rate = 108;
+		break;
+	default:
+		ipw_rt->rt_rate = 0;
+		break;
+	}
+
+	/* antenna number */
+	ipw_rt->rt_antenna = (antennaAndPhy & 3);	/* Is this right? */
+
+	/* set the preamble flag if we have it */
+	if ((antennaAndPhy & 64))
+		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+	/* Set the size of the skb to the size of the frame */
+	skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
+
+	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
+
+	if (!libipw_rx(priv->ieee, rxb->skb, stats))
+		dev->stats.rx_errors++;
+	else {			/* libipw_rx succeeded, so it now owns the SKB */
+		rxb->skb = NULL;
+		/* no LED during capture */
+	}
+}
+#endif
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+#define libipw_is_probe_response(fc) \
+   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
+    (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
+
+#define libipw_is_management(fc) \
+   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
+
+#define libipw_is_control(fc) \
+   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
+
+#define libipw_is_data(fc) \
+   ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
+
+#define libipw_is_assoc_request(fc) \
+   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
+
+#define libipw_is_reassoc_request(fc) \
+   ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
+
+static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
+				      struct ipw_rx_mem_buffer *rxb,
+				      struct libipw_rx_stats *stats)
+{
+	struct net_device *dev = priv->prom_net_dev;
+	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+	struct ipw_rx_frame *frame = &pkt->u.frame;
+	struct ipw_rt_hdr *ipw_rt;
+
+	/* First cache any information we need before we overwrite
+	 * the information provided in the skb from the hardware */
+	struct ieee80211_hdr *hdr;
+	u16 channel = frame->received_channel;
+	u8 phy_flags = frame->antennaAndPhy;
+	s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
+	s8 noise = (s8) le16_to_cpu(frame->noise);
+	u8 rate = frame->rate;
+	unsigned short len = le16_to_cpu(pkt->u.frame.length);
+	struct sk_buff *skb;
+	int hdr_only = 0;
+	u16 filter = priv->prom_priv->filter;
+
+	/* If the filter is set to not include Rx frames then return */
+	if (filter & IPW_PROM_NO_RX)
+		return;
+
+	/* We received data from the HW, so stop the watchdog */
+	netif_trans_update(dev);
+
+	if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
+		dev->stats.rx_errors++;
+		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+		return;
+	}
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!netif_running(dev))) {
+		dev->stats.rx_dropped++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	/* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
+	 * that now */
+	if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
+		/* FIXME: Should alloc bigger skb instead */
+		dev->stats.rx_dropped++;
+		IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
+		return;
+	}
+
+	hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
+	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_MGMT)
+			return;
+		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+			hdr_only = 1;
+	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_CTL)
+			return;
+		if (filter & IPW_PROM_CTL_HEADER_ONLY)
+			hdr_only = 1;
+	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_DATA)
+			return;
+		if (filter & IPW_PROM_DATA_HEADER_ONLY)
+			hdr_only = 1;
+	}
+
+	/* Copy the SKB since this is for the promiscuous side */
+	skb = skb_copy(rxb->skb, GFP_ATOMIC);
+	if (skb == NULL) {
+		IPW_ERROR("skb_clone failed for promiscuous copy.\n");
+		return;
+	}
+
+	/* copy the frame data to write after where the radiotap header goes */
+	ipw_rt = (void *)skb->data;
+
+	if (hdr_only)
+		len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
+
+	memcpy(ipw_rt->payload, hdr, len);
+
+	ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
+	ipw_rt->rt_hdr.it_pad = 0;	/* always good to zero */
+	ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt));	/* total header+data */
+
+	/* Set the size of the skb to the size of the frame */
+	skb_put(skb, sizeof(*ipw_rt) + len);
+
+	/* Big bitfield of all the fields we provide in radiotap */
+	ipw_rt->rt_hdr.it_present = cpu_to_le32(
+	     (1 << IEEE80211_RADIOTAP_TSFT) |
+	     (1 << IEEE80211_RADIOTAP_FLAGS) |
+	     (1 << IEEE80211_RADIOTAP_RATE) |
+	     (1 << IEEE80211_RADIOTAP_CHANNEL) |
+	     (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+	     (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+	     (1 << IEEE80211_RADIOTAP_ANTENNA));
+
+	/* Zero the flags, we'll add to them as we go */
+	ipw_rt->rt_flags = 0;
+	ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
+			       frame->parent_tsf[2] << 16 |
+			       frame->parent_tsf[1] << 8  |
+			       frame->parent_tsf[0]);
+
+	/* Convert to DBM */
+	ipw_rt->rt_dbmsignal = signal;
+	ipw_rt->rt_dbmnoise = noise;
+
+	/* Convert the channel data and set the flags */
+	ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
+	if (channel > 14) {	/* 802.11a */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
+	} else if (phy_flags & (1 << 5)) {	/* 802.11b */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
+	} else {		/* 802.11g */
+		ipw_rt->rt_chbitmask =
+		    cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
+	}
+
+	/* set the rate in multiples of 500k/s */
+	switch (rate) {
+	case IPW_TX_RATE_1MB:
+		ipw_rt->rt_rate = 2;
+		break;
+	case IPW_TX_RATE_2MB:
+		ipw_rt->rt_rate = 4;
+		break;
+	case IPW_TX_RATE_5MB:
+		ipw_rt->rt_rate = 10;
+		break;
+	case IPW_TX_RATE_6MB:
+		ipw_rt->rt_rate = 12;
+		break;
+	case IPW_TX_RATE_9MB:
+		ipw_rt->rt_rate = 18;
+		break;
+	case IPW_TX_RATE_11MB:
+		ipw_rt->rt_rate = 22;
+		break;
+	case IPW_TX_RATE_12MB:
+		ipw_rt->rt_rate = 24;
+		break;
+	case IPW_TX_RATE_18MB:
+		ipw_rt->rt_rate = 36;
+		break;
+	case IPW_TX_RATE_24MB:
+		ipw_rt->rt_rate = 48;
+		break;
+	case IPW_TX_RATE_36MB:
+		ipw_rt->rt_rate = 72;
+		break;
+	case IPW_TX_RATE_48MB:
+		ipw_rt->rt_rate = 96;
+		break;
+	case IPW_TX_RATE_54MB:
+		ipw_rt->rt_rate = 108;
+		break;
+	default:
+		ipw_rt->rt_rate = 0;
+		break;
+	}
+
+	/* antenna number */
+	ipw_rt->rt_antenna = (phy_flags & 3);
+
+	/* set the preamble flag if we have it */
+	if (phy_flags & (1 << 6))
+		ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+
+	IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
+
+	if (!libipw_rx(priv->prom_priv->ieee, skb, stats)) {
+		dev->stats.rx_errors++;
+		dev_kfree_skb_any(skb);
+	}
+}
+#endif
+
+static int is_network_packet(struct ipw_priv *priv,
+				    struct libipw_hdr_4addr *header)
+{
+	/* Filter incoming packets to determine if they are targeted toward
+	 * this network, discarding packets coming from ourselves */
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:	/* Header: Dest. | Source    | BSSID */
+		/* packets from our adapter are dropped (echo) */
+		if (ether_addr_equal(header->addr2, priv->net_dev->dev_addr))
+			return 0;
+
+		/* {broad,multi}cast packets to our BSSID go through */
+		if (is_multicast_ether_addr(header->addr1))
+			return ether_addr_equal(header->addr3, priv->bssid);
+
+		/* packets to our adapter go through */
+		return ether_addr_equal(header->addr1,
+					priv->net_dev->dev_addr);
+
+	case IW_MODE_INFRA:	/* Header: Dest. | BSSID | Source */
+		/* packets from our adapter are dropped (echo) */
+		if (ether_addr_equal(header->addr3, priv->net_dev->dev_addr))
+			return 0;
+
+		/* {broad,multi}cast packets to our BSS go through */
+		if (is_multicast_ether_addr(header->addr1))
+			return ether_addr_equal(header->addr2, priv->bssid);
+
+		/* packets to our adapter go through */
+		return ether_addr_equal(header->addr1,
+					priv->net_dev->dev_addr);
+	}
+
+	return 1;
+}
+
+#define IPW_PACKET_RETRY_TIME HZ
+
+static  int is_duplicate_packet(struct ipw_priv *priv,
+				      struct libipw_hdr_4addr *header)
+{
+	u16 sc = le16_to_cpu(header->seq_ctl);
+	u16 seq = WLAN_GET_SEQ_SEQ(sc);
+	u16 frag = WLAN_GET_SEQ_FRAG(sc);
+	u16 *last_seq, *last_frag;
+	unsigned long *last_time;
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		{
+			struct list_head *p;
+			struct ipw_ibss_seq *entry = NULL;
+			u8 *mac = header->addr2;
+			int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
+
+			list_for_each(p, &priv->ibss_mac_hash[index]) {
+				entry =
+				    list_entry(p, struct ipw_ibss_seq, list);
+				if (ether_addr_equal(entry->mac, mac))
+					break;
+			}
+			if (p == &priv->ibss_mac_hash[index]) {
+				entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+				if (!entry) {
+					IPW_ERROR
+					    ("Cannot malloc new mac entry\n");
+					return 0;
+				}
+				memcpy(entry->mac, mac, ETH_ALEN);
+				entry->seq_num = seq;
+				entry->frag_num = frag;
+				entry->packet_time = jiffies;
+				list_add(&entry->list,
+					 &priv->ibss_mac_hash[index]);
+				return 0;
+			}
+			last_seq = &entry->seq_num;
+			last_frag = &entry->frag_num;
+			last_time = &entry->packet_time;
+			break;
+		}
+	case IW_MODE_INFRA:
+		last_seq = &priv->last_seq_num;
+		last_frag = &priv->last_frag_num;
+		last_time = &priv->last_packet_time;
+		break;
+	default:
+		return 0;
+	}
+	if ((*last_seq == seq) &&
+	    time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
+		if (*last_frag == frag)
+			goto drop;
+		if (*last_frag + 1 != frag)
+			/* out-of-order fragment */
+			goto drop;
+	} else
+		*last_seq = seq;
+
+	*last_frag = frag;
+	*last_time = jiffies;
+	return 0;
+
+      drop:
+	/* Comment this line now since we observed the card receives
+	 * duplicate packets but the FCTL_RETRY bit is not set in the
+	 * IBSS mode with fragmentation enabled.
+	 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
+	return 1;
+}
+
+static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
+				   struct ipw_rx_mem_buffer *rxb,
+				   struct libipw_rx_stats *stats)
+{
+	struct sk_buff *skb = rxb->skb;
+	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
+	struct libipw_hdr_4addr *header = (struct libipw_hdr_4addr *)
+	    (skb->data + IPW_RX_FRAME_SIZE);
+
+	libipw_rx_mgt(priv->ieee, header, stats);
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	    ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
+	      IEEE80211_STYPE_PROBE_RESP) ||
+	     (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
+	      IEEE80211_STYPE_BEACON))) {
+		if (ether_addr_equal(header->addr3, priv->bssid))
+			ipw_add_station(priv, header->addr2);
+	}
+
+	if (priv->config & CFG_NET_STATS) {
+		IPW_DEBUG_HC("sending stat packet\n");
+
+		/* Set the size of the skb to the size of the full
+		 * ipw header and 802.11 frame */
+		skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
+			IPW_RX_FRAME_SIZE);
+
+		/* Advance past the ipw packet header to the 802.11 frame */
+		skb_pull(skb, IPW_RX_FRAME_SIZE);
+
+		/* Push the libipw_rx_stats before the 802.11 frame */
+		memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
+
+		skb->dev = priv->ieee->dev;
+
+		/* Point raw at the libipw_stats */
+		skb_reset_mac_header(skb);
+
+		skb->pkt_type = PACKET_OTHERHOST;
+		skb->protocol = cpu_to_be16(ETH_P_80211_STATS);
+		memset(skb->cb, 0, sizeof(rxb->skb->cb));
+		netif_rx(skb);
+		rxb->skb = NULL;
+	}
+}
+
+/*
+ * Main entry function for receiving a packet with 80211 headers.  This
+ * should be called when ever the FW has notified us that there is a new
+ * skb in the receive queue.
+ */
+static void ipw_rx(struct ipw_priv *priv)
+{
+	struct ipw_rx_mem_buffer *rxb;
+	struct ipw_rx_packet *pkt;
+	struct libipw_hdr_4addr *header;
+	u32 r, w, i;
+	u8 network_packet;
+	u8 fill_rx = 0;
+
+	r = ipw_read32(priv, IPW_RX_READ_INDEX);
+	w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
+	i = priv->rxq->read;
+
+	if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+
+	while (i != r) {
+		rxb = priv->rxq->queue[i];
+		if (unlikely(rxb == NULL)) {
+			printk(KERN_CRIT "Queue not allocated!\n");
+			break;
+		}
+		priv->rxq->queue[i] = NULL;
+
+		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
+					    IPW_RX_BUF_SIZE,
+					    PCI_DMA_FROMDEVICE);
+
+		pkt = (struct ipw_rx_packet *)rxb->skb->data;
+		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
+			     pkt->header.message_type,
+			     pkt->header.rx_seq_num, pkt->header.control_bits);
+
+		switch (pkt->header.message_type) {
+		case RX_FRAME_TYPE:	/* 802.11 frame */  {
+				struct libipw_rx_stats stats = {
+					.rssi = pkt->u.frame.rssi_dbm -
+					    IPW_RSSI_TO_DBM,
+					.signal =
+					    pkt->u.frame.rssi_dbm -
+					    IPW_RSSI_TO_DBM + 0x100,
+					.noise =
+					    le16_to_cpu(pkt->u.frame.noise),
+					.rate = pkt->u.frame.rate,
+					.mac_time = jiffies,
+					.received_channel =
+					    pkt->u.frame.received_channel,
+					.freq =
+					    (pkt->u.frame.
+					     control & (1 << 0)) ?
+					    LIBIPW_24GHZ_BAND :
+					    LIBIPW_52GHZ_BAND,
+					.len = le16_to_cpu(pkt->u.frame.length),
+				};
+
+				if (stats.rssi != 0)
+					stats.mask |= LIBIPW_STATMASK_RSSI;
+				if (stats.signal != 0)
+					stats.mask |= LIBIPW_STATMASK_SIGNAL;
+				if (stats.noise != 0)
+					stats.mask |= LIBIPW_STATMASK_NOISE;
+				if (stats.rate != 0)
+					stats.mask |= LIBIPW_STATMASK_RATE;
+
+				priv->rx_packets++;
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
+		ipw_handle_promiscuous_rx(priv, rxb, &stats);
+#endif
+
+#ifdef CONFIG_IPW2200_MONITOR
+				if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+#ifdef CONFIG_IPW2200_RADIOTAP
+
+                ipw_handle_data_packet_monitor(priv,
+					       rxb,
+					       &stats);
+#else
+		ipw_handle_data_packet(priv, rxb,
+				       &stats);
+#endif
+					break;
+				}
+#endif
+
+				header =
+				    (struct libipw_hdr_4addr *)(rxb->skb->
+								   data +
+								   IPW_RX_FRAME_SIZE);
+				/* TODO: Check Ad-Hoc dest/source and make sure
+				 * that we are actually parsing these packets
+				 * correctly -- we should probably use the
+				 * frame control of the packet and disregard
+				 * the current iw_mode */
+
+				network_packet =
+				    is_network_packet(priv, header);
+				if (network_packet && priv->assoc_network) {
+					priv->assoc_network->stats.rssi =
+					    stats.rssi;
+					priv->exp_avg_rssi =
+					    exponential_average(priv->exp_avg_rssi,
+					    stats.rssi, DEPTH_RSSI);
+				}
+
+				IPW_DEBUG_RX("Frame: len=%u\n",
+					     le16_to_cpu(pkt->u.frame.length));
+
+				if (le16_to_cpu(pkt->u.frame.length) <
+				    libipw_get_hdrlen(le16_to_cpu(
+						    header->frame_ctl))) {
+					IPW_DEBUG_DROP
+					    ("Received packet is too small. "
+					     "Dropping.\n");
+					priv->net_dev->stats.rx_errors++;
+					priv->wstats.discard.misc++;
+					break;
+				}
+
+				switch (WLAN_FC_GET_TYPE
+					(le16_to_cpu(header->frame_ctl))) {
+
+				case IEEE80211_FTYPE_MGMT:
+					ipw_handle_mgmt_packet(priv, rxb,
+							       &stats);
+					break;
+
+				case IEEE80211_FTYPE_CTL:
+					break;
+
+				case IEEE80211_FTYPE_DATA:
+					if (unlikely(!network_packet ||
+						     is_duplicate_packet(priv,
+									 header)))
+					{
+						IPW_DEBUG_DROP("Dropping: "
+							       "%pM, "
+							       "%pM, "
+							       "%pM\n",
+							       header->addr1,
+							       header->addr2,
+							       header->addr3);
+						break;
+					}
+
+					ipw_handle_data_packet(priv, rxb,
+							       &stats);
+
+					break;
+				}
+				break;
+			}
+
+		case RX_HOST_NOTIFICATION_TYPE:{
+				IPW_DEBUG_RX
+				    ("Notification: subtype=%02X flags=%02X size=%d\n",
+				     pkt->u.notification.subtype,
+				     pkt->u.notification.flags,
+				     le16_to_cpu(pkt->u.notification.size));
+				ipw_rx_notification(priv, &pkt->u.notification);
+				break;
+			}
+
+		default:
+			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
+				     pkt->header.message_type);
+			break;
+		}
+
+		/* For now we just don't re-use anything.  We can tweak this
+		 * later to try and re-use notification packets and SKBs that
+		 * fail to Rx correctly */
+		if (rxb->skb != NULL) {
+			dev_kfree_skb_any(rxb->skb);
+			rxb->skb = NULL;
+		}
+
+		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
+				 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+		list_add_tail(&rxb->list, &priv->rxq->rx_used);
+
+		i = (i + 1) % RX_QUEUE_SIZE;
+
+		/* If there are a lot of unsued frames, restock the Rx queue
+		 * so the ucode won't assert */
+		if (fill_rx) {
+			priv->rxq->read = i;
+			ipw_rx_queue_replenish(priv);
+		}
+	}
+
+	/* Backtrack one entry */
+	priv->rxq->read = i;
+	ipw_rx_queue_restock(priv);
+}
+
+#define DEFAULT_RTS_THRESHOLD     2304U
+#define MIN_RTS_THRESHOLD         1U
+#define MAX_RTS_THRESHOLD         2304U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+/**
+ * ipw_sw_reset
+ * @option: options to control different reset behaviour
+ * 	    0 = reset everything except the 'disable' module_param
+ * 	    1 = reset everything and print out driver info (for probe only)
+ * 	    2 = reset everything
+ */
+static int ipw_sw_reset(struct ipw_priv *priv, int option)
+{
+	int band, modulation;
+	int old_mode = priv->ieee->iw_mode;
+
+	/* Initialize module parameter values here */
+	priv->config = 0;
+
+	/* We default to disabling the LED code as right now it causes
+	 * too many systems to lock up... */
+	if (!led_support)
+		priv->config |= CFG_NO_LED;
+
+	if (associate)
+		priv->config |= CFG_ASSOCIATE;
+	else
+		IPW_DEBUG_INFO("Auto associate disabled.\n");
+
+	if (auto_create)
+		priv->config |= CFG_ADHOC_CREATE;
+	else
+		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
+
+	priv->config &= ~CFG_STATIC_ESSID;
+	priv->essid_len = 0;
+	memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
+
+	if (disable && option) {
+		priv->status |= STATUS_RF_KILL_SW;
+		IPW_DEBUG_INFO("Radio disabled.\n");
+	}
+
+	if (default_channel != 0) {
+		priv->config |= CFG_STATIC_CHANNEL;
+		priv->channel = default_channel;
+		IPW_DEBUG_INFO("Bind to static channel %d\n", default_channel);
+		/* TODO: Validate that provided channel is in range */
+	}
+#ifdef CONFIG_IPW2200_QOS
+	ipw_qos_init(priv, qos_enable, qos_burst_enable,
+		     burst_duration_CCK, burst_duration_OFDM);
+#endif				/* CONFIG_IPW2200_QOS */
+
+	switch (network_mode) {
+	case 1:
+		priv->ieee->iw_mode = IW_MODE_ADHOC;
+		priv->net_dev->type = ARPHRD_ETHER;
+
+		break;
+#ifdef CONFIG_IPW2200_MONITOR
+	case 2:
+		priv->ieee->iw_mode = IW_MODE_MONITOR;
+#ifdef CONFIG_IPW2200_RADIOTAP
+		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+#else
+		priv->net_dev->type = ARPHRD_IEEE80211;
+#endif
+		break;
+#endif
+	default:
+	case 0:
+		priv->net_dev->type = ARPHRD_ETHER;
+		priv->ieee->iw_mode = IW_MODE_INFRA;
+		break;
+	}
+
+	if (hwcrypto) {
+		priv->ieee->host_encrypt = 0;
+		priv->ieee->host_encrypt_msdu = 0;
+		priv->ieee->host_decrypt = 0;
+		priv->ieee->host_mc_decrypt = 0;
+	}
+	IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
+
+	/* IPW2200/2915 is abled to do hardware fragmentation. */
+	priv->ieee->host_open_frag = 0;
+
+	if ((priv->pci_dev->device == 0x4223) ||
+	    (priv->pci_dev->device == 0x4224)) {
+		if (option == 1)
+			printk(KERN_INFO DRV_NAME
+			       ": Detected Intel PRO/Wireless 2915ABG Network "
+			       "Connection\n");
+		priv->ieee->abg_true = 1;
+		band = LIBIPW_52GHZ_BAND | LIBIPW_24GHZ_BAND;
+		modulation = LIBIPW_OFDM_MODULATION |
+		    LIBIPW_CCK_MODULATION;
+		priv->adapter = IPW_2915ABG;
+		priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
+	} else {
+		if (option == 1)
+			printk(KERN_INFO DRV_NAME
+			       ": Detected Intel PRO/Wireless 2200BG Network "
+			       "Connection\n");
+
+		priv->ieee->abg_true = 0;
+		band = LIBIPW_24GHZ_BAND;
+		modulation = LIBIPW_OFDM_MODULATION |
+		    LIBIPW_CCK_MODULATION;
+		priv->adapter = IPW_2200BG;
+		priv->ieee->mode = IEEE_G | IEEE_B;
+	}
+
+	priv->ieee->freq_band = band;
+	priv->ieee->modulation = modulation;
+
+	priv->rates_mask = LIBIPW_DEFAULT_RATES_MASK;
+
+	priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
+	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
+
+	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
+	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
+	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
+
+	/* If power management is turned on, default to AC mode */
+	priv->power_mode = IPW_POWER_AC;
+	priv->tx_power = IPW_TX_POWER_DEFAULT;
+
+	return old_mode == priv->ieee->iw_mode;
+}
+
+/*
+ * This file defines the Wireless Extension handlers.  It does not
+ * define any methods of hardware manipulation and relies on the
+ * functions defined in ipw_main to provide the HW interaction.
+ *
+ * The exception to this is the use of the ipw_get_ordinal()
+ * function used to poll the hardware vs. making unnecessary calls.
+ *
+ */
+
+static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
+{
+	if (channel == 0) {
+		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
+		priv->config &= ~CFG_STATIC_CHANNEL;
+		IPW_DEBUG_ASSOC("Attempting to associate with new "
+				"parameters.\n");
+		ipw_associate(priv);
+		return 0;
+	}
+
+	priv->config |= CFG_STATIC_CHANNEL;
+
+	if (priv->channel == channel) {
+		IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
+			       channel);
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
+	priv->channel = channel;
+
+#ifdef CONFIG_IPW2200_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+		int i;
+		if (priv->status & STATUS_SCANNING) {
+			IPW_DEBUG_SCAN("Scan abort triggered due to "
+				       "channel change.\n");
+			ipw_abort_scan(priv);
+		}
+
+		for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
+			udelay(10);
+
+		if (priv->status & STATUS_SCANNING)
+			IPW_DEBUG_SCAN("Still scanning...\n");
+		else
+			IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
+				       1000 - i);
+
+		return 0;
+	}
+#endif				/* CONFIG_IPW2200_MONITOR */
+
+	/* Network configuration changed -- force [re]association */
+	IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
+	if (!ipw_disassociate(priv))
+		ipw_associate(priv);
+
+	return 0;
+}
+
+static int ipw_wx_set_freq(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct iw_freq *fwrq = &wrqu->freq;
+	int ret = 0, i;
+	u8 channel, flags;
+	int band;
+
+	if (fwrq->m == 0) {
+		IPW_DEBUG_WX("SET Freq/Channel -> any\n");
+		mutex_lock(&priv->mutex);
+		ret = ipw_set_channel(priv, 0);
+		mutex_unlock(&priv->mutex);
+		return ret;
+	}
+	/* if setting by freq convert to channel */
+	if (fwrq->e == 1) {
+		channel = libipw_freq_to_channel(priv->ieee, fwrq->m);
+		if (channel == 0)
+			return -EINVAL;
+	} else
+		channel = fwrq->m;
+
+	if (!(band = libipw_is_valid_channel(priv->ieee, channel)))
+		return -EINVAL;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		i = libipw_channel_to_index(priv->ieee, channel);
+		if (i == -1)
+			return -EINVAL;
+
+		flags = (band == LIBIPW_24GHZ_BAND) ?
+		    geo->bg[i].flags : geo->a[i].flags;
+		if (flags & LIBIPW_CH_PASSIVE_ONLY) {
+			IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
+			return -EINVAL;
+		}
+	}
+
+	IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
+	mutex_lock(&priv->mutex);
+	ret = ipw_set_channel(priv, channel);
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+static int ipw_wx_get_freq(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	wrqu->freq.e = 0;
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured CHANNEL then return that; otherwise return ANY */
+	mutex_lock(&priv->mutex);
+	if (priv->config & CFG_STATIC_CHANNEL ||
+	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
+		int i;
+
+		i = libipw_channel_to_index(priv->ieee, priv->channel);
+		BUG_ON(i == -1);
+		wrqu->freq.e = 1;
+
+		switch (libipw_is_valid_channel(priv->ieee, priv->channel)) {
+		case LIBIPW_52GHZ_BAND:
+			wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
+			break;
+
+		case LIBIPW_24GHZ_BAND:
+			wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
+			break;
+
+		default:
+			BUG();
+		}
+	} else
+		wrqu->freq.m = 0;
+
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
+	return 0;
+}
+
+static int ipw_wx_set_mode(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
+
+	switch (wrqu->mode) {
+#ifdef CONFIG_IPW2200_MONITOR
+	case IW_MODE_MONITOR:
+#endif
+	case IW_MODE_ADHOC:
+	case IW_MODE_INFRA:
+		break;
+	case IW_MODE_AUTO:
+		wrqu->mode = IW_MODE_INFRA;
+		break;
+	default:
+		return -EINVAL;
+	}
+	if (wrqu->mode == priv->ieee->iw_mode)
+		return 0;
+
+	mutex_lock(&priv->mutex);
+
+	ipw_sw_reset(priv, 0);
+
+#ifdef CONFIG_IPW2200_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		priv->net_dev->type = ARPHRD_ETHER;
+
+	if (wrqu->mode == IW_MODE_MONITOR)
+#ifdef CONFIG_IPW2200_RADIOTAP
+		priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+#else
+		priv->net_dev->type = ARPHRD_IEEE80211;
+#endif
+#endif				/* CONFIG_IPW2200_MONITOR */
+
+	/* Free the existing firmware and reset the fw_loaded
+	 * flag so ipw_load() will bring in the new firmware */
+	free_firmware();
+
+	priv->ieee->iw_mode = wrqu->mode;
+
+	schedule_work(&priv->adapter_restart);
+	mutex_unlock(&priv->mutex);
+	return err;
+}
+
+static int ipw_wx_get_mode(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->mode = priv->ieee->iw_mode;
+	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+/* Values are in microsecond */
+static const s32 timeout_duration[] = {
+	350000,
+	250000,
+	75000,
+	37000,
+	25000,
+};
+
+static const s32 period_duration[] = {
+	400000,
+	700000,
+	1000000,
+	1000000,
+	1000000
+};
+
+static int ipw_wx_get_range(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct iw_range *range = (struct iw_range *)extra;
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	int i = 0, j;
+
+	wrqu->data.length = sizeof(*range);
+	memset(range, 0, sizeof(*range));
+
+	/* 54Mbs == ~27 Mb/s real (802.11g) */
+	range->throughput = 27 * 1000 * 1000;
+
+	range->max_qual.qual = 100;
+	/* TODO: Find real max RSSI and stick here */
+	range->max_qual.level = 0;
+	range->max_qual.noise = 0;
+	range->max_qual.updated = 7;	/* Updated all three */
+
+	range->avg_qual.qual = 70;
+	/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
+	range->avg_qual.level = 0;	/* FIXME to real average level */
+	range->avg_qual.noise = 0;
+	range->avg_qual.updated = 7;	/* Updated all three */
+	mutex_lock(&priv->mutex);
+	range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
+
+	for (i = 0; i < range->num_bitrates; i++)
+		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
+		    500000;
+
+	range->max_rts = DEFAULT_RTS_THRESHOLD;
+	range->min_frag = MIN_FRAG_THRESHOLD;
+	range->max_frag = MAX_FRAG_THRESHOLD;
+
+	range->encoding_size[0] = 5;
+	range->encoding_size[1] = 13;
+	range->num_encoding_sizes = 2;
+	range->max_encoding_tokens = WEP_KEYS;
+
+	/* Set the Wireless Extension versions */
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 18;
+
+	i = 0;
+	if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
+		for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
+			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
+			    (geo->bg[j].flags & LIBIPW_CH_PASSIVE_ONLY))
+				continue;
+
+			range->freq[i].i = geo->bg[j].channel;
+			range->freq[i].m = geo->bg[j].freq * 100000;
+			range->freq[i].e = 1;
+			i++;
+		}
+	}
+
+	if (priv->ieee->mode & IEEE_A) {
+		for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
+			if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
+			    (geo->a[j].flags & LIBIPW_CH_PASSIVE_ONLY))
+				continue;
+
+			range->freq[i].i = geo->a[j].channel;
+			range->freq[i].m = geo->a[j].freq * 100000;
+			range->freq[i].e = 1;
+			i++;
+		}
+	}
+
+	range->num_channels = i;
+	range->num_frequency = i;
+
+	mutex_unlock(&priv->mutex);
+
+	/* Event capability (kernel + driver) */
+	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
+				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+	range->event_capa[1] = IW_EVENT_CAPA_K_1;
+
+	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+	range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
+
+	IPW_DEBUG_WX("GET Range\n");
+	return 0;
+}
+
+static int ipw_wx_set_wap(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+	mutex_lock(&priv->mutex);
+	if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) ||
+	    is_zero_ether_addr(wrqu->ap_addr.sa_data)) {
+		/* we disable mandatory BSSID association */
+		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
+		priv->config &= ~CFG_STATIC_BSSID;
+		IPW_DEBUG_ASSOC("Attempting to associate with new "
+				"parameters.\n");
+		ipw_associate(priv);
+		mutex_unlock(&priv->mutex);
+		return 0;
+	}
+
+	priv->config |= CFG_STATIC_BSSID;
+	if (ether_addr_equal(priv->bssid, wrqu->ap_addr.sa_data)) {
+		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
+		mutex_unlock(&priv->mutex);
+		return 0;
+	}
+
+	IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
+		     wrqu->ap_addr.sa_data);
+
+	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
+
+	/* Network configuration changed -- force [re]association */
+	IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
+	if (!ipw_disassociate(priv))
+		ipw_associate(priv);
+
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_wap(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured BSSID then return that; otherwise return ANY */
+	mutex_lock(&priv->mutex);
+	if (priv->config & CFG_STATIC_BSSID ||
+	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
+	} else
+		eth_zero_addr(wrqu->ap_addr.sa_data);
+
+	IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
+		     wrqu->ap_addr.sa_data);
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_set_essid(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+        int length;
+
+        mutex_lock(&priv->mutex);
+
+        if (!wrqu->essid.flags)
+        {
+                IPW_DEBUG_WX("Setting ESSID to ANY\n");
+                ipw_disassociate(priv);
+                priv->config &= ~CFG_STATIC_ESSID;
+                ipw_associate(priv);
+                mutex_unlock(&priv->mutex);
+                return 0;
+        }
+
+	length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
+
+	priv->config |= CFG_STATIC_ESSID;
+
+	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
+	    && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
+		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
+		mutex_unlock(&priv->mutex);
+		return 0;
+	}
+
+	IPW_DEBUG_WX("Setting ESSID: '%*pE' (%d)\n", length, extra, length);
+
+	priv->essid_len = length;
+	memcpy(priv->essid, extra, priv->essid_len);
+
+	/* Network configuration changed -- force [re]association */
+	IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
+	if (!ipw_disassociate(priv))
+		ipw_associate(priv);
+
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_essid(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured ESSID then return that; otherwise return ANY */
+	mutex_lock(&priv->mutex);
+	if (priv->config & CFG_STATIC_ESSID ||
+	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_WX("Getting essid: '%*pE'\n",
+			     priv->essid_len, priv->essid);
+		memcpy(extra, priv->essid, priv->essid_len);
+		wrqu->essid.length = priv->essid_len;
+		wrqu->essid.flags = 1;	/* active */
+	} else {
+		IPW_DEBUG_WX("Getting essid: ANY\n");
+		wrqu->essid.length = 0;
+		wrqu->essid.flags = 0;	/* active */
+	}
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_set_nick(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
+	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
+		return -E2BIG;
+	mutex_lock(&priv->mutex);
+	wrqu->data.length = min_t(size_t, wrqu->data.length, sizeof(priv->nick));
+	memset(priv->nick, 0, sizeof(priv->nick));
+	memcpy(priv->nick, extra, wrqu->data.length);
+	IPW_DEBUG_TRACE("<<\n");
+	mutex_unlock(&priv->mutex);
+	return 0;
+
+}
+
+static int ipw_wx_get_nick(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	IPW_DEBUG_WX("Getting nick\n");
+	mutex_lock(&priv->mutex);
+	wrqu->data.length = strlen(priv->nick);
+	memcpy(extra, priv->nick, wrqu->data.length);
+	wrqu->data.flags = 1;	/* active */
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_set_sens(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
+	IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
+	mutex_lock(&priv->mutex);
+
+	if (wrqu->sens.fixed == 0)
+	{
+		priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
+		priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
+		goto out;
+	}
+	if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
+	    (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	priv->roaming_threshold = wrqu->sens.value;
+	priv->disassociate_threshold = 3*wrqu->sens.value;
+      out:
+	mutex_unlock(&priv->mutex);
+	return err;
+}
+
+static int ipw_wx_get_sens(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->sens.fixed = 1;
+	wrqu->sens.value = priv->roaming_threshold;
+	mutex_unlock(&priv->mutex);
+
+	IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
+		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_rate(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	/* TODO: We should use semaphores or locks for access to priv */
+	struct ipw_priv *priv = libipw_priv(dev);
+	u32 target_rate = wrqu->bitrate.value;
+	u32 fixed, mask;
+
+	/* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
+	/* value = X, fixed = 1 means only rate X */
+	/* value = X, fixed = 0 means all rates lower equal X */
+
+	if (target_rate == -1) {
+		fixed = 0;
+		mask = LIBIPW_DEFAULT_RATES_MASK;
+		/* Now we should reassociate */
+		goto apply;
+	}
+
+	mask = 0;
+	fixed = wrqu->bitrate.fixed;
+
+	if (target_rate == 1000000 || !fixed)
+		mask |= LIBIPW_CCK_RATE_1MB_MASK;
+	if (target_rate == 1000000)
+		goto apply;
+
+	if (target_rate == 2000000 || !fixed)
+		mask |= LIBIPW_CCK_RATE_2MB_MASK;
+	if (target_rate == 2000000)
+		goto apply;
+
+	if (target_rate == 5500000 || !fixed)
+		mask |= LIBIPW_CCK_RATE_5MB_MASK;
+	if (target_rate == 5500000)
+		goto apply;
+
+	if (target_rate == 6000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_6MB_MASK;
+	if (target_rate == 6000000)
+		goto apply;
+
+	if (target_rate == 9000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_9MB_MASK;
+	if (target_rate == 9000000)
+		goto apply;
+
+	if (target_rate == 11000000 || !fixed)
+		mask |= LIBIPW_CCK_RATE_11MB_MASK;
+	if (target_rate == 11000000)
+		goto apply;
+
+	if (target_rate == 12000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_12MB_MASK;
+	if (target_rate == 12000000)
+		goto apply;
+
+	if (target_rate == 18000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_18MB_MASK;
+	if (target_rate == 18000000)
+		goto apply;
+
+	if (target_rate == 24000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_24MB_MASK;
+	if (target_rate == 24000000)
+		goto apply;
+
+	if (target_rate == 36000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_36MB_MASK;
+	if (target_rate == 36000000)
+		goto apply;
+
+	if (target_rate == 48000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_48MB_MASK;
+	if (target_rate == 48000000)
+		goto apply;
+
+	if (target_rate == 54000000 || !fixed)
+		mask |= LIBIPW_OFDM_RATE_54MB_MASK;
+	if (target_rate == 54000000)
+		goto apply;
+
+	IPW_DEBUG_WX("invalid rate specified, returning error\n");
+	return -EINVAL;
+
+      apply:
+	IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
+		     mask, fixed ? "fixed" : "sub-rates");
+	mutex_lock(&priv->mutex);
+	if (mask == LIBIPW_DEFAULT_RATES_MASK) {
+		priv->config &= ~CFG_FIXED_RATE;
+		ipw_set_fixed_rate(priv, priv->ieee->mode);
+	} else
+		priv->config |= CFG_FIXED_RATE;
+
+	if (priv->rates_mask == mask) {
+		IPW_DEBUG_WX("Mask set to current mask.\n");
+		mutex_unlock(&priv->mutex);
+		return 0;
+	}
+
+	priv->rates_mask = mask;
+
+	/* Network configuration changed -- force [re]association */
+	IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
+	if (!ipw_disassociate(priv))
+		ipw_associate(priv);
+
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_rate(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->bitrate.value = priv->last_rate;
+	wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
+	return 0;
+}
+
+static int ipw_wx_set_rts(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	if (wrqu->rts.disabled || !wrqu->rts.fixed)
+		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
+	else {
+		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
+		    wrqu->rts.value > MAX_RTS_THRESHOLD) {
+			mutex_unlock(&priv->mutex);
+			return -EINVAL;
+		}
+		priv->rts_threshold = wrqu->rts.value;
+	}
+
+	ipw_send_rts_threshold(priv, priv->rts_threshold);
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
+	return 0;
+}
+
+static int ipw_wx_get_rts(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->rts.value = priv->rts_threshold;
+	wrqu->rts.fixed = 0;	/* no auto select */
+	wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
+	return 0;
+}
+
+static int ipw_wx_set_txpow(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int err = 0;
+
+	mutex_lock(&priv->mutex);
+	if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
+		err = -EINPROGRESS;
+		goto out;
+	}
+
+	if (!wrqu->power.fixed)
+		wrqu->power.value = IPW_TX_POWER_DEFAULT;
+
+	if (wrqu->power.flags != IW_TXPOW_DBM) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
+	    (wrqu->power.value < IPW_TX_POWER_MIN)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	priv->tx_power = wrqu->power.value;
+	err = ipw_set_tx_power(priv);
+      out:
+	mutex_unlock(&priv->mutex);
+	return err;
+}
+
+static int ipw_wx_get_txpow(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->power.value = priv->tx_power;
+	wrqu->power.fixed = 1;
+	wrqu->power.flags = IW_TXPOW_DBM;
+	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
+	mutex_unlock(&priv->mutex);
+
+	IPW_DEBUG_WX("GET TX Power -> %s %d\n",
+		     wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_frag(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	if (wrqu->frag.disabled || !wrqu->frag.fixed)
+		priv->ieee->fts = DEFAULT_FTS;
+	else {
+		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+		    wrqu->frag.value > MAX_FRAG_THRESHOLD) {
+			mutex_unlock(&priv->mutex);
+			return -EINVAL;
+		}
+
+		priv->ieee->fts = wrqu->frag.value & ~0x1;
+	}
+
+	ipw_send_frag_threshold(priv, wrqu->frag.value);
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
+	return 0;
+}
+
+static int ipw_wx_get_frag(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	wrqu->frag.value = priv->ieee->fts;
+	wrqu->frag.fixed = 0;	/* no auto select */
+	wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_retry(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
+		return -EINVAL;
+
+	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
+		return 0;
+
+	if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
+		return -EINVAL;
+
+	mutex_lock(&priv->mutex);
+	if (wrqu->retry.flags & IW_RETRY_SHORT)
+		priv->short_retry_limit = (u8) wrqu->retry.value;
+	else if (wrqu->retry.flags & IW_RETRY_LONG)
+		priv->long_retry_limit = (u8) wrqu->retry.value;
+	else {
+		priv->short_retry_limit = (u8) wrqu->retry.value;
+		priv->long_retry_limit = (u8) wrqu->retry.value;
+	}
+
+	ipw_send_retry_limit(priv, priv->short_retry_limit,
+			     priv->long_retry_limit);
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
+		     priv->short_retry_limit, priv->long_retry_limit);
+	return 0;
+}
+
+static int ipw_wx_get_retry(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+
+	mutex_lock(&priv->mutex);
+	wrqu->retry.disabled = 0;
+
+	if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+		mutex_unlock(&priv->mutex);
+		return -EINVAL;
+	}
+
+	if (wrqu->retry.flags & IW_RETRY_LONG) {
+		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
+		wrqu->retry.value = priv->long_retry_limit;
+	} else if (wrqu->retry.flags & IW_RETRY_SHORT) {
+		wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
+		wrqu->retry.value = priv->short_retry_limit;
+	} else {
+		wrqu->retry.flags = IW_RETRY_LIMIT;
+		wrqu->retry.value = priv->short_retry_limit;
+	}
+	mutex_unlock(&priv->mutex);
+
+	IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_scan(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct iw_scan_req *req = (struct iw_scan_req *)extra;
+	struct delayed_work *work = NULL;
+
+	mutex_lock(&priv->mutex);
+
+	priv->user_requested_scan = 1;
+
+	if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+			int len = min((int)req->essid_len,
+			              (int)sizeof(priv->direct_scan_ssid));
+			memcpy(priv->direct_scan_ssid, req->essid, len);
+			priv->direct_scan_ssid_len = len;
+			work = &priv->request_direct_scan;
+		} else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
+			work = &priv->request_passive_scan;
+		}
+	} else {
+		/* Normal active broadcast scan */
+		work = &priv->request_scan;
+	}
+
+	mutex_unlock(&priv->mutex);
+
+	IPW_DEBUG_WX("Start scan\n");
+
+	schedule_delayed_work(work, 0);
+
+	return 0;
+}
+
+static int ipw_wx_get_scan(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_scan(priv->ieee, info, wrqu, extra);
+}
+
+static int ipw_wx_set_encode(struct net_device *dev,
+			     struct iw_request_info *info,
+			     union iwreq_data *wrqu, char *key)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int ret;
+	u32 cap = priv->capability;
+
+	mutex_lock(&priv->mutex);
+	ret = libipw_wx_set_encode(priv->ieee, info, wrqu, key);
+
+	/* In IBSS mode, we need to notify the firmware to update
+	 * the beacon info after we changed the capability. */
+	if (cap != priv->capability &&
+	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	    priv->status & STATUS_ASSOCIATED)
+		ipw_disassociate(priv);
+
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+static int ipw_wx_get_encode(struct net_device *dev,
+			     struct iw_request_info *info,
+			     union iwreq_data *wrqu, char *key)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	return libipw_wx_get_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw_wx_set_power(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int err;
+	mutex_lock(&priv->mutex);
+	if (wrqu->power.disabled) {
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
+		if (err) {
+			IPW_DEBUG_WX("failed setting power mode.\n");
+			mutex_unlock(&priv->mutex);
+			return err;
+		}
+		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
+		mutex_unlock(&priv->mutex);
+		return 0;
+	}
+
+	switch (wrqu->power.flags & IW_POWER_MODE) {
+	case IW_POWER_ON:	/* If not specified */
+	case IW_POWER_MODE:	/* If set all mask */
+	case IW_POWER_ALL_R:	/* If explicitly state all */
+		break;
+	default:		/* Otherwise we don't support it */
+		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
+			     wrqu->power.flags);
+		mutex_unlock(&priv->mutex);
+		return -EOPNOTSUPP;
+	}
+
+	/* If the user hasn't specified a power management mode yet, default
+	 * to BATTERY */
+	if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
+		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
+	else
+		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+
+	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
+	if (err) {
+		IPW_DEBUG_WX("failed setting power mode.\n");
+		mutex_unlock(&priv->mutex);
+		return err;
+	}
+
+	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_power(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	if (!(priv->power_mode & IPW_POWER_ENABLED))
+		wrqu->power.disabled = 1;
+	else
+		wrqu->power.disabled = 0;
+
+	mutex_unlock(&priv->mutex);
+	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
+
+	return 0;
+}
+
+static int ipw_wx_set_powermode(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int mode = *(int *)extra;
+	int err;
+
+	mutex_lock(&priv->mutex);
+	if ((mode < 1) || (mode > IPW_POWER_LIMIT))
+		mode = IPW_POWER_AC;
+
+	if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
+		err = ipw_send_power_mode(priv, mode);
+		if (err) {
+			IPW_DEBUG_WX("failed setting power mode.\n");
+			mutex_unlock(&priv->mutex);
+			return err;
+		}
+		priv->power_mode = IPW_POWER_ENABLED | mode;
+	}
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+#define MAX_WX_STRING 80
+static int ipw_wx_get_powermode(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int level = IPW_POWER_LEVEL(priv->power_mode);
+	char *p = extra;
+
+	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
+
+	switch (level) {
+	case IPW_POWER_AC:
+		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
+		break;
+	case IPW_POWER_BATTERY:
+		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
+		break;
+	default:
+		p += snprintf(p, MAX_WX_STRING - (p - extra),
+			      "(Timeout %dms, Period %dms)",
+			      timeout_duration[level - 1] / 1000,
+			      period_duration[level - 1] / 1000);
+	}
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED))
+		p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
+
+	wrqu->data.length = p - extra + 1;
+
+	return 0;
+}
+
+static int ipw_wx_set_wireless_mode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int mode = *(int *)extra;
+	u8 band = 0, modulation = 0;
+
+	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
+		IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
+		return -EINVAL;
+	}
+	mutex_lock(&priv->mutex);
+	if (priv->adapter == IPW_2915ABG) {
+		priv->ieee->abg_true = 1;
+		if (mode & IEEE_A) {
+			band |= LIBIPW_52GHZ_BAND;
+			modulation |= LIBIPW_OFDM_MODULATION;
+		} else
+			priv->ieee->abg_true = 0;
+	} else {
+		if (mode & IEEE_A) {
+			IPW_WARNING("Attempt to set 2200BG into "
+				    "802.11a mode\n");
+			mutex_unlock(&priv->mutex);
+			return -EINVAL;
+		}
+
+		priv->ieee->abg_true = 0;
+	}
+
+	if (mode & IEEE_B) {
+		band |= LIBIPW_24GHZ_BAND;
+		modulation |= LIBIPW_CCK_MODULATION;
+	} else
+		priv->ieee->abg_true = 0;
+
+	if (mode & IEEE_G) {
+		band |= LIBIPW_24GHZ_BAND;
+		modulation |= LIBIPW_OFDM_MODULATION;
+	} else
+		priv->ieee->abg_true = 0;
+
+	priv->ieee->mode = mode;
+	priv->ieee->freq_band = band;
+	priv->ieee->modulation = modulation;
+	init_supported_rates(priv, &priv->rates);
+
+	/* Network configuration changed -- force [re]association */
+	IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
+	if (!ipw_disassociate(priv)) {
+		ipw_send_supported_rates(priv, &priv->rates);
+		ipw_associate(priv);
+	}
+
+	/* Update the band LEDs */
+	ipw_led_band_on(priv);
+
+	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
+		     mode & IEEE_A ? 'a' : '.',
+		     mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_wireless_mode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	switch (priv->ieee->mode) {
+	case IEEE_A:
+		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
+		break;
+	case IEEE_B:
+		strncpy(extra, "802.11b (2)", MAX_WX_STRING);
+		break;
+	case IEEE_A | IEEE_B:
+		strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
+		break;
+	case IEEE_G:
+		strncpy(extra, "802.11g (4)", MAX_WX_STRING);
+		break;
+	case IEEE_A | IEEE_G:
+		strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
+		break;
+	case IEEE_B | IEEE_G:
+		strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
+		break;
+	case IEEE_A | IEEE_B | IEEE_G:
+		strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
+		break;
+	default:
+		strncpy(extra, "unknown", MAX_WX_STRING);
+		break;
+	}
+	extra[MAX_WX_STRING - 1] = '\0';
+
+	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
+
+	wrqu->data.length = strlen(extra) + 1;
+	mutex_unlock(&priv->mutex);
+
+	return 0;
+}
+
+static int ipw_wx_set_preamble(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int mode = *(int *)extra;
+	mutex_lock(&priv->mutex);
+	/* Switching from SHORT -> LONG requires a disassociation */
+	if (mode == 1) {
+		if (!(priv->config & CFG_PREAMBLE_LONG)) {
+			priv->config |= CFG_PREAMBLE_LONG;
+
+			/* Network configuration changed -- force [re]association */
+			IPW_DEBUG_ASSOC
+			    ("[re]association triggered due to preamble change.\n");
+			if (!ipw_disassociate(priv))
+				ipw_associate(priv);
+		}
+		goto done;
+	}
+
+	if (mode == 0) {
+		priv->config &= ~CFG_PREAMBLE_LONG;
+		goto done;
+	}
+	mutex_unlock(&priv->mutex);
+	return -EINVAL;
+
+      done:
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static int ipw_wx_get_preamble(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	mutex_lock(&priv->mutex);
+	if (priv->config & CFG_PREAMBLE_LONG)
+		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+#ifdef CONFIG_IPW2200_MONITOR
+static int ipw_wx_set_monitor(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int *parms = (int *)extra;
+	int enable = (parms[0] > 0);
+	mutex_lock(&priv->mutex);
+	IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
+	if (enable) {
+		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+#ifdef CONFIG_IPW2200_RADIOTAP
+			priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+#else
+			priv->net_dev->type = ARPHRD_IEEE80211;
+#endif
+			schedule_work(&priv->adapter_restart);
+		}
+
+		ipw_set_channel(priv, parms[1]);
+	} else {
+		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+			mutex_unlock(&priv->mutex);
+			return 0;
+		}
+		priv->net_dev->type = ARPHRD_ETHER;
+		schedule_work(&priv->adapter_restart);
+	}
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+#endif				/* CONFIG_IPW2200_MONITOR */
+
+static int ipw_wx_reset(struct net_device *dev,
+			struct iw_request_info *info,
+			union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	IPW_DEBUG_WX("RESET\n");
+	schedule_work(&priv->adapter_restart);
+	return 0;
+}
+
+static int ipw_wx_sw_reset(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	union iwreq_data wrqu_sec = {
+		.encoding = {
+			     .flags = IW_ENCODE_DISABLED,
+			     },
+	};
+	int ret;
+
+	IPW_DEBUG_WX("SW_RESET\n");
+
+	mutex_lock(&priv->mutex);
+
+	ret = ipw_sw_reset(priv, 2);
+	if (!ret) {
+		free_firmware();
+		ipw_adapter_restart(priv);
+	}
+
+	/* The SW reset bit might have been toggled on by the 'disable'
+	 * module parameter, so take appropriate action */
+	ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
+
+	mutex_unlock(&priv->mutex);
+	libipw_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
+	mutex_lock(&priv->mutex);
+
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		/* Configuration likely changed -- force [re]association */
+		IPW_DEBUG_ASSOC("[re]association triggered due to sw "
+				"reset.\n");
+		if (!ipw_disassociate(priv))
+			ipw_associate(priv);
+	}
+
+	mutex_unlock(&priv->mutex);
+
+	return 0;
+}
+
+/* Rebase the WE IOCTLs to zero for the handler array */
+static iw_handler ipw_wx_handlers[] = {
+	IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
+	IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
+	IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
+	IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
+	IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
+	IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
+	IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
+	IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
+	IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
+	IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
+	IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
+	IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
+	IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
+	IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
+	IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
+	IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
+	IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
+	IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
+	IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
+	IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
+	IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
+	IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
+	IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
+	IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
+	IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
+	IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
+	IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
+	IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
+	IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
+	IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
+	IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
+	IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
+	IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
+	IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
+	IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
+	IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
+	IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
+	IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
+	IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
+	IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
+	IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
+};
+
+enum {
+	IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
+	IPW_PRIV_GET_POWER,
+	IPW_PRIV_SET_MODE,
+	IPW_PRIV_GET_MODE,
+	IPW_PRIV_SET_PREAMBLE,
+	IPW_PRIV_GET_PREAMBLE,
+	IPW_PRIV_RESET,
+	IPW_PRIV_SW_RESET,
+#ifdef CONFIG_IPW2200_MONITOR
+	IPW_PRIV_SET_MONITOR,
+#endif
+};
+
+static struct iw_priv_args ipw_priv_args[] = {
+	{
+	 .cmd = IPW_PRIV_SET_POWER,
+	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	 .name = "set_power"},
+	{
+	 .cmd = IPW_PRIV_GET_POWER,
+	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+	 .name = "get_power"},
+	{
+	 .cmd = IPW_PRIV_SET_MODE,
+	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	 .name = "set_mode"},
+	{
+	 .cmd = IPW_PRIV_GET_MODE,
+	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+	 .name = "get_mode"},
+	{
+	 .cmd = IPW_PRIV_SET_PREAMBLE,
+	 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	 .name = "set_preamble"},
+	{
+	 .cmd = IPW_PRIV_GET_PREAMBLE,
+	 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
+	 .name = "get_preamble"},
+	{
+	 IPW_PRIV_RESET,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
+	{
+	 IPW_PRIV_SW_RESET,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
+#ifdef CONFIG_IPW2200_MONITOR
+	{
+	 IPW_PRIV_SET_MONITOR,
+	 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
+#endif				/* CONFIG_IPW2200_MONITOR */
+};
+
+static iw_handler ipw_priv_handler[] = {
+	ipw_wx_set_powermode,
+	ipw_wx_get_powermode,
+	ipw_wx_set_wireless_mode,
+	ipw_wx_get_wireless_mode,
+	ipw_wx_set_preamble,
+	ipw_wx_get_preamble,
+	ipw_wx_reset,
+	ipw_wx_sw_reset,
+#ifdef CONFIG_IPW2200_MONITOR
+	ipw_wx_set_monitor,
+#endif
+};
+
+static const struct iw_handler_def ipw_wx_handler_def = {
+	.standard = ipw_wx_handlers,
+	.num_standard = ARRAY_SIZE(ipw_wx_handlers),
+	.num_private = ARRAY_SIZE(ipw_priv_handler),
+	.num_private_args = ARRAY_SIZE(ipw_priv_args),
+	.private = ipw_priv_handler,
+	.private_args = ipw_priv_args,
+	.get_wireless_stats = ipw_get_wireless_stats,
+};
+
+/*
+ * Get wireless statistics.
+ * Called by /proc/net/wireless
+ * Also called by SIOCGIWSTATS
+ */
+static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct iw_statistics *wstats;
+
+	wstats = &priv->wstats;
+
+	/* if hw is disabled, then ipw_get_ordinal() can't be called.
+	 * netdev->get_wireless_stats seems to be called before fw is
+	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
+	 * and associated; if not associcated, the values are all meaningless
+	 * anyway, so set them all to NULL and INVALID */
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->miss.beacon = 0;
+		wstats->discard.retries = 0;
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+		wstats->qual.noise = 0;
+		wstats->qual.updated = 7;
+		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
+		    IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
+		return wstats;
+	}
+
+	wstats->qual.qual = priv->quality;
+	wstats->qual.level = priv->exp_avg_rssi;
+	wstats->qual.noise = priv->exp_avg_noise;
+	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
+	    IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
+
+	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
+	wstats->discard.retries = priv->last_tx_failures;
+	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
+
+/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
+	goto fail_get_ordinal;
+	wstats->discard.retries += tx_retry; */
+
+	return wstats;
+}
+
+/* net device stuff */
+
+static  void init_sys_config(struct ipw_sys_config *sys_config)
+{
+	memset(sys_config, 0, sizeof(struct ipw_sys_config));
+	sys_config->bt_coexistence = 0;
+	sys_config->answer_broadcast_ssid_probe = 0;
+	sys_config->accept_all_data_frames = 0;
+	sys_config->accept_non_directed_frames = 1;
+	sys_config->exclude_unicast_unencrypted = 0;
+	sys_config->disable_unicast_decryption = 1;
+	sys_config->exclude_multicast_unencrypted = 0;
+	sys_config->disable_multicast_decryption = 1;
+	if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
+		antenna = CFG_SYS_ANTENNA_BOTH;
+	sys_config->antenna_diversity = antenna;
+	sys_config->pass_crc_to_host = 0;	/* TODO: See if 1 gives us FCS */
+	sys_config->dot11g_auto_detection = 0;
+	sys_config->enable_cts_to_self = 0;
+	sys_config->bt_coexist_collision_thr = 0;
+	sys_config->pass_noise_stats_to_host = 1;	/* 1 -- fix for 256 */
+	sys_config->silence_threshold = 0x1e;
+}
+
+static int ipw_net_open(struct net_device *dev)
+{
+	IPW_DEBUG_INFO("dev->open\n");
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int ipw_net_stop(struct net_device *dev)
+{
+	IPW_DEBUG_INFO("dev->close\n");
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+todo:
+
+modify to send one tfd per fragment instead of using chunking.  otherwise
+we need to heavily modify the libipw_skb_to_txb.
+*/
+
+static int ipw_tx_skb(struct ipw_priv *priv, struct libipw_txb *txb,
+			     int pri)
+{
+	struct libipw_hdr_3addrqos *hdr = (struct libipw_hdr_3addrqos *)
+	    txb->fragments[0]->data;
+	int i = 0;
+	struct tfd_frame *tfd;
+#ifdef CONFIG_IPW2200_QOS
+	int tx_id = ipw_get_tx_queue_number(priv, pri);
+	struct clx2_tx_queue *txq = &priv->txq[tx_id];
+#else
+	struct clx2_tx_queue *txq = &priv->txq[0];
+#endif
+	struct clx2_queue *q = &txq->q;
+	u8 id, hdr_len, unicast;
+	int fc;
+
+	if (!(priv->status & STATUS_ASSOCIATED))
+		goto drop;
+
+	hdr_len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		unicast = !is_multicast_ether_addr(hdr->addr1);
+		id = ipw_find_station(priv, hdr->addr1);
+		if (id == IPW_INVALID_STATION) {
+			id = ipw_add_station(priv, hdr->addr1);
+			if (id == IPW_INVALID_STATION) {
+				IPW_WARNING("Attempt to send data to "
+					    "invalid cell: %pM\n",
+					    hdr->addr1);
+				goto drop;
+			}
+		}
+		break;
+
+	case IW_MODE_INFRA:
+	default:
+		unicast = !is_multicast_ether_addr(hdr->addr3);
+		id = 0;
+		break;
+	}
+
+	tfd = &txq->bd[q->first_empty];
+	txq->txb[q->first_empty] = txb;
+	memset(tfd, 0, sizeof(*tfd));
+	tfd->u.data.station_number = id;
+
+	tfd->control_flags.message_type = TX_FRAME_TYPE;
+	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
+
+	tfd->u.data.cmd_id = DINO_CMD_TX;
+	tfd->u.data.len = cpu_to_le16(txb->payload_size);
+
+	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
+		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
+	else
+		tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
+
+	if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
+		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
+
+	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
+
+	if (likely(unicast))
+		tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
+
+	if (txb->encrypted && !priv->ieee->host_encrypt) {
+		switch (priv->ieee->sec.level) {
+		case SEC_LEVEL_3:
+			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
+			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+			/* XXX: ACK flag must be set for CCMP even if it
+			 * is a multicast/broadcast packet, because CCMP
+			 * group communication encrypted by GTK is
+			 * actually done by the AP. */
+			if (!unicast)
+				tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
+
+			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
+			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
+			tfd->u.data.key_index = 0;
+			tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
+			break;
+		case SEC_LEVEL_2:
+			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
+			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+			tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
+			tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
+			tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
+			break;
+		case SEC_LEVEL_1:
+			tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
+			    cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+			tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
+			if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
+			    40)
+				tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
+			else
+				tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
+			break;
+		case SEC_LEVEL_0:
+			break;
+		default:
+			printk(KERN_ERR "Unknown security level %d\n",
+			       priv->ieee->sec.level);
+			break;
+		}
+	} else
+		/* No hardware encryption */
+		tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
+
+#ifdef CONFIG_IPW2200_QOS
+	if (fc & IEEE80211_STYPE_QOS_DATA)
+		ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
+#endif				/* CONFIG_IPW2200_QOS */
+
+	/* payload */
+	tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
+						 txb->nr_frags));
+	IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
+		       txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
+	for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
+		IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
+			       i, le32_to_cpu(tfd->u.data.num_chunks),
+			       txb->fragments[i]->len - hdr_len);
+		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
+			     i, tfd->u.data.num_chunks,
+			     txb->fragments[i]->len - hdr_len);
+		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
+			   txb->fragments[i]->len - hdr_len);
+
+		tfd->u.data.chunk_ptr[i] =
+		    cpu_to_le32(pci_map_single
+				(priv->pci_dev,
+				 txb->fragments[i]->data + hdr_len,
+				 txb->fragments[i]->len - hdr_len,
+				 PCI_DMA_TODEVICE));
+		tfd->u.data.chunk_len[i] =
+		    cpu_to_le16(txb->fragments[i]->len - hdr_len);
+	}
+
+	if (i != txb->nr_frags) {
+		struct sk_buff *skb;
+		u16 remaining_bytes = 0;
+		int j;
+
+		for (j = i; j < txb->nr_frags; j++)
+			remaining_bytes += txb->fragments[j]->len - hdr_len;
+
+		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
+		       remaining_bytes);
+		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
+		if (skb != NULL) {
+			tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
+			for (j = i; j < txb->nr_frags; j++) {
+				int size = txb->fragments[j]->len - hdr_len;
+
+				printk(KERN_INFO "Adding frag %d %d...\n",
+				       j, size);
+				skb_put_data(skb,
+					     txb->fragments[j]->data + hdr_len,
+					     size);
+			}
+			dev_kfree_skb_any(txb->fragments[i]);
+			txb->fragments[i] = skb;
+			tfd->u.data.chunk_ptr[i] =
+			    cpu_to_le32(pci_map_single
+					(priv->pci_dev, skb->data,
+					 remaining_bytes,
+					 PCI_DMA_TODEVICE));
+
+			le32_add_cpu(&tfd->u.data.num_chunks, 1);
+		}
+	}
+
+	/* kick DMA */
+	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
+	ipw_write32(priv, q->reg_w, q->first_empty);
+
+	if (ipw_tx_queue_space(q) < q->high_mark)
+		netif_stop_queue(priv->net_dev);
+
+	return NETDEV_TX_OK;
+
+      drop:
+	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
+	libipw_txb_free(txb);
+	return NETDEV_TX_OK;
+}
+
+static int ipw_net_is_queue_full(struct net_device *dev, int pri)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+#ifdef CONFIG_IPW2200_QOS
+	int tx_id = ipw_get_tx_queue_number(priv, pri);
+	struct clx2_tx_queue *txq = &priv->txq[tx_id];
+#else
+	struct clx2_tx_queue *txq = &priv->txq[0];
+#endif				/* CONFIG_IPW2200_QOS */
+
+	if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
+		return 1;
+
+	return 0;
+}
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
+				      struct libipw_txb *txb)
+{
+	struct libipw_rx_stats dummystats;
+	struct ieee80211_hdr *hdr;
+	u8 n;
+	u16 filter = priv->prom_priv->filter;
+	int hdr_only = 0;
+
+	if (filter & IPW_PROM_NO_TX)
+		return;
+
+	memset(&dummystats, 0, sizeof(dummystats));
+
+	/* Filtering of fragment chains is done against the first fragment */
+	hdr = (void *)txb->fragments[0]->data;
+	if (libipw_is_management(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_MGMT)
+			return;
+		if (filter & IPW_PROM_MGMT_HEADER_ONLY)
+			hdr_only = 1;
+	} else if (libipw_is_control(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_CTL)
+			return;
+		if (filter & IPW_PROM_CTL_HEADER_ONLY)
+			hdr_only = 1;
+	} else if (libipw_is_data(le16_to_cpu(hdr->frame_control))) {
+		if (filter & IPW_PROM_NO_DATA)
+			return;
+		if (filter & IPW_PROM_DATA_HEADER_ONLY)
+			hdr_only = 1;
+	}
+
+	for(n=0; n<txb->nr_frags; ++n) {
+		struct sk_buff *src = txb->fragments[n];
+		struct sk_buff *dst;
+		struct ieee80211_radiotap_header *rt_hdr;
+		int len;
+
+		if (hdr_only) {
+			hdr = (void *)src->data;
+			len = libipw_get_hdrlen(le16_to_cpu(hdr->frame_control));
+		} else
+			len = src->len;
+
+		dst = alloc_skb(len + sizeof(*rt_hdr) + sizeof(u16)*2, GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		rt_hdr = skb_put(dst, sizeof(*rt_hdr));
+
+		rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
+		rt_hdr->it_pad = 0;
+		rt_hdr->it_present = 0; /* after all, it's just an idea */
+		rt_hdr->it_present |=  cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
+
+		*(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
+			ieee80211chan2mhz(priv->channel));
+		if (priv->channel > 14) 	/* 802.11a */
+			*(__le16*)skb_put(dst, sizeof(u16)) =
+				cpu_to_le16(IEEE80211_CHAN_OFDM |
+					     IEEE80211_CHAN_5GHZ);
+		else if (priv->ieee->mode == IEEE_B) /* 802.11b */
+			*(__le16*)skb_put(dst, sizeof(u16)) =
+				cpu_to_le16(IEEE80211_CHAN_CCK |
+					     IEEE80211_CHAN_2GHZ);
+		else 		/* 802.11g */
+			*(__le16*)skb_put(dst, sizeof(u16)) =
+				cpu_to_le16(IEEE80211_CHAN_OFDM |
+				 IEEE80211_CHAN_2GHZ);
+
+		rt_hdr->it_len = cpu_to_le16(dst->len);
+
+		skb_copy_from_linear_data(src, skb_put(dst, len), len);
+
+		if (!libipw_rx(priv->prom_priv->ieee, dst, &dummystats))
+			dev_kfree_skb_any(dst);
+	}
+}
+#endif
+
+static netdev_tx_t ipw_net_hard_start_xmit(struct libipw_txb *txb,
+					   struct net_device *dev, int pri)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	unsigned long flags;
+	netdev_tx_t ret;
+
+	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
+	spin_lock_irqsave(&priv->lock, flags);
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	if (rtap_iface && netif_running(priv->prom_net_dev))
+		ipw_handle_promiscuous_tx(priv, txb);
+#endif
+
+	ret = ipw_tx_skb(priv, txb, pri);
+	if (ret == NETDEV_TX_OK)
+		__ipw_led_activity_on(priv);
+	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return ret;
+}
+
+static void ipw_net_set_multicast_list(struct net_device *dev)
+{
+
+}
+
+static int ipw_net_set_mac_address(struct net_device *dev, void *p)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	mutex_lock(&priv->mutex);
+	priv->config |= CFG_CUSTOM_MAC;
+	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+	printk(KERN_INFO "%s: Setting MAC to %pM\n",
+	       priv->net_dev->name, priv->mac_addr);
+	schedule_work(&priv->adapter_restart);
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+static void ipw_ethtool_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *info)
+{
+	struct ipw_priv *p = libipw_priv(dev);
+	char vers[64];
+	char date[32];
+	u32 len;
+
+	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+
+	len = sizeof(vers);
+	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
+	len = sizeof(date);
+	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
+
+	snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
+		 vers, date);
+	strlcpy(info->bus_info, pci_name(p->pci_dev),
+		sizeof(info->bus_info));
+}
+
+static u32 ipw_ethtool_get_link(struct net_device *dev)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	return (priv->status & STATUS_ASSOCIATED) != 0;
+}
+
+static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
+{
+	return IPW_EEPROM_IMAGE_SIZE;
+}
+
+static int ipw_ethtool_get_eeprom(struct net_device *dev,
+				  struct ethtool_eeprom *eeprom, u8 * bytes)
+{
+	struct ipw_priv *p = libipw_priv(dev);
+
+	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
+		return -EINVAL;
+	mutex_lock(&p->mutex);
+	memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
+	mutex_unlock(&p->mutex);
+	return 0;
+}
+
+static int ipw_ethtool_set_eeprom(struct net_device *dev,
+				  struct ethtool_eeprom *eeprom, u8 * bytes)
+{
+	struct ipw_priv *p = libipw_priv(dev);
+	int i;
+
+	if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
+		return -EINVAL;
+	mutex_lock(&p->mutex);
+	memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
+	for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
+		ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
+	mutex_unlock(&p->mutex);
+	return 0;
+}
+
+static const struct ethtool_ops ipw_ethtool_ops = {
+	.get_link = ipw_ethtool_get_link,
+	.get_drvinfo = ipw_ethtool_get_drvinfo,
+	.get_eeprom_len = ipw_ethtool_get_eeprom_len,
+	.get_eeprom = ipw_ethtool_get_eeprom,
+	.set_eeprom = ipw_ethtool_set_eeprom,
+};
+
+static irqreturn_t ipw_isr(int irq, void *data)
+{
+	struct ipw_priv *priv = data;
+	u32 inta, inta_mask;
+
+	if (!priv)
+		return IRQ_NONE;
+
+	spin_lock(&priv->irq_lock);
+
+	if (!(priv->status & STATUS_INT_ENABLED)) {
+		/* IRQ is disabled */
+		goto none;
+	}
+
+	inta = ipw_read32(priv, IPW_INTA_RW);
+	inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
+		goto none;
+	}
+
+	if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
+		/* Shared interrupt */
+		goto none;
+	}
+
+	/* tell the device to stop sending interrupts */
+	__ipw_disable_interrupts(priv);
+
+	/* ack current interrupts */
+	inta &= (IPW_INTA_MASK_ALL & inta_mask);
+	ipw_write32(priv, IPW_INTA_RW, inta);
+
+	/* Cache INTA value for our tasklet */
+	priv->isr_inta = inta;
+
+	tasklet_schedule(&priv->irq_tasklet);
+
+	spin_unlock(&priv->irq_lock);
+
+	return IRQ_HANDLED;
+      none:
+	spin_unlock(&priv->irq_lock);
+	return IRQ_NONE;
+}
+
+static void ipw_rf_kill(void *adapter)
+{
+	struct ipw_priv *priv = adapter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
+		schedule_delayed_work(&priv->rf_kill, 2 * HZ);
+		goto exit_unlock;
+	}
+
+	/* RF Kill is now disabled, so bring the device back up */
+
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
+				  "device\n");
+
+		/* we can not do an adapter restart while inside an irq lock */
+		schedule_work(&priv->adapter_restart);
+	} else
+		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
+				  "enabled\n");
+
+      exit_unlock:
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void ipw_bg_rf_kill(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, rf_kill.work);
+	mutex_lock(&priv->mutex);
+	ipw_rf_kill(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_link_up(struct ipw_priv *priv)
+{
+	priv->last_seq_num = -1;
+	priv->last_frag_num = -1;
+	priv->last_packet_time = 0;
+
+	netif_carrier_on(priv->net_dev);
+
+	cancel_delayed_work(&priv->request_scan);
+	cancel_delayed_work(&priv->request_direct_scan);
+	cancel_delayed_work(&priv->request_passive_scan);
+	cancel_delayed_work(&priv->scan_event);
+	ipw_reset_stats(priv);
+	/* Ensure the rate is updated immediately */
+	priv->last_rate = ipw_get_current_rate(priv);
+	ipw_gather_stats(priv);
+	ipw_led_link_up(priv);
+	notify_wx_assoc_event(priv);
+
+	if (priv->config & CFG_BACKGROUND_SCAN)
+		schedule_delayed_work(&priv->request_scan, HZ);
+}
+
+static void ipw_bg_link_up(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_up);
+	mutex_lock(&priv->mutex);
+	ipw_link_up(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_link_down(struct ipw_priv *priv)
+{
+	ipw_led_link_down(priv);
+	netif_carrier_off(priv->net_dev);
+	notify_wx_assoc_event(priv);
+
+	/* Cancel any queued work ... */
+	cancel_delayed_work(&priv->request_scan);
+	cancel_delayed_work(&priv->request_direct_scan);
+	cancel_delayed_work(&priv->request_passive_scan);
+	cancel_delayed_work(&priv->adhoc_check);
+	cancel_delayed_work(&priv->gather_stats);
+
+	ipw_reset_stats(priv);
+
+	if (!(priv->status & STATUS_EXIT_PENDING)) {
+		/* Queue up another scan... */
+		schedule_delayed_work(&priv->request_scan, 0);
+	} else
+		cancel_delayed_work(&priv->scan_event);
+}
+
+static void ipw_bg_link_down(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, link_down);
+	mutex_lock(&priv->mutex);
+	ipw_link_down(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_setup_deferred_work(struct ipw_priv *priv)
+{
+	int ret = 0;
+
+	init_waitqueue_head(&priv->wait_command_queue);
+	init_waitqueue_head(&priv->wait_state);
+
+	INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
+	INIT_WORK(&priv->associate, ipw_bg_associate);
+	INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
+	INIT_WORK(&priv->system_config, ipw_system_config);
+	INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
+	INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
+	INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
+	INIT_WORK(&priv->up, ipw_bg_up);
+	INIT_WORK(&priv->down, ipw_bg_down);
+	INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
+	INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
+	INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
+	INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
+	INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
+	INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
+	INIT_WORK(&priv->roam, ipw_bg_roam);
+	INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
+	INIT_WORK(&priv->link_up, ipw_bg_link_up);
+	INIT_WORK(&priv->link_down, ipw_bg_link_down);
+	INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
+	INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
+	INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
+	INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
+
+#ifdef CONFIG_IPW2200_QOS
+	INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
+#endif				/* CONFIG_IPW2200_QOS */
+
+	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+		     ipw_irq_tasklet, (unsigned long)priv);
+
+	return ret;
+}
+
+static void shim__set_security(struct net_device *dev,
+			       struct libipw_security *sec)
+{
+	struct ipw_priv *priv = libipw_priv(dev);
+	int i;
+	for (i = 0; i < 4; i++) {
+		if (sec->flags & (1 << i)) {
+			priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
+			priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
+			if (sec->key_sizes[i] == 0)
+				priv->ieee->sec.flags &= ~(1 << i);
+			else {
+				memcpy(priv->ieee->sec.keys[i], sec->keys[i],
+				       sec->key_sizes[i]);
+				priv->ieee->sec.flags |= (1 << i);
+			}
+			priv->status |= STATUS_SECURITY_UPDATED;
+		} else if (sec->level != SEC_LEVEL_1)
+			priv->ieee->sec.flags &= ~(1 << i);
+	}
+
+	if (sec->flags & SEC_ACTIVE_KEY) {
+		if (sec->active_key <= 3) {
+			priv->ieee->sec.active_key = sec->active_key;
+			priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
+		} else
+			priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	} else
+		priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
+
+	if ((sec->flags & SEC_AUTH_MODE) &&
+	    (priv->ieee->sec.auth_mode != sec->auth_mode)) {
+		priv->ieee->sec.auth_mode = sec->auth_mode;
+		priv->ieee->sec.flags |= SEC_AUTH_MODE;
+		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
+			priv->capability |= CAP_SHARED_KEY;
+		else
+			priv->capability &= ~CAP_SHARED_KEY;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
+		priv->ieee->sec.flags |= SEC_ENABLED;
+		priv->ieee->sec.enabled = sec->enabled;
+		priv->status |= STATUS_SECURITY_UPDATED;
+		if (sec->enabled)
+			priv->capability |= CAP_PRIVACY_ON;
+		else
+			priv->capability &= ~CAP_PRIVACY_ON;
+	}
+
+	if (sec->flags & SEC_ENCRYPT)
+		priv->ieee->sec.encrypt = sec->encrypt;
+
+	if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
+		priv->ieee->sec.level = sec->level;
+		priv->ieee->sec.flags |= SEC_LEVEL;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
+		ipw_set_hwcrypto_keys(priv);
+
+	/* To match current functionality of ipw2100 (which works well w/
+	 * various supplicants, we don't force a disassociate if the
+	 * privacy capability changes ... */
+#if 0
+	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
+	    (((priv->assoc_request.capability &
+	       cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
+	     (!(priv->assoc_request.capability &
+		cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
+		IPW_DEBUG_ASSOC("Disassociating due to capability "
+				"change.\n");
+		ipw_disassociate(priv);
+	}
+#endif
+}
+
+static int init_supported_rates(struct ipw_priv *priv,
+				struct ipw_supported_rates *rates)
+{
+	/* TODO: Mask out rates based on priv->rates_mask */
+
+	memset(rates, 0, sizeof(*rates));
+	/* configure supported rates */
+	switch (priv->ieee->freq_band) {
+	case LIBIPW_52GHZ_BAND:
+		rates->ieee_mode = IPW_A_MODE;
+		rates->purpose = IPW_RATE_CAPABILITIES;
+		ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
+					LIBIPW_OFDM_DEFAULT_RATES_MASK);
+		break;
+
+	default:		/* Mixed or 2.4Ghz */
+		rates->ieee_mode = IPW_G_MODE;
+		rates->purpose = IPW_RATE_CAPABILITIES;
+		ipw_add_cck_scan_rates(rates, LIBIPW_CCK_MODULATION,
+				       LIBIPW_CCK_DEFAULT_RATES_MASK);
+		if (priv->ieee->modulation & LIBIPW_OFDM_MODULATION) {
+			ipw_add_ofdm_scan_rates(rates, LIBIPW_CCK_MODULATION,
+						LIBIPW_OFDM_DEFAULT_RATES_MASK);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int ipw_config(struct ipw_priv *priv)
+{
+	/* This is only called from ipw_up, which resets/reloads the firmware
+	   so, we don't need to first disable the card before we configure
+	   it */
+	if (ipw_set_tx_power(priv))
+		goto error;
+
+	/* initialize adapter address */
+	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
+		goto error;
+
+	/* set basic system config settings */
+	init_sys_config(&priv->sys_config);
+
+	/* Support Bluetooth if we have BT h/w on board, and user wants to.
+	 * Does not support BT priority yet (don't abort or defer our Tx) */
+	if (bt_coexist) {
+		unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
+
+		if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
+			priv->sys_config.bt_coexistence
+			    |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
+		if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
+			priv->sys_config.bt_coexistence
+			    |= CFG_BT_COEXISTENCE_OOB;
+	}
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
+		priv->sys_config.accept_all_data_frames = 1;
+		priv->sys_config.accept_non_directed_frames = 1;
+		priv->sys_config.accept_all_mgmt_bcpr = 1;
+		priv->sys_config.accept_all_mgmt_frames = 1;
+	}
+#endif
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		priv->sys_config.answer_broadcast_ssid_probe = 1;
+	else
+		priv->sys_config.answer_broadcast_ssid_probe = 0;
+
+	if (ipw_send_system_config(priv))
+		goto error;
+
+	init_supported_rates(priv, &priv->rates);
+	if (ipw_send_supported_rates(priv, &priv->rates))
+		goto error;
+
+	/* Set request-to-send threshold */
+	if (priv->rts_threshold) {
+		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
+			goto error;
+	}
+#ifdef CONFIG_IPW2200_QOS
+	IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
+	ipw_qos_activate(priv, NULL);
+#endif				/* CONFIG_IPW2200_QOS */
+
+	if (ipw_set_random_seed(priv))
+		goto error;
+
+	/* final state transition to the RUN state */
+	if (ipw_send_host_complete(priv))
+		goto error;
+
+	priv->status |= STATUS_INIT;
+
+	ipw_led_init(priv);
+	ipw_led_radio_on(priv);
+	priv->notif_missed_beacons = 0;
+
+	/* Set hardware WEP key if it is configured. */
+	if ((priv->capability & CAP_PRIVACY_ON) &&
+	    (priv->ieee->sec.level == SEC_LEVEL_1) &&
+	    !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
+		ipw_set_hwcrypto_keys(priv);
+
+	return 0;
+
+      error:
+	return -EIO;
+}
+
+/*
+ * NOTE:
+ *
+ * These tables have been tested in conjunction with the
+ * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
+ *
+ * Altering this values, using it on other hardware, or in geographies
+ * not intended for resale of the above mentioned Intel adapters has
+ * not been tested.
+ *
+ * Remember to update the table in README.ipw2200 when changing this
+ * table.
+ *
+ */
+static const struct libipw_geo ipw_geos[] = {
+	{			/* Restricted */
+	 "---",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 },
+
+	{			/* Custom US/Canada */
+	 "ZZF",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 .a_channels = 8,
+	 .a = {{5180, 36},
+	       {5200, 40},
+	       {5220, 44},
+	       {5240, 48},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY}},
+	 },
+
+	{			/* Rest of World */
+	 "ZZD",
+	 .bg_channels = 13,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}, {2467, 12},
+		{2472, 13}},
+	 },
+
+	{			/* Custom USA & Europe & High */
+	 "ZZA",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 .a_channels = 13,
+	 .a = {{5180, 36},
+	       {5200, 40},
+	       {5220, 44},
+	       {5240, 48},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
+	       {5745, 149},
+	       {5765, 153},
+	       {5785, 157},
+	       {5805, 161},
+	       {5825, 165}},
+	 },
+
+	{			/* Custom NA & Europe */
+	 "ZZB",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 .a_channels = 13,
+	 .a = {{5180, 36},
+	       {5200, 40},
+	       {5220, 44},
+	       {5240, 48},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
+	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
+	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
+	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
+	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
+	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
+	 },
+
+	{			/* Custom Japan */
+	 "ZZC",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 .a_channels = 4,
+	 .a = {{5170, 34}, {5190, 38},
+	       {5210, 42}, {5230, 46}},
+	 },
+
+	{			/* Custom */
+	 "ZZM",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 },
+
+	{			/* Europe */
+	 "ZZE",
+	 .bg_channels = 13,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}, {2467, 12},
+		{2472, 13}},
+	 .a_channels = 19,
+	 .a = {{5180, 36},
+	       {5200, 40},
+	       {5220, 44},
+	       {5240, 48},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
+	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
+	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
+	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
+	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
+	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
+	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
+	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
+	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
+	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
+	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
+	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY}},
+	 },
+
+	{			/* Custom Japan */
+	 "ZZJ",
+	 .bg_channels = 14,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}, {2467, 12},
+		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY}},
+	 .a_channels = 4,
+	 .a = {{5170, 34}, {5190, 38},
+	       {5210, 42}, {5230, 46}},
+	 },
+
+	{			/* Rest of World */
+	 "ZZR",
+	 .bg_channels = 14,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}, {2467, 12},
+		{2472, 13}, {2484, 14, LIBIPW_CH_B_ONLY |
+			     LIBIPW_CH_PASSIVE_ONLY}},
+	 },
+
+	{			/* High Band */
+	 "ZZH",
+	 .bg_channels = 13,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11},
+		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
+		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
+	 .a_channels = 4,
+	 .a = {{5745, 149}, {5765, 153},
+	       {5785, 157}, {5805, 161}},
+	 },
+
+	{			/* Custom Europe */
+	 "ZZG",
+	 .bg_channels = 13,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11},
+		{2467, 12}, {2472, 13}},
+	 .a_channels = 4,
+	 .a = {{5180, 36}, {5200, 40},
+	       {5220, 44}, {5240, 48}},
+	 },
+
+	{			/* Europe */
+	 "ZZK",
+	 .bg_channels = 13,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11},
+		{2467, 12, LIBIPW_CH_PASSIVE_ONLY},
+		{2472, 13, LIBIPW_CH_PASSIVE_ONLY}},
+	 .a_channels = 24,
+	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
+	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
+	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
+	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
+	       {5500, 100, LIBIPW_CH_PASSIVE_ONLY},
+	       {5520, 104, LIBIPW_CH_PASSIVE_ONLY},
+	       {5540, 108, LIBIPW_CH_PASSIVE_ONLY},
+	       {5560, 112, LIBIPW_CH_PASSIVE_ONLY},
+	       {5580, 116, LIBIPW_CH_PASSIVE_ONLY},
+	       {5600, 120, LIBIPW_CH_PASSIVE_ONLY},
+	       {5620, 124, LIBIPW_CH_PASSIVE_ONLY},
+	       {5640, 128, LIBIPW_CH_PASSIVE_ONLY},
+	       {5660, 132, LIBIPW_CH_PASSIVE_ONLY},
+	       {5680, 136, LIBIPW_CH_PASSIVE_ONLY},
+	       {5700, 140, LIBIPW_CH_PASSIVE_ONLY},
+	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
+	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
+	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
+	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
+	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
+	 },
+
+	{			/* Europe */
+	 "ZZL",
+	 .bg_channels = 11,
+	 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
+		{2427, 4}, {2432, 5}, {2437, 6},
+		{2442, 7}, {2447, 8}, {2452, 9},
+		{2457, 10}, {2462, 11}},
+	 .a_channels = 13,
+	 .a = {{5180, 36, LIBIPW_CH_PASSIVE_ONLY},
+	       {5200, 40, LIBIPW_CH_PASSIVE_ONLY},
+	       {5220, 44, LIBIPW_CH_PASSIVE_ONLY},
+	       {5240, 48, LIBIPW_CH_PASSIVE_ONLY},
+	       {5260, 52, LIBIPW_CH_PASSIVE_ONLY},
+	       {5280, 56, LIBIPW_CH_PASSIVE_ONLY},
+	       {5300, 60, LIBIPW_CH_PASSIVE_ONLY},
+	       {5320, 64, LIBIPW_CH_PASSIVE_ONLY},
+	       {5745, 149, LIBIPW_CH_PASSIVE_ONLY},
+	       {5765, 153, LIBIPW_CH_PASSIVE_ONLY},
+	       {5785, 157, LIBIPW_CH_PASSIVE_ONLY},
+	       {5805, 161, LIBIPW_CH_PASSIVE_ONLY},
+	       {5825, 165, LIBIPW_CH_PASSIVE_ONLY}},
+	 }
+};
+
+static void ipw_set_geo(struct ipw_priv *priv)
+{
+	int j;
+
+	for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
+		if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
+			    ipw_geos[j].name, 3))
+			break;
+	}
+
+	if (j == ARRAY_SIZE(ipw_geos)) {
+		IPW_WARNING("SKU [%c%c%c] not recognized.\n",
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 0],
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 1],
+			    priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
+		j = 0;
+	}
+
+	libipw_set_geo(priv->ieee, &ipw_geos[j]);
+}
+
+#define MAX_HW_RESTARTS 5
+static int ipw_up(struct ipw_priv *priv)
+{
+	int rc, i;
+
+	/* Age scan list entries found before suspend */
+	if (priv->suspend_time) {
+		libipw_networks_age(priv->ieee, priv->suspend_time);
+		priv->suspend_time = 0;
+	}
+
+	if (priv->status & STATUS_EXIT_PENDING)
+		return -EIO;
+
+	if (cmdlog && !priv->cmdlog) {
+		priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
+				       GFP_KERNEL);
+		if (priv->cmdlog == NULL) {
+			IPW_ERROR("Error allocating %d command log entries.\n",
+				  cmdlog);
+			return -ENOMEM;
+		} else {
+			priv->cmdlog_len = cmdlog;
+		}
+	}
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++) {
+		/* Load the microcode, firmware, and eeprom.
+		 * Also start the clocks. */
+		rc = ipw_load(priv);
+		if (rc) {
+			IPW_ERROR("Unable to load firmware: %d\n", rc);
+			return rc;
+		}
+
+		ipw_init_ordinals(priv);
+		if (!(priv->config & CFG_CUSTOM_MAC))
+			eeprom_parse_mac(priv, priv->mac_addr);
+		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+		ipw_set_geo(priv);
+
+		if (priv->status & STATUS_RF_KILL_SW) {
+			IPW_WARNING("Radio disabled by module parameter.\n");
+			return 0;
+		} else if (rf_kill_active(priv)) {
+			IPW_WARNING("Radio Frequency Kill Switch is On:\n"
+				    "Kill switch must be turned off for "
+				    "wireless networking to work.\n");
+			schedule_delayed_work(&priv->rf_kill, 2 * HZ);
+			return 0;
+		}
+
+		rc = ipw_config(priv);
+		if (!rc) {
+			IPW_DEBUG_INFO("Configured device on count %i\n", i);
+
+			/* If configure to try and auto-associate, kick
+			 * off a scan. */
+			schedule_delayed_work(&priv->request_scan, 0);
+
+			return 0;
+		}
+
+		IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
+		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
+			       i, MAX_HW_RESTARTS);
+
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		ipw_down(priv);
+	}
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
+
+	return -EIO;
+}
+
+static void ipw_bg_up(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, up);
+	mutex_lock(&priv->mutex);
+	ipw_up(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void ipw_deinit(struct ipw_priv *priv)
+{
+	int i;
+
+	if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
+		ipw_abort_scan(priv);
+	}
+
+	if (priv->status & STATUS_ASSOCIATED) {
+		IPW_DEBUG_INFO("Disassociating during shutdown.\n");
+		ipw_disassociate(priv);
+	}
+
+	ipw_led_shutdown(priv);
+
+	/* Wait up to 1s for status to change to not scanning and not
+	 * associated (disassociation can take a while for a ful 802.11
+	 * exchange */
+	for (i = 1000; i && (priv->status &
+			     (STATUS_DISASSOCIATING |
+			      STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
+		udelay(10);
+
+	if (priv->status & (STATUS_DISASSOCIATING |
+			    STATUS_ASSOCIATED | STATUS_SCANNING))
+		IPW_DEBUG_INFO("Still associated or scanning...\n");
+	else
+		IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
+
+	/* Attempt to disable the card */
+	ipw_send_card_disable(priv, 0);
+
+	priv->status &= ~STATUS_INIT;
+}
+
+static void ipw_down(struct ipw_priv *priv)
+{
+	int exit_pending = priv->status & STATUS_EXIT_PENDING;
+
+	priv->status |= STATUS_EXIT_PENDING;
+
+	if (ipw_is_init(priv))
+		ipw_deinit(priv);
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		priv->status &= ~STATUS_EXIT_PENDING;
+
+	/* tell the device to stop sending interrupts */
+	ipw_disable_interrupts(priv);
+
+	/* Clear all bits but the RF Kill */
+	priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
+	netif_carrier_off(priv->net_dev);
+
+	ipw_stop_nic(priv);
+
+	ipw_led_radio_off(priv);
+}
+
+static void ipw_bg_down(struct work_struct *work)
+{
+	struct ipw_priv *priv =
+		container_of(work, struct ipw_priv, down);
+	mutex_lock(&priv->mutex);
+	ipw_down(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static int ipw_wdev_init(struct net_device *dev)
+{
+	int i, rc = 0;
+	struct ipw_priv *priv = libipw_priv(dev);
+	const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
+	struct wireless_dev *wdev = &priv->ieee->wdev;
+
+	memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
+
+	/* fill-out priv->ieee->bg_band */
+	if (geo->bg_channels) {
+		struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
+
+		bg_band->band = NL80211_BAND_2GHZ;
+		bg_band->n_channels = geo->bg_channels;
+		bg_band->channels = kcalloc(geo->bg_channels,
+					    sizeof(struct ieee80211_channel),
+					    GFP_KERNEL);
+		if (!bg_band->channels) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* translate geo->bg to bg_band.channels */
+		for (i = 0; i < geo->bg_channels; i++) {
+			bg_band->channels[i].band = NL80211_BAND_2GHZ;
+			bg_band->channels[i].center_freq = geo->bg[i].freq;
+			bg_band->channels[i].hw_value = geo->bg[i].channel;
+			bg_band->channels[i].max_power = geo->bg[i].max_power;
+			if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
+				bg_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		bg_band->bitrates = ipw2200_bg_rates;
+		bg_band->n_bitrates = ipw2200_num_bg_rates;
+
+		wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
+	}
+
+	/* fill-out priv->ieee->a_band */
+	if (geo->a_channels) {
+		struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
+
+		a_band->band = NL80211_BAND_5GHZ;
+		a_band->n_channels = geo->a_channels;
+		a_band->channels = kcalloc(geo->a_channels,
+					   sizeof(struct ieee80211_channel),
+					   GFP_KERNEL);
+		if (!a_band->channels) {
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* translate geo->a to a_band.channels */
+		for (i = 0; i < geo->a_channels; i++) {
+			a_band->channels[i].band = NL80211_BAND_5GHZ;
+			a_band->channels[i].center_freq = geo->a[i].freq;
+			a_band->channels[i].hw_value = geo->a[i].channel;
+			a_band->channels[i].max_power = geo->a[i].max_power;
+			if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_NO_IR;
+			if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
+				a_band->channels[i].flags |=
+					IEEE80211_CHAN_RADAR;
+			/* No equivalent for LIBIPW_CH_80211H_RULES,
+			   LIBIPW_CH_UNIFORM_SPREADING, or
+			   LIBIPW_CH_B_ONLY... */
+		}
+		/* point at bitrate info */
+		a_band->bitrates = ipw2200_a_rates;
+		a_band->n_bitrates = ipw2200_num_a_rates;
+
+		wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
+	}
+
+	wdev->wiphy->cipher_suites = ipw_cipher_suites;
+	wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
+
+	set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
+
+	/* With that information in place, we can now register the wiphy... */
+	if (wiphy_register(wdev->wiphy))
+		rc = -EIO;
+out:
+	return rc;
+}
+
+/* PCI driver stuff */
+static const struct pci_device_id card_ids[] = {
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
+	{PCI_VDEVICE(INTEL, 0x104f), 0},
+	{PCI_VDEVICE(INTEL, 0x4220), 0},	/* BG */
+	{PCI_VDEVICE(INTEL, 0x4221), 0},	/* BG */
+	{PCI_VDEVICE(INTEL, 0x4223), 0},	/* ABG */
+	{PCI_VDEVICE(INTEL, 0x4224), 0},	/* ABG */
+
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, card_ids);
+
+static struct attribute *ipw_sysfs_entries[] = {
+	&dev_attr_rf_kill.attr,
+	&dev_attr_direct_dword.attr,
+	&dev_attr_indirect_byte.attr,
+	&dev_attr_indirect_dword.attr,
+	&dev_attr_mem_gpio_reg.attr,
+	&dev_attr_command_event_reg.attr,
+	&dev_attr_nic_type.attr,
+	&dev_attr_status.attr,
+	&dev_attr_cfg.attr,
+	&dev_attr_error.attr,
+	&dev_attr_event_log.attr,
+	&dev_attr_cmd_log.attr,
+	&dev_attr_eeprom_delay.attr,
+	&dev_attr_ucode_version.attr,
+	&dev_attr_rtc.attr,
+	&dev_attr_scan_age.attr,
+	&dev_attr_led.attr,
+	&dev_attr_speed_scan.attr,
+	&dev_attr_net_stats.attr,
+	&dev_attr_channels.attr,
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	&dev_attr_rtap_iface.attr,
+	&dev_attr_rtap_filter.attr,
+#endif
+	NULL
+};
+
+static const struct attribute_group ipw_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = ipw_sysfs_entries,
+};
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+static int ipw_prom_open(struct net_device *dev)
+{
+	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
+	struct ipw_priv *priv = prom_priv->priv;
+
+	IPW_DEBUG_INFO("prom dev->open\n");
+	netif_carrier_off(dev);
+
+	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+		priv->sys_config.accept_all_data_frames = 1;
+		priv->sys_config.accept_non_directed_frames = 1;
+		priv->sys_config.accept_all_mgmt_bcpr = 1;
+		priv->sys_config.accept_all_mgmt_frames = 1;
+
+		ipw_send_system_config(priv);
+	}
+
+	return 0;
+}
+
+static int ipw_prom_stop(struct net_device *dev)
+{
+	struct ipw_prom_priv *prom_priv = libipw_priv(dev);
+	struct ipw_priv *priv = prom_priv->priv;
+
+	IPW_DEBUG_INFO("prom dev->stop\n");
+
+	if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+		priv->sys_config.accept_all_data_frames = 0;
+		priv->sys_config.accept_non_directed_frames = 0;
+		priv->sys_config.accept_all_mgmt_bcpr = 0;
+		priv->sys_config.accept_all_mgmt_frames = 0;
+
+		ipw_send_system_config(priv);
+	}
+
+	return 0;
+}
+
+static netdev_tx_t ipw_prom_hard_start_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	IPW_DEBUG_INFO("prom dev->xmit\n");
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops ipw_prom_netdev_ops = {
+	.ndo_open 		= ipw_prom_open,
+	.ndo_stop		= ipw_prom_stop,
+	.ndo_start_xmit		= ipw_prom_hard_start_xmit,
+	.ndo_set_mac_address 	= eth_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static int ipw_prom_alloc(struct ipw_priv *priv)
+{
+	int rc = 0;
+
+	if (priv->prom_net_dev)
+		return -EPERM;
+
+	priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
+	if (priv->prom_net_dev == NULL)
+		return -ENOMEM;
+
+	priv->prom_priv = libipw_priv(priv->prom_net_dev);
+	priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
+	priv->prom_priv->priv = priv;
+
+	strcpy(priv->prom_net_dev->name, "rtap%d");
+	memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+	priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
+	priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
+
+	priv->prom_net_dev->min_mtu = 68;
+	priv->prom_net_dev->max_mtu = LIBIPW_DATA_LEN;
+
+	priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
+	SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
+
+	rc = register_netdev(priv->prom_net_dev);
+	if (rc) {
+		free_libipw(priv->prom_net_dev, 1);
+		priv->prom_net_dev = NULL;
+		return rc;
+	}
+
+	return 0;
+}
+
+static void ipw_prom_free(struct ipw_priv *priv)
+{
+	if (!priv->prom_net_dev)
+		return;
+
+	unregister_netdev(priv->prom_net_dev);
+	free_libipw(priv->prom_net_dev, 1);
+
+	priv->prom_net_dev = NULL;
+}
+
+#endif
+
+static const struct net_device_ops ipw_netdev_ops = {
+	.ndo_open		= ipw_net_open,
+	.ndo_stop		= ipw_net_stop,
+	.ndo_set_rx_mode	= ipw_net_set_multicast_list,
+	.ndo_set_mac_address	= ipw_net_set_mac_address,
+	.ndo_start_xmit		= libipw_xmit,
+	.ndo_validate_addr	= eth_validate_addr,
+};
+
+static int ipw_pci_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct net_device *net_dev;
+	void __iomem *base;
+	u32 length, val;
+	struct ipw_priv *priv;
+	int i;
+
+	net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
+	if (net_dev == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	priv = libipw_priv(net_dev);
+	priv->ieee = netdev_priv(net_dev);
+
+	priv->net_dev = net_dev;
+	priv->pci_dev = pdev;
+	ipw_debug_level = debug;
+	spin_lock_init(&priv->irq_lock);
+	spin_lock_init(&priv->lock);
+	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
+
+	mutex_init(&priv->mutex);
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_free_libipw;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err) {
+		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
+		goto out_pci_disable_device;
+	}
+
+	pci_set_drvdata(pdev, priv);
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+	length = pci_resource_len(pdev, 0);
+	priv->hw_len = length;
+
+	base = pci_ioremap_bar(pdev, 0);
+	if (!base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	priv->hw_base = base;
+	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
+	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
+
+	err = ipw_setup_deferred_work(priv);
+	if (err) {
+		IPW_ERROR("Unable to setup deferred work\n");
+		goto out_iounmap;
+	}
+
+	ipw_sw_reset(priv, 1);
+
+	err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
+	if (err) {
+		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
+		goto out_iounmap;
+	}
+
+	SET_NETDEV_DEV(net_dev, &pdev->dev);
+
+	mutex_lock(&priv->mutex);
+
+	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
+	priv->ieee->set_security = shim__set_security;
+	priv->ieee->is_queue_full = ipw_net_is_queue_full;
+
+#ifdef CONFIG_IPW2200_QOS
+	priv->ieee->is_qos_active = ipw_is_qos_active;
+	priv->ieee->handle_probe_response = ipw_handle_beacon;
+	priv->ieee->handle_beacon = ipw_handle_probe_response;
+	priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
+#endif				/* CONFIG_IPW2200_QOS */
+
+	priv->ieee->perfect_rssi = -20;
+	priv->ieee->worst_rssi = -85;
+
+	net_dev->netdev_ops = &ipw_netdev_ops;
+	priv->wireless_data.spy_data = &priv->ieee->spy_data;
+	net_dev->wireless_data = &priv->wireless_data;
+	net_dev->wireless_handlers = &ipw_wx_handler_def;
+	net_dev->ethtool_ops = &ipw_ethtool_ops;
+
+	net_dev->min_mtu = 68;
+	net_dev->max_mtu = LIBIPW_DATA_LEN;
+
+	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
+	if (err) {
+		IPW_ERROR("failed to create sysfs device attributes\n");
+		mutex_unlock(&priv->mutex);
+		goto out_release_irq;
+	}
+
+	if (ipw_up(priv)) {
+		mutex_unlock(&priv->mutex);
+		err = -EIO;
+		goto out_remove_sysfs;
+	}
+
+	mutex_unlock(&priv->mutex);
+
+	err = ipw_wdev_init(net_dev);
+	if (err) {
+		IPW_ERROR("failed to register wireless device\n");
+		goto out_remove_sysfs;
+	}
+
+	err = register_netdev(net_dev);
+	if (err) {
+		IPW_ERROR("failed to register network device\n");
+		goto out_unregister_wiphy;
+	}
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	if (rtap_iface) {
+	        err = ipw_prom_alloc(priv);
+		if (err) {
+			IPW_ERROR("Failed to register promiscuous network "
+				  "device (error %d).\n", err);
+			unregister_netdev(priv->net_dev);
+			goto out_unregister_wiphy;
+		}
+	}
+#endif
+
+	printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
+	       "channels, %d 802.11a channels)\n",
+	       priv->ieee->geo.name, priv->ieee->geo.bg_channels,
+	       priv->ieee->geo.a_channels);
+
+	return 0;
+
+      out_unregister_wiphy:
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->a_band.channels);
+	kfree(priv->ieee->bg_band.channels);
+      out_remove_sysfs:
+	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
+      out_release_irq:
+	free_irq(pdev->irq, priv);
+      out_iounmap:
+	iounmap(priv->hw_base);
+      out_pci_release_regions:
+	pci_release_regions(pdev);
+      out_pci_disable_device:
+	pci_disable_device(pdev);
+      out_free_libipw:
+	free_libipw(priv->net_dev, 0);
+      out:
+	return err;
+}
+
+static void ipw_pci_remove(struct pci_dev *pdev)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	struct list_head *p, *q;
+	int i;
+
+	if (!priv)
+		return;
+
+	mutex_lock(&priv->mutex);
+
+	priv->status |= STATUS_EXIT_PENDING;
+	ipw_down(priv);
+	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
+
+	mutex_unlock(&priv->mutex);
+
+	unregister_netdev(priv->net_dev);
+
+	if (priv->rxq) {
+		ipw_rx_queue_free(priv, priv->rxq);
+		priv->rxq = NULL;
+	}
+	ipw_tx_queue_free(priv);
+
+	if (priv->cmdlog) {
+		kfree(priv->cmdlog);
+		priv->cmdlog = NULL;
+	}
+
+	/* make sure all works are inactive */
+	cancel_delayed_work_sync(&priv->adhoc_check);
+	cancel_work_sync(&priv->associate);
+	cancel_work_sync(&priv->disassociate);
+	cancel_work_sync(&priv->system_config);
+	cancel_work_sync(&priv->rx_replenish);
+	cancel_work_sync(&priv->adapter_restart);
+	cancel_delayed_work_sync(&priv->rf_kill);
+	cancel_work_sync(&priv->up);
+	cancel_work_sync(&priv->down);
+	cancel_delayed_work_sync(&priv->request_scan);
+	cancel_delayed_work_sync(&priv->request_direct_scan);
+	cancel_delayed_work_sync(&priv->request_passive_scan);
+	cancel_delayed_work_sync(&priv->scan_event);
+	cancel_delayed_work_sync(&priv->gather_stats);
+	cancel_work_sync(&priv->abort_scan);
+	cancel_work_sync(&priv->roam);
+	cancel_delayed_work_sync(&priv->scan_check);
+	cancel_work_sync(&priv->link_up);
+	cancel_work_sync(&priv->link_down);
+	cancel_delayed_work_sync(&priv->led_link_on);
+	cancel_delayed_work_sync(&priv->led_link_off);
+	cancel_delayed_work_sync(&priv->led_act_off);
+	cancel_work_sync(&priv->merge_networks);
+
+	/* Free MAC hash list for ADHOC */
+	for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
+		list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
+			list_del(p);
+			kfree(list_entry(p, struct ipw_ibss_seq, list));
+		}
+	}
+
+	kfree(priv->error);
+	priv->error = NULL;
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	ipw_prom_free(priv);
+#endif
+
+	free_irq(pdev->irq, priv);
+	iounmap(priv->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	/* wiphy_unregister needs to be here, before free_libipw */
+	wiphy_unregister(priv->ieee->wdev.wiphy);
+	kfree(priv->ieee->a_band.channels);
+	kfree(priv->ieee->bg_band.channels);
+	free_libipw(priv->net_dev, 0);
+	free_firmware();
+}
+
+#ifdef CONFIG_PM
+static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	struct net_device *dev = priv->net_dev;
+
+	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
+
+	/* Take down the device; powers it off, etc. */
+	ipw_down(priv);
+
+	/* Remove the PRESENT state of the device */
+	netif_device_detach(dev);
+
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	priv->suspend_at = ktime_get_boottime_seconds();
+
+	return 0;
+}
+
+static int ipw_pci_resume(struct pci_dev *pdev)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	struct net_device *dev = priv->net_dev;
+	int err;
+	u32 val;
+
+	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
+
+	pci_set_power_state(pdev, PCI_D0);
+	err = pci_enable_device(pdev);
+	if (err) {
+		printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
+		       dev->name);
+		return err;
+	}
+	pci_restore_state(pdev);
+
+	/*
+	 * Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+	/* Set the device back into the PRESENT state; this will also wake
+	 * the queue of needed */
+	netif_device_attach(dev);
+
+	priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at;
+
+	/* Bring the device back up */
+	schedule_work(&priv->up);
+
+	return 0;
+}
+#endif
+
+static void ipw_pci_shutdown(struct pci_dev *pdev)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+
+	/* Take down the device; powers it off, etc. */
+	ipw_down(priv);
+
+	pci_disable_device(pdev);
+}
+
+/* driver initialization stuff */
+static struct pci_driver ipw_driver = {
+	.name = DRV_NAME,
+	.id_table = card_ids,
+	.probe = ipw_pci_probe,
+	.remove = ipw_pci_remove,
+#ifdef CONFIG_PM
+	.suspend = ipw_pci_suspend,
+	.resume = ipw_pci_resume,
+#endif
+	.shutdown = ipw_pci_shutdown,
+};
+
+static int __init ipw_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
+
+	ret = pci_register_driver(&ipw_driver);
+	if (ret) {
+		IPW_ERROR("Unable to initialize PCI module\n");
+		return ret;
+	}
+
+	ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
+	if (ret) {
+		IPW_ERROR("Unable to create driver sysfs file\n");
+		pci_unregister_driver(&ipw_driver);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void __exit ipw_exit(void)
+{
+	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
+	pci_unregister_driver(&ipw_driver);
+}
+
+module_param(disable, int, 0444);
+MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
+
+module_param(associate, int, 0444);
+MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
+
+module_param(auto_create, int, 0444);
+MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
+
+module_param_named(led, led_support, int, 0444);
+MODULE_PARM_DESC(led, "enable led control on some systems (default 1 on)");
+
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "debug output mask");
+
+module_param_named(channel, default_channel, int, 0444);
+MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+module_param(rtap_iface, int, 0444);
+MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
+#endif
+
+#ifdef CONFIG_IPW2200_QOS
+module_param(qos_enable, int, 0444);
+MODULE_PARM_DESC(qos_enable, "enable all QoS functionalities");
+
+module_param(qos_burst_enable, int, 0444);
+MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
+
+module_param(qos_no_ack_mask, int, 0444);
+MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
+
+module_param(burst_duration_CCK, int, 0444);
+MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
+
+module_param(burst_duration_OFDM, int, 0444);
+MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
+#endif				/* CONFIG_IPW2200_QOS */
+
+#ifdef CONFIG_IPW2200_MONITOR
+module_param_named(mode, network_mode, int, 0444);
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
+#else
+module_param_named(mode, network_mode, int, 0444);
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
+#endif
+
+module_param(bt_coexist, int, 0444);
+MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
+
+module_param(hwcrypto, int, 0444);
+MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
+
+module_param(cmdlog, int, 0444);
+MODULE_PARM_DESC(cmdlog,
+		 "allocate a ring buffer for logging firmware commands");
+
+module_param(roaming, int, 0444);
+MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
+
+module_param(antenna, int, 0444);
+MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
+
+module_exit(ipw_exit);
+module_init(ipw_init);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
new file mode 100644
index 0000000..f98ab1f
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -0,0 +1,2001 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+#ifndef __ipw2200_h__
+#define __ipw2200_h__
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/dma-mapping.h>
+
+#include <linux/firmware.h>
+#include <linux/wireless.h>
+#include <linux/jiffies.h>
+#include <asm/io.h>
+
+#include <net/lib80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#define DRV_NAME	"ipw2200"
+
+#include <linux/workqueue.h>
+
+#include "libipw.h"
+
+/* Authentication  and Association States */
+enum connection_manager_assoc_states {
+	CMAS_INIT = 0,
+	CMAS_TX_AUTH_SEQ_1,
+	CMAS_RX_AUTH_SEQ_2,
+	CMAS_AUTH_SEQ_1_PASS,
+	CMAS_AUTH_SEQ_1_FAIL,
+	CMAS_TX_AUTH_SEQ_3,
+	CMAS_RX_AUTH_SEQ_4,
+	CMAS_AUTH_SEQ_2_PASS,
+	CMAS_AUTH_SEQ_2_FAIL,
+	CMAS_AUTHENTICATED,
+	CMAS_TX_ASSOC,
+	CMAS_RX_ASSOC_RESP,
+	CMAS_ASSOCIATED,
+	CMAS_LAST
+};
+
+#define IPW_WAIT                     (1<<0)
+#define IPW_QUIET                    (1<<1)
+#define IPW_ROAMING                  (1<<2)
+
+#define IPW_POWER_MODE_CAM           0x00	//(always on)
+#define IPW_POWER_INDEX_1            0x01
+#define IPW_POWER_INDEX_2            0x02
+#define IPW_POWER_INDEX_3            0x03
+#define IPW_POWER_INDEX_4            0x04
+#define IPW_POWER_INDEX_5            0x05
+#define IPW_POWER_AC                 0x06
+#define IPW_POWER_BATTERY            0x07
+#define IPW_POWER_LIMIT              0x07
+#define IPW_POWER_MASK               0x0F
+#define IPW_POWER_ENABLED            0x10
+#define IPW_POWER_LEVEL(x)           ((x) & IPW_POWER_MASK)
+
+#define IPW_CMD_HOST_COMPLETE                 2
+#define IPW_CMD_POWER_DOWN                    4
+#define IPW_CMD_SYSTEM_CONFIG                 6
+#define IPW_CMD_MULTICAST_ADDRESS             7
+#define IPW_CMD_SSID                          8
+#define IPW_CMD_ADAPTER_ADDRESS              11
+#define IPW_CMD_PORT_TYPE                    12
+#define IPW_CMD_RTS_THRESHOLD                15
+#define IPW_CMD_FRAG_THRESHOLD               16
+#define IPW_CMD_POWER_MODE                   17
+#define IPW_CMD_WEP_KEY                      18
+#define IPW_CMD_TGI_TX_KEY                   19
+#define IPW_CMD_SCAN_REQUEST                 20
+#define IPW_CMD_ASSOCIATE                    21
+#define IPW_CMD_SUPPORTED_RATES              22
+#define IPW_CMD_SCAN_ABORT                   23
+#define IPW_CMD_TX_FLUSH                     24
+#define IPW_CMD_QOS_PARAMETERS               25
+#define IPW_CMD_SCAN_REQUEST_EXT             26
+#define IPW_CMD_DINO_CONFIG                  30
+#define IPW_CMD_RSN_CAPABILITIES             31
+#define IPW_CMD_RX_KEY                       32
+#define IPW_CMD_CARD_DISABLE                 33
+#define IPW_CMD_SEED_NUMBER                  34
+#define IPW_CMD_TX_POWER                     35
+#define IPW_CMD_COUNTRY_INFO                 36
+#define IPW_CMD_AIRONET_INFO                 37
+#define IPW_CMD_AP_TX_POWER                  38
+#define IPW_CMD_CCKM_INFO                    39
+#define IPW_CMD_CCX_VER_INFO                 40
+#define IPW_CMD_SET_CALIBRATION              41
+#define IPW_CMD_SENSITIVITY_CALIB            42
+#define IPW_CMD_RETRY_LIMIT                  51
+#define IPW_CMD_IPW_PRE_POWER_DOWN           58
+#define IPW_CMD_VAP_BEACON_TEMPLATE          60
+#define IPW_CMD_VAP_DTIM_PERIOD              61
+#define IPW_CMD_EXT_SUPPORTED_RATES          62
+#define IPW_CMD_VAP_LOCAL_TX_PWR_CONSTRAINT  63
+#define IPW_CMD_VAP_QUIET_INTERVALS          64
+#define IPW_CMD_VAP_CHANNEL_SWITCH           65
+#define IPW_CMD_VAP_MANDATORY_CHANNELS       66
+#define IPW_CMD_VAP_CELL_PWR_LIMIT           67
+#define IPW_CMD_VAP_CF_PARAM_SET             68
+#define IPW_CMD_VAP_SET_BEACONING_STATE      69
+#define IPW_CMD_MEASUREMENT                  80
+#define IPW_CMD_POWER_CAPABILITY             81
+#define IPW_CMD_SUPPORTED_CHANNELS           82
+#define IPW_CMD_TPC_REPORT                   83
+#define IPW_CMD_WME_INFO                     84
+#define IPW_CMD_PRODUCTION_COMMAND	     85
+#define IPW_CMD_LINKSYS_EOU_INFO             90
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        6
+
+#define TX_QUEUE_SIZE                        32
+#define RX_QUEUE_SIZE                        32
+
+#define DINO_CMD_WEP_KEY                   0x08
+#define DINO_CMD_TX                        0x0B
+#define DCT_ANTENNA_A                      0x01
+#define DCT_ANTENNA_B                      0x02
+
+#define IPW_A_MODE                         0
+#define IPW_B_MODE                         1
+#define IPW_G_MODE                         2
+
+/*
+ * TX Queue Flag Definitions
+ */
+
+/* tx wep key definition */
+#define DCT_WEP_KEY_NOT_IMMIDIATE	0x00
+#define DCT_WEP_KEY_64Bit		0x40
+#define DCT_WEP_KEY_128Bit		0x80
+#define DCT_WEP_KEY_128bitIV		0xC0
+#define DCT_WEP_KEY_SIZE_MASK		0xC0
+
+#define DCT_WEP_KEY_INDEX_MASK		0x0F
+#define DCT_WEP_INDEX_USE_IMMEDIATE	0x20
+
+/* abort attempt if mgmt frame is rx'd */
+#define DCT_FLAG_ABORT_MGMT                0x01
+
+/* require CTS */
+#define DCT_FLAG_CTS_REQUIRED              0x02
+
+/* use short preamble */
+#define DCT_FLAG_LONG_PREAMBLE             0x00
+#define DCT_FLAG_SHORT_PREAMBLE            0x04
+
+/* RTS/CTS first */
+#define DCT_FLAG_RTS_REQD                  0x08
+
+/* dont calculate duration field */
+#define DCT_FLAG_DUR_SET                   0x10
+
+/* even if MAC WEP set (allows pre-encrypt) */
+#define DCT_FLAG_NO_WEP              0x20
+
+/* overwrite TSF field */
+#define DCT_FLAG_TSF_REQD                  0x40
+
+/* ACK rx is expected to follow */
+#define DCT_FLAG_ACK_REQD                  0x80
+
+/* TX flags extension */
+#define DCT_FLAG_EXT_MODE_CCK  0x01
+#define DCT_FLAG_EXT_MODE_OFDM 0x00
+
+#define DCT_FLAG_EXT_SECURITY_WEP     0x00
+#define DCT_FLAG_EXT_SECURITY_NO      DCT_FLAG_EXT_SECURITY_WEP
+#define DCT_FLAG_EXT_SECURITY_CKIP    0x04
+#define DCT_FLAG_EXT_SECURITY_CCM     0x08
+#define DCT_FLAG_EXT_SECURITY_TKIP    0x0C
+#define DCT_FLAG_EXT_SECURITY_MASK    0x0C
+
+#define DCT_FLAG_EXT_QOS_ENABLED      0x10
+
+#define DCT_FLAG_EXT_HC_NO_SIFS_PIFS  0x00
+#define DCT_FLAG_EXT_HC_SIFS          0x20
+#define DCT_FLAG_EXT_HC_PIFS          0x40
+
+#define TX_RX_TYPE_MASK                    0xFF
+#define TX_FRAME_TYPE                      0x00
+#define TX_HOST_COMMAND_TYPE               0x01
+#define RX_FRAME_TYPE                      0x09
+#define RX_HOST_NOTIFICATION_TYPE          0x03
+#define RX_HOST_CMD_RESPONSE_TYPE          0x04
+#define RX_TX_FRAME_RESPONSE_TYPE          0x05
+#define TFD_NEED_IRQ_MASK                  0x04
+
+#define HOST_CMD_DINO_CONFIG               30
+
+#define HOST_NOTIFICATION_STATUS_ASSOCIATED             10
+#define HOST_NOTIFICATION_STATUS_AUTHENTICATE           11
+#define HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT    12
+#define HOST_NOTIFICATION_STATUS_SCAN_COMPLETED         13
+#define HOST_NOTIFICATION_STATUS_FRAG_LENGTH            14
+#define HOST_NOTIFICATION_STATUS_LINK_DETERIORATION     15
+#define HOST_NOTIFICATION_DINO_CONFIG_RESPONSE          16
+#define HOST_NOTIFICATION_STATUS_BEACON_STATE           17
+#define HOST_NOTIFICATION_STATUS_TGI_TX_KEY             18
+#define HOST_NOTIFICATION_TX_STATUS                     19
+#define HOST_NOTIFICATION_CALIB_KEEP_RESULTS            20
+#define HOST_NOTIFICATION_MEASUREMENT_STARTED           21
+#define HOST_NOTIFICATION_MEASUREMENT_ENDED             22
+#define HOST_NOTIFICATION_CHANNEL_SWITCHED              23
+#define HOST_NOTIFICATION_RX_DURING_QUIET_PERIOD        24
+#define HOST_NOTIFICATION_NOISE_STATS			25
+#define HOST_NOTIFICATION_S36_MEASUREMENT_ACCEPTED      30
+#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED       31
+
+#define HOST_NOTIFICATION_STATUS_BEACON_MISSING         1
+#define IPW_MB_SCAN_CANCEL_THRESHOLD                    3
+#define IPW_MB_ROAMING_THRESHOLD_MIN                    1
+#define IPW_MB_ROAMING_THRESHOLD_DEFAULT                8
+#define IPW_MB_ROAMING_THRESHOLD_MAX                    30
+#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT           3*IPW_MB_ROAMING_THRESHOLD_DEFAULT
+#define IPW_REAL_RATE_RX_PACKET_THRESHOLD               300
+
+#define MACADRR_BYTE_LEN                     6
+
+#define DCR_TYPE_AP                       0x01
+#define DCR_TYPE_WLAP                     0x02
+#define DCR_TYPE_MU_ESS                   0x03
+#define DCR_TYPE_MU_IBSS                  0x04
+#define DCR_TYPE_MU_PIBSS                 0x05
+#define DCR_TYPE_SNIFFER                  0x06
+#define DCR_TYPE_MU_BSS        DCR_TYPE_MU_ESS
+
+/* QoS  definitions */
+
+#define CW_MIN_OFDM          15
+#define CW_MAX_OFDM          1023
+#define CW_MIN_CCK           31
+#define CW_MAX_CCK           1023
+
+#define QOS_TX0_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define QOS_TX1_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define QOS_TX2_CW_MIN_OFDM      cpu_to_le16((CW_MIN_OFDM + 1)/2 - 1)
+#define QOS_TX3_CW_MIN_OFDM      cpu_to_le16((CW_MIN_OFDM + 1)/4 - 1)
+
+#define QOS_TX0_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+#define QOS_TX1_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+#define QOS_TX2_CW_MIN_CCK       cpu_to_le16((CW_MIN_CCK + 1)/2 - 1)
+#define QOS_TX3_CW_MIN_CCK       cpu_to_le16((CW_MIN_CCK + 1)/4 - 1)
+
+#define QOS_TX0_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+#define QOS_TX1_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+#define QOS_TX2_CW_MAX_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define QOS_TX3_CW_MAX_OFDM      cpu_to_le16((CW_MIN_OFDM + 1)/2 - 1)
+
+#define QOS_TX0_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+#define QOS_TX1_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+#define QOS_TX2_CW_MAX_CCK       cpu_to_le16(CW_MIN_CCK)
+#define QOS_TX3_CW_MAX_CCK       cpu_to_le16((CW_MIN_CCK + 1)/2 - 1)
+
+#define QOS_TX0_AIFS            (3 - QOS_AIFSN_MIN_VALUE)
+#define QOS_TX1_AIFS            (7 - QOS_AIFSN_MIN_VALUE)
+#define QOS_TX2_AIFS            (2 - QOS_AIFSN_MIN_VALUE)
+#define QOS_TX3_AIFS            (2 - QOS_AIFSN_MIN_VALUE)
+
+#define QOS_TX0_ACM             0
+#define QOS_TX1_ACM             0
+#define QOS_TX2_ACM             0
+#define QOS_TX3_ACM             0
+
+#define QOS_TX0_TXOP_LIMIT_CCK          0
+#define QOS_TX1_TXOP_LIMIT_CCK          0
+#define QOS_TX2_TXOP_LIMIT_CCK          cpu_to_le16(6016)
+#define QOS_TX3_TXOP_LIMIT_CCK          cpu_to_le16(3264)
+
+#define QOS_TX0_TXOP_LIMIT_OFDM      0
+#define QOS_TX1_TXOP_LIMIT_OFDM      0
+#define QOS_TX2_TXOP_LIMIT_OFDM      cpu_to_le16(3008)
+#define QOS_TX3_TXOP_LIMIT_OFDM      cpu_to_le16(1504)
+
+#define DEF_TX0_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define DEF_TX1_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define DEF_TX2_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+#define DEF_TX3_CW_MIN_OFDM      cpu_to_le16(CW_MIN_OFDM)
+
+#define DEF_TX0_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+#define DEF_TX1_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+#define DEF_TX2_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+#define DEF_TX3_CW_MIN_CCK       cpu_to_le16(CW_MIN_CCK)
+
+#define DEF_TX0_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+#define DEF_TX1_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+#define DEF_TX2_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+#define DEF_TX3_CW_MAX_OFDM      cpu_to_le16(CW_MAX_OFDM)
+
+#define DEF_TX0_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+#define DEF_TX1_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+#define DEF_TX2_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+#define DEF_TX3_CW_MAX_CCK       cpu_to_le16(CW_MAX_CCK)
+
+#define DEF_TX0_AIFS            0
+#define DEF_TX1_AIFS            0
+#define DEF_TX2_AIFS            0
+#define DEF_TX3_AIFS            0
+
+#define DEF_TX0_ACM             0
+#define DEF_TX1_ACM             0
+#define DEF_TX2_ACM             0
+#define DEF_TX3_ACM             0
+
+#define DEF_TX0_TXOP_LIMIT_CCK        0
+#define DEF_TX1_TXOP_LIMIT_CCK        0
+#define DEF_TX2_TXOP_LIMIT_CCK        0
+#define DEF_TX3_TXOP_LIMIT_CCK        0
+
+#define DEF_TX0_TXOP_LIMIT_OFDM       0
+#define DEF_TX1_TXOP_LIMIT_OFDM       0
+#define DEF_TX2_TXOP_LIMIT_OFDM       0
+#define DEF_TX3_TXOP_LIMIT_OFDM       0
+
+#define QOS_QOS_SETS                  3
+#define QOS_PARAM_SET_ACTIVE          0
+#define QOS_PARAM_SET_DEF_CCK         1
+#define QOS_PARAM_SET_DEF_OFDM        2
+
+#define CTRL_QOS_NO_ACK               (0x0020)
+
+#define IPW_TX_QUEUE_1        1
+#define IPW_TX_QUEUE_2        2
+#define IPW_TX_QUEUE_3        3
+#define IPW_TX_QUEUE_4        4
+
+/* QoS sturctures */
+struct ipw_qos_info {
+	int qos_enable;
+	struct libipw_qos_parameters *def_qos_parm_OFDM;
+	struct libipw_qos_parameters *def_qos_parm_CCK;
+	u32 burst_duration_CCK;
+	u32 burst_duration_OFDM;
+	u16 qos_no_ack_mask;
+	int burst_enable;
+};
+
+/**************************************************************/
+/**
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct clx2_queue {
+	int n_bd;		       /**< number of BDs in this queue */
+	int first_empty;	       /**< 1-st empty entry (index) */
+	int last_used;		       /**< last used entry (index) */
+	u32 reg_w;		     /**< 'write' reg (queue head), addr in domain 1 */
+	u32 reg_r;		     /**< 'read' reg (queue tail), addr in domain 1 */
+	dma_addr_t dma_addr;		/**< physical addr for BD's */
+	int low_mark;		       /**< low watermark, resume queue if free space more than this */
+	int high_mark;		       /**< high watermark, stop queue if free space less than this */
+} __packed; /* XXX */
+
+struct machdr32 {
+	__le16 frame_ctl;
+	__le16 duration;		// watch out for endians!
+	u8 addr1[MACADRR_BYTE_LEN];
+	u8 addr2[MACADRR_BYTE_LEN];
+	u8 addr3[MACADRR_BYTE_LEN];
+	__le16 seq_ctrl;		// more endians!
+	u8 addr4[MACADRR_BYTE_LEN];
+	__le16 qos_ctrl;
+} __packed;
+
+struct machdr30 {
+	__le16 frame_ctl;
+	__le16 duration;		// watch out for endians!
+	u8 addr1[MACADRR_BYTE_LEN];
+	u8 addr2[MACADRR_BYTE_LEN];
+	u8 addr3[MACADRR_BYTE_LEN];
+	__le16 seq_ctrl;		// more endians!
+	u8 addr4[MACADRR_BYTE_LEN];
+} __packed;
+
+struct machdr26 {
+	__le16 frame_ctl;
+	__le16 duration;		// watch out for endians!
+	u8 addr1[MACADRR_BYTE_LEN];
+	u8 addr2[MACADRR_BYTE_LEN];
+	u8 addr3[MACADRR_BYTE_LEN];
+	__le16 seq_ctrl;		// more endians!
+	__le16 qos_ctrl;
+} __packed;
+
+struct machdr24 {
+	__le16 frame_ctl;
+	__le16 duration;		// watch out for endians!
+	u8 addr1[MACADRR_BYTE_LEN];
+	u8 addr2[MACADRR_BYTE_LEN];
+	u8 addr3[MACADRR_BYTE_LEN];
+	__le16 seq_ctrl;		// more endians!
+} __packed;
+
+// TX TFD with 32 byte MAC Header
+struct tx_tfd_32 {
+	struct machdr32 mchdr;	// 32
+	__le32 uivplaceholder[2];	// 8
+} __packed;
+
+// TX TFD with 30 byte MAC Header
+struct tx_tfd_30 {
+	struct machdr30 mchdr;	// 30
+	u8 reserved[2];		// 2
+	__le32 uivplaceholder[2];	// 8
+} __packed;
+
+// tx tfd with 26 byte mac header
+struct tx_tfd_26 {
+	struct machdr26 mchdr;	// 26
+	u8 reserved1[2];	// 2
+	__le32 uivplaceholder[2];	// 8
+	u8 reserved2[4];	// 4
+} __packed;
+
+// tx tfd with 24 byte mac header
+struct tx_tfd_24 {
+	struct machdr24 mchdr;	// 24
+	__le32 uivplaceholder[2];	// 8
+	u8 reserved[8];		// 8
+} __packed;
+
+#define DCT_WEP_KEY_FIELD_LENGTH 16
+
+struct tfd_command {
+	u8 index;
+	u8 length;
+	__le16 reserved;
+	u8 payload[0];
+} __packed;
+
+struct tfd_data {
+	/* Header */
+	__le32 work_area_ptr;
+	u8 station_number;	/* 0 for BSS */
+	u8 reserved1;
+	__le16 reserved2;
+
+	/* Tx Parameters */
+	u8 cmd_id;
+	u8 seq_num;
+	__le16 len;
+	u8 priority;
+	u8 tx_flags;
+	u8 tx_flags_ext;
+	u8 key_index;
+	u8 wepkey[DCT_WEP_KEY_FIELD_LENGTH];
+	u8 rate;
+	u8 antenna;
+	__le16 next_packet_duration;
+	__le16 next_frag_len;
+	__le16 back_off_counter;	//////txop;
+	u8 retrylimit;
+	__le16 cwcurrent;
+	u8 reserved3;
+
+	/* 802.11 MAC Header */
+	union {
+		struct tx_tfd_24 tfd_24;
+		struct tx_tfd_26 tfd_26;
+		struct tx_tfd_30 tfd_30;
+		struct tx_tfd_32 tfd_32;
+	} tfd;
+
+	/* Payload DMA info */
+	__le32 num_chunks;
+	__le32 chunk_ptr[NUM_TFD_CHUNKS];
+	__le16 chunk_len[NUM_TFD_CHUNKS];
+} __packed;
+
+struct txrx_control_flags {
+	u8 message_type;
+	u8 rx_seq_num;
+	u8 control_bits;
+	u8 reserved;
+} __packed;
+
+#define  TFD_SIZE                           128
+#define  TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH   (TFD_SIZE - sizeof(struct txrx_control_flags))
+
+struct tfd_frame {
+	struct txrx_control_flags control_flags;
+	union {
+		struct tfd_data data;
+		struct tfd_command cmd;
+		u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
+	} u;
+} __packed;
+
+typedef void destructor_func(const void *);
+
+/**
+ * Tx Queue for DMA. Queue consists of circular buffer of
+ * BD's and required locking structures.
+ */
+struct clx2_tx_queue {
+	struct clx2_queue q;
+	struct tfd_frame *bd;
+	struct libipw_txb **txb;
+};
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 32
+#define RX_LOW_WATERMARK 8
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+// Used for passing to driver number of successes and failures per rate
+struct rate_histogram {
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} success;
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} failed;
+} __packed;
+
+/* statistics command response */
+struct ipw_cmd_stats {
+	u8 cmd_id;
+	u8 seq_num;
+	__le16 good_sfd;
+	__le16 bad_plcp;
+	__le16 wrong_bssid;
+	__le16 valid_mpdu;
+	__le16 bad_mac_header;
+	__le16 reserved_frame_types;
+	__le16 rx_ina;
+	__le16 bad_crc32;
+	__le16 invalid_cts;
+	__le16 invalid_acks;
+	__le16 long_distance_ina_fina;
+	__le16 dsp_silence_unreachable;
+	__le16 accumulated_rssi;
+	__le16 rx_ovfl_frame_tossed;
+	__le16 rssi_silence_threshold;
+	__le16 rx_ovfl_frame_supplied;
+	__le16 last_rx_frame_signal;
+	__le16 last_rx_frame_noise;
+	__le16 rx_autodetec_no_ofdm;
+	__le16 rx_autodetec_no_barker;
+	__le16 reserved;
+} __packed;
+
+struct notif_channel_result {
+	u8 channel_num;
+	struct ipw_cmd_stats stats;
+	u8 uReserved;
+} __packed;
+
+#define SCAN_COMPLETED_STATUS_COMPLETE  1
+#define SCAN_COMPLETED_STATUS_ABORTED   2
+
+struct notif_scan_complete {
+	u8 scan_type;
+	u8 num_channels;
+	u8 status;
+	u8 reserved;
+} __packed;
+
+struct notif_frag_length {
+	__le16 frag_length;
+	__le16 reserved;
+} __packed;
+
+struct notif_beacon_state {
+	__le32 state;
+	__le32 number;
+} __packed;
+
+struct notif_tgi_tx_key {
+	u8 key_state;
+	u8 security_type;
+	u8 station_index;
+	u8 reserved;
+} __packed;
+
+#define SILENCE_OVER_THRESH (1)
+#define SILENCE_UNDER_THRESH (2)
+
+struct notif_link_deterioration {
+	struct ipw_cmd_stats stats;
+	u8 rate;
+	u8 modulation;
+	struct rate_histogram histogram;
+	u8 silence_notification_type;	/* SILENCE_OVER/UNDER_THRESH */
+	__le16 silence_count;
+} __packed;
+
+struct notif_association {
+	u8 state;
+} __packed;
+
+struct notif_authenticate {
+	u8 state;
+	struct machdr24 addr;
+	__le16 status;
+} __packed;
+
+struct notif_calibration {
+	u8 data[104];
+} __packed;
+
+struct notif_noise {
+	__le32 value;
+} __packed;
+
+struct ipw_rx_notification {
+	u8 reserved[8];
+	u8 subtype;
+	u8 flags;
+	__le16 size;
+	union {
+		struct notif_association assoc;
+		struct notif_authenticate auth;
+		struct notif_channel_result channel_result;
+		struct notif_scan_complete scan_complete;
+		struct notif_frag_length frag_len;
+		struct notif_beacon_state beacon_state;
+		struct notif_tgi_tx_key tgi_tx_key;
+		struct notif_link_deterioration link_deterioration;
+		struct notif_calibration calibration;
+		struct notif_noise noise;
+		u8 raw[0];
+	} u;
+} __packed;
+
+struct ipw_rx_frame {
+	__le32 reserved1;
+	u8 parent_tsf[4];	// fw_use[0] is boolean for OUR_TSF_IS_GREATER
+	u8 received_channel;	// The channel that this frame was received on.
+	// Note that for .11b this does not have to be
+	// the same as the channel that it was sent.
+	// Filled by LMAC
+	u8 frameStatus;
+	u8 rate;
+	u8 rssi;
+	u8 agc;
+	u8 rssi_dbm;
+	__le16 signal;
+	__le16 noise;
+	u8 antennaAndPhy;
+	u8 control;		// control bit should be on in bg
+	u8 rtscts_rate;		// rate of rts or cts (in rts cts sequence rate
+	// is identical)
+	u8 rtscts_seen;		// 0x1 RTS seen ; 0x2 CTS seen
+	__le16 length;
+	u8 data[0];
+} __packed;
+
+struct ipw_rx_header {
+	u8 message_type;
+	u8 rx_seq_num;
+	u8 control_bits;
+	u8 reserved;
+} __packed;
+
+struct ipw_rx_packet {
+	struct ipw_rx_header header;
+	union {
+		struct ipw_rx_frame frame;
+		struct ipw_rx_notification notification;
+	} u;
+} __packed;
+
+#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
+#define IPW_RX_FRAME_SIZE        (unsigned int)(sizeof(struct ipw_rx_header) + \
+                                 sizeof(struct ipw_rx_frame))
+
+struct ipw_rx_mem_buffer {
+	dma_addr_t dma_addr;
+	struct sk_buff *skb;
+	struct list_head list;
+};				/* Not transferred over network, so not  __packed */
+
+struct ipw_rx_queue {
+	struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+	struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+	u32 processed;		/* Internal index to last handled Rx packet */
+	u32 read;		/* Shared index to newest available Rx buffer */
+	u32 write;		/* Shared index to oldest written Rx packet */
+	u32 free_count;		/* Number of pre-allocated buffers in rx_free */
+	/* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */
+	struct list_head rx_free;	/* Own an SKBs */
+	struct list_head rx_used;	/* No SKB allocated */
+	spinlock_t lock;
+};				/* Not transferred over network, so not  __packed */
+
+struct alive_command_responce {
+	u8 alive_command;
+	u8 sequence_number;
+	__le16 software_revision;
+	u8 device_identifier;
+	u8 reserved1[5];
+	__le16 reserved2;
+	__le16 reserved3;
+	__le16 clock_settle_time;
+	__le16 powerup_settle_time;
+	__le16 reserved4;
+	u8 time_stamp[5];	/* month, day, year, hours, minutes */
+	u8 ucode_valid;
+} __packed;
+
+#define IPW_MAX_RATES 12
+
+struct ipw_rates {
+	u8 num_rates;
+	u8 rates[IPW_MAX_RATES];
+} __packed;
+
+struct command_block {
+	unsigned int control;
+	u32 source_addr;
+	u32 dest_addr;
+	unsigned int status;
+} __packed;
+
+#define CB_NUMBER_OF_ELEMENTS_SMALL 64
+struct fw_image_desc {
+	unsigned long last_cb_index;
+	unsigned long current_cb_index;
+	struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL];
+	void *v_addr;
+	unsigned long p_addr;
+	unsigned long len;
+};
+
+struct ipw_sys_config {
+	u8 bt_coexistence;
+	u8 reserved1;
+	u8 answer_broadcast_ssid_probe;
+	u8 accept_all_data_frames;
+	u8 accept_non_directed_frames;
+	u8 exclude_unicast_unencrypted;
+	u8 disable_unicast_decryption;
+	u8 exclude_multicast_unencrypted;
+	u8 disable_multicast_decryption;
+	u8 antenna_diversity;
+	u8 pass_crc_to_host;
+	u8 dot11g_auto_detection;
+	u8 enable_cts_to_self;
+	u8 enable_multicast_filtering;
+	u8 bt_coexist_collision_thr;
+	u8 silence_threshold;
+	u8 accept_all_mgmt_bcpr;
+	u8 accept_all_mgmt_frames;
+	u8 pass_noise_stats_to_host;
+	u8 reserved3;
+} __packed;
+
+struct ipw_multicast_addr {
+	u8 num_of_multicast_addresses;
+	u8 reserved[3];
+	u8 mac1[6];
+	u8 mac2[6];
+	u8 mac3[6];
+	u8 mac4[6];
+} __packed;
+
+#define DCW_WEP_KEY_INDEX_MASK		0x03	/* bits [0:1] */
+#define DCW_WEP_KEY_SEC_TYPE_MASK	0x30	/* bits [4:5] */
+
+#define DCW_WEP_KEY_SEC_TYPE_WEP	0x00
+#define DCW_WEP_KEY_SEC_TYPE_CCM	0x20
+#define DCW_WEP_KEY_SEC_TYPE_TKIP	0x30
+
+#define DCW_WEP_KEY_INVALID_SIZE	0x00	/* 0 = Invalid key */
+#define DCW_WEP_KEY64Bit_SIZE		0x05	/* 64-bit encryption */
+#define DCW_WEP_KEY128Bit_SIZE		0x0D	/* 128-bit encryption */
+#define DCW_CCM_KEY128Bit_SIZE		0x10	/* 128-bit key */
+//#define DCW_WEP_KEY128BitIV_SIZE      0x10    /* 128-bit key and 128-bit IV */
+
+struct ipw_wep_key {
+	u8 cmd_id;
+	u8 seq_num;
+	u8 key_index;
+	u8 key_size;
+	u8 key[16];
+} __packed;
+
+struct ipw_tgi_tx_key {
+	u8 key_id;
+	u8 security_type;
+	u8 station_index;
+	u8 flags;
+	u8 key[16];
+	__le32 tx_counter[2];
+} __packed;
+
+#define IPW_SCAN_CHANNELS 54
+
+struct ipw_scan_request {
+	u8 scan_type;
+	__le16 dwell_time;
+	u8 channels_list[IPW_SCAN_CHANNELS];
+	u8 channels_reserved[3];
+} __packed;
+
+enum {
+	IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
+	IPW_SCAN_PASSIVE_FULL_DWELL_SCAN,
+	IPW_SCAN_ACTIVE_DIRECT_SCAN,
+	IPW_SCAN_ACTIVE_BROADCAST_SCAN,
+	IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN,
+	IPW_SCAN_TYPES
+};
+
+struct ipw_scan_request_ext {
+	__le32 full_scan_index;
+	u8 channels_list[IPW_SCAN_CHANNELS];
+	u8 scan_type[IPW_SCAN_CHANNELS / 2];
+	u8 reserved;
+	__le16 dwell_time[IPW_SCAN_TYPES];
+} __packed;
+
+static inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
+{
+	if (index % 2)
+		return scan->scan_type[index / 2] & 0x0F;
+	else
+		return (scan->scan_type[index / 2] & 0xF0) >> 4;
+}
+
+static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
+				     u8 index, u8 scan_type)
+{
+	if (index % 2)
+		scan->scan_type[index / 2] =
+		    (scan->scan_type[index / 2] & 0xF0) | (scan_type & 0x0F);
+	else
+		scan->scan_type[index / 2] =
+		    (scan->scan_type[index / 2] & 0x0F) |
+		    ((scan_type & 0x0F) << 4);
+}
+
+struct ipw_associate {
+	u8 channel;
+#ifdef __LITTLE_ENDIAN_BITFIELD
+	u8 auth_type:4, auth_key:4;
+#else
+	u8 auth_key:4, auth_type:4;
+#endif
+	u8 assoc_type;
+	u8 reserved;
+	__le16 policy_support;
+	u8 preamble_length;
+	u8 ieee_mode;
+	u8 bssid[ETH_ALEN];
+	__le32 assoc_tsf_msw;
+	__le32 assoc_tsf_lsw;
+	__le16 capability;
+	__le16 listen_interval;
+	__le16 beacon_interval;
+	u8 dest[ETH_ALEN];
+	__le16 atim_window;
+	u8 smr;
+	u8 reserved1;
+	__le16 reserved2;
+} __packed;
+
+struct ipw_supported_rates {
+	u8 ieee_mode;
+	u8 num_rates;
+	u8 purpose;
+	u8 reserved;
+	u8 supported_rates[IPW_MAX_RATES];
+} __packed;
+
+struct ipw_rts_threshold {
+	__le16 rts_threshold;
+	__le16 reserved;
+} __packed;
+
+struct ipw_frag_threshold {
+	__le16 frag_threshold;
+	__le16 reserved;
+} __packed;
+
+struct ipw_retry_limit {
+	u8 short_retry_limit;
+	u8 long_retry_limit;
+	__le16 reserved;
+} __packed;
+
+struct ipw_dino_config {
+	__le32 dino_config_addr;
+	__le16 dino_config_size;
+	u8 dino_response;
+	u8 reserved;
+} __packed;
+
+struct ipw_aironet_info {
+	u8 id;
+	u8 length;
+	__le16 reserved;
+} __packed;
+
+struct ipw_rx_key {
+	u8 station_index;
+	u8 key_type;
+	u8 key_id;
+	u8 key_flag;
+	u8 key[16];
+	u8 station_address[6];
+	u8 key_index;
+	u8 reserved;
+} __packed;
+
+struct ipw_country_channel_info {
+	u8 first_channel;
+	u8 no_channels;
+	s8 max_tx_power;
+} __packed;
+
+struct ipw_country_info {
+	u8 id;
+	u8 length;
+	u8 country_str[IEEE80211_COUNTRY_STRING_LEN];
+	struct ipw_country_channel_info groups[7];
+} __packed;
+
+struct ipw_channel_tx_power {
+	u8 channel_number;
+	s8 tx_power;
+} __packed;
+
+#define SCAN_ASSOCIATED_INTERVAL (HZ)
+#define SCAN_INTERVAL (HZ / 10)
+#define MAX_A_CHANNELS  37
+#define MAX_B_CHANNELS  14
+
+struct ipw_tx_power {
+	u8 num_channels;
+	u8 ieee_mode;
+	struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
+} __packed;
+
+struct ipw_rsn_capabilities {
+	u8 id;
+	u8 length;
+	__le16 version;
+} __packed;
+
+struct ipw_sensitivity_calib {
+	__le16 beacon_rssi_raw;
+	__le16 reserved;
+} __packed;
+
+/**
+ * Host command structure.
+ *
+ * On input, the following fields should be filled:
+ * - cmd
+ * - len
+ * - status_len
+ * - param (if needed)
+ *
+ * On output,
+ * - \a status contains status;
+ * - \a param filled with status parameters.
+ */
+struct ipw_cmd {	 /* XXX */
+	u32 cmd;   /**< Host command */
+	u32 status;/**< Status */
+	u32 status_len;
+		   /**< How many 32 bit parameters in the status */
+	u32 len;   /**< incoming parameters length, bytes */
+  /**
+   * command parameters.
+   * There should be enough space for incoming and
+   * outcoming parameters.
+   * Incoming parameters listed 1-st, followed by outcoming params.
+   * nParams=(len+3)/4+status_len
+   */
+	u32 param[0];
+} __packed;
+
+#define STATUS_HCMD_ACTIVE      (1<<0)	/**< host command in progress */
+
+#define STATUS_INT_ENABLED      (1<<1)
+#define STATUS_RF_KILL_HW       (1<<2)
+#define STATUS_RF_KILL_SW       (1<<3)
+#define STATUS_RF_KILL_MASK     (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
+
+#define STATUS_INIT             (1<<5)
+#define STATUS_AUTH             (1<<6)
+#define STATUS_ASSOCIATED       (1<<7)
+#define STATUS_STATE_MASK       (STATUS_INIT | STATUS_AUTH | STATUS_ASSOCIATED)
+
+#define STATUS_ASSOCIATING      (1<<8)
+#define STATUS_DISASSOCIATING   (1<<9)
+#define STATUS_ROAMING          (1<<10)
+#define STATUS_EXIT_PENDING     (1<<11)
+#define STATUS_DISASSOC_PENDING (1<<12)
+#define STATUS_STATE_PENDING    (1<<13)
+
+#define STATUS_DIRECT_SCAN_PENDING (1<<19)
+#define STATUS_SCAN_PENDING     (1<<20)
+#define STATUS_SCANNING         (1<<21)
+#define STATUS_SCAN_ABORTING    (1<<22)
+#define STATUS_SCAN_FORCED      (1<<23)
+
+#define STATUS_LED_LINK_ON      (1<<24)
+#define STATUS_LED_ACT_ON       (1<<25)
+
+#define STATUS_INDIRECT_BYTE    (1<<28)	/* sysfs entry configured for access */
+#define STATUS_INDIRECT_DWORD   (1<<29)	/* sysfs entry configured for access */
+#define STATUS_DIRECT_DWORD     (1<<30)	/* sysfs entry configured for access */
+
+#define STATUS_SECURITY_UPDATED (1<<31)	/* Security sync needed */
+
+#define CFG_STATIC_CHANNEL      (1<<0)	/* Restrict assoc. to single channel */
+#define CFG_STATIC_ESSID        (1<<1)	/* Restrict assoc. to single SSID */
+#define CFG_STATIC_BSSID        (1<<2)	/* Restrict assoc. to single BSSID */
+#define CFG_CUSTOM_MAC          (1<<3)
+#define CFG_PREAMBLE_LONG       (1<<4)
+#define CFG_ADHOC_PERSIST       (1<<5)
+#define CFG_ASSOCIATE           (1<<6)
+#define CFG_FIXED_RATE          (1<<7)
+#define CFG_ADHOC_CREATE        (1<<8)
+#define CFG_NO_LED              (1<<9)
+#define CFG_BACKGROUND_SCAN     (1<<10)
+#define CFG_SPEED_SCAN          (1<<11)
+#define CFG_NET_STATS           (1<<12)
+
+#define CAP_SHARED_KEY          (1<<0)	/* Off = OPEN */
+#define CAP_PRIVACY_ON          (1<<1)	/* Off = No privacy */
+
+#define MAX_STATIONS            32
+#define IPW_INVALID_STATION     (0xff)
+
+struct ipw_station_entry {
+	u8 mac_addr[ETH_ALEN];
+	u8 reserved;
+	u8 support_mode;
+};
+
+#define AVG_ENTRIES 8
+struct average {
+	s16 entries[AVG_ENTRIES];
+	u8 pos;
+	u8 init;
+	s32 sum;
+};
+
+#define MAX_SPEED_SCAN 100
+#define IPW_IBSS_MAC_HASH_SIZE 31
+
+struct ipw_ibss_seq {
+	u8 mac[ETH_ALEN];
+	u16 seq_num;
+	u16 frag_num;
+	unsigned long packet_time;
+	struct list_head list;
+};
+
+struct ipw_error_elem {	 /* XXX */
+	u32 desc;
+	u32 time;
+	u32 blink1;
+	u32 blink2;
+	u32 link1;
+	u32 link2;
+	u32 data;
+};
+
+struct ipw_event {	 /* XXX */
+	u32 event;
+	u32 time;
+	u32 data;
+} __packed;
+
+struct ipw_fw_error {	 /* XXX */
+	unsigned long jiffies;
+	u32 status;
+	u32 config;
+	u32 elem_len;
+	u32 log_len;
+	struct ipw_error_elem *elem;
+	struct ipw_event *log;
+	u8 payload[0];
+} __packed;
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+
+enum ipw_prom_filter {
+	IPW_PROM_CTL_HEADER_ONLY = (1 << 0),
+	IPW_PROM_MGMT_HEADER_ONLY = (1 << 1),
+	IPW_PROM_DATA_HEADER_ONLY = (1 << 2),
+	IPW_PROM_ALL_HEADER_ONLY = 0xf, /* bits 0..3 */
+	IPW_PROM_NO_TX = (1 << 4),
+	IPW_PROM_NO_RX = (1 << 5),
+	IPW_PROM_NO_CTL = (1 << 6),
+	IPW_PROM_NO_MGMT = (1 << 7),
+	IPW_PROM_NO_DATA = (1 << 8),
+};
+
+struct ipw_priv;
+struct ipw_prom_priv {
+	struct ipw_priv *priv;
+	struct libipw_device *ieee;
+	enum ipw_prom_filter filter;
+	int tx_packets;
+	int rx_packets;
+};
+#endif
+
+#if defined(CONFIG_IPW2200_RADIOTAP) || defined(CONFIG_IPW2200_PROMISCUOUS)
+/* Magic struct that slots into the radiotap header -- no reason
+ * to build this manually element by element, we can write it much
+ * more efficiently than we can parse it. ORDER MATTERS HERE
+ *
+ * When sent to us via the simulated Rx interface in sysfs, the entire
+ * structure is provided regardless of any bits unset.
+ */
+struct ipw_rt_hdr {
+	struct ieee80211_radiotap_header rt_hdr;
+	u64 rt_tsf;      /* TSF */	/* XXX */
+	u8 rt_flags;	/* radiotap packet flags */
+	u8 rt_rate;	/* rate in 500kb/s */
+	__le16 rt_channel;	/* channel in mhz */
+	__le16 rt_chbitmask;	/* channel bitfield */
+	s8 rt_dbmsignal;	/* signal in dbM, kluged to signed */
+	s8 rt_dbmnoise;
+	u8 rt_antenna;	/* antenna number */
+	u8 payload[0];  /* payload... */
+} __packed;
+#endif
+
+struct ipw_priv {
+	/* ieee device used by generic ieee processing code */
+	struct libipw_device *ieee;
+
+	spinlock_t lock;
+	spinlock_t irq_lock;
+	struct mutex mutex;
+
+	/* basic pci-network driver stuff */
+	struct pci_dev *pci_dev;
+	struct net_device *net_dev;
+
+#ifdef CONFIG_IPW2200_PROMISCUOUS
+	/* Promiscuous mode */
+	struct ipw_prom_priv *prom_priv;
+	struct net_device *prom_net_dev;
+#endif
+
+	/* pci hardware address support */
+	void __iomem *hw_base;
+	unsigned long hw_len;
+
+	struct fw_image_desc sram_desc;
+
+	/* result of ucode download */
+	struct alive_command_responce dino_alive;
+
+	wait_queue_head_t wait_command_queue;
+	wait_queue_head_t wait_state;
+
+	/* Rx and Tx DMA processing queues */
+	struct ipw_rx_queue *rxq;
+	struct clx2_tx_queue txq_cmd;
+	struct clx2_tx_queue txq[4];
+	u32 status;
+	u32 config;
+	u32 capability;
+
+	struct average average_missed_beacons;
+	s16 exp_avg_rssi;
+	s16 exp_avg_noise;
+	u32 port_type;
+	int rx_bufs_min;	  /**< minimum number of bufs in Rx queue */
+	int rx_pend_max;	  /**< maximum pending buffers for one IRQ */
+	u32 hcmd_seq;		  /**< sequence number for hcmd */
+	u32 disassociate_threshold;
+	u32 roaming_threshold;
+
+	struct ipw_associate assoc_request;
+	struct libipw_network *assoc_network;
+
+	unsigned long ts_scan_abort;
+	struct ipw_supported_rates rates;
+	struct ipw_rates phy[3];	   /**< PHY restrictions, per band */
+	struct ipw_rates supp;		   /**< software defined */
+	struct ipw_rates extended;	   /**< use for corresp. IE, AP only */
+
+	struct notif_link_deterioration last_link_deterioration; /** for statistics */
+	struct ipw_cmd *hcmd; /**< host command currently executed */
+
+	wait_queue_head_t hcmd_wq;     /**< host command waits for execution */
+	u32 tsf_bcn[2];		     /**< TSF from latest beacon */
+
+	struct notif_calibration calib;	/**< last calibration */
+
+	/* ordinal interface with firmware */
+	u32 table0_addr;
+	u32 table0_len;
+	u32 table1_addr;
+	u32 table1_len;
+	u32 table2_addr;
+	u32 table2_len;
+
+	/* context information */
+	u8 essid[IW_ESSID_MAX_SIZE];
+	u8 essid_len;
+	u8 nick[IW_ESSID_MAX_SIZE];
+	u16 rates_mask;
+	u8 channel;
+	struct ipw_sys_config sys_config;
+	u32 power_mode;
+	u8 bssid[ETH_ALEN];
+	u16 rts_threshold;
+	u8 mac_addr[ETH_ALEN];
+	u8 num_stations;
+	u8 stations[MAX_STATIONS][ETH_ALEN];
+	u8 short_retry_limit;
+	u8 long_retry_limit;
+
+	u32 notif_missed_beacons;
+
+	/* Statistics and counters normalized with each association */
+	u32 last_missed_beacons;
+	u32 last_tx_packets;
+	u32 last_rx_packets;
+	u32 last_tx_failures;
+	u32 last_rx_err;
+	u32 last_rate;
+
+	u32 missed_adhoc_beacons;
+	u32 missed_beacons;
+	u32 rx_packets;
+	u32 tx_packets;
+	u32 quality;
+
+	u8 speed_scan[MAX_SPEED_SCAN];
+	u8 speed_scan_pos;
+
+	u16 last_seq_num;
+	u16 last_frag_num;
+	unsigned long last_packet_time;
+	struct list_head ibss_mac_hash[IPW_IBSS_MAC_HASH_SIZE];
+
+	/* eeprom */
+	u8 eeprom[0x100];	/* 256 bytes of eeprom */
+	u8 country[4];
+	int eeprom_delay;
+
+	struct iw_statistics wstats;
+
+	struct iw_public_data wireless_data;
+
+	int user_requested_scan;
+	u8 direct_scan_ssid[IW_ESSID_MAX_SIZE];
+	u8 direct_scan_ssid_len;
+
+	struct delayed_work adhoc_check;
+	struct work_struct associate;
+	struct work_struct disassociate;
+	struct work_struct system_config;
+	struct work_struct rx_replenish;
+	struct delayed_work request_scan;
+	struct delayed_work request_direct_scan;
+	struct delayed_work request_passive_scan;
+	struct delayed_work scan_event;
+	struct work_struct adapter_restart;
+	struct delayed_work rf_kill;
+	struct work_struct up;
+	struct work_struct down;
+	struct delayed_work gather_stats;
+	struct work_struct abort_scan;
+	struct work_struct roam;
+	struct delayed_work scan_check;
+	struct work_struct link_up;
+	struct work_struct link_down;
+
+	struct tasklet_struct irq_tasklet;
+
+	/* LED related variables and work_struct */
+	u8 nic_type;
+	u32 led_activity_on;
+	u32 led_activity_off;
+	u32 led_association_on;
+	u32 led_association_off;
+	u32 led_ofdm_on;
+	u32 led_ofdm_off;
+
+	struct delayed_work led_link_on;
+	struct delayed_work led_link_off;
+	struct delayed_work led_act_off;
+	struct work_struct merge_networks;
+
+	struct ipw_cmd_log *cmdlog;
+	int cmdlog_len;
+	int cmdlog_pos;
+
+#define IPW_2200BG  1
+#define IPW_2915ABG 2
+	u8 adapter;
+
+	s8 tx_power;
+
+	/* Track time in suspend using CLOCK_BOOTIME */
+	time64_t suspend_at;
+	time64_t suspend_time;
+
+#ifdef CONFIG_PM
+	u32 pm_state[16];
+#endif
+
+	struct ipw_fw_error *error;
+
+	/* network state */
+
+	/* Used to pass the current INTA value from ISR to Tasklet */
+	u32 isr_inta;
+
+	/* QoS */
+	struct ipw_qos_info qos_data;
+	struct work_struct qos_activate;
+	/*********************************/
+
+	/* debugging info */
+	u32 indirect_dword;
+	u32 direct_dword;
+	u32 indirect_byte;
+};				/*ipw_priv */
+
+/* debug macros */
+
+/* Debug and printf string expansion helpers for printing bitfields */
+#define BIT_FMT8 "%c%c%c%c-%c%c%c%c"
+#define BIT_FMT16 BIT_FMT8 ":" BIT_FMT8
+#define BIT_FMT32 BIT_FMT16 " " BIT_FMT16
+
+#define BITC(x,y) (((x>>y)&1)?'1':'0')
+#define BIT_ARG8(x) \
+BITC(x,7),BITC(x,6),BITC(x,5),BITC(x,4),\
+BITC(x,3),BITC(x,2),BITC(x,1),BITC(x,0)
+
+#define BIT_ARG16(x) \
+BITC(x,15),BITC(x,14),BITC(x,13),BITC(x,12),\
+BITC(x,11),BITC(x,10),BITC(x,9),BITC(x,8),\
+BIT_ARG8(x)
+
+#define BIT_ARG32(x) \
+BITC(x,31),BITC(x,30),BITC(x,29),BITC(x,28),\
+BITC(x,27),BITC(x,26),BITC(x,25),BITC(x,24),\
+BITC(x,23),BITC(x,22),BITC(x,21),BITC(x,20),\
+BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
+BIT_ARG16(x)
+
+
+#define IPW_DEBUG(level, fmt, args...) \
+do { if (ipw_debug_level & (level)) \
+  printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+
+#ifdef CONFIG_IPW2200_DEBUG
+#define IPW_LL_DEBUG(level, fmt, args...) \
+do { if (ipw_debug_level & (level)) \
+  printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+#else
+#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
+#endif				/* CONFIG_IPW2200_DEBUG */
+
+/*
+ * To use the debug system;
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define IPW_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry.  xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a IPW_xxxx_DEBUG() macro definition for your
+ * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ipw/debug_level
+ *
+ * you simply need to add your entry to the ipw_debug_levels array.
+ *
+ * If you do not see debug_level in /proc/net/ipw then you do not have
+ * CONFIG_IPW2200_DEBUG defined in your kernel configuration
+ *
+ */
+
+#define IPW_DL_ERROR         (1<<0)
+#define IPW_DL_WARNING       (1<<1)
+#define IPW_DL_INFO          (1<<2)
+#define IPW_DL_WX            (1<<3)
+#define IPW_DL_HOST_COMMAND  (1<<5)
+#define IPW_DL_STATE         (1<<6)
+
+#define IPW_DL_NOTIF         (1<<10)
+#define IPW_DL_SCAN          (1<<11)
+#define IPW_DL_ASSOC         (1<<12)
+#define IPW_DL_DROP          (1<<13)
+#define IPW_DL_IOCTL         (1<<14)
+
+#define IPW_DL_MANAGE        (1<<15)
+#define IPW_DL_FW            (1<<16)
+#define IPW_DL_RF_KILL       (1<<17)
+#define IPW_DL_FW_ERRORS     (1<<18)
+
+#define IPW_DL_LED           (1<<19)
+
+#define IPW_DL_ORD           (1<<20)
+
+#define IPW_DL_FRAG          (1<<21)
+#define IPW_DL_WEP           (1<<22)
+#define IPW_DL_TX            (1<<23)
+#define IPW_DL_RX            (1<<24)
+#define IPW_DL_ISR           (1<<25)
+#define IPW_DL_FW_INFO       (1<<26)
+#define IPW_DL_IO            (1<<27)
+#define IPW_DL_TRACE         (1<<28)
+
+#define IPW_DL_STATS         (1<<29)
+#define IPW_DL_MERGE         (1<<30)
+#define IPW_DL_QOS           (1<<31)
+
+#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
+#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_INFO(f, a...)    IPW_DEBUG(IPW_DL_INFO, f, ## a)
+
+#define IPW_DEBUG_WX(f, a...)     IPW_DEBUG(IPW_DL_WX, f, ## a)
+#define IPW_DEBUG_SCAN(f, a...)   IPW_DEBUG(IPW_DL_SCAN, f, ## a)
+#define IPW_DEBUG_TRACE(f, a...)  IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a)
+#define IPW_DEBUG_RX(f, a...)     IPW_LL_DEBUG(IPW_DL_RX, f, ## a)
+#define IPW_DEBUG_TX(f, a...)     IPW_LL_DEBUG(IPW_DL_TX, f, ## a)
+#define IPW_DEBUG_ISR(f, a...)    IPW_LL_DEBUG(IPW_DL_ISR, f, ## a)
+#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
+#define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a)
+#define IPW_DEBUG_WEP(f, a...)    IPW_LL_DEBUG(IPW_DL_WEP, f, ## a)
+#define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
+#define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a)
+#define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a)
+#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
+#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
+#define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a)
+#define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a)
+#define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a)
+#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
+#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a)
+#define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a)
+#define IPW_DEBUG_QOS(f, a...)   IPW_LL_DEBUG(IPW_DL_QOS, f, ## a)
+
+#include <linux/ctype.h>
+
+/*
+* Register bit definitions
+*/
+
+#define IPW_INTA_RW       0x00000008
+#define IPW_INTA_MASK_R   0x0000000C
+#define IPW_INDIRECT_ADDR 0x00000010
+#define IPW_INDIRECT_DATA 0x00000014
+#define IPW_AUTOINC_ADDR  0x00000018
+#define IPW_AUTOINC_DATA  0x0000001C
+#define IPW_RESET_REG     0x00000020
+#define IPW_GP_CNTRL_RW   0x00000024
+
+#define IPW_READ_INT_REGISTER 0xFF4
+
+#define IPW_GP_CNTRL_BIT_INIT_DONE	0x00000004
+
+#define IPW_REGISTER_DOMAIN1_END        0x00001000
+#define IPW_SRAM_READ_INT_REGISTER 	0x00000ff4
+
+#define IPW_SHARED_LOWER_BOUND          0x00000200
+#define IPW_INTERRUPT_AREA_LOWER_BOUND  0x00000f80
+
+#define IPW_NIC_SRAM_LOWER_BOUND        0x00000000
+#define IPW_NIC_SRAM_UPPER_BOUND        0x00030000
+
+#define IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER (1 << 29)
+#define IPW_GP_CNTRL_BIT_CLOCK_READY    0x00000001
+#define IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY 0x00000002
+
+/*
+ * RESET Register Bit Indexes
+ */
+#define CBD_RESET_REG_PRINCETON_RESET (1<<0)
+#define IPW_START_STANDBY             (1<<2)
+#define IPW_ACTIVITY_LED              (1<<4)
+#define IPW_ASSOCIATED_LED            (1<<5)
+#define IPW_OFDM_LED                  (1<<6)
+#define IPW_RESET_REG_SW_RESET        (1<<7)
+#define IPW_RESET_REG_MASTER_DISABLED (1<<8)
+#define IPW_RESET_REG_STOP_MASTER     (1<<9)
+#define IPW_GATE_ODMA                 (1<<25)
+#define IPW_GATE_IDMA                 (1<<26)
+#define IPW_ARC_KESHET_CONFIG         (1<<27)
+#define IPW_GATE_ADMA                 (1<<29)
+
+#define IPW_CSR_CIS_UPPER_BOUND	0x00000200
+#define IPW_DOMAIN_0_END 0x1000
+#define CLX_MEM_BAR_SIZE 0x1000
+
+/* Dino/baseband control registers bits */
+
+#define DINO_ENABLE_SYSTEM 0x80	/* 1 = baseband processor on, 0 = reset */
+#define DINO_ENABLE_CS     0x40	/* 1 = enable ucode load */
+#define DINO_RXFIFO_DATA   0x01	/* 1 = data available */
+#define IPW_BASEBAND_CONTROL_STATUS	0X00200000
+#define IPW_BASEBAND_TX_FIFO_WRITE	0X00200004
+#define IPW_BASEBAND_RX_FIFO_READ	0X00200004
+#define IPW_BASEBAND_CONTROL_STORE	0X00200010
+
+#define IPW_INTERNAL_CMD_EVENT 	0X00300004
+#define IPW_BASEBAND_POWER_DOWN 0x00000001
+
+#define IPW_MEM_HALT_AND_RESET  0x003000e0
+
+/* defgroup bits_halt_reset MEM_HALT_AND_RESET register bits */
+#define IPW_BIT_HALT_RESET_ON	0x80000000
+#define IPW_BIT_HALT_RESET_OFF 	0x00000000
+
+#define CB_LAST_VALID     0x20000000
+#define CB_INT_ENABLED    0x40000000
+#define CB_VALID          0x80000000
+#define CB_SRC_LE         0x08000000
+#define CB_DEST_LE        0x04000000
+#define CB_SRC_AUTOINC    0x00800000
+#define CB_SRC_IO_GATED   0x00400000
+#define CB_DEST_AUTOINC   0x00080000
+#define CB_SRC_SIZE_LONG  0x00200000
+#define CB_DEST_SIZE_LONG 0x00020000
+
+/* DMA DEFINES */
+
+#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000
+#define DMA_CB_STOP_AND_ABORT            0x00000C00
+#define DMA_CB_START                     0x00000100
+
+#define IPW_SHARED_SRAM_SIZE               0x00030000
+#define IPW_SHARED_SRAM_DMA_CONTROL        0x00027000
+#define CB_MAX_LENGTH                      0x1FFF
+
+#define IPW_HOST_EEPROM_DATA_SRAM_SIZE 0xA18
+#define IPW_EEPROM_IMAGE_SIZE          0x100
+
+/* DMA defs */
+#define IPW_DMA_I_CURRENT_CB  0x003000D0
+#define IPW_DMA_O_CURRENT_CB  0x003000D4
+#define IPW_DMA_I_DMA_CONTROL 0x003000A4
+#define IPW_DMA_I_CB_BASE     0x003000A0
+
+#define IPW_TX_CMD_QUEUE_BD_BASE        0x00000200
+#define IPW_TX_CMD_QUEUE_BD_SIZE        0x00000204
+#define IPW_TX_QUEUE_0_BD_BASE          0x00000208
+#define IPW_TX_QUEUE_0_BD_SIZE          (0x0000020C)
+#define IPW_TX_QUEUE_1_BD_BASE          0x00000210
+#define IPW_TX_QUEUE_1_BD_SIZE          0x00000214
+#define IPW_TX_QUEUE_2_BD_BASE          0x00000218
+#define IPW_TX_QUEUE_2_BD_SIZE          (0x0000021C)
+#define IPW_TX_QUEUE_3_BD_BASE          0x00000220
+#define IPW_TX_QUEUE_3_BD_SIZE          0x00000224
+#define IPW_RX_BD_BASE                  0x00000240
+#define IPW_RX_BD_SIZE                  0x00000244
+#define IPW_RFDS_TABLE_LOWER            0x00000500
+
+#define IPW_TX_CMD_QUEUE_READ_INDEX     0x00000280
+#define IPW_TX_QUEUE_0_READ_INDEX       0x00000284
+#define IPW_TX_QUEUE_1_READ_INDEX       0x00000288
+#define IPW_TX_QUEUE_2_READ_INDEX       (0x0000028C)
+#define IPW_TX_QUEUE_3_READ_INDEX       0x00000290
+#define IPW_RX_READ_INDEX               (0x000002A0)
+
+#define IPW_TX_CMD_QUEUE_WRITE_INDEX    (0x00000F80)
+#define IPW_TX_QUEUE_0_WRITE_INDEX      (0x00000F84)
+#define IPW_TX_QUEUE_1_WRITE_INDEX      (0x00000F88)
+#define IPW_TX_QUEUE_2_WRITE_INDEX      (0x00000F8C)
+#define IPW_TX_QUEUE_3_WRITE_INDEX      (0x00000F90)
+#define IPW_RX_WRITE_INDEX              (0x00000FA0)
+
+/*
+ * EEPROM Related Definitions
+ */
+
+#define IPW_EEPROM_DATA_SRAM_ADDRESS (IPW_SHARED_LOWER_BOUND + 0x814)
+#define IPW_EEPROM_DATA_SRAM_SIZE    (IPW_SHARED_LOWER_BOUND + 0x818)
+#define IPW_EEPROM_LOAD_DISABLE      (IPW_SHARED_LOWER_BOUND + 0x81C)
+#define IPW_EEPROM_DATA              (IPW_SHARED_LOWER_BOUND + 0x820)
+#define IPW_EEPROM_UPPER_ADDRESS     (IPW_SHARED_LOWER_BOUND + 0x9E0)
+
+#define IPW_STATION_TABLE_LOWER      (IPW_SHARED_LOWER_BOUND + 0xA0C)
+#define IPW_STATION_TABLE_UPPER      (IPW_SHARED_LOWER_BOUND + 0xB0C)
+#define IPW_REQUEST_ATIM             (IPW_SHARED_LOWER_BOUND + 0xB0C)
+#define IPW_ATIM_SENT                (IPW_SHARED_LOWER_BOUND + 0xB10)
+#define IPW_WHO_IS_AWAKE             (IPW_SHARED_LOWER_BOUND + 0xB14)
+#define IPW_DURING_ATIM_WINDOW       (IPW_SHARED_LOWER_BOUND + 0xB18)
+
+#define MSB                             1
+#define LSB                             0
+#define WORD_TO_BYTE(_word)             ((_word) * sizeof(u16))
+
+#define GET_EEPROM_ADDR(_wordoffset,_byteoffset) \
+    ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) )
+
+/* EEPROM access by BYTE */
+#define EEPROM_PME_CAPABILITY   (GET_EEPROM_ADDR(0x09,MSB))	/* 1 byte   */
+#define EEPROM_MAC_ADDRESS      (GET_EEPROM_ADDR(0x21,LSB))	/* 6 byte   */
+#define EEPROM_VERSION          (GET_EEPROM_ADDR(0x24,MSB))	/* 1 byte   */
+#define EEPROM_NIC_TYPE         (GET_EEPROM_ADDR(0x25,LSB))	/* 1 byte   */
+#define EEPROM_SKU_CAPABILITY   (GET_EEPROM_ADDR(0x25,MSB))	/* 1 byte   */
+#define EEPROM_COUNTRY_CODE     (GET_EEPROM_ADDR(0x26,LSB))	/* 3 bytes  */
+#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB))	/* 2 bytes  */
+#define EEPROM_IBSS_CHANNELS_A  (GET_EEPROM_ADDR(0x29,MSB))	/* 5 bytes  */
+#define EEPROM_BSS_CHANNELS_BG  (GET_EEPROM_ADDR(0x2c,LSB))	/* 2 bytes  */
+#define EEPROM_HW_VERSION       (GET_EEPROM_ADDR(0x72,LSB))	/* 2 bytes  */
+
+/* NIC type as found in the one byte EEPROM_NIC_TYPE offset */
+#define EEPROM_NIC_TYPE_0 0
+#define EEPROM_NIC_TYPE_1 1
+#define EEPROM_NIC_TYPE_2 2
+#define EEPROM_NIC_TYPE_3 3
+#define EEPROM_NIC_TYPE_4 4
+
+/* Bluetooth Coexistence capabilities as found in EEPROM_SKU_CAPABILITY */
+#define EEPROM_SKU_CAP_BT_CHANNEL_SIG  0x01	/* we can tell BT our channel # */
+#define EEPROM_SKU_CAP_BT_PRIORITY     0x02	/* BT can take priority over us */
+#define EEPROM_SKU_CAP_BT_OOB          0x04	/* we can signal BT out-of-band */
+
+#define FW_MEM_REG_LOWER_BOUND          0x00300000
+#define FW_MEM_REG_EEPROM_ACCESS        (FW_MEM_REG_LOWER_BOUND + 0x40)
+#define IPW_EVENT_REG                   (FW_MEM_REG_LOWER_BOUND + 0x04)
+#define EEPROM_BIT_SK                   (1<<0)
+#define EEPROM_BIT_CS                   (1<<1)
+#define EEPROM_BIT_DI                   (1<<2)
+#define EEPROM_BIT_DO                   (1<<4)
+
+#define EEPROM_CMD_READ                 0x2
+
+/* Interrupts masks */
+#define IPW_INTA_NONE   0x00000000
+
+#define IPW_INTA_BIT_RX_TRANSFER                   0x00000002
+#define IPW_INTA_BIT_STATUS_CHANGE                 0x00000010
+#define IPW_INTA_BIT_BEACON_PERIOD_EXPIRED         0x00000020
+
+//Inta Bits for CF
+#define IPW_INTA_BIT_TX_CMD_QUEUE                  0x00000800
+#define IPW_INTA_BIT_TX_QUEUE_1                    0x00001000
+#define IPW_INTA_BIT_TX_QUEUE_2                    0x00002000
+#define IPW_INTA_BIT_TX_QUEUE_3                    0x00004000
+#define IPW_INTA_BIT_TX_QUEUE_4                    0x00008000
+
+#define IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE      0x00010000
+
+#define IPW_INTA_BIT_PREPARE_FOR_POWER_DOWN        0x00100000
+#define IPW_INTA_BIT_POWER_DOWN                    0x00200000
+
+#define IPW_INTA_BIT_FW_INITIALIZATION_DONE        0x01000000
+#define IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE  0x02000000
+#define IPW_INTA_BIT_RF_KILL_DONE                  0x04000000
+#define IPW_INTA_BIT_FATAL_ERROR             0x40000000
+#define IPW_INTA_BIT_PARITY_ERROR            0x80000000
+
+/* Interrupts enabled at init time. */
+#define IPW_INTA_MASK_ALL                        \
+        (IPW_INTA_BIT_TX_QUEUE_1               | \
+	 IPW_INTA_BIT_TX_QUEUE_2               | \
+	 IPW_INTA_BIT_TX_QUEUE_3               | \
+	 IPW_INTA_BIT_TX_QUEUE_4               | \
+	 IPW_INTA_BIT_TX_CMD_QUEUE             | \
+	 IPW_INTA_BIT_RX_TRANSFER              | \
+	 IPW_INTA_BIT_FATAL_ERROR              | \
+	 IPW_INTA_BIT_PARITY_ERROR             | \
+	 IPW_INTA_BIT_STATUS_CHANGE            | \
+	 IPW_INTA_BIT_FW_INITIALIZATION_DONE   | \
+	 IPW_INTA_BIT_BEACON_PERIOD_EXPIRED    | \
+	 IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE | \
+	 IPW_INTA_BIT_PREPARE_FOR_POWER_DOWN   | \
+	 IPW_INTA_BIT_POWER_DOWN               | \
+         IPW_INTA_BIT_RF_KILL_DONE )
+
+/* FW event log definitions */
+#define EVENT_ELEM_SIZE     (3 * sizeof(u32))
+#define EVENT_START_OFFSET  (1 * sizeof(u32) + 2 * sizeof(u16))
+
+/* FW error log definitions */
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+
+/* TX power level (dbm) */
+#define IPW_TX_POWER_MIN	-12
+#define IPW_TX_POWER_MAX	20
+#define IPW_TX_POWER_DEFAULT	IPW_TX_POWER_MAX
+
+enum {
+	IPW_FW_ERROR_OK = 0,
+	IPW_FW_ERROR_FAIL,
+	IPW_FW_ERROR_MEMORY_UNDERFLOW,
+	IPW_FW_ERROR_MEMORY_OVERFLOW,
+	IPW_FW_ERROR_BAD_PARAM,
+	IPW_FW_ERROR_BAD_CHECKSUM,
+	IPW_FW_ERROR_NMI_INTERRUPT,
+	IPW_FW_ERROR_BAD_DATABASE,
+	IPW_FW_ERROR_ALLOC_FAIL,
+	IPW_FW_ERROR_DMA_UNDERRUN,
+	IPW_FW_ERROR_DMA_STATUS,
+	IPW_FW_ERROR_DINO_ERROR,
+	IPW_FW_ERROR_EEPROM_ERROR,
+	IPW_FW_ERROR_SYSASSERT,
+	IPW_FW_ERROR_FATAL_ERROR
+};
+
+#define AUTH_OPEN	0
+#define AUTH_SHARED_KEY	1
+#define AUTH_LEAP	2
+#define AUTH_IGNORE	3
+
+#define HC_ASSOCIATE      0
+#define HC_REASSOCIATE    1
+#define HC_DISASSOCIATE   2
+#define HC_IBSS_START     3
+#define HC_IBSS_RECONF    4
+#define HC_DISASSOC_QUIET 5
+
+#define HC_QOS_SUPPORT_ASSOC  cpu_to_le16(0x01)
+
+#define IPW_RATE_CAPABILITIES 1
+#define IPW_RATE_CONNECT      0
+
+/*
+ * Rate values and masks
+ */
+#define IPW_TX_RATE_1MB  0x0A
+#define IPW_TX_RATE_2MB  0x14
+#define IPW_TX_RATE_5MB  0x37
+#define IPW_TX_RATE_6MB  0x0D
+#define IPW_TX_RATE_9MB  0x0F
+#define IPW_TX_RATE_11MB 0x6E
+#define IPW_TX_RATE_12MB 0x05
+#define IPW_TX_RATE_18MB 0x07
+#define IPW_TX_RATE_24MB 0x09
+#define IPW_TX_RATE_36MB 0x0B
+#define IPW_TX_RATE_48MB 0x01
+#define IPW_TX_RATE_54MB 0x03
+
+#define IPW_ORD_TABLE_ID_MASK             0x0000FF00
+#define IPW_ORD_TABLE_VALUE_MASK          0x000000FF
+
+#define IPW_ORD_TABLE_0_MASK              0x0000F000
+#define IPW_ORD_TABLE_1_MASK              0x0000F100
+#define IPW_ORD_TABLE_2_MASK              0x0000F200
+#define IPW_ORD_TABLE_3_MASK              0x0000F300
+#define IPW_ORD_TABLE_4_MASK              0x0000F400
+#define IPW_ORD_TABLE_5_MASK              0x0000F500
+#define IPW_ORD_TABLE_6_MASK              0x0000F600
+#define IPW_ORD_TABLE_7_MASK              0x0000F700
+
+/*
+ * Table 0 Entries (all entries are 32 bits)
+ */
+enum {
+	IPW_ORD_STAT_TX_CURR_RATE = IPW_ORD_TABLE_0_MASK + 1,
+	IPW_ORD_STAT_FRAG_TRESHOLD,
+	IPW_ORD_STAT_RTS_THRESHOLD,
+	IPW_ORD_STAT_TX_HOST_REQUESTS,
+	IPW_ORD_STAT_TX_HOST_COMPLETE,
+	IPW_ORD_STAT_TX_DIR_DATA,
+	IPW_ORD_STAT_TX_DIR_DATA_B_1,
+	IPW_ORD_STAT_TX_DIR_DATA_B_2,
+	IPW_ORD_STAT_TX_DIR_DATA_B_5_5,
+	IPW_ORD_STAT_TX_DIR_DATA_B_11,
+	/* Hole */
+
+	IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19,
+	IPW_ORD_STAT_TX_DIR_DATA_G_2,
+	IPW_ORD_STAT_TX_DIR_DATA_G_5_5,
+	IPW_ORD_STAT_TX_DIR_DATA_G_6,
+	IPW_ORD_STAT_TX_DIR_DATA_G_9,
+	IPW_ORD_STAT_TX_DIR_DATA_G_11,
+	IPW_ORD_STAT_TX_DIR_DATA_G_12,
+	IPW_ORD_STAT_TX_DIR_DATA_G_18,
+	IPW_ORD_STAT_TX_DIR_DATA_G_24,
+	IPW_ORD_STAT_TX_DIR_DATA_G_36,
+	IPW_ORD_STAT_TX_DIR_DATA_G_48,
+	IPW_ORD_STAT_TX_DIR_DATA_G_54,
+	IPW_ORD_STAT_TX_NON_DIR_DATA,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_1,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_2,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_5_5,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_11,
+	/* Hole */
+
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_2,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_6,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_9,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_11,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_12,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_18,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_24,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_36,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_48,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_54,
+	IPW_ORD_STAT_TX_RETRY,
+	IPW_ORD_STAT_TX_FAILURE,
+	IPW_ORD_STAT_RX_ERR_CRC,
+	IPW_ORD_STAT_RX_ERR_ICV,
+	IPW_ORD_STAT_RX_NO_BUFFER,
+	IPW_ORD_STAT_FULL_SCANS,
+	IPW_ORD_STAT_PARTIAL_SCANS,
+	IPW_ORD_STAT_TGH_ABORTED_SCANS,
+	IPW_ORD_STAT_TX_TOTAL_BYTES,
+	IPW_ORD_STAT_CURR_RSSI_RAW,
+	IPW_ORD_STAT_RX_BEACON,
+	IPW_ORD_STAT_MISSED_BEACONS,
+	IPW_ORD_TABLE_0_LAST
+};
+
+#define IPW_RSSI_TO_DBM 112
+
+/* Table 1 Entries
+ */
+enum {
+	IPW_ORD_TABLE_1_LAST = IPW_ORD_TABLE_1_MASK | 1,
+};
+
+/*
+ * Table 2 Entries
+ *
+ * FW_VERSION:    16 byte string
+ * FW_DATE:       16 byte string (only 14 bytes used)
+ * UCODE_VERSION: 4 byte version code
+ * UCODE_DATE:    5 bytes code code
+ * ADDAPTER_MAC:  6 byte MAC address
+ * RTC:           4 byte clock
+ */
+enum {
+	IPW_ORD_STAT_FW_VERSION = IPW_ORD_TABLE_2_MASK | 1,
+	IPW_ORD_STAT_FW_DATE,
+	IPW_ORD_STAT_UCODE_VERSION,
+	IPW_ORD_STAT_UCODE_DATE,
+	IPW_ORD_STAT_ADAPTER_MAC,
+	IPW_ORD_STAT_RTC,
+	IPW_ORD_TABLE_2_LAST
+};
+
+/* Table 3 */
+enum {
+	IPW_ORD_STAT_TX_PACKET = IPW_ORD_TABLE_3_MASK | 0,
+	IPW_ORD_STAT_TX_PACKET_FAILURE,
+	IPW_ORD_STAT_TX_PACKET_SUCCESS,
+	IPW_ORD_STAT_TX_PACKET_ABORTED,
+	IPW_ORD_TABLE_3_LAST
+};
+
+/* Table 4 */
+enum {
+	IPW_ORD_TABLE_4_LAST = IPW_ORD_TABLE_4_MASK
+};
+
+/* Table 5 */
+enum {
+	IPW_ORD_STAT_AVAILABLE_AP_COUNT = IPW_ORD_TABLE_5_MASK,
+	IPW_ORD_STAT_AP_ASSNS,
+	IPW_ORD_STAT_ROAM,
+	IPW_ORD_STAT_ROAM_CAUSE_MISSED_BEACONS,
+	IPW_ORD_STAT_ROAM_CAUSE_UNASSOC,
+	IPW_ORD_STAT_ROAM_CAUSE_RSSI,
+	IPW_ORD_STAT_ROAM_CAUSE_LINK_QUALITY,
+	IPW_ORD_STAT_ROAM_CAUSE_AP_LOAD_BALANCE,
+	IPW_ORD_STAT_ROAM_CAUSE_AP_NO_TX,
+	IPW_ORD_STAT_LINK_UP,
+	IPW_ORD_STAT_LINK_DOWN,
+	IPW_ORD_ANTENNA_DIVERSITY,
+	IPW_ORD_CURR_FREQ,
+	IPW_ORD_TABLE_5_LAST
+};
+
+/* Table 6 */
+enum {
+	IPW_ORD_COUNTRY_CODE = IPW_ORD_TABLE_6_MASK,
+	IPW_ORD_CURR_BSSID,
+	IPW_ORD_CURR_SSID,
+	IPW_ORD_TABLE_6_LAST
+};
+
+/* Table 7 */
+enum {
+	IPW_ORD_STAT_PERCENT_MISSED_BEACONS = IPW_ORD_TABLE_7_MASK,
+	IPW_ORD_STAT_PERCENT_TX_RETRIES,
+	IPW_ORD_STAT_PERCENT_LINK_QUALITY,
+	IPW_ORD_STAT_CURR_RSSI_DBM,
+	IPW_ORD_TABLE_7_LAST
+};
+
+#define IPW_ERROR_LOG     (IPW_SHARED_LOWER_BOUND + 0x410)
+#define IPW_EVENT_LOG     (IPW_SHARED_LOWER_BOUND + 0x414)
+#define IPW_ORDINALS_TABLE_LOWER        (IPW_SHARED_LOWER_BOUND + 0x500)
+#define IPW_ORDINALS_TABLE_0            (IPW_SHARED_LOWER_BOUND + 0x180)
+#define IPW_ORDINALS_TABLE_1            (IPW_SHARED_LOWER_BOUND + 0x184)
+#define IPW_ORDINALS_TABLE_2            (IPW_SHARED_LOWER_BOUND + 0x188)
+#define IPW_MEM_FIXED_OVERRIDE          (IPW_SHARED_LOWER_BOUND + 0x41C)
+
+struct ipw_fixed_rate {
+	__le16 tx_rates;
+	__le16 reserved;
+} __packed;
+
+#define IPW_INDIRECT_ADDR_MASK (~0x3ul)
+
+struct host_cmd {
+	u8 cmd;
+	u8 len;
+	u16 reserved;
+	u32 *param;
+} __packed;	/* XXX */
+
+struct cmdlog_host_cmd {
+	u8 cmd;
+	u8 len;
+	__le16 reserved;
+	char param[124];
+} __packed;
+
+struct ipw_cmd_log {
+	unsigned long jiffies;
+	int retcode;
+	struct cmdlog_host_cmd cmd;
+};
+
+/* SysConfig command parameters ... */
+/* bt_coexistence param */
+#define CFG_BT_COEXISTENCE_SIGNAL_CHNL  0x01	/* tell BT our chnl # */
+#define CFG_BT_COEXISTENCE_DEFER        0x02	/* defer our Tx if BT traffic */
+#define CFG_BT_COEXISTENCE_KILL         0x04	/* kill our Tx if BT traffic */
+#define CFG_BT_COEXISTENCE_WME_OVER_BT  0x08	/* multimedia extensions */
+#define CFG_BT_COEXISTENCE_OOB          0x10	/* signal BT via out-of-band */
+
+/* clear-to-send to self param */
+#define CFG_CTS_TO_ITSELF_ENABLED_MIN	0x00
+#define CFG_CTS_TO_ITSELF_ENABLED_MAX	0x01
+#define CFG_CTS_TO_ITSELF_ENABLED_DEF	CFG_CTS_TO_ITSELF_ENABLED_MIN
+
+/* Antenna diversity param (h/w can select best antenna, based on signal) */
+#define CFG_SYS_ANTENNA_BOTH            0x00	/* NIC selects best antenna */
+#define CFG_SYS_ANTENNA_A               0x01	/* force antenna A */
+#define CFG_SYS_ANTENNA_B               0x03	/* force antenna B */
+#define CFG_SYS_ANTENNA_SLOW_DIV        0x02	/* consider background noise */
+
+#define IPW_MAX_CONFIG_RETRIES 10
+
+#endif				/* __ipw2200_h__ */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
new file mode 100644
index 0000000..b513551
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -0,0 +1,1007 @@
+/*
+ * Merged with mainline ieee80211.h in Aug 2004.  Original ieee802_11
+ * remains copyright by the original authors
+ *
+ * Portions of the merged code are based on Host AP (software wireless
+ * LAN access point) driver for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <j@w1.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ * Copyright (c) 2004-2005, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * API Version History
+ * 1.0.x -- Initial version
+ * 1.1.x -- Added radiotap, QoS, TIM, libipw_geo APIs,
+ *          various structure changes, and crypto API init method
+ */
+#ifndef LIBIPW_H
+#define LIBIPW_H
+#include <linux/if_ether.h>	/* ETH_ALEN */
+#include <linux/kernel.h>	/* ARRAY_SIZE */
+#include <linux/wireless.h>
+#include <linux/ieee80211.h>
+
+#include <net/lib80211.h>
+#include <net/cfg80211.h>
+
+#define LIBIPW_VERSION "git-1.1.13"
+
+#define LIBIPW_DATA_LEN		2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+   6.2.1.1.2.
+
+   The figure in section 7.1.2 suggests a body size of up to 2312
+   bytes is allowed, which is a bit confusing, I suspect this
+   represents the 2304 bytes of real data, plus a possible 8 bytes of
+   WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+#define LIBIPW_1ADDR_LEN 10
+#define LIBIPW_2ADDR_LEN 16
+#define LIBIPW_3ADDR_LEN 24
+#define LIBIPW_4ADDR_LEN 30
+#define LIBIPW_FCS_LEN    4
+#define LIBIPW_HLEN			(LIBIPW_4ADDR_LEN)
+#define LIBIPW_FRAME_LEN		(LIBIPW_DATA_LEN + LIBIPW_HLEN)
+
+#define MIN_FRAG_THRESHOLD     256U
+#define	MAX_FRAG_THRESHOLD     2346U
+
+/* QOS control */
+#define LIBIPW_QCTL_TID		0x000F
+
+/* debug macros */
+
+#ifdef CONFIG_LIBIPW_DEBUG
+extern u32 libipw_debug_level;
+#define LIBIPW_DEBUG(level, fmt, args...) \
+do { if (libipw_debug_level & (level)) \
+  printk(KERN_DEBUG "libipw: %c %s " fmt, \
+         in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+#else
+#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
+#endif				/* CONFIG_LIBIPW_DEBUG */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define LIBIPW_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry.  xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a LIBIPW_xxxx_DEBUG() macro definition for your
+ * classification, or use LIBIPW_DEBUG(LIBIPW_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ieee80211/debug_level
+ *
+ * you simply need to add your entry to the libipw_debug_level array.
+ *
+ * If you do not see debug_level in /proc/net/ieee80211 then you do not have
+ * CONFIG_LIBIPW_DEBUG defined in your kernel configuration
+ *
+ */
+
+#define LIBIPW_DL_INFO          (1<<0)
+#define LIBIPW_DL_WX            (1<<1)
+#define LIBIPW_DL_SCAN          (1<<2)
+#define LIBIPW_DL_STATE         (1<<3)
+#define LIBIPW_DL_MGMT          (1<<4)
+#define LIBIPW_DL_FRAG          (1<<5)
+#define LIBIPW_DL_DROP          (1<<7)
+
+#define LIBIPW_DL_TX            (1<<8)
+#define LIBIPW_DL_RX            (1<<9)
+#define LIBIPW_DL_QOS           (1<<31)
+
+#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
+#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
+#define LIBIPW_DEBUG_INFO(f, a...)   LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
+
+#define LIBIPW_DEBUG_WX(f, a...)     LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
+#define LIBIPW_DEBUG_SCAN(f, a...)   LIBIPW_DEBUG(LIBIPW_DL_SCAN, f, ## a)
+#define LIBIPW_DEBUG_STATE(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_STATE, f, ## a)
+#define LIBIPW_DEBUG_MGMT(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_MGMT, f, ## a)
+#define LIBIPW_DEBUG_FRAG(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_FRAG, f, ## a)
+#define LIBIPW_DEBUG_DROP(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_DROP, f, ## a)
+#define LIBIPW_DEBUG_TX(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_TX, f, ## a)
+#define LIBIPW_DEBUG_RX(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_RX, f, ## a)
+#define LIBIPW_DEBUG_QOS(f, a...)  LIBIPW_DEBUG(LIBIPW_DL_QOS, f, ## a)
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>	/* ARPHRD_ETHER */
+
+#ifndef WIRELESS_SPY
+#define WIRELESS_SPY		/* enable iwspy support */
+#endif
+#include <net/iw_handler.h>	/* new driver API */
+
+#define ETH_P_PREAUTH 0x88C7	/* IEEE 802.11i pre-authentication */
+
+#ifndef ETH_P_80211_RAW
+#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
+#endif
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct libipw_snap_hdr {
+
+	u8 dsap;		/* always 0xAA */
+	u8 ssap;		/* always 0xAA */
+	u8 ctrl;		/* always 0x03 */
+	u8 oui[P80211_OUI_LEN];	/* organizational universal id */
+
+} __packed;
+
+#define SNAP_SIZE sizeof(struct libipw_snap_hdr)
+
+#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
+
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & IEEE80211_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq)  (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+
+#define LIBIPW_STATMASK_SIGNAL (1<<0)
+#define LIBIPW_STATMASK_RSSI (1<<1)
+#define LIBIPW_STATMASK_NOISE (1<<2)
+#define LIBIPW_STATMASK_RATE (1<<3)
+#define LIBIPW_STATMASK_WEMASK 0x7
+
+#define LIBIPW_CCK_MODULATION    (1<<0)
+#define LIBIPW_OFDM_MODULATION   (1<<1)
+
+#define LIBIPW_24GHZ_BAND     (1<<0)
+#define LIBIPW_52GHZ_BAND     (1<<1)
+
+#define LIBIPW_CCK_RATE_1MB		        0x02
+#define LIBIPW_CCK_RATE_2MB		        0x04
+#define LIBIPW_CCK_RATE_5MB		        0x0B
+#define LIBIPW_CCK_RATE_11MB		        0x16
+#define LIBIPW_OFDM_RATE_6MB		        0x0C
+#define LIBIPW_OFDM_RATE_9MB		        0x12
+#define LIBIPW_OFDM_RATE_12MB		0x18
+#define LIBIPW_OFDM_RATE_18MB		0x24
+#define LIBIPW_OFDM_RATE_24MB		0x30
+#define LIBIPW_OFDM_RATE_36MB		0x48
+#define LIBIPW_OFDM_RATE_48MB		0x60
+#define LIBIPW_OFDM_RATE_54MB		0x6C
+#define LIBIPW_BASIC_RATE_MASK		0x80
+
+#define LIBIPW_CCK_RATE_1MB_MASK		(1<<0)
+#define LIBIPW_CCK_RATE_2MB_MASK		(1<<1)
+#define LIBIPW_CCK_RATE_5MB_MASK		(1<<2)
+#define LIBIPW_CCK_RATE_11MB_MASK		(1<<3)
+#define LIBIPW_OFDM_RATE_6MB_MASK		(1<<4)
+#define LIBIPW_OFDM_RATE_9MB_MASK		(1<<5)
+#define LIBIPW_OFDM_RATE_12MB_MASK		(1<<6)
+#define LIBIPW_OFDM_RATE_18MB_MASK		(1<<7)
+#define LIBIPW_OFDM_RATE_24MB_MASK		(1<<8)
+#define LIBIPW_OFDM_RATE_36MB_MASK		(1<<9)
+#define LIBIPW_OFDM_RATE_48MB_MASK		(1<<10)
+#define LIBIPW_OFDM_RATE_54MB_MASK		(1<<11)
+
+#define LIBIPW_CCK_RATES_MASK	        0x0000000F
+#define LIBIPW_CCK_BASIC_RATES_MASK	(LIBIPW_CCK_RATE_1MB_MASK | \
+	LIBIPW_CCK_RATE_2MB_MASK)
+#define LIBIPW_CCK_DEFAULT_RATES_MASK	(LIBIPW_CCK_BASIC_RATES_MASK | \
+        LIBIPW_CCK_RATE_5MB_MASK | \
+        LIBIPW_CCK_RATE_11MB_MASK)
+
+#define LIBIPW_OFDM_RATES_MASK		0x00000FF0
+#define LIBIPW_OFDM_BASIC_RATES_MASK	(LIBIPW_OFDM_RATE_6MB_MASK | \
+	LIBIPW_OFDM_RATE_12MB_MASK | \
+	LIBIPW_OFDM_RATE_24MB_MASK)
+#define LIBIPW_OFDM_DEFAULT_RATES_MASK	(LIBIPW_OFDM_BASIC_RATES_MASK | \
+	LIBIPW_OFDM_RATE_9MB_MASK  | \
+	LIBIPW_OFDM_RATE_18MB_MASK | \
+	LIBIPW_OFDM_RATE_36MB_MASK | \
+	LIBIPW_OFDM_RATE_48MB_MASK | \
+	LIBIPW_OFDM_RATE_54MB_MASK)
+#define LIBIPW_DEFAULT_RATES_MASK (LIBIPW_OFDM_DEFAULT_RATES_MASK | \
+                                LIBIPW_CCK_DEFAULT_RATES_MASK)
+
+#define LIBIPW_NUM_OFDM_RATES	    8
+#define LIBIPW_NUM_CCK_RATES	            4
+#define LIBIPW_OFDM_SHIFT_MASK_A         4
+
+/* NOTE: This data is for statistical purposes; not all hardware provides this
+ *       information for frames received.
+ *       For libipw_rx_mgt, you need to set at least the 'len' parameter.
+ */
+struct libipw_rx_stats {
+	u32 mac_time;
+	s8 rssi;
+	u8 signal;
+	u8 noise;
+	u16 rate;		/* in 100 kbps */
+	u8 received_channel;
+	u8 control;
+	u8 mask;
+	u8 freq;
+	u16 len;
+	u64 tsf;
+	u32 beacon_time;
+};
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define LIBIPW_FRAG_CACHE_LEN 4
+
+struct libipw_frag_entry {
+	unsigned long first_frag_time;
+	unsigned int seq;
+	unsigned int last_frag;
+	struct sk_buff *skb;
+	u8 src_addr[ETH_ALEN];
+	u8 dst_addr[ETH_ALEN];
+};
+
+struct libipw_stats {
+	unsigned int tx_unicast_frames;
+	unsigned int tx_multicast_frames;
+	unsigned int tx_fragments;
+	unsigned int tx_unicast_octets;
+	unsigned int tx_multicast_octets;
+	unsigned int tx_deferred_transmissions;
+	unsigned int tx_single_retry_frames;
+	unsigned int tx_multiple_retry_frames;
+	unsigned int tx_retry_limit_exceeded;
+	unsigned int tx_discards;
+	unsigned int rx_unicast_frames;
+	unsigned int rx_multicast_frames;
+	unsigned int rx_fragments;
+	unsigned int rx_unicast_octets;
+	unsigned int rx_multicast_octets;
+	unsigned int rx_fcs_errors;
+	unsigned int rx_discards_no_buffer;
+	unsigned int tx_discards_wrong_sa;
+	unsigned int rx_discards_undecryptable;
+	unsigned int rx_message_in_msg_fragments;
+	unsigned int rx_message_in_bad_msg_fragments;
+};
+
+struct libipw_device;
+
+#define SEC_KEY_1		(1<<0)
+#define SEC_KEY_2		(1<<1)
+#define SEC_KEY_3		(1<<2)
+#define SEC_KEY_4		(1<<3)
+#define SEC_ACTIVE_KEY		(1<<4)
+#define SEC_AUTH_MODE		(1<<5)
+#define SEC_UNICAST_GROUP	(1<<6)
+#define SEC_LEVEL		(1<<7)
+#define SEC_ENABLED		(1<<8)
+#define SEC_ENCRYPT		(1<<9)
+
+#define SEC_LEVEL_0		0	/* None */
+#define SEC_LEVEL_1		1	/* WEP 40 and 104 bit */
+#define SEC_LEVEL_2		2	/* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP	3	/* Level 1 + CKIP */
+#define SEC_LEVEL_3		4	/* Level 2 + CCMP */
+
+#define SEC_ALG_NONE		0
+#define SEC_ALG_WEP		1
+#define SEC_ALG_TKIP		2
+#define SEC_ALG_CCMP		3
+
+#define WEP_KEYS		4
+#define WEP_KEY_LEN		13
+#define SCM_KEY_LEN		32
+#define SCM_TEMPORAL_KEY_LENGTH	16
+
+struct libipw_security {
+	u16 active_key:2, enabled:1, unicast_uses_group:1, encrypt:1;
+	u8 auth_mode;
+	u8 encode_alg[WEP_KEYS];
+	u8 key_sizes[WEP_KEYS];
+	u8 keys[WEP_KEYS][SCM_KEY_LEN];
+	u8 level;
+	u16 flags;
+} __packed;
+
+/*
+
+ 802.11 data frame from AP
+
+      ,-------------------------------------------------------------------.
+Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
+      |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  frame  |  fcs |
+      |      | tion | (BSSID) |         |         | ence |  data   |      |
+      `-------------------------------------------------------------------'
+
+Total: 28-2340 bytes
+
+*/
+
+#define BEACON_PROBE_SSID_ID_POSITION 12
+
+struct libipw_hdr_1addr {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 payload[0];
+} __packed;
+
+struct libipw_hdr_2addr {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 payload[0];
+} __packed;
+
+struct libipw_hdr_3addr {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	__le16 seq_ctl;
+	u8 payload[0];
+} __packed;
+
+struct libipw_hdr_4addr {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	__le16 seq_ctl;
+	u8 addr4[ETH_ALEN];
+	u8 payload[0];
+} __packed;
+
+struct libipw_hdr_3addrqos {
+	__le16 frame_ctl;
+	__le16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	__le16 seq_ctl;
+	u8 payload[0];
+	__le16 qos_ctl;
+} __packed;
+
+struct libipw_info_element {
+	u8 id;
+	u8 len;
+	u8 data[0];
+} __packed;
+
+/*
+ * These are the data types that can make up management packets
+ *
+	u16 auth_algorithm;
+	u16 auth_sequence;
+	u16 beacon_interval;
+	u16 capability;
+	u8 current_ap[ETH_ALEN];
+	u16 listen_interval;
+	struct {
+		u16 association_id:14, reserved:2;
+	} __packed;
+	u32 time_stamp[2];
+	u16 reason;
+	u16 status;
+*/
+
+struct libipw_auth {
+	struct libipw_hdr_3addr header;
+	__le16 algorithm;
+	__le16 transaction;
+	__le16 status;
+	/* challenge */
+	struct libipw_info_element info_element[0];
+} __packed;
+
+struct libipw_channel_switch {
+	u8 id;
+	u8 len;
+	u8 mode;
+	u8 channel;
+	u8 count;
+} __packed;
+
+struct libipw_action {
+	struct libipw_hdr_3addr header;
+	u8 category;
+	u8 action;
+	union {
+		struct libipw_action_exchange {
+			u8 token;
+			struct libipw_info_element info_element[0];
+		} exchange;
+		struct libipw_channel_switch channel_switch;
+
+	} format;
+} __packed;
+
+struct libipw_disassoc {
+	struct libipw_hdr_3addr header;
+	__le16 reason;
+} __packed;
+
+/* Alias deauth for disassoc */
+#define libipw_deauth libipw_disassoc
+
+struct libipw_probe_request {
+	struct libipw_hdr_3addr header;
+	/* SSID, supported rates */
+	struct libipw_info_element info_element[0];
+} __packed;
+
+struct libipw_probe_response {
+	struct libipw_hdr_3addr header;
+	__le32 time_stamp[2];
+	__le16 beacon_interval;
+	__le16 capability;
+	/* SSID, supported rates, FH params, DS params,
+	 * CF params, IBSS params, TIM (if beacon), RSN */
+	struct libipw_info_element info_element[0];
+} __packed;
+
+/* Alias beacon for probe_response */
+#define libipw_beacon libipw_probe_response
+
+struct libipw_assoc_request {
+	struct libipw_hdr_3addr header;
+	__le16 capability;
+	__le16 listen_interval;
+	/* SSID, supported rates, RSN */
+	struct libipw_info_element info_element[0];
+} __packed;
+
+struct libipw_reassoc_request {
+	struct libipw_hdr_3addr header;
+	__le16 capability;
+	__le16 listen_interval;
+	u8 current_ap[ETH_ALEN];
+	struct libipw_info_element info_element[0];
+} __packed;
+
+struct libipw_assoc_response {
+	struct libipw_hdr_3addr header;
+	__le16 capability;
+	__le16 status;
+	__le16 aid;
+	/* supported rates */
+	struct libipw_info_element info_element[0];
+} __packed;
+
+struct libipw_txb {
+	u8 nr_frags;
+	u8 encrypted;
+	u8 rts_included;
+	u8 reserved;
+	u16 frag_size;
+	u16 payload_size;
+	struct sk_buff *fragments[0];
+};
+
+/* SWEEP TABLE ENTRIES NUMBER */
+#define MAX_SWEEP_TAB_ENTRIES		  42
+#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET  7
+/* MAX_RATES_LENGTH needs to be 12.  The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates.  Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH                  ((u8)12)
+#define MAX_RATES_EX_LENGTH               ((u8)16)
+#define MAX_NETWORK_COUNT                  128
+
+#define CRC_LENGTH                 4U
+
+#define MAX_WPA_IE_LEN 64
+
+#define NETWORK_HAS_OFDM       (1<<1)
+#define NETWORK_HAS_CCK        (1<<2)
+
+/* QoS structure */
+#define NETWORK_HAS_QOS_PARAMETERS      (1<<3)
+#define NETWORK_HAS_QOS_INFORMATION     (1<<4)
+#define NETWORK_HAS_QOS_MASK            (NETWORK_HAS_QOS_PARAMETERS | \
+					 NETWORK_HAS_QOS_INFORMATION)
+
+/* 802.11h */
+#define NETWORK_HAS_POWER_CONSTRAINT    (1<<5)
+#define NETWORK_HAS_CSA                 (1<<6)
+#define NETWORK_HAS_QUIET               (1<<7)
+#define NETWORK_HAS_IBSS_DFS            (1<<8)
+#define NETWORK_HAS_TPC_REPORT          (1<<9)
+
+#define NETWORK_HAS_ERP_VALUE           (1<<10)
+
+#define QOS_QUEUE_NUM                   4
+#define QOS_OUI_LEN                     3
+#define QOS_OUI_TYPE                    2
+#define QOS_ELEMENT_ID                  221
+#define QOS_OUI_INFO_SUB_TYPE           0
+#define QOS_OUI_PARAM_SUB_TYPE          1
+#define QOS_VERSION_1                   1
+#define QOS_AIFSN_MIN_VALUE             2
+
+struct libipw_qos_information_element {
+	u8 elementID;
+	u8 length;
+	u8 qui[QOS_OUI_LEN];
+	u8 qui_type;
+	u8 qui_subtype;
+	u8 version;
+	u8 ac_info;
+} __packed;
+
+struct libipw_qos_ac_parameter {
+	u8 aci_aifsn;
+	u8 ecw_min_max;
+	__le16 tx_op_limit;
+} __packed;
+
+struct libipw_qos_parameter_info {
+	struct libipw_qos_information_element info_element;
+	u8 reserved;
+	struct libipw_qos_ac_parameter ac_params_record[QOS_QUEUE_NUM];
+} __packed;
+
+struct libipw_qos_parameters {
+	__le16 cw_min[QOS_QUEUE_NUM];
+	__le16 cw_max[QOS_QUEUE_NUM];
+	u8 aifs[QOS_QUEUE_NUM];
+	u8 flag[QOS_QUEUE_NUM];
+	__le16 tx_op_limit[QOS_QUEUE_NUM];
+} __packed;
+
+struct libipw_qos_data {
+	struct libipw_qos_parameters parameters;
+	int active;
+	int supported;
+	u8 param_count;
+	u8 old_param_count;
+};
+
+struct libipw_tim_parameters {
+	u8 tim_count;
+	u8 tim_period;
+} __packed;
+
+/*******************************************************/
+
+struct libipw_tpc_report {
+	u8 transmit_power;
+	u8 link_margin;
+} __packed;
+
+struct libipw_channel_map {
+	u8 channel;
+	u8 map;
+} __packed;
+
+struct libipw_ibss_dfs {
+	struct libipw_info_element ie;
+	u8 owner[ETH_ALEN];
+	u8 recovery_interval;
+	struct libipw_channel_map channel_map[0];
+};
+
+struct libipw_csa {
+	u8 mode;
+	u8 channel;
+	u8 count;
+} __packed;
+
+struct libipw_quiet {
+	u8 count;
+	u8 period;
+	u8 duration;
+	u8 offset;
+} __packed;
+
+struct libipw_network {
+	/* These entries are used to identify a unique network */
+	u8 bssid[ETH_ALEN];
+	u8 channel;
+	/* Ensure null-terminated for any debug msgs */
+	u8 ssid[IW_ESSID_MAX_SIZE + 1];
+	u8 ssid_len;
+
+	struct libipw_qos_data qos_data;
+
+	/* These are network statistics */
+	struct libipw_rx_stats stats;
+	u16 capability;
+	u8 rates[MAX_RATES_LENGTH];
+	u8 rates_len;
+	u8 rates_ex[MAX_RATES_EX_LENGTH];
+	u8 rates_ex_len;
+	unsigned long last_scanned;
+	u8 mode;
+	u32 flags;
+	u32 last_associate;
+	u32 time_stamp[2];
+	u16 beacon_interval;
+	u16 listen_interval;
+	u16 atim_window;
+	u8 erp_value;
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	size_t wpa_ie_len;
+	u8 rsn_ie[MAX_WPA_IE_LEN];
+	size_t rsn_ie_len;
+	struct libipw_tim_parameters tim;
+
+	/* 802.11h info */
+
+	/* Power Constraint - mandatory if spctrm mgmt required */
+	u8 power_constraint;
+
+	/* TPC Report - mandatory if spctrm mgmt required */
+	struct libipw_tpc_report tpc_report;
+
+	/* Channel Switch Announcement - optional if spctrm mgmt required */
+	struct libipw_csa csa;
+
+	/* Quiet - optional if spctrm mgmt required */
+	struct libipw_quiet quiet;
+
+	struct list_head list;
+};
+
+enum libipw_state {
+	LIBIPW_UNINITIALIZED = 0,
+	LIBIPW_INITIALIZED,
+	LIBIPW_ASSOCIATING,
+	LIBIPW_ASSOCIATED,
+	LIBIPW_AUTHENTICATING,
+	LIBIPW_AUTHENTICATED,
+	LIBIPW_SHUTDOWN
+};
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+
+#define CFG_LIBIPW_RESERVE_FCS (1<<0)
+#define CFG_LIBIPW_COMPUTE_FCS (1<<1)
+#define CFG_LIBIPW_RTS (1<<2)
+
+#define LIBIPW_24GHZ_MIN_CHANNEL 1
+#define LIBIPW_24GHZ_MAX_CHANNEL 14
+#define LIBIPW_24GHZ_CHANNELS (LIBIPW_24GHZ_MAX_CHANNEL - \
+				  LIBIPW_24GHZ_MIN_CHANNEL + 1)
+
+#define LIBIPW_52GHZ_MIN_CHANNEL 34
+#define LIBIPW_52GHZ_MAX_CHANNEL 165
+#define LIBIPW_52GHZ_CHANNELS (LIBIPW_52GHZ_MAX_CHANNEL - \
+				  LIBIPW_52GHZ_MIN_CHANNEL + 1)
+
+enum {
+	LIBIPW_CH_PASSIVE_ONLY = (1 << 0),
+	LIBIPW_CH_80211H_RULES = (1 << 1),
+	LIBIPW_CH_B_ONLY = (1 << 2),
+	LIBIPW_CH_NO_IBSS = (1 << 3),
+	LIBIPW_CH_UNIFORM_SPREADING = (1 << 4),
+	LIBIPW_CH_RADAR_DETECT = (1 << 5),
+	LIBIPW_CH_INVALID = (1 << 6),
+};
+
+struct libipw_channel {
+	u32 freq;	/* in MHz */
+	u8 channel;
+	u8 flags;
+	u8 max_power;	/* in dBm */
+};
+
+struct libipw_geo {
+	u8 name[4];
+	u8 bg_channels;
+	u8 a_channels;
+	struct libipw_channel bg[LIBIPW_24GHZ_CHANNELS];
+	struct libipw_channel a[LIBIPW_52GHZ_CHANNELS];
+};
+
+struct libipw_device {
+	struct net_device *dev;
+	struct wireless_dev wdev;
+	struct libipw_security sec;
+
+	/* Bookkeeping structures */
+	struct libipw_stats ieee_stats;
+
+	struct libipw_geo geo;
+	struct ieee80211_supported_band bg_band;
+	struct ieee80211_supported_band a_band;
+
+	/* Probe / Beacon management */
+	struct list_head network_free_list;
+	struct list_head network_list;
+	struct libipw_network *networks[MAX_NETWORK_COUNT];
+	int scans;
+	int scan_age;
+
+	int iw_mode;		/* operating mode (IW_MODE_*) */
+	struct iw_spy_data spy_data;	/* iwspy support */
+
+	spinlock_t lock;
+
+	int tx_headroom;	/* Set to size of any additional room needed at front
+				 * of allocated Tx SKBs */
+	u32 config;
+
+	/* WEP and other encryption related settings at the device level */
+	int open_wep;		/* Set to 1 to allow unencrypted frames */
+
+	/* If the host performs {en,de}cryption, then set to 1 */
+	int host_encrypt;
+	int host_encrypt_msdu;
+	int host_decrypt;
+	/* host performs multicast decryption */
+	int host_mc_decrypt;
+
+	/* host should strip IV and ICV from protected frames */
+	/* meaningful only when hardware decryption is being used */
+	int host_strip_iv_icv;
+
+	int host_open_frag;
+	int ieee802_1x;		/* is IEEE 802.1X used */
+
+	/* WPA data */
+	int wpa_enabled;
+	int drop_unencrypted;
+	int privacy_invoked;
+	size_t wpa_ie_len;
+	u8 *wpa_ie;
+
+	struct lib80211_crypt_info crypt_info;
+
+	int bcrx_sta_key;	/* use individual keys to override default keys even
+				 * with RX of broad/multicast frames */
+
+	/* Fragmentation structures */
+	struct libipw_frag_entry frag_cache[LIBIPW_FRAG_CACHE_LEN];
+	unsigned int frag_next_idx;
+	u16 fts;		/* Fragmentation Threshold */
+	u16 rts;		/* RTS threshold */
+
+	/* Association info */
+	u8 bssid[ETH_ALEN];
+
+	enum libipw_state state;
+
+	int mode;		/* A, B, G */
+	int modulation;		/* CCK, OFDM */
+	int freq_band;		/* 2.4Ghz, 5.2Ghz, Mixed */
+	int abg_true;		/* ABG flag              */
+
+	int perfect_rssi;
+	int worst_rssi;
+
+	u16 prev_seq_ctl;	/* used to drop duplicate frames */
+
+	/* Callback functions */
+	void (*set_security) (struct net_device * dev,
+			      struct libipw_security * sec);
+	netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb,
+					struct net_device * dev, int pri);
+	int (*is_queue_full) (struct net_device * dev, int pri);
+
+	int (*handle_management) (struct net_device * dev,
+				  struct libipw_network * network, u16 type);
+	int (*is_qos_active) (struct net_device *dev, struct sk_buff *skb);
+
+	/* Typical STA methods */
+	int (*handle_auth) (struct net_device * dev,
+			    struct libipw_auth * auth);
+	int (*handle_deauth) (struct net_device * dev,
+			      struct libipw_deauth * auth);
+	int (*handle_action) (struct net_device * dev,
+			      struct libipw_action * action,
+			      struct libipw_rx_stats * stats);
+	int (*handle_disassoc) (struct net_device * dev,
+				struct libipw_disassoc * assoc);
+	int (*handle_beacon) (struct net_device * dev,
+			      struct libipw_beacon * beacon,
+			      struct libipw_network * network);
+	int (*handle_probe_response) (struct net_device * dev,
+				      struct libipw_probe_response * resp,
+				      struct libipw_network * network);
+	int (*handle_probe_request) (struct net_device * dev,
+				     struct libipw_probe_request * req,
+				     struct libipw_rx_stats * stats);
+	int (*handle_assoc_response) (struct net_device * dev,
+				      struct libipw_assoc_response * resp,
+				      struct libipw_network * network);
+
+	/* Typical AP methods */
+	int (*handle_assoc_request) (struct net_device * dev);
+	int (*handle_reassoc_request) (struct net_device * dev,
+				       struct libipw_reassoc_request * req);
+
+	/* This must be the last item so that it points to the data
+	 * allocated beyond this structure by alloc_libipw */
+	u8 priv[0];
+};
+
+#define IEEE_A            (1<<0)
+#define IEEE_B            (1<<1)
+#define IEEE_G            (1<<2)
+#define IEEE_MODE_MASK    (IEEE_A|IEEE_B|IEEE_G)
+
+static inline void *libipw_priv(struct net_device *dev)
+{
+	return ((struct libipw_device *)netdev_priv(dev))->priv;
+}
+
+static inline int libipw_is_valid_mode(struct libipw_device *ieee,
+					  int mode)
+{
+	/*
+	 * It is possible for both access points and our device to support
+	 * combinations of modes, so as long as there is one valid combination
+	 * of ap/device supported modes, then return success
+	 *
+	 */
+	if ((mode & IEEE_A) &&
+	    (ieee->modulation & LIBIPW_OFDM_MODULATION) &&
+	    (ieee->freq_band & LIBIPW_52GHZ_BAND))
+		return 1;
+
+	if ((mode & IEEE_G) &&
+	    (ieee->modulation & LIBIPW_OFDM_MODULATION) &&
+	    (ieee->freq_band & LIBIPW_24GHZ_BAND))
+		return 1;
+
+	if ((mode & IEEE_B) &&
+	    (ieee->modulation & LIBIPW_CCK_MODULATION) &&
+	    (ieee->freq_band & LIBIPW_24GHZ_BAND))
+		return 1;
+
+	return 0;
+}
+
+static inline int libipw_get_hdrlen(u16 fc)
+{
+	int hdrlen = LIBIPW_3ADDR_LEN;
+	u16 stype = WLAN_FC_GET_STYPE(fc);
+
+	switch (WLAN_FC_GET_TYPE(fc)) {
+	case IEEE80211_FTYPE_DATA:
+		if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
+			hdrlen = LIBIPW_4ADDR_LEN;
+		if (stype & IEEE80211_STYPE_QOS_DATA)
+			hdrlen += 2;
+		break;
+	case IEEE80211_FTYPE_CTL:
+		switch (WLAN_FC_GET_STYPE(fc)) {
+		case IEEE80211_STYPE_CTS:
+		case IEEE80211_STYPE_ACK:
+			hdrlen = LIBIPW_1ADDR_LEN;
+			break;
+		default:
+			hdrlen = LIBIPW_2ADDR_LEN;
+			break;
+		}
+		break;
+	}
+
+	return hdrlen;
+}
+
+static inline u8 *libipw_get_payload(struct ieee80211_hdr *hdr)
+{
+	switch (libipw_get_hdrlen(le16_to_cpu(hdr->frame_control))) {
+	case LIBIPW_1ADDR_LEN:
+		return ((struct libipw_hdr_1addr *)hdr)->payload;
+	case LIBIPW_2ADDR_LEN:
+		return ((struct libipw_hdr_2addr *)hdr)->payload;
+	case LIBIPW_3ADDR_LEN:
+		return ((struct libipw_hdr_3addr *)hdr)->payload;
+	case LIBIPW_4ADDR_LEN:
+		return ((struct libipw_hdr_4addr *)hdr)->payload;
+	}
+	return NULL;
+}
+
+static inline int libipw_is_ofdm_rate(u8 rate)
+{
+	switch (rate & ~LIBIPW_BASIC_RATE_MASK) {
+	case LIBIPW_OFDM_RATE_6MB:
+	case LIBIPW_OFDM_RATE_9MB:
+	case LIBIPW_OFDM_RATE_12MB:
+	case LIBIPW_OFDM_RATE_18MB:
+	case LIBIPW_OFDM_RATE_24MB:
+	case LIBIPW_OFDM_RATE_36MB:
+	case LIBIPW_OFDM_RATE_48MB:
+	case LIBIPW_OFDM_RATE_54MB:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int libipw_is_cck_rate(u8 rate)
+{
+	switch (rate & ~LIBIPW_BASIC_RATE_MASK) {
+	case LIBIPW_CCK_RATE_1MB:
+	case LIBIPW_CCK_RATE_2MB:
+	case LIBIPW_CCK_RATE_5MB:
+	case LIBIPW_CCK_RATE_11MB:
+		return 1;
+	}
+	return 0;
+}
+
+/* libipw.c */
+void free_libipw(struct net_device *dev, int monitor);
+struct net_device *alloc_libipw(int sizeof_priv, int monitor);
+
+void libipw_networks_age(struct libipw_device *ieee, unsigned long age_secs);
+
+int libipw_set_encryption(struct libipw_device *ieee);
+
+/* libipw_tx.c */
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev);
+void libipw_txb_free(struct libipw_txb *);
+
+/* libipw_rx.c */
+void libipw_rx_any(struct libipw_device *ieee, struct sk_buff *skb,
+		   struct libipw_rx_stats *stats);
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+	      struct libipw_rx_stats *rx_stats);
+/* make sure to set stats->len */
+void libipw_rx_mgt(struct libipw_device *ieee, struct libipw_hdr_4addr *header,
+		   struct libipw_rx_stats *stats);
+
+/* libipw_geo.c */
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee);
+void libipw_set_geo(struct libipw_device *ieee, const struct libipw_geo *geo);
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel);
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel);
+u8 libipw_freq_to_channel(struct libipw_device *ieee, u32 freq);
+u8 libipw_get_channel_flags(struct libipw_device *ieee, u8 channel);
+const struct libipw_channel *libipw_get_channel(struct libipw_device *ieee,
+						u8 channel);
+u32 libipw_channel_to_freq(struct libipw_device *ieee, u8 channel);
+
+/* libipw_wx.c */
+int libipw_wx_get_scan(struct libipw_device *ieee, struct iw_request_info *info,
+		       union iwreq_data *wrqu, char *key);
+int libipw_wx_set_encode(struct libipw_device *ieee,
+			 struct iw_request_info *info, union iwreq_data *wrqu,
+			 char *key);
+int libipw_wx_get_encode(struct libipw_device *ieee,
+			 struct iw_request_info *info, union iwreq_data *wrqu,
+			 char *key);
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra);
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra);
+
+static inline void libipw_increment_scans(struct libipw_device *ieee)
+{
+	ieee->scans++;
+}
+
+static inline int libipw_get_scans(struct libipw_device *ieee)
+{
+	return ieee->scans;
+}
+
+#endif				/* LIBIPW_H */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_geo.c b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c
new file mode 100644
index 0000000..ce7eda2
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_geo.c
@@ -0,0 +1,193 @@
+/******************************************************************************
+
+  Copyright(c) 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+
+#include "libipw.h"
+
+int libipw_is_valid_channel(struct libipw_device *ieee, u8 channel)
+{
+	int i;
+
+	/* Driver needs to initialize the geography map before using
+	 * these helper functions */
+	if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
+		return 0;
+
+	if (ieee->freq_band & LIBIPW_24GHZ_BAND)
+		for (i = 0; i < ieee->geo.bg_channels; i++)
+			/* NOTE: If G mode is currently supported but
+			 * this is a B only channel, we don't see it
+			 * as valid. */
+			if ((ieee->geo.bg[i].channel == channel) &&
+			    !(ieee->geo.bg[i].flags & LIBIPW_CH_INVALID) &&
+			    (!(ieee->mode & IEEE_G) ||
+			     !(ieee->geo.bg[i].flags & LIBIPW_CH_B_ONLY)))
+				return LIBIPW_24GHZ_BAND;
+
+	if (ieee->freq_band & LIBIPW_52GHZ_BAND)
+		for (i = 0; i < ieee->geo.a_channels; i++)
+			if ((ieee->geo.a[i].channel == channel) &&
+			    !(ieee->geo.a[i].flags & LIBIPW_CH_INVALID))
+				return LIBIPW_52GHZ_BAND;
+
+	return 0;
+}
+
+int libipw_channel_to_index(struct libipw_device *ieee, u8 channel)
+{
+	int i;
+
+	/* Driver needs to initialize the geography map before using
+	 * these helper functions */
+	if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
+		return -1;
+
+	if (ieee->freq_band & LIBIPW_24GHZ_BAND)
+		for (i = 0; i < ieee->geo.bg_channels; i++)
+			if (ieee->geo.bg[i].channel == channel)
+				return i;
+
+	if (ieee->freq_band & LIBIPW_52GHZ_BAND)
+		for (i = 0; i < ieee->geo.a_channels; i++)
+			if (ieee->geo.a[i].channel == channel)
+				return i;
+
+	return -1;
+}
+
+u32 libipw_channel_to_freq(struct libipw_device * ieee, u8 channel)
+{
+	const struct libipw_channel * ch;
+
+	/* Driver needs to initialize the geography map before using
+	 * these helper functions */
+	if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
+		return 0;
+
+	ch = libipw_get_channel(ieee, channel);
+	if (!ch->channel)
+		return 0;
+	return ch->freq;
+}
+
+u8 libipw_freq_to_channel(struct libipw_device * ieee, u32 freq)
+{
+	int i;
+
+	/* Driver needs to initialize the geography map before using
+	 * these helper functions */
+	if (ieee->geo.bg_channels == 0 && ieee->geo.a_channels == 0)
+		return 0;
+
+	freq /= 100000;
+
+	if (ieee->freq_band & LIBIPW_24GHZ_BAND)
+		for (i = 0; i < ieee->geo.bg_channels; i++)
+			if (ieee->geo.bg[i].freq == freq)
+				return ieee->geo.bg[i].channel;
+
+	if (ieee->freq_band & LIBIPW_52GHZ_BAND)
+		for (i = 0; i < ieee->geo.a_channels; i++)
+			if (ieee->geo.a[i].freq == freq)
+				return ieee->geo.a[i].channel;
+
+	return 0;
+}
+
+void libipw_set_geo(struct libipw_device *ieee,
+		      const struct libipw_geo *geo)
+{
+	memcpy(ieee->geo.name, geo->name, 3);
+	ieee->geo.name[3] = '\0';
+	ieee->geo.bg_channels = geo->bg_channels;
+	ieee->geo.a_channels = geo->a_channels;
+	memcpy(ieee->geo.bg, geo->bg, geo->bg_channels *
+	       sizeof(struct libipw_channel));
+	memcpy(ieee->geo.a, geo->a, ieee->geo.a_channels *
+	       sizeof(struct libipw_channel));
+}
+
+const struct libipw_geo *libipw_get_geo(struct libipw_device *ieee)
+{
+	return &ieee->geo;
+}
+
+u8 libipw_get_channel_flags(struct libipw_device * ieee, u8 channel)
+{
+	int index = libipw_channel_to_index(ieee, channel);
+
+	if (index == -1)
+		return LIBIPW_CH_INVALID;
+
+	if (channel <= LIBIPW_24GHZ_CHANNELS)
+		return ieee->geo.bg[index].flags;
+
+	return ieee->geo.a[index].flags;
+}
+
+static const struct libipw_channel bad_channel = {
+	.channel = 0,
+	.flags = LIBIPW_CH_INVALID,
+	.max_power = 0,
+};
+
+const struct libipw_channel *libipw_get_channel(struct libipw_device
+						      *ieee, u8 channel)
+{
+	int index = libipw_channel_to_index(ieee, channel);
+
+	if (index == -1)
+		return &bad_channel;
+
+	if (channel <= LIBIPW_24GHZ_CHANNELS)
+		return &ieee->geo.bg[index];
+
+	return &ieee->geo.a[index];
+}
+
+EXPORT_SYMBOL(libipw_get_channel);
+EXPORT_SYMBOL(libipw_get_channel_flags);
+EXPORT_SYMBOL(libipw_is_valid_channel);
+EXPORT_SYMBOL(libipw_freq_to_channel);
+EXPORT_SYMBOL(libipw_channel_to_freq);
+EXPORT_SYMBOL(libipw_channel_to_index);
+EXPORT_SYMBOL(libipw_set_geo);
+EXPORT_SYMBOL(libipw_get_geo);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_module.c b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
new file mode 100644
index 0000000..f00d45f
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_module.c
@@ -0,0 +1,312 @@
+/*******************************************************************************
+
+  Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
+
+  Portions of this file are based on the WEP enablement code provided by the
+  Host AP project hostap-drivers v0.1.3
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+  <j@w1.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <net/net_namespace.h>
+#include <net/arp.h>
+
+#include "libipw.h"
+
+#define DRV_DESCRIPTION "802.11 data/management/control stack"
+#define DRV_NAME        "libipw"
+#define DRV_PROCNAME	"ieee80211"
+#define DRV_VERSION	LIBIPW_VERSION
+#define DRV_COPYRIGHT   "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+
+static struct cfg80211_ops libipw_config_ops = { };
+static void *libipw_wiphy_privid = &libipw_wiphy_privid;
+
+static int libipw_networks_allocate(struct libipw_device *ieee)
+{
+	int i, j;
+
+	for (i = 0; i < MAX_NETWORK_COUNT; i++) {
+		ieee->networks[i] = kzalloc(sizeof(struct libipw_network),
+					    GFP_KERNEL);
+		if (!ieee->networks[i]) {
+			LIBIPW_ERROR("Out of memory allocating beacons\n");
+			for (j = 0; j < i; j++)
+				kfree(ieee->networks[j]);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static inline void libipw_networks_free(struct libipw_device *ieee)
+{
+	int i;
+
+	for (i = 0; i < MAX_NETWORK_COUNT; i++)
+		kfree(ieee->networks[i]);
+}
+
+void libipw_networks_age(struct libipw_device *ieee,
+                            unsigned long age_secs)
+{
+	struct libipw_network *network = NULL;
+	unsigned long flags;
+	unsigned long age_jiffies = msecs_to_jiffies(age_secs * MSEC_PER_SEC);
+
+	spin_lock_irqsave(&ieee->lock, flags);
+	list_for_each_entry(network, &ieee->network_list, list) {
+		network->last_scanned -= age_jiffies;
+	}
+	spin_unlock_irqrestore(&ieee->lock, flags);
+}
+EXPORT_SYMBOL(libipw_networks_age);
+
+static void libipw_networks_initialize(struct libipw_device *ieee)
+{
+	int i;
+
+	INIT_LIST_HEAD(&ieee->network_free_list);
+	INIT_LIST_HEAD(&ieee->network_list);
+	for (i = 0; i < MAX_NETWORK_COUNT; i++)
+		list_add_tail(&ieee->networks[i]->list,
+			      &ieee->network_free_list);
+}
+
+struct net_device *alloc_libipw(int sizeof_priv, int monitor)
+{
+	struct libipw_device *ieee;
+	struct net_device *dev;
+	int err;
+
+	LIBIPW_DEBUG_INFO("Initializing...\n");
+
+	dev = alloc_etherdev(sizeof(struct libipw_device) + sizeof_priv);
+	if (!dev)
+		goto failed;
+
+	ieee = netdev_priv(dev);
+
+	ieee->dev = dev;
+
+	if (!monitor) {
+		ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
+		if (!ieee->wdev.wiphy) {
+			LIBIPW_ERROR("Unable to allocate wiphy.\n");
+			goto failed_free_netdev;
+		}
+
+		ieee->dev->ieee80211_ptr = &ieee->wdev;
+		ieee->wdev.iftype = NL80211_IFTYPE_STATION;
+
+		/* Fill-out wiphy structure bits we know...  Not enough info
+		   here to call set_wiphy_dev or set MAC address or channel info
+		   -- have to do that in ->ndo_init... */
+		ieee->wdev.wiphy->privid = libipw_wiphy_privid;
+
+		ieee->wdev.wiphy->max_scan_ssids = 1;
+		ieee->wdev.wiphy->max_scan_ie_len = 0;
+		ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
+						| BIT(NL80211_IFTYPE_ADHOC);
+	}
+
+	err = libipw_networks_allocate(ieee);
+	if (err) {
+		LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
+		goto failed_free_wiphy;
+	}
+	libipw_networks_initialize(ieee);
+
+	/* Default fragmentation threshold is maximum payload size */
+	ieee->fts = DEFAULT_FTS;
+	ieee->rts = DEFAULT_FTS;
+	ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
+	ieee->open_wep = 1;
+
+	/* Default to enabling full open WEP with host based encrypt/decrypt */
+	ieee->host_encrypt = 1;
+	ieee->host_decrypt = 1;
+	ieee->host_mc_decrypt = 1;
+
+	/* Host fragmentation in Open mode. Default is enabled.
+	 * Note: host fragmentation is always enabled if host encryption
+	 * is enabled. For cards can do hardware encryption, they must do
+	 * hardware fragmentation as well. So we don't need a variable
+	 * like host_enc_frag. */
+	ieee->host_open_frag = 1;
+	ieee->ieee802_1x = 1;	/* Default to supporting 802.1x */
+
+	spin_lock_init(&ieee->lock);
+
+	lib80211_crypt_info_init(&ieee->crypt_info, dev->name, &ieee->lock);
+
+	ieee->wpa_enabled = 0;
+	ieee->drop_unencrypted = 0;
+	ieee->privacy_invoked = 0;
+
+	return dev;
+
+failed_free_wiphy:
+	if (!monitor)
+		wiphy_free(ieee->wdev.wiphy);
+failed_free_netdev:
+	free_netdev(dev);
+failed:
+	return NULL;
+}
+EXPORT_SYMBOL(alloc_libipw);
+
+void free_libipw(struct net_device *dev, int monitor)
+{
+	struct libipw_device *ieee = netdev_priv(dev);
+
+	lib80211_crypt_info_free(&ieee->crypt_info);
+
+	libipw_networks_free(ieee);
+
+	/* free cfg80211 resources */
+	if (!monitor)
+		wiphy_free(ieee->wdev.wiphy);
+
+	free_netdev(dev);
+}
+EXPORT_SYMBOL(free_libipw);
+
+#ifdef CONFIG_LIBIPW_DEBUG
+
+static int debug = 0;
+u32 libipw_debug_level = 0;
+EXPORT_SYMBOL_GPL(libipw_debug_level);
+static struct proc_dir_entry *libipw_proc = NULL;
+
+static int debug_level_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "0x%08X\n", libipw_debug_level);
+	return 0;
+}
+
+static int debug_level_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debug_level_proc_show, NULL);
+}
+
+static ssize_t debug_level_proc_write(struct file *file,
+		const char __user *buffer, size_t count, loff_t *pos)
+{
+	char buf[] = "0x00000000\n";
+	size_t len = min(sizeof(buf) - 1, count);
+	unsigned long val;
+
+	if (copy_from_user(buf, buffer, len))
+		return count;
+	buf[len] = 0;
+	if (sscanf(buf, "%li", &val) != 1)
+		printk(KERN_INFO DRV_NAME
+		       ": %s is not in hex or decimal form.\n", buf);
+	else
+		libipw_debug_level = val;
+
+	return strnlen(buf, len);
+}
+
+static const struct file_operations debug_level_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= debug_level_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= debug_level_proc_write,
+};
+#endif				/* CONFIG_LIBIPW_DEBUG */
+
+static int __init libipw_init(void)
+{
+#ifdef CONFIG_LIBIPW_DEBUG
+	struct proc_dir_entry *e;
+
+	libipw_debug_level = debug;
+	libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
+	if (libipw_proc == NULL) {
+		LIBIPW_ERROR("Unable to create " DRV_PROCNAME
+				" proc directory\n");
+		return -EIO;
+	}
+	e = proc_create("debug_level", 0644, libipw_proc,
+			&debug_level_proc_fops);
+	if (!e) {
+		remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
+		libipw_proc = NULL;
+		return -EIO;
+	}
+#endif				/* CONFIG_LIBIPW_DEBUG */
+
+	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
+
+	return 0;
+}
+
+static void __exit libipw_exit(void)
+{
+#ifdef CONFIG_LIBIPW_DEBUG
+	if (libipw_proc) {
+		remove_proc_entry("debug_level", libipw_proc);
+		remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
+		libipw_proc = NULL;
+	}
+#endif				/* CONFIG_LIBIPW_DEBUG */
+}
+
+#ifdef CONFIG_LIBIPW_DEBUG
+#include <linux/moduleparam.h>
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif				/* CONFIG_LIBIPW_DEBUG */
+
+module_exit(libipw_exit);
+module_init(libipw_init);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_rx.c b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
new file mode 100644
index 0000000..6df19f0
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_rx.c
@@ -0,0 +1,1765 @@
+/*
+ * Original code based Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3 - hostap.o module, common routines
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <j@w1.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+ * Copyright (c) 2004-2005, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/gfp.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+#include <linux/ctype.h>
+
+#include <net/lib80211.h>
+
+#include "libipw.h"
+
+static void libipw_monitor_rx(struct libipw_device *ieee,
+					struct sk_buff *skb,
+					struct libipw_rx_stats *rx_stats)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	u16 fc = le16_to_cpu(hdr->frame_control);
+
+	skb->dev = ieee->dev;
+	skb_reset_mac_header(skb);
+	skb_pull(skb, libipw_get_hdrlen(fc));
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = htons(ETH_P_80211_RAW);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	netif_rx(skb);
+}
+
+/* Called only as a tasklet (software IRQ) */
+static struct libipw_frag_entry *libipw_frag_cache_find(struct
+							      libipw_device
+							      *ieee,
+							      unsigned int seq,
+							      unsigned int frag,
+							      u8 * src,
+							      u8 * dst)
+{
+	struct libipw_frag_entry *entry;
+	int i;
+
+	for (i = 0; i < LIBIPW_FRAG_CACHE_LEN; i++) {
+		entry = &ieee->frag_cache[i];
+		if (entry->skb != NULL &&
+		    time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
+			LIBIPW_DEBUG_FRAG("expiring fragment cache entry "
+					     "seq=%u last_frag=%u\n",
+					     entry->seq, entry->last_frag);
+			dev_kfree_skb_any(entry->skb);
+			entry->skb = NULL;
+		}
+
+		if (entry->skb != NULL && entry->seq == seq &&
+		    (entry->last_frag + 1 == frag || frag == -1) &&
+		    ether_addr_equal(entry->src_addr, src) &&
+		    ether_addr_equal(entry->dst_addr, dst))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/* Called only as a tasklet (software IRQ) */
+static struct sk_buff *libipw_frag_cache_get(struct libipw_device *ieee,
+						struct libipw_hdr_4addr *hdr)
+{
+	struct sk_buff *skb = NULL;
+	u16 sc;
+	unsigned int frag, seq;
+	struct libipw_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	seq = WLAN_GET_SEQ_SEQ(sc);
+
+	if (frag == 0) {
+		/* Reserve enough space to fit maximum frame length */
+		skb = dev_alloc_skb(ieee->dev->mtu +
+				    sizeof(struct libipw_hdr_4addr) +
+				    8 /* LLC */  +
+				    2 /* alignment */  +
+				    8 /* WEP */  + ETH_ALEN /* WDS */ );
+		if (skb == NULL)
+			return NULL;
+
+		entry = &ieee->frag_cache[ieee->frag_next_idx];
+		ieee->frag_next_idx++;
+		if (ieee->frag_next_idx >= LIBIPW_FRAG_CACHE_LEN)
+			ieee->frag_next_idx = 0;
+
+		if (entry->skb != NULL)
+			dev_kfree_skb_any(entry->skb);
+
+		entry->first_frag_time = jiffies;
+		entry->seq = seq;
+		entry->last_frag = frag;
+		entry->skb = skb;
+		memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
+		memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
+	} else {
+		/* received a fragment of a frame for which the head fragment
+		 * should have already been received */
+		entry = libipw_frag_cache_find(ieee, seq, frag, hdr->addr2,
+						  hdr->addr1);
+		if (entry != NULL) {
+			entry->last_frag = frag;
+			skb = entry->skb;
+		}
+	}
+
+	return skb;
+}
+
+/* Called only as a tasklet (software IRQ) */
+static int libipw_frag_cache_invalidate(struct libipw_device *ieee,
+					   struct libipw_hdr_4addr *hdr)
+{
+	u16 sc;
+	unsigned int seq;
+	struct libipw_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	seq = WLAN_GET_SEQ_SEQ(sc);
+
+	entry = libipw_frag_cache_find(ieee, seq, -1, hdr->addr2,
+					  hdr->addr1);
+
+	if (entry == NULL) {
+		LIBIPW_DEBUG_FRAG("could not invalidate fragment cache "
+				     "entry (seq=%u)\n", seq);
+		return -1;
+	}
+
+	entry->skb = NULL;
+	return 0;
+}
+
+#ifdef NOT_YET
+/* libipw_rx_frame_mgtmt
+ *
+ * Responsible for handling management control frames
+ *
+ * Called by libipw_rx */
+static int
+libipw_rx_frame_mgmt(struct libipw_device *ieee, struct sk_buff *skb,
+			struct libipw_rx_stats *rx_stats, u16 type,
+			u16 stype)
+{
+	if (ieee->iw_mode == IW_MODE_MASTER) {
+		printk(KERN_DEBUG "%s: Master mode not yet supported.\n",
+		       ieee->dev->name);
+		return 0;
+/*
+  hostap_update_sta_ps(ieee, (struct hostap_libipw_hdr_4addr *)
+  skb->data);*/
+	}
+
+	if (ieee->hostapd && type == WLAN_FC_TYPE_MGMT) {
+		if (stype == WLAN_FC_STYPE_BEACON &&
+		    ieee->iw_mode == IW_MODE_MASTER) {
+			struct sk_buff *skb2;
+			/* Process beacon frames also in kernel driver to
+			 * update STA(AP) table statistics */
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2)
+				hostap_rx(skb2->dev, skb2, rx_stats);
+		}
+
+		/* send management frames to the user space daemon for
+		 * processing */
+		ieee->apdevstats.rx_packets++;
+		ieee->apdevstats.rx_bytes += skb->len;
+		prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
+		return 0;
+	}
+
+	if (ieee->iw_mode == IW_MODE_MASTER) {
+		if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
+			printk(KERN_DEBUG "%s: unknown management frame "
+			       "(type=0x%02x, stype=0x%02x) dropped\n",
+			       skb->dev->name, type, stype);
+			return -1;
+		}
+
+		hostap_rx(skb->dev, skb, rx_stats);
+		return 0;
+	}
+
+	printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
+	       "received in non-Host AP mode\n", skb->dev->name);
+	return -1;
+}
+#endif
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char libipw_rfc1042_header[] =
+    { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char libipw_bridge_tunnel_header[] =
+    { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
+/* Called by libipw_rx_frame_decrypt */
+static int libipw_is_eapol_frame(struct libipw_device *ieee,
+				    struct sk_buff *skb)
+{
+	struct net_device *dev = ieee->dev;
+	u16 fc, ethertype;
+	struct libipw_hdr_3addr *hdr;
+	u8 *pos;
+
+	if (skb->len < 24)
+		return 0;
+
+	hdr = (struct libipw_hdr_3addr *)skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* check that the frame is unicast frame to us */
+	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+	    IEEE80211_FCTL_TODS &&
+	    ether_addr_equal(hdr->addr1, dev->dev_addr) &&
+	    ether_addr_equal(hdr->addr3, dev->dev_addr)) {
+		/* ToDS frame with own addr BSSID and DA */
+	} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		   IEEE80211_FCTL_FROMDS &&
+		   ether_addr_equal(hdr->addr1, dev->dev_addr)) {
+		/* FromDS frame with own addr as DA */
+	} else
+		return 0;
+
+	if (skb->len < 24 + 8)
+		return 0;
+
+	/* check for port access entity Ethernet type */
+	pos = skb->data + 24;
+	ethertype = (pos[6] << 8) | pos[7];
+	if (ethertype == ETH_P_PAE)
+		return 1;
+
+	return 0;
+}
+
+/* Called only as a tasklet (software IRQ), by libipw_rx */
+static int
+libipw_rx_frame_decrypt(struct libipw_device *ieee, struct sk_buff *skb,
+			   struct lib80211_crypt_data *crypt)
+{
+	struct libipw_hdr_3addr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+		return 0;
+
+	hdr = (struct libipw_hdr_3addr *)skb->data;
+	hdrlen = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		LIBIPW_DEBUG_DROP("decryption failed (SA=%pM) res=%d\n",
+				     hdr->addr2, res);
+		if (res == -2)
+			LIBIPW_DEBUG_DROP("Decryption failed ICV "
+					     "mismatch (key %d)\n",
+					     skb->data[hdrlen + 3] >> 6);
+		ieee->ieee_stats.rx_discards_undecryptable++;
+		return -1;
+	}
+
+	return res;
+}
+
+/* Called only as a tasklet (software IRQ), by libipw_rx */
+static int
+libipw_rx_frame_decrypt_msdu(struct libipw_device *ieee,
+				struct sk_buff *skb, int keyidx,
+				struct lib80211_crypt_data *crypt)
+{
+	struct libipw_hdr_3addr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+		return 0;
+
+	hdr = (struct libipw_hdr_3addr *)skb->data;
+	hdrlen = libipw_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+		       " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2,
+		       keyidx);
+		return -1;
+	}
+
+	return 0;
+}
+
+/* All received frames are sent to this function. @skb contains the frame in
+ * IEEE 802.11 format, i.e., in the format it was sent over air.
+ * This function is called only as a tasklet (software IRQ). */
+int libipw_rx(struct libipw_device *ieee, struct sk_buff *skb,
+		 struct libipw_rx_stats *rx_stats)
+{
+	struct net_device *dev = ieee->dev;
+	struct libipw_hdr_4addr *hdr;
+	size_t hdrlen;
+	u16 fc, type, stype, sc;
+	unsigned int frag;
+	u8 *payload;
+	u16 ethertype;
+#ifdef NOT_YET
+	struct net_device *wds = NULL;
+	struct sk_buff *skb2 = NULL;
+	struct net_device *wds = NULL;
+	int frame_authorized = 0;
+	int from_assoc_ap = 0;
+	void *sta = NULL;
+#endif
+	u8 dst[ETH_ALEN];
+	u8 src[ETH_ALEN];
+	struct lib80211_crypt_data *crypt = NULL;
+	int keyidx = 0;
+	int can_be_decrypted = 0;
+
+	hdr = (struct libipw_hdr_4addr *)skb->data;
+	if (skb->len < 10) {
+		printk(KERN_INFO "%s: SKB length < 10\n", dev->name);
+		goto rx_dropped;
+	}
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	type = WLAN_FC_GET_TYPE(fc);
+	stype = WLAN_FC_GET_STYPE(fc);
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	hdrlen = libipw_get_hdrlen(fc);
+
+	if (skb->len < hdrlen) {
+		printk(KERN_INFO "%s: invalid SKB length %d\n",
+			dev->name, skb->len);
+		goto rx_dropped;
+	}
+
+	/* Put this code here so that we avoid duplicating it in all
+	 * Rx paths. - Jean II */
+#ifdef CONFIG_WIRELESS_EXT
+#ifdef IW_WIRELESS_SPY		/* defined in iw_handler.h */
+	/* If spy monitoring on */
+	if (ieee->spy_data.spy_number > 0) {
+		struct iw_quality wstats;
+
+		wstats.updated = 0;
+		if (rx_stats->mask & LIBIPW_STATMASK_RSSI) {
+			wstats.level = rx_stats->signal;
+			wstats.updated |= IW_QUAL_LEVEL_UPDATED;
+		} else
+			wstats.updated |= IW_QUAL_LEVEL_INVALID;
+
+		if (rx_stats->mask & LIBIPW_STATMASK_NOISE) {
+			wstats.noise = rx_stats->noise;
+			wstats.updated |= IW_QUAL_NOISE_UPDATED;
+		} else
+			wstats.updated |= IW_QUAL_NOISE_INVALID;
+
+		if (rx_stats->mask & LIBIPW_STATMASK_SIGNAL) {
+			wstats.qual = rx_stats->signal;
+			wstats.updated |= IW_QUAL_QUAL_UPDATED;
+		} else
+			wstats.updated |= IW_QUAL_QUAL_INVALID;
+
+		/* Update spy records */
+		wireless_spy_update(ieee->dev, hdr->addr2, &wstats);
+	}
+#endif				/* IW_WIRELESS_SPY */
+#endif				/* CONFIG_WIRELESS_EXT */
+
+#ifdef NOT_YET
+	hostap_update_rx_stats(local->ap, hdr, rx_stats);
+#endif
+
+	if (ieee->iw_mode == IW_MODE_MONITOR) {
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += skb->len;
+		libipw_monitor_rx(ieee, skb, rx_stats);
+		return 1;
+	}
+
+	can_be_decrypted = (is_multicast_ether_addr(hdr->addr1) ||
+			    is_broadcast_ether_addr(hdr->addr2)) ?
+	    ieee->host_mc_decrypt : ieee->host_decrypt;
+
+	if (can_be_decrypted) {
+		if (skb->len >= hdrlen + 3) {
+			/* Top two-bits of byte 3 are the key index */
+			keyidx = skb->data[hdrlen + 3] >> 6;
+		}
+
+		/* ieee->crypt[] is WEP_KEY (4) in length.  Given that keyidx
+		 * is only allowed 2-bits of storage, no value of keyidx can
+		 * be provided via above code that would result in keyidx
+		 * being out of range */
+		crypt = ieee->crypt_info.crypt[keyidx];
+
+#ifdef NOT_YET
+		sta = NULL;
+
+		/* Use station specific key to override default keys if the
+		 * receiver address is a unicast address ("individual RA"). If
+		 * bcrx_sta_key parameter is set, station specific key is used
+		 * even with broad/multicast targets (this is against IEEE
+		 * 802.11, but makes it easier to use different keys with
+		 * stations that do not support WEP key mapping). */
+
+		if (is_unicast_ether_addr(hdr->addr1) || local->bcrx_sta_key)
+			(void)hostap_handle_sta_crypto(local, hdr, &crypt,
+						       &sta);
+#endif
+
+		/* allow NULL decrypt to indicate an station specific override
+		 * for default encryption */
+		if (crypt && (crypt->ops == NULL ||
+			      crypt->ops->decrypt_mpdu == NULL))
+			crypt = NULL;
+
+		if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) {
+			/* This seems to be triggered by some (multicast?)
+			 * frames from other than current BSS, so just drop the
+			 * frames silently instead of filling system log with
+			 * these reports. */
+			LIBIPW_DEBUG_DROP("Decryption failed (not set)"
+					     " (SA=%pM)\n", hdr->addr2);
+			ieee->ieee_stats.rx_discards_undecryptable++;
+			goto rx_dropped;
+		}
+	}
+#ifdef NOT_YET
+	if (type != WLAN_FC_TYPE_DATA) {
+		if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH &&
+		    fc & IEEE80211_FCTL_PROTECTED && ieee->host_decrypt &&
+		    (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0) {
+			printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
+			       "from %pM\n", dev->name, hdr->addr2);
+			/* TODO: could inform hostapd about this so that it
+			 * could send auth failure report */
+			goto rx_dropped;
+		}
+
+		if (libipw_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
+			goto rx_dropped;
+		else
+			goto rx_exit;
+	}
+#endif
+	/* drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.29) */
+	if (sc == ieee->prev_seq_ctl)
+		goto rx_dropped;
+	else
+		ieee->prev_seq_ctl = sc;
+
+	/* Data frame - extract src/dst addresses */
+	if (skb->len < LIBIPW_3ADDR_LEN)
+		goto rx_dropped;
+
+	switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+	case IEEE80211_FCTL_FROMDS:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr3, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_TODS:
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+		if (skb->len < LIBIPW_4ADDR_LEN)
+			goto rx_dropped;
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr4, ETH_ALEN);
+		break;
+	default:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	}
+
+#ifdef NOT_YET
+	if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
+		goto rx_dropped;
+	if (wds) {
+		skb->dev = dev = wds;
+		stats = hostap_get_stats(dev);
+	}
+
+	if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
+	    (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+	    IEEE80211_FCTL_FROMDS && ieee->stadev &&
+	    ether_addr_equal(hdr->addr2, ieee->assoc_ap_addr)) {
+		/* Frame from BSSID of the AP for which we are a client */
+		skb->dev = dev = ieee->stadev;
+		stats = hostap_get_stats(dev);
+		from_assoc_ap = 1;
+	}
+#endif
+
+#ifdef NOT_YET
+	if ((ieee->iw_mode == IW_MODE_MASTER ||
+	     ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) {
+		switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
+					     wds != NULL)) {
+		case AP_RX_CONTINUE_NOT_AUTHORIZED:
+			frame_authorized = 0;
+			break;
+		case AP_RX_CONTINUE:
+			frame_authorized = 1;
+			break;
+		case AP_RX_DROP:
+			goto rx_dropped;
+		case AP_RX_EXIT:
+			goto rx_exit;
+		}
+	}
+#endif
+
+	/* Nullfunc frames may have PS-bit set, so they must be passed to
+	 * hostap_handle_sta_rx() before being dropped here. */
+
+	stype &= ~IEEE80211_STYPE_QOS_DATA;
+
+	if (stype != IEEE80211_STYPE_DATA &&
+	    stype != IEEE80211_STYPE_DATA_CFACK &&
+	    stype != IEEE80211_STYPE_DATA_CFPOLL &&
+	    stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
+		if (stype != IEEE80211_STYPE_NULLFUNC)
+			LIBIPW_DEBUG_DROP("RX: dropped data frame "
+					     "with no data (type=0x%02x, "
+					     "subtype=0x%02x, len=%d)\n",
+					     type, stype, skb->len);
+		goto rx_dropped;
+	}
+
+	/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
+
+	if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
+	    (keyidx = libipw_rx_frame_decrypt(ieee, skb, crypt)) < 0)
+		goto rx_dropped;
+
+	hdr = (struct libipw_hdr_4addr *)skb->data;
+
+	/* skb: hdr + (possibly fragmented) plaintext payload */
+	// PR: FIXME: hostap has additional conditions in the "if" below:
+	// ieee->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) &&
+	if ((frag != 0) || (fc & IEEE80211_FCTL_MOREFRAGS)) {
+		int flen;
+		struct sk_buff *frag_skb = libipw_frag_cache_get(ieee, hdr);
+		LIBIPW_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
+
+		if (!frag_skb) {
+			LIBIPW_DEBUG(LIBIPW_DL_RX | LIBIPW_DL_FRAG,
+					"Rx cannot get skb from fragment "
+					"cache (morefrag=%d seq=%u frag=%u)\n",
+					(fc & IEEE80211_FCTL_MOREFRAGS) != 0,
+					WLAN_GET_SEQ_SEQ(sc), frag);
+			goto rx_dropped;
+		}
+
+		flen = skb->len;
+		if (frag != 0)
+			flen -= hdrlen;
+
+		if (frag_skb->tail + flen > frag_skb->end) {
+			printk(KERN_WARNING "%s: host decrypted and "
+			       "reassembled frame did not fit skb\n",
+			       dev->name);
+			libipw_frag_cache_invalidate(ieee, hdr);
+			goto rx_dropped;
+		}
+
+		if (frag == 0) {
+			/* copy first fragment (including full headers) into
+			 * beginning of the fragment cache skb */
+			skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen);
+		} else {
+			/* append frame payload to the end of the fragment
+			 * cache skb */
+			skb_copy_from_linear_data_offset(skb, hdrlen,
+				      skb_put(frag_skb, flen), flen);
+		}
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+
+		if (fc & IEEE80211_FCTL_MOREFRAGS) {
+			/* more fragments expected - leave the skb in fragment
+			 * cache for now; it will be delivered to upper layers
+			 * after all fragments have been received */
+			goto rx_exit;
+		}
+
+		/* this was the last fragment and the frame will be
+		 * delivered, so remove skb from fragment cache */
+		skb = frag_skb;
+		hdr = (struct libipw_hdr_4addr *)skb->data;
+		libipw_frag_cache_invalidate(ieee, hdr);
+	}
+
+	/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
+	 * encrypted/authenticated */
+	if ((fc & IEEE80211_FCTL_PROTECTED) && can_be_decrypted &&
+	    libipw_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
+		goto rx_dropped;
+
+	hdr = (struct libipw_hdr_4addr *)skb->data;
+	if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep) {
+		if (		/*ieee->ieee802_1x && */
+			   libipw_is_eapol_frame(ieee, skb)) {
+			/* pass unencrypted EAPOL frames even if encryption is
+			 * configured */
+		} else {
+			LIBIPW_DEBUG_DROP("encryption configured, but RX "
+					     "frame not encrypted (SA=%pM)\n",
+					     hdr->addr2);
+			goto rx_dropped;
+		}
+	}
+
+	if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !ieee->open_wep &&
+	    !libipw_is_eapol_frame(ieee, skb)) {
+		LIBIPW_DEBUG_DROP("dropped unencrypted RX data "
+				     "frame from %pM (drop_unencrypted=1)\n",
+				     hdr->addr2);
+		goto rx_dropped;
+	}
+
+	/* If the frame was decrypted in hardware, we may need to strip off
+	 * any security data (IV, ICV, etc) that was left behind */
+	if (!can_be_decrypted && (fc & IEEE80211_FCTL_PROTECTED) &&
+	    ieee->host_strip_iv_icv) {
+		int trimlen = 0;
+
+		/* Top two-bits of byte 3 are the key index */
+		if (skb->len >= hdrlen + 3)
+			keyidx = skb->data[hdrlen + 3] >> 6;
+
+		/* To strip off any security data which appears before the
+		 * payload, we simply increase hdrlen (as the header gets
+		 * chopped off immediately below). For the security data which
+		 * appears after the payload, we use skb_trim. */
+
+		switch (ieee->sec.encode_alg[keyidx]) {
+		case SEC_ALG_WEP:
+			/* 4 byte IV */
+			hdrlen += 4;
+			/* 4 byte ICV */
+			trimlen = 4;
+			break;
+		case SEC_ALG_TKIP:
+			/* 4 byte IV, 4 byte ExtIV */
+			hdrlen += 8;
+			/* 8 byte MIC, 4 byte ICV */
+			trimlen = 12;
+			break;
+		case SEC_ALG_CCMP:
+			/* 8 byte CCMP header */
+			hdrlen += 8;
+			/* 8 byte MIC */
+			trimlen = 8;
+			break;
+		}
+
+		if (skb->len < trimlen)
+			goto rx_dropped;
+
+		__skb_trim(skb, skb->len - trimlen);
+
+		if (skb->len < hdrlen)
+			goto rx_dropped;
+	}
+
+	/* skb: hdr + (possible reassembled) full plaintext payload */
+
+	payload = skb->data + hdrlen;
+	ethertype = (payload[6] << 8) | payload[7];
+
+#ifdef NOT_YET
+	/* If IEEE 802.1X is used, check whether the port is authorized to send
+	 * the received frame. */
+	if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) {
+		if (ethertype == ETH_P_PAE) {
+			printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n",
+			       dev->name);
+			if (ieee->hostapd && ieee->apdev) {
+				/* Send IEEE 802.1X frames to the user
+				 * space daemon for processing */
+				prism2_rx_80211(ieee->apdev, skb, rx_stats,
+						PRISM2_RX_MGMT);
+				ieee->apdevstats.rx_packets++;
+				ieee->apdevstats.rx_bytes += skb->len;
+				goto rx_exit;
+			}
+		} else if (!frame_authorized) {
+			printk(KERN_DEBUG "%s: dropped frame from "
+			       "unauthorized port (IEEE 802.1X): "
+			       "ethertype=0x%04x\n", dev->name, ethertype);
+			goto rx_dropped;
+		}
+	}
+#endif
+
+	/* convert hdr + possible LLC headers into Ethernet header */
+	if (skb->len - hdrlen >= 8 &&
+	    ((memcmp(payload, libipw_rfc1042_header, SNAP_SIZE) == 0 &&
+	      ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+	     memcmp(payload, libipw_bridge_tunnel_header, SNAP_SIZE) == 0)) {
+		/* remove RFC1042 or Bridge-Tunnel encapsulation and
+		 * replace EtherType */
+		skb_pull(skb, hdrlen + SNAP_SIZE);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	} else {
+		__be16 len;
+		/* Leave Ethernet header part of hdr and full payload */
+		skb_pull(skb, hdrlen);
+		len = htons(skb->len);
+		memcpy(skb_push(skb, 2), &len, 2);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	}
+
+#ifdef NOT_YET
+	if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		    IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) {
+		/* Non-standard frame: get addr4 from its bogus location after
+		 * the payload */
+		skb_copy_to_linear_data_offset(skb, ETH_ALEN,
+					       skb->data + skb->len - ETH_ALEN,
+					       ETH_ALEN);
+		skb_trim(skb, skb->len - ETH_ALEN);
+	}
+#endif
+
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+
+#ifdef NOT_YET
+	if (ieee->iw_mode == IW_MODE_MASTER && !wds && ieee->ap->bridge_packets) {
+		if (is_multicast_ether_addr(dst)) {
+			/* copy multicast frame both to the higher layers and
+			 * to the wireless media */
+			ieee->ap->bridged_multicast++;
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2 == NULL)
+				printk(KERN_DEBUG "%s: skb_clone failed for "
+				       "multicast frame\n", dev->name);
+		} else if (hostap_is_sta_assoc(ieee->ap, dst)) {
+			/* send frame directly to the associated STA using
+			 * wireless media and not passing to higher layers */
+			ieee->ap->bridged_unicast++;
+			skb2 = skb;
+			skb = NULL;
+		}
+	}
+
+	if (skb2 != NULL) {
+		/* send to wireless media */
+		skb2->dev = dev;
+		skb2->protocol = htons(ETH_P_802_3);
+		skb_reset_mac_header(skb2);
+		skb_reset_network_header(skb2);
+		/* skb2->network_header += ETH_HLEN; */
+		dev_queue_xmit(skb2);
+	}
+#endif
+
+	if (skb) {
+		skb->protocol = eth_type_trans(skb, dev);
+		memset(skb->cb, 0, sizeof(skb->cb));
+		skb->ip_summed = CHECKSUM_NONE;	/* 802.11 crc not sufficient */
+		if (netif_rx(skb) == NET_RX_DROP) {
+			/* netif_rx always succeeds, but it might drop
+			 * the packet.  If it drops the packet, we log that
+			 * in our stats. */
+			LIBIPW_DEBUG_DROP
+			    ("RX: netif_rx dropped the packet\n");
+			dev->stats.rx_dropped++;
+		}
+	}
+
+      rx_exit:
+#ifdef NOT_YET
+	if (sta)
+		hostap_handle_sta_release(sta);
+#endif
+	return 1;
+
+      rx_dropped:
+	dev->stats.rx_dropped++;
+
+	/* Returning 0 indicates to caller that we have not handled the SKB--
+	 * so it is still allocated and can be used again by underlying
+	 * hardware as a DMA target */
+	return 0;
+}
+
+/* Filter out unrelated packets, call libipw_rx[_mgt]
+ * This function takes over the skb, it should not be used again after calling
+ * this function. */
+void libipw_rx_any(struct libipw_device *ieee,
+		     struct sk_buff *skb, struct libipw_rx_stats *stats)
+{
+	struct libipw_hdr_4addr *hdr;
+	int is_packet_for_us;
+	u16 fc;
+
+	if (ieee->iw_mode == IW_MODE_MONITOR) {
+		if (!libipw_rx(ieee, skb, stats))
+			dev_kfree_skb_irq(skb);
+		return;
+	}
+
+	if (skb->len < sizeof(struct ieee80211_hdr))
+		goto drop_free;
+
+	hdr = (struct libipw_hdr_4addr *)skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	if ((fc & IEEE80211_FCTL_VERS) != 0)
+		goto drop_free;
+
+	switch (fc & IEEE80211_FCTL_FTYPE) {
+	case IEEE80211_FTYPE_MGMT:
+		if (skb->len < sizeof(struct libipw_hdr_3addr))
+			goto drop_free;
+		libipw_rx_mgt(ieee, hdr, stats);
+		dev_kfree_skb_irq(skb);
+		return;
+	case IEEE80211_FTYPE_DATA:
+		break;
+	case IEEE80211_FTYPE_CTL:
+		return;
+	default:
+		return;
+	}
+
+	is_packet_for_us = 0;
+	switch (ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		/* our BSS and not from/to DS */
+		if (ether_addr_equal(hdr->addr3, ieee->bssid))
+		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == 0) {
+			/* promisc: get all */
+			if (ieee->dev->flags & IFF_PROMISC)
+				is_packet_for_us = 1;
+			/* to us */
+			else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
+				is_packet_for_us = 1;
+			/* mcast */
+			else if (is_multicast_ether_addr(hdr->addr1))
+				is_packet_for_us = 1;
+		}
+		break;
+	case IW_MODE_INFRA:
+		/* our BSS (== from our AP) and from DS */
+		if (ether_addr_equal(hdr->addr2, ieee->bssid))
+		if ((fc & (IEEE80211_FCTL_TODS+IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS) {
+			/* promisc: get all */
+			if (ieee->dev->flags & IFF_PROMISC)
+				is_packet_for_us = 1;
+			/* to us */
+			else if (ether_addr_equal(hdr->addr1, ieee->dev->dev_addr))
+				is_packet_for_us = 1;
+			/* mcast */
+			else if (is_multicast_ether_addr(hdr->addr1)) {
+				/* not our own packet bcasted from AP */
+				if (!ether_addr_equal(hdr->addr3, ieee->dev->dev_addr))
+					is_packet_for_us = 1;
+			}
+		}
+		break;
+	default:
+		/* ? */
+		break;
+	}
+
+	if (is_packet_for_us)
+		if (!libipw_rx(ieee, skb, stats))
+			dev_kfree_skb_irq(skb);
+	return;
+
+drop_free:
+	dev_kfree_skb_irq(skb);
+	ieee->dev->stats.rx_dropped++;
+}
+
+#define MGMT_FRAME_FIXED_PART_LENGTH		0x24
+
+static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
+
+/*
+* Make the structure we read from the beacon packet to have
+* the right values
+*/
+static int libipw_verify_qos_info(struct libipw_qos_information_element
+				     *info_element, int sub_type)
+{
+
+	if (info_element->qui_subtype != sub_type)
+		return -1;
+	if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN))
+		return -1;
+	if (info_element->qui_type != QOS_OUI_TYPE)
+		return -1;
+	if (info_element->version != QOS_VERSION_1)
+		return -1;
+
+	return 0;
+}
+
+/*
+ * Parse a QoS parameter element
+ */
+static int libipw_read_qos_param_element(struct libipw_qos_parameter_info
+					    *element_param, struct libipw_info_element
+					    *info_element)
+{
+	int ret = 0;
+	u16 size = sizeof(struct libipw_qos_parameter_info) - 2;
+
+	if ((info_element == NULL) || (element_param == NULL))
+		return -1;
+
+	if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) {
+		memcpy(element_param->info_element.qui, info_element->data,
+		       info_element->len);
+		element_param->info_element.elementID = info_element->id;
+		element_param->info_element.length = info_element->len;
+	} else
+		ret = -1;
+	if (ret == 0)
+		ret = libipw_verify_qos_info(&element_param->info_element,
+						QOS_OUI_PARAM_SUB_TYPE);
+	return ret;
+}
+
+/*
+ * Parse a QoS information element
+ */
+static int libipw_read_qos_info_element(struct
+					   libipw_qos_information_element
+					   *element_info, struct libipw_info_element
+					   *info_element)
+{
+	int ret = 0;
+	u16 size = sizeof(struct libipw_qos_information_element) - 2;
+
+	if (element_info == NULL)
+		return -1;
+	if (info_element == NULL)
+		return -1;
+
+	if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) {
+		memcpy(element_info->qui, info_element->data,
+		       info_element->len);
+		element_info->elementID = info_element->id;
+		element_info->length = info_element->len;
+	} else
+		ret = -1;
+
+	if (ret == 0)
+		ret = libipw_verify_qos_info(element_info,
+						QOS_OUI_INFO_SUB_TYPE);
+	return ret;
+}
+
+/*
+ * Write QoS parameters from the ac parameters.
+ */
+static int libipw_qos_convert_ac_to_parameters(struct
+						  libipw_qos_parameter_info
+						  *param_elm, struct
+						  libipw_qos_parameters
+						  *qos_param)
+{
+	int rc = 0;
+	int i;
+	struct libipw_qos_ac_parameter *ac_params;
+	u32 txop;
+	u8 cw_min;
+	u8 cw_max;
+
+	for (i = 0; i < QOS_QUEUE_NUM; i++) {
+		ac_params = &(param_elm->ac_params_record[i]);
+
+		qos_param->aifs[i] = (ac_params->aci_aifsn) & 0x0F;
+		qos_param->aifs[i] -= (qos_param->aifs[i] < 2) ? 0 : 2;
+
+		cw_min = ac_params->ecw_min_max & 0x0F;
+		qos_param->cw_min[i] = cpu_to_le16((1 << cw_min) - 1);
+
+		cw_max = (ac_params->ecw_min_max & 0xF0) >> 4;
+		qos_param->cw_max[i] = cpu_to_le16((1 << cw_max) - 1);
+
+		qos_param->flag[i] =
+		    (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00;
+
+		txop = le16_to_cpu(ac_params->tx_op_limit) * 32;
+		qos_param->tx_op_limit[i] = cpu_to_le16(txop);
+	}
+	return rc;
+}
+
+/*
+ * we have a generic data element which it may contain QoS information or
+ * parameters element. check the information element length to decide
+ * which type to read
+ */
+static int libipw_parse_qos_info_param_IE(struct libipw_info_element
+					     *info_element,
+					     struct libipw_network *network)
+{
+	int rc = 0;
+	struct libipw_qos_parameters *qos_param = NULL;
+	struct libipw_qos_information_element qos_info_element;
+
+	rc = libipw_read_qos_info_element(&qos_info_element, info_element);
+
+	if (rc == 0) {
+		network->qos_data.param_count = qos_info_element.ac_info & 0x0F;
+		network->flags |= NETWORK_HAS_QOS_INFORMATION;
+	} else {
+		struct libipw_qos_parameter_info param_element;
+
+		rc = libipw_read_qos_param_element(&param_element,
+						      info_element);
+		if (rc == 0) {
+			qos_param = &(network->qos_data.parameters);
+			libipw_qos_convert_ac_to_parameters(&param_element,
+							       qos_param);
+			network->flags |= NETWORK_HAS_QOS_PARAMETERS;
+			network->qos_data.param_count =
+			    param_element.info_element.ac_info & 0x0F;
+		}
+	}
+
+	if (rc == 0) {
+		LIBIPW_DEBUG_QOS("QoS is supported\n");
+		network->qos_data.supported = 1;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_LIBIPW_DEBUG
+#define MFIE_STRING(x) case WLAN_EID_ ##x: return #x
+
+static const char *get_info_element_string(u16 id)
+{
+	switch (id) {
+		MFIE_STRING(SSID);
+		MFIE_STRING(SUPP_RATES);
+		MFIE_STRING(FH_PARAMS);
+		MFIE_STRING(DS_PARAMS);
+		MFIE_STRING(CF_PARAMS);
+		MFIE_STRING(TIM);
+		MFIE_STRING(IBSS_PARAMS);
+		MFIE_STRING(COUNTRY);
+		MFIE_STRING(REQUEST);
+		MFIE_STRING(CHALLENGE);
+		MFIE_STRING(PWR_CONSTRAINT);
+		MFIE_STRING(PWR_CAPABILITY);
+		MFIE_STRING(TPC_REQUEST);
+		MFIE_STRING(TPC_REPORT);
+		MFIE_STRING(SUPPORTED_CHANNELS);
+		MFIE_STRING(CHANNEL_SWITCH);
+		MFIE_STRING(MEASURE_REQUEST);
+		MFIE_STRING(MEASURE_REPORT);
+		MFIE_STRING(QUIET);
+		MFIE_STRING(IBSS_DFS);
+		MFIE_STRING(ERP_INFO);
+		MFIE_STRING(RSN);
+		MFIE_STRING(EXT_SUPP_RATES);
+		MFIE_STRING(VENDOR_SPECIFIC);
+		MFIE_STRING(QOS_PARAMETER);
+	default:
+		return "UNKNOWN";
+	}
+}
+#endif
+
+static int libipw_parse_info_param(struct libipw_info_element
+				      *info_element, u16 length,
+				      struct libipw_network *network)
+{
+	u8 i;
+#ifdef CONFIG_LIBIPW_DEBUG
+	char rates_str[64];
+	char *p;
+#endif
+
+	while (length >= sizeof(*info_element)) {
+		if (sizeof(*info_element) + info_element->len > length) {
+			LIBIPW_DEBUG_MGMT("Info elem: parse failed: "
+					     "info_element->len + 2 > left : "
+					     "info_element->len+2=%zd left=%d, id=%d.\n",
+					     info_element->len +
+					     sizeof(*info_element),
+					     length, info_element->id);
+			/* We stop processing but don't return an error here
+			 * because some misbehaviour APs break this rule. ie.
+			 * Orinoco AP1000. */
+			break;
+		}
+
+		switch (info_element->id) {
+		case WLAN_EID_SSID:
+			network->ssid_len = min(info_element->len,
+						(u8) IW_ESSID_MAX_SIZE);
+			memcpy(network->ssid, info_element->data,
+			       network->ssid_len);
+			if (network->ssid_len < IW_ESSID_MAX_SIZE)
+				memset(network->ssid + network->ssid_len, 0,
+				       IW_ESSID_MAX_SIZE - network->ssid_len);
+
+			LIBIPW_DEBUG_MGMT("WLAN_EID_SSID: '%*pE' len=%d.\n",
+					  network->ssid_len, network->ssid,
+					  network->ssid_len);
+			break;
+
+		case WLAN_EID_SUPP_RATES:
+#ifdef CONFIG_LIBIPW_DEBUG
+			p = rates_str;
+#endif
+			network->rates_len = min(info_element->len,
+						 MAX_RATES_LENGTH);
+			for (i = 0; i < network->rates_len; i++) {
+				network->rates[i] = info_element->data[i];
+#ifdef CONFIG_LIBIPW_DEBUG
+				p += snprintf(p, sizeof(rates_str) -
+					      (p - rates_str), "%02X ",
+					      network->rates[i]);
+#endif
+				if (libipw_is_ofdm_rate
+				    (info_element->data[i])) {
+					network->flags |= NETWORK_HAS_OFDM;
+					if (info_element->data[i] &
+					    LIBIPW_BASIC_RATE_MASK)
+						network->flags &=
+						    ~NETWORK_HAS_CCK;
+				}
+			}
+
+			LIBIPW_DEBUG_MGMT("WLAN_EID_SUPP_RATES: '%s' (%d)\n",
+					     rates_str, network->rates_len);
+			break;
+
+		case WLAN_EID_EXT_SUPP_RATES:
+#ifdef CONFIG_LIBIPW_DEBUG
+			p = rates_str;
+#endif
+			network->rates_ex_len = min(info_element->len,
+						    MAX_RATES_EX_LENGTH);
+			for (i = 0; i < network->rates_ex_len; i++) {
+				network->rates_ex[i] = info_element->data[i];
+#ifdef CONFIG_LIBIPW_DEBUG
+				p += snprintf(p, sizeof(rates_str) -
+					      (p - rates_str), "%02X ",
+					      network->rates_ex[i]);
+#endif
+				if (libipw_is_ofdm_rate
+				    (info_element->data[i])) {
+					network->flags |= NETWORK_HAS_OFDM;
+					if (info_element->data[i] &
+					    LIBIPW_BASIC_RATE_MASK)
+						network->flags &=
+						    ~NETWORK_HAS_CCK;
+				}
+			}
+
+			LIBIPW_DEBUG_MGMT("WLAN_EID_EXT_SUPP_RATES: '%s' (%d)\n",
+					     rates_str, network->rates_ex_len);
+			break;
+
+		case WLAN_EID_DS_PARAMS:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_DS_PARAMS: %d\n",
+					     info_element->data[0]);
+			network->channel = info_element->data[0];
+			break;
+
+		case WLAN_EID_FH_PARAMS:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_FH_PARAMS: ignored\n");
+			break;
+
+		case WLAN_EID_CF_PARAMS:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_CF_PARAMS: ignored\n");
+			break;
+
+		case WLAN_EID_TIM:
+			network->tim.tim_count = info_element->data[0];
+			network->tim.tim_period = info_element->data[1];
+			LIBIPW_DEBUG_MGMT("WLAN_EID_TIM: partially ignored\n");
+			break;
+
+		case WLAN_EID_ERP_INFO:
+			network->erp_value = info_element->data[0];
+			network->flags |= NETWORK_HAS_ERP_VALUE;
+			LIBIPW_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
+					     network->erp_value);
+			break;
+
+		case WLAN_EID_IBSS_PARAMS:
+			network->atim_window = info_element->data[0];
+			LIBIPW_DEBUG_MGMT("WLAN_EID_IBSS_PARAMS: %d\n",
+					     network->atim_window);
+			break;
+
+		case WLAN_EID_CHALLENGE:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_CHALLENGE: ignored\n");
+			break;
+
+		case WLAN_EID_VENDOR_SPECIFIC:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_VENDOR_SPECIFIC: %d bytes\n",
+					     info_element->len);
+			if (!libipw_parse_qos_info_param_IE(info_element,
+							       network))
+				break;
+
+			if (info_element->len >= 4 &&
+			    info_element->data[0] == 0x00 &&
+			    info_element->data[1] == 0x50 &&
+			    info_element->data[2] == 0xf2 &&
+			    info_element->data[3] == 0x01) {
+				network->wpa_ie_len = min(info_element->len + 2,
+							  MAX_WPA_IE_LEN);
+				memcpy(network->wpa_ie, info_element,
+				       network->wpa_ie_len);
+			}
+			break;
+
+		case WLAN_EID_RSN:
+			LIBIPW_DEBUG_MGMT("WLAN_EID_RSN: %d bytes\n",
+					     info_element->len);
+			network->rsn_ie_len = min(info_element->len + 2,
+						  MAX_WPA_IE_LEN);
+			memcpy(network->rsn_ie, info_element,
+			       network->rsn_ie_len);
+			break;
+
+		case WLAN_EID_QOS_PARAMETER:
+			printk(KERN_ERR
+			       "QoS Error need to parse QOS_PARAMETER IE\n");
+			break;
+			/* 802.11h */
+		case WLAN_EID_PWR_CONSTRAINT:
+			network->power_constraint = info_element->data[0];
+			network->flags |= NETWORK_HAS_POWER_CONSTRAINT;
+			break;
+
+		case WLAN_EID_CHANNEL_SWITCH:
+			network->power_constraint = info_element->data[0];
+			network->flags |= NETWORK_HAS_CSA;
+			break;
+
+		case WLAN_EID_QUIET:
+			network->quiet.count = info_element->data[0];
+			network->quiet.period = info_element->data[1];
+			network->quiet.duration = info_element->data[2];
+			network->quiet.offset = info_element->data[3];
+			network->flags |= NETWORK_HAS_QUIET;
+			break;
+
+		case WLAN_EID_IBSS_DFS:
+			network->flags |= NETWORK_HAS_IBSS_DFS;
+			break;
+
+		case WLAN_EID_TPC_REPORT:
+			network->tpc_report.transmit_power =
+			    info_element->data[0];
+			network->tpc_report.link_margin = info_element->data[1];
+			network->flags |= NETWORK_HAS_TPC_REPORT;
+			break;
+
+		default:
+			LIBIPW_DEBUG_MGMT
+			    ("Unsupported info element: %s (%d)\n",
+			     get_info_element_string(info_element->id),
+			     info_element->id);
+			break;
+		}
+
+		length -= sizeof(*info_element) + info_element->len;
+		info_element =
+		    (struct libipw_info_element *)&info_element->
+		    data[info_element->len];
+	}
+
+	return 0;
+}
+
+static int libipw_handle_assoc_resp(struct libipw_device *ieee, struct libipw_assoc_response
+				       *frame, struct libipw_rx_stats *stats)
+{
+	struct libipw_network network_resp = { };
+	struct libipw_network *network = &network_resp;
+	struct net_device *dev = ieee->dev;
+
+	network->flags = 0;
+	network->qos_data.active = 0;
+	network->qos_data.supported = 0;
+	network->qos_data.param_count = 0;
+	network->qos_data.old_param_count = 0;
+
+	//network->atim_window = le16_to_cpu(frame->aid) & (0x3FFF);
+	network->atim_window = le16_to_cpu(frame->aid);
+	network->listen_interval = le16_to_cpu(frame->status);
+	memcpy(network->bssid, frame->header.addr3, ETH_ALEN);
+	network->capability = le16_to_cpu(frame->capability);
+	network->last_scanned = jiffies;
+	network->rates_len = network->rates_ex_len = 0;
+	network->last_associate = 0;
+	network->ssid_len = 0;
+	network->erp_value =
+	    (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0;
+
+	if (stats->freq == LIBIPW_52GHZ_BAND) {
+		/* for A band (No DS info) */
+		network->channel = stats->received_channel;
+	} else
+		network->flags |= NETWORK_HAS_CCK;
+
+	network->wpa_ie_len = 0;
+	network->rsn_ie_len = 0;
+
+	if (libipw_parse_info_param
+	    (frame->info_element, stats->len - sizeof(*frame), network))
+		return 1;
+
+	network->mode = 0;
+	if (stats->freq == LIBIPW_52GHZ_BAND)
+		network->mode = IEEE_A;
+	else {
+		if (network->flags & NETWORK_HAS_OFDM)
+			network->mode |= IEEE_G;
+		if (network->flags & NETWORK_HAS_CCK)
+			network->mode |= IEEE_B;
+	}
+
+	memcpy(&network->stats, stats, sizeof(network->stats));
+
+	if (ieee->handle_assoc_response != NULL)
+		ieee->handle_assoc_response(dev, frame, network);
+
+	return 0;
+}
+
+/***************************************************/
+
+static int libipw_network_init(struct libipw_device *ieee, struct libipw_probe_response
+					 *beacon,
+					 struct libipw_network *network,
+					 struct libipw_rx_stats *stats)
+{
+	network->qos_data.active = 0;
+	network->qos_data.supported = 0;
+	network->qos_data.param_count = 0;
+	network->qos_data.old_param_count = 0;
+
+	/* Pull out fixed field data */
+	memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
+	network->capability = le16_to_cpu(beacon->capability);
+	network->last_scanned = jiffies;
+	network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]);
+	network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]);
+	network->beacon_interval = le16_to_cpu(beacon->beacon_interval);
+	/* Where to pull this? beacon->listen_interval; */
+	network->listen_interval = 0x0A;
+	network->rates_len = network->rates_ex_len = 0;
+	network->last_associate = 0;
+	network->ssid_len = 0;
+	network->flags = 0;
+	network->atim_window = 0;
+	network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ?
+	    0x3 : 0x0;
+
+	if (stats->freq == LIBIPW_52GHZ_BAND) {
+		/* for A band (No DS info) */
+		network->channel = stats->received_channel;
+	} else
+		network->flags |= NETWORK_HAS_CCK;
+
+	network->wpa_ie_len = 0;
+	network->rsn_ie_len = 0;
+
+	if (libipw_parse_info_param
+	    (beacon->info_element, stats->len - sizeof(*beacon), network))
+		return 1;
+
+	network->mode = 0;
+	if (stats->freq == LIBIPW_52GHZ_BAND)
+		network->mode = IEEE_A;
+	else {
+		if (network->flags & NETWORK_HAS_OFDM)
+			network->mode |= IEEE_G;
+		if (network->flags & NETWORK_HAS_CCK)
+			network->mode |= IEEE_B;
+	}
+
+	if (network->mode == 0) {
+		LIBIPW_DEBUG_SCAN("Filtered out '%*pE (%pM)' network.\n",
+				  network->ssid_len, network->ssid,
+				  network->bssid);
+		return 1;
+	}
+
+	memcpy(&network->stats, stats, sizeof(network->stats));
+
+	return 0;
+}
+
+static inline int is_same_network(struct libipw_network *src,
+				  struct libipw_network *dst)
+{
+	/* A network is only a duplicate if the channel, BSSID, and ESSID
+	 * all match.  We treat all <hidden> with the same BSSID and channel
+	 * as one network */
+	return ((src->ssid_len == dst->ssid_len) &&
+		(src->channel == dst->channel) &&
+		ether_addr_equal_64bits(src->bssid, dst->bssid) &&
+		!memcmp(src->ssid, dst->ssid, src->ssid_len));
+}
+
+static void update_network(struct libipw_network *dst,
+				  struct libipw_network *src)
+{
+	int qos_active;
+	u8 old_param;
+
+	/* We only update the statistics if they were created by receiving
+	 * the network information on the actual channel the network is on.
+	 *
+	 * This keeps beacons received on neighbor channels from bringing
+	 * down the signal level of an AP. */
+	if (dst->channel == src->stats.received_channel)
+		memcpy(&dst->stats, &src->stats,
+		       sizeof(struct libipw_rx_stats));
+	else
+		LIBIPW_DEBUG_SCAN("Network %pM info received "
+			"off channel (%d vs. %d)\n", src->bssid,
+			dst->channel, src->stats.received_channel);
+
+	dst->capability = src->capability;
+	memcpy(dst->rates, src->rates, src->rates_len);
+	dst->rates_len = src->rates_len;
+	memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
+	dst->rates_ex_len = src->rates_ex_len;
+
+	dst->mode = src->mode;
+	dst->flags = src->flags;
+	dst->time_stamp[0] = src->time_stamp[0];
+	dst->time_stamp[1] = src->time_stamp[1];
+
+	dst->beacon_interval = src->beacon_interval;
+	dst->listen_interval = src->listen_interval;
+	dst->atim_window = src->atim_window;
+	dst->erp_value = src->erp_value;
+	dst->tim = src->tim;
+
+	memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
+	dst->wpa_ie_len = src->wpa_ie_len;
+	memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
+	dst->rsn_ie_len = src->rsn_ie_len;
+
+	dst->last_scanned = jiffies;
+	qos_active = src->qos_data.active;
+	old_param = dst->qos_data.old_param_count;
+	if (dst->flags & NETWORK_HAS_QOS_MASK)
+		memcpy(&dst->qos_data, &src->qos_data,
+		       sizeof(struct libipw_qos_data));
+	else {
+		dst->qos_data.supported = src->qos_data.supported;
+		dst->qos_data.param_count = src->qos_data.param_count;
+	}
+
+	if (dst->qos_data.supported == 1) {
+		if (dst->ssid_len)
+			LIBIPW_DEBUG_QOS
+			    ("QoS the network %s is QoS supported\n",
+			     dst->ssid);
+		else
+			LIBIPW_DEBUG_QOS
+			    ("QoS the network is QoS supported\n");
+	}
+	dst->qos_data.active = qos_active;
+	dst->qos_data.old_param_count = old_param;
+
+	/* dst->last_associate is not overwritten */
+}
+
+static inline int is_beacon(__le16 fc)
+{
+	return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON);
+}
+
+static void libipw_process_probe_response(struct libipw_device
+						    *ieee, struct
+						    libipw_probe_response
+						    *beacon, struct libipw_rx_stats
+						    *stats)
+{
+	struct net_device *dev = ieee->dev;
+	struct libipw_network network = { };
+	struct libipw_network *target;
+	struct libipw_network *oldest = NULL;
+#ifdef CONFIG_LIBIPW_DEBUG
+	struct libipw_info_element *info_element = beacon->info_element;
+#endif
+	unsigned long flags;
+
+	LIBIPW_DEBUG_SCAN("'%*pE' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
+		     info_element->len, info_element->data,
+		     beacon->header.addr3,
+		     (beacon->capability & cpu_to_le16(1 << 0xf)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0xe)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0xd)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0xc)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0xb)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0xa)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x9)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x8)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x7)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x6)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x5)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x4)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x3)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x2)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x1)) ? '1' : '0',
+		     (beacon->capability & cpu_to_le16(1 << 0x0)) ? '1' : '0');
+
+	if (libipw_network_init(ieee, beacon, &network, stats)) {
+		LIBIPW_DEBUG_SCAN("Dropped '%*pE' (%pM) via %s.\n",
+				  info_element->len, info_element->data,
+				  beacon->header.addr3,
+				  is_beacon(beacon->header.frame_ctl) ?
+				  "BEACON" : "PROBE RESPONSE");
+		return;
+	}
+
+	/* The network parsed correctly -- so now we scan our known networks
+	 * to see if we can find it in our list.
+	 *
+	 * NOTE:  This search is definitely not optimized.  Once its doing
+	 *        the "right thing" we'll optimize it for efficiency if
+	 *        necessary */
+
+	/* Search for this entry in the list and update it if it is
+	 * already there. */
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	list_for_each_entry(target, &ieee->network_list, list) {
+		if (is_same_network(target, &network))
+			break;
+
+		if ((oldest == NULL) ||
+		    time_before(target->last_scanned, oldest->last_scanned))
+			oldest = target;
+	}
+
+	/* If we didn't find a match, then get a new network slot to initialize
+	 * with this beacon's information */
+	if (&target->list == &ieee->network_list) {
+		if (list_empty(&ieee->network_free_list)) {
+			/* If there are no more slots, expire the oldest */
+			list_del(&oldest->list);
+			target = oldest;
+			LIBIPW_DEBUG_SCAN("Expired '%*pE' (%pM) from network list.\n",
+					  target->ssid_len, target->ssid,
+					  target->bssid);
+		} else {
+			/* Otherwise just pull from the free list */
+			target = list_entry(ieee->network_free_list.next,
+					    struct libipw_network, list);
+			list_del(ieee->network_free_list.next);
+		}
+
+#ifdef CONFIG_LIBIPW_DEBUG
+		LIBIPW_DEBUG_SCAN("Adding '%*pE' (%pM) via %s.\n",
+				  network.ssid_len, network.ssid,
+				  network.bssid,
+				  is_beacon(beacon->header.frame_ctl) ?
+				  "BEACON" : "PROBE RESPONSE");
+#endif
+		memcpy(target, &network, sizeof(*target));
+		list_add_tail(&target->list, &ieee->network_list);
+	} else {
+		LIBIPW_DEBUG_SCAN("Updating '%*pE' (%pM) via %s.\n",
+				  target->ssid_len, target->ssid,
+				  target->bssid,
+				  is_beacon(beacon->header.frame_ctl) ?
+				  "BEACON" : "PROBE RESPONSE");
+		update_network(target, &network);
+	}
+
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+	if (is_beacon(beacon->header.frame_ctl)) {
+		if (ieee->handle_beacon != NULL)
+			ieee->handle_beacon(dev, beacon, target);
+	} else {
+		if (ieee->handle_probe_response != NULL)
+			ieee->handle_probe_response(dev, beacon, target);
+	}
+}
+
+void libipw_rx_mgt(struct libipw_device *ieee,
+		      struct libipw_hdr_4addr *header,
+		      struct libipw_rx_stats *stats)
+{
+	switch (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl))) {
+	case IEEE80211_STYPE_ASSOC_RESP:
+		LIBIPW_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		libipw_handle_assoc_resp(ieee,
+					    (struct libipw_assoc_response *)
+					    header, stats);
+		break;
+
+	case IEEE80211_STYPE_REASSOC_RESP:
+		LIBIPW_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		break;
+
+	case IEEE80211_STYPE_PROBE_REQ:
+		LIBIPW_DEBUG_MGMT("received auth (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+
+		if (ieee->handle_probe_request != NULL)
+			ieee->handle_probe_request(ieee->dev,
+						   (struct
+						    libipw_probe_request *)
+						   header, stats);
+		break;
+
+	case IEEE80211_STYPE_PROBE_RESP:
+		LIBIPW_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		LIBIPW_DEBUG_SCAN("Probe response\n");
+		libipw_process_probe_response(ieee,
+						 (struct
+						  libipw_probe_response *)
+						 header, stats);
+		break;
+
+	case IEEE80211_STYPE_BEACON:
+		LIBIPW_DEBUG_MGMT("received BEACON (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		LIBIPW_DEBUG_SCAN("Beacon\n");
+		libipw_process_probe_response(ieee,
+						 (struct
+						  libipw_probe_response *)
+						 header, stats);
+		break;
+	case IEEE80211_STYPE_AUTH:
+
+		LIBIPW_DEBUG_MGMT("received auth (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+
+		if (ieee->handle_auth != NULL)
+			ieee->handle_auth(ieee->dev,
+					  (struct libipw_auth *)header);
+		break;
+
+	case IEEE80211_STYPE_DISASSOC:
+		if (ieee->handle_disassoc != NULL)
+			ieee->handle_disassoc(ieee->dev,
+					      (struct libipw_disassoc *)
+					      header);
+		break;
+
+	case IEEE80211_STYPE_ACTION:
+		LIBIPW_DEBUG_MGMT("ACTION\n");
+		if (ieee->handle_action)
+			ieee->handle_action(ieee->dev,
+					    (struct libipw_action *)
+					    header, stats);
+		break;
+
+	case IEEE80211_STYPE_REASSOC_REQ:
+		LIBIPW_DEBUG_MGMT("received reassoc (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+
+		LIBIPW_DEBUG_MGMT("%s: LIBIPW_REASSOC_REQ received\n",
+				     ieee->dev->name);
+		if (ieee->handle_reassoc_request != NULL)
+			ieee->handle_reassoc_request(ieee->dev,
+						    (struct libipw_reassoc_request *)
+						     header);
+		break;
+
+	case IEEE80211_STYPE_ASSOC_REQ:
+		LIBIPW_DEBUG_MGMT("received assoc (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+
+		LIBIPW_DEBUG_MGMT("%s: LIBIPW_ASSOC_REQ received\n",
+				     ieee->dev->name);
+		if (ieee->handle_assoc_request != NULL)
+			ieee->handle_assoc_request(ieee->dev);
+		break;
+
+	case IEEE80211_STYPE_DEAUTH:
+		LIBIPW_DEBUG_MGMT("DEAUTH\n");
+		if (ieee->handle_deauth != NULL)
+			ieee->handle_deauth(ieee->dev,
+					    (struct libipw_deauth *)
+					    header);
+		break;
+	default:
+		LIBIPW_DEBUG_MGMT("received UNKNOWN (%d)\n",
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		LIBIPW_DEBUG_MGMT("%s: Unknown management packet: %d\n",
+				     ieee->dev->name,
+				     WLAN_FC_GET_STYPE(le16_to_cpu
+						       (header->frame_ctl)));
+		break;
+	}
+}
+
+EXPORT_SYMBOL_GPL(libipw_rx_any);
+EXPORT_SYMBOL(libipw_rx_mgt);
+EXPORT_SYMBOL(libipw_rx);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
new file mode 100644
index 0000000..84205aa
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -0,0 +1,533 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <linux/uaccess.h>
+
+#include "libipw.h"
+
+/*
+
+802.11 Data Frame
+
+      ,-------------------------------------------------------------------.
+Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
+      |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  Frame  |  fcs |
+      |      | tion | (BSSID) |         |         | ence |  data   |      |
+      `--------------------------------------------------|         |------'
+Total: 28 non-data bytes                                 `----.----'
+							      |
+       .- 'Frame data' expands, if WEP enabled, to <----------'
+       |
+       V
+      ,-----------------------.
+Bytes |  4  |   0-2296  |  4  |
+      |-----|-----------|-----|
+Desc. | IV  | Encrypted | ICV |
+      |     | Packet    |     |
+      `-----|           |-----'
+	    `-----.-----'
+		  |
+       .- 'Encrypted Packet' expands to
+       |
+       V
+      ,---------------------------------------------------.
+Bytes |  1   |  1   |    1    |    3     |  2   |  0-2304 |
+      |------|------|---------|----------|------|---------|
+Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP      |
+      | DSAP | SSAP |         |          |      | Packet  |
+      | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8|      |         |
+      `----------------------------------------------------
+Total: 8 non-data bytes
+
+802.3 Ethernet Data Frame
+
+      ,-----------------------------------------.
+Bytes |   6   |   6   |  2   |  Variable |   4  |
+      |-------|-------|------|-----------|------|
+Desc. | Dest. | Source| Type | IP Packet |  fcs |
+      |  MAC  |  MAC  |      |           |      |
+      `-----------------------------------------'
+Total: 18 non-data bytes
+
+In the event that fragmentation is required, the incoming payload is split into
+N parts of size ieee->fts.  The first fragment contains the SNAP header and the
+remaining packets are just data.
+
+If encryption is enabled, each fragment payload size is reduced by enough space
+to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
+So if you have 1500 bytes of payload with ieee->fts set to 500 without
+encryption it will take 3 frames.  With WEP it will take 4 frames as the
+payload of each frame is reduced to 492 bytes.
+
+* SKB visualization
+*
+*  ,- skb->data
+* |
+* |    ETHERNET HEADER        ,-<-- PAYLOAD
+* |                           |     14 bytes from skb->data
+* |  2 bytes for Type --> ,T. |     (sizeof ethhdr)
+* |                       | | |
+* |,-Dest.--. ,--Src.---. | | |
+* |  6 bytes| | 6 bytes | | | |
+* v         | |         | | | |
+* 0         | v       1 | v | v           2
+* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+*     ^     | ^         | ^ |
+*     |     | |         | | |
+*     |     | |         | `T' <---- 2 bytes for Type
+*     |     | |         |
+*     |     | '---SNAP--' <-------- 6 bytes for SNAP
+*     |     |
+*     `-IV--' <-------------------- 4 bytes for IV (WEP)
+*
+*      SNAP HEADER
+*
+*/
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static int libipw_copy_snap(u8 * data, __be16 h_proto)
+{
+	struct libipw_snap_hdr *snap;
+	u8 *oui;
+
+	snap = (struct libipw_snap_hdr *)data;
+	snap->dsap = 0xaa;
+	snap->ssap = 0xaa;
+	snap->ctrl = 0x03;
+
+	if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX))
+		oui = P802_1H_OUI;
+	else
+		oui = RFC1042_OUI;
+	snap->oui[0] = oui[0];
+	snap->oui[1] = oui[1];
+	snap->oui[2] = oui[2];
+
+	memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16));
+
+	return SNAP_SIZE + sizeof(u16);
+}
+
+static int libipw_encrypt_fragment(struct libipw_device *ieee,
+					     struct sk_buff *frag, int hdr_len)
+{
+	struct lib80211_crypt_data *crypt =
+		ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
+	int res;
+
+	if (crypt == NULL)
+		return -1;
+
+	/* To encrypt, frame format is:
+	 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
+	atomic_inc(&crypt->refcnt);
+	res = 0;
+	if (crypt->ops && crypt->ops->encrypt_mpdu)
+		res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
+
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
+		       ieee->dev->name, frag->len);
+		ieee->ieee_stats.tx_discards++;
+		return -1;
+	}
+
+	return 0;
+}
+
+void libipw_txb_free(struct libipw_txb *txb)
+{
+	int i;
+	if (unlikely(!txb))
+		return;
+	for (i = 0; i < txb->nr_frags; i++)
+		if (txb->fragments[i])
+			dev_kfree_skb_any(txb->fragments[i]);
+	kfree(txb);
+}
+
+static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
+						 int headroom, gfp_t gfp_mask)
+{
+	struct libipw_txb *txb;
+	int i;
+	txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags),
+		      gfp_mask);
+	if (!txb)
+		return NULL;
+
+	memset(txb, 0, sizeof(struct libipw_txb));
+	txb->nr_frags = nr_frags;
+	txb->frag_size = txb_size;
+
+	for (i = 0; i < nr_frags; i++) {
+		txb->fragments[i] = __dev_alloc_skb(txb_size + headroom,
+						    gfp_mask);
+		if (unlikely(!txb->fragments[i])) {
+			i--;
+			break;
+		}
+		skb_reserve(txb->fragments[i], headroom);
+	}
+	if (unlikely(i != nr_frags)) {
+		while (i >= 0)
+			dev_kfree_skb_any(txb->fragments[i--]);
+		kfree(txb);
+		return NULL;
+	}
+	return txb;
+}
+
+static int libipw_classify(struct sk_buff *skb)
+{
+	struct ethhdr *eth;
+	struct iphdr *ip;
+
+	eth = (struct ethhdr *)skb->data;
+	if (eth->h_proto != htons(ETH_P_IP))
+		return 0;
+
+	ip = ip_hdr(skb);
+	switch (ip->tos & 0xfc) {
+	case 0x20:
+		return 2;
+	case 0x40:
+		return 1;
+	case 0x60:
+		return 3;
+	case 0x80:
+		return 4;
+	case 0xa0:
+		return 5;
+	case 0xc0:
+		return 6;
+	case 0xe0:
+		return 7;
+	default:
+		return 0;
+	}
+}
+
+/* Incoming skb is converted to a txb which consists of
+ * a block of 802.11 fragment packets (stored as skbs) */
+netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct libipw_device *ieee = netdev_priv(dev);
+	struct libipw_txb *txb = NULL;
+	struct libipw_hdr_3addrqos *frag_hdr;
+	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size,
+	    rts_required;
+	unsigned long flags;
+	int encrypt, host_encrypt, host_encrypt_msdu;
+	__be16 ether_type;
+	int bytes, fc, hdr_len;
+	struct sk_buff *skb_frag;
+	struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */
+		.duration_id = 0,
+		.seq_ctl = 0,
+		.qos_ctl = 0
+	};
+	u8 dest[ETH_ALEN], src[ETH_ALEN];
+	struct lib80211_crypt_data *crypt;
+	int priority = skb->priority;
+	int snapped = 0;
+
+	if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority))
+		return NETDEV_TX_BUSY;
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	/* If there is no driver handler to take the TXB, dont' bother
+	 * creating it... */
+	if (!ieee->hard_start_xmit) {
+		printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name);
+		goto success;
+	}
+
+	if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
+		printk(KERN_WARNING "%s: skb too small (%d).\n",
+		       ieee->dev->name, skb->len);
+		goto success;
+	}
+
+	ether_type = ((struct ethhdr *)skb->data)->h_proto;
+
+	crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
+
+	encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) &&
+	    ieee->sec.encrypt;
+
+	host_encrypt = ieee->host_encrypt && encrypt && crypt;
+	host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt;
+
+	if (!encrypt && ieee->ieee802_1x &&
+	    ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) {
+		dev->stats.tx_dropped++;
+		goto success;
+	}
+
+	/* Save source and destination addresses */
+	skb_copy_from_linear_data(skb, dest, ETH_ALEN);
+	skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN);
+
+	if (host_encrypt)
+		fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
+		    IEEE80211_FCTL_PROTECTED;
+	else
+		fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
+
+	if (ieee->iw_mode == IW_MODE_INFRA) {
+		fc |= IEEE80211_FCTL_TODS;
+		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
+		memcpy(header.addr1, ieee->bssid, ETH_ALEN);
+		memcpy(header.addr2, src, ETH_ALEN);
+		memcpy(header.addr3, dest, ETH_ALEN);
+	} else if (ieee->iw_mode == IW_MODE_ADHOC) {
+		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
+		memcpy(header.addr1, dest, ETH_ALEN);
+		memcpy(header.addr2, src, ETH_ALEN);
+		memcpy(header.addr3, ieee->bssid, ETH_ALEN);
+	}
+	hdr_len = LIBIPW_3ADDR_LEN;
+
+	if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) {
+		fc |= IEEE80211_STYPE_QOS_DATA;
+		hdr_len += 2;
+
+		skb->priority = libipw_classify(skb);
+		header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID);
+	}
+	header.frame_ctl = cpu_to_le16(fc);
+
+	/* Advance the SKB to the start of the payload */
+	skb_pull(skb, sizeof(struct ethhdr));
+
+	/* Determine total amount of storage required for TXB packets */
+	bytes = skb->len + SNAP_SIZE + sizeof(u16);
+
+	/* Encrypt msdu first on the whole data packet. */
+	if ((host_encrypt || host_encrypt_msdu) &&
+	    crypt && crypt->ops && crypt->ops->encrypt_msdu) {
+		int res = 0;
+		int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len +
+		    crypt->ops->extra_msdu_postfix_len;
+		struct sk_buff *skb_new = dev_alloc_skb(len);
+
+		if (unlikely(!skb_new))
+			goto failed;
+
+		skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len);
+		skb_put_data(skb_new, &header, hdr_len);
+		snapped = 1;
+		libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)),
+				    ether_type);
+		skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len);
+		res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv);
+		if (res < 0) {
+			LIBIPW_ERROR("msdu encryption failed\n");
+			dev_kfree_skb_any(skb_new);
+			goto failed;
+		}
+		dev_kfree_skb_any(skb);
+		skb = skb_new;
+		bytes += crypt->ops->extra_msdu_prefix_len +
+		    crypt->ops->extra_msdu_postfix_len;
+		skb_pull(skb, hdr_len);
+	}
+
+	if (host_encrypt || ieee->host_open_frag) {
+		/* Determine fragmentation size based on destination (multicast
+		 * and broadcast are not fragmented) */
+		if (is_multicast_ether_addr(dest) ||
+		    is_broadcast_ether_addr(dest))
+			frag_size = MAX_FRAG_THRESHOLD;
+		else
+			frag_size = ieee->fts;
+
+		/* Determine amount of payload per fragment.  Regardless of if
+		 * this stack is providing the full 802.11 header, one will
+		 * eventually be affixed to this fragment -- so we must account
+		 * for it when determining the amount of payload space. */
+		bytes_per_frag = frag_size - hdr_len;
+		if (ieee->config &
+		    (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
+			bytes_per_frag -= LIBIPW_FCS_LEN;
+
+		/* Each fragment may need to have room for encryption
+		 * pre/postfix */
+		if (host_encrypt)
+			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+			    crypt->ops->extra_mpdu_postfix_len;
+
+		/* Number of fragments is the total
+		 * bytes_per_frag / payload_per_fragment */
+		nr_frags = bytes / bytes_per_frag;
+		bytes_last_frag = bytes % bytes_per_frag;
+		if (bytes_last_frag)
+			nr_frags++;
+		else
+			bytes_last_frag = bytes_per_frag;
+	} else {
+		nr_frags = 1;
+		bytes_per_frag = bytes_last_frag = bytes;
+		frag_size = bytes + hdr_len;
+	}
+
+	rts_required = (frag_size > ieee->rts
+			&& ieee->config & CFG_LIBIPW_RTS);
+	if (rts_required)
+		nr_frags++;
+
+	/* When we allocate the TXB we allocate enough space for the reserve
+	 * and full fragment bytes (bytes_per_frag doesn't include prefix,
+	 * postfix, header, FCS, etc.) */
+	txb = libipw_alloc_txb(nr_frags, frag_size,
+				  ieee->tx_headroom, GFP_ATOMIC);
+	if (unlikely(!txb)) {
+		printk(KERN_WARNING "%s: Could not allocate TXB\n",
+		       ieee->dev->name);
+		goto failed;
+	}
+	txb->encrypted = encrypt;
+	if (host_encrypt)
+		txb->payload_size = frag_size * (nr_frags - 1) +
+		    bytes_last_frag;
+	else
+		txb->payload_size = bytes;
+
+	if (rts_required) {
+		skb_frag = txb->fragments[0];
+		frag_hdr = skb_put(skb_frag, hdr_len);
+
+		/*
+		 * Set header frame_ctl to the RTS.
+		 */
+		header.frame_ctl =
+		    cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS);
+		memcpy(frag_hdr, &header, hdr_len);
+
+		/*
+		 * Restore header frame_ctl to the original data setting.
+		 */
+		header.frame_ctl = cpu_to_le16(fc);
+
+		if (ieee->config &
+		    (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
+			skb_put(skb_frag, 4);
+
+		txb->rts_included = 1;
+		i = 1;
+	} else
+		i = 0;
+
+	for (; i < nr_frags; i++) {
+		skb_frag = txb->fragments[i];
+
+		if (host_encrypt)
+			skb_reserve(skb_frag,
+				    crypt->ops->extra_mpdu_prefix_len);
+
+		frag_hdr = skb_put_data(skb_frag, &header, hdr_len);
+
+		/* If this is not the last fragment, then add the MOREFRAGS
+		 * bit to the frame control */
+		if (i != nr_frags - 1) {
+			frag_hdr->frame_ctl =
+			    cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS);
+			bytes = bytes_per_frag;
+		} else {
+			/* The last fragment takes the remaining length */
+			bytes = bytes_last_frag;
+		}
+
+		if (i == 0 && !snapped) {
+			libipw_copy_snap(skb_put
+					    (skb_frag, SNAP_SIZE + sizeof(u16)),
+					    ether_type);
+			bytes -= SNAP_SIZE + sizeof(u16);
+		}
+
+		skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes);
+
+		/* Advance the SKB... */
+		skb_pull(skb, bytes);
+
+		/* Encryption routine will move the header forward in order
+		 * to insert the IV between the header and the payload */
+		if (host_encrypt)
+			libipw_encrypt_fragment(ieee, skb_frag, hdr_len);
+
+		if (ieee->config &
+		    (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS))
+			skb_put(skb_frag, 4);
+	}
+
+      success:
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+	dev_kfree_skb_any(skb);
+
+	if (txb) {
+		netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority);
+		if (ret == NETDEV_TX_OK) {
+			dev->stats.tx_packets++;
+			dev->stats.tx_bytes += txb->payload_size;
+			return NETDEV_TX_OK;
+		}
+
+		libipw_txb_free(txb);
+	}
+
+	return NETDEV_TX_OK;
+
+      failed:
+	spin_unlock_irqrestore(&ieee->lock, flags);
+	netif_stop_queue(dev);
+	dev->stats.tx_errors++;
+	return NETDEV_TX_BUSY;
+}
+EXPORT_SYMBOL(libipw_xmit);
+
+EXPORT_SYMBOL(libipw_txb_free);
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
new file mode 100644
index 0000000..d32d39f
--- /dev/null
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c
@@ -0,0 +1,738 @@
+/******************************************************************************
+
+  Copyright(c) 2004-2005 Intel Corporation. All rights reserved.
+
+  Portions of this file are based on the WEP enablement code provided by the
+  Host AP project hostap-drivers v0.1.3
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+  <j@w1.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi>
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Intel Linux Wireless <ilw@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+#include <linux/hardirq.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+
+#include <net/lib80211.h>
+#include <linux/wireless.h>
+
+#include "libipw.h"
+
+static const char *libipw_modes[] = {
+	"?", "a", "b", "ab", "g", "ag", "bg", "abg"
+};
+
+static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
+{
+	unsigned long end = jiffies;
+
+	if (end >= start)
+		return jiffies_to_msecs(end - start);
+
+	return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+}
+
+#define MAX_CUSTOM_LEN 64
+static char *libipw_translate_scan(struct libipw_device *ieee,
+				      char *start, char *stop,
+				      struct libipw_network *network,
+				      struct iw_request_info *info)
+{
+	char custom[MAX_CUSTOM_LEN];
+	char *p;
+	struct iw_event iwe;
+	int i, j;
+	char *current_val;	/* For rates */
+	u8 rate;
+
+	/* First entry *MUST* be the AP MAC address */
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
+	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
+
+	/* Remaining entries will be displayed in the order we provide them */
+
+	/* Add the ESSID */
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.flags = 1;
+	iwe.u.data.length = min(network->ssid_len, (u8) 32);
+	start = iwe_stream_add_point(info, start, stop,
+				     &iwe, network->ssid);
+
+	/* Add the protocol name */
+	iwe.cmd = SIOCGIWNAME;
+	snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s",
+		 libipw_modes[network->mode]);
+	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
+
+	/* Add mode */
+	iwe.cmd = SIOCGIWMODE;
+	if (network->capability & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+		if (network->capability & WLAN_CAPABILITY_ESS)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+
+		start = iwe_stream_add_event(info, start, stop,
+					     &iwe, IW_EV_UINT_LEN);
+	}
+
+	/* Add channel and frequency */
+	/* Note : userspace automatically computes channel using iwrange */
+	iwe.cmd = SIOCGIWFREQ;
+	iwe.u.freq.m = libipw_channel_to_freq(ieee, network->channel);
+	iwe.u.freq.e = 6;
+	iwe.u.freq.i = 0;
+	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
+
+	/* Add encryption capability */
+	iwe.cmd = SIOCGIWENCODE;
+	if (network->capability & WLAN_CAPABILITY_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	start = iwe_stream_add_point(info, start, stop,
+				     &iwe, network->ssid);
+
+	/* Add basic and extended rates */
+	/* Rate : stuffing multiple values in a single event require a bit
+	 * more of magic - Jean II */
+	current_val = start + iwe_stream_lcp_len(info);
+	iwe.cmd = SIOCGIWRATE;
+	/* Those two flags are ignored... */
+	iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+
+	for (i = 0, j = 0; i < network->rates_len;) {
+		if (j < network->rates_ex_len &&
+		    ((network->rates_ex[j] & 0x7F) <
+		     (network->rates[i] & 0x7F)))
+			rate = network->rates_ex[j++] & 0x7F;
+		else
+			rate = network->rates[i++] & 0x7F;
+		/* Bit rate given in 500 kb/s units (+ 0x80) */
+		iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
+		/* Add new value to event */
+		current_val = iwe_stream_add_value(info, start, current_val,
+						   stop, &iwe, IW_EV_PARAM_LEN);
+	}
+	for (; j < network->rates_ex_len; j++) {
+		rate = network->rates_ex[j] & 0x7F;
+		/* Bit rate given in 500 kb/s units (+ 0x80) */
+		iwe.u.bitrate.value = ((rate & 0x7f) * 500000);
+		/* Add new value to event */
+		current_val = iwe_stream_add_value(info, start, current_val,
+						   stop, &iwe, IW_EV_PARAM_LEN);
+	}
+	/* Check if we added any rate */
+	if ((current_val - start) > iwe_stream_lcp_len(info))
+		start = current_val;
+
+	/* Add quality statistics */
+	iwe.cmd = IWEVQUAL;
+	iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
+	    IW_QUAL_NOISE_UPDATED;
+
+	if (!(network->stats.mask & LIBIPW_STATMASK_RSSI)) {
+		iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID |
+		    IW_QUAL_LEVEL_INVALID;
+		iwe.u.qual.qual = 0;
+	} else {
+		if (ieee->perfect_rssi == ieee->worst_rssi)
+			iwe.u.qual.qual = 100;
+		else
+			iwe.u.qual.qual =
+			    (100 *
+			     (ieee->perfect_rssi - ieee->worst_rssi) *
+			     (ieee->perfect_rssi - ieee->worst_rssi) -
+			     (ieee->perfect_rssi - network->stats.rssi) *
+			     (15 * (ieee->perfect_rssi - ieee->worst_rssi) +
+			      62 * (ieee->perfect_rssi -
+				    network->stats.rssi))) /
+			    ((ieee->perfect_rssi -
+			      ieee->worst_rssi) * (ieee->perfect_rssi -
+						   ieee->worst_rssi));
+		if (iwe.u.qual.qual > 100)
+			iwe.u.qual.qual = 100;
+		else if (iwe.u.qual.qual < 1)
+			iwe.u.qual.qual = 0;
+	}
+
+	if (!(network->stats.mask & LIBIPW_STATMASK_NOISE)) {
+		iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
+		iwe.u.qual.noise = 0;
+	} else {
+		iwe.u.qual.noise = network->stats.noise;
+	}
+
+	if (!(network->stats.mask & LIBIPW_STATMASK_SIGNAL)) {
+		iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
+		iwe.u.qual.level = 0;
+	} else {
+		iwe.u.qual.level = network->stats.signal;
+	}
+
+	start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
+
+	iwe.cmd = IWEVCUSTOM;
+	p = custom;
+
+	iwe.u.data.length = p - custom;
+	if (iwe.u.data.length)
+		start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+
+	memset(&iwe, 0, sizeof(iwe));
+	if (network->wpa_ie_len) {
+		char buf[MAX_WPA_IE_LEN];
+		memcpy(buf, network->wpa_ie, network->wpa_ie_len);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = network->wpa_ie_len;
+		start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	if (network->rsn_ie_len) {
+		char buf[MAX_WPA_IE_LEN];
+		memcpy(buf, network->rsn_ie, network->rsn_ie_len);
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = network->rsn_ie_len;
+		start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+	}
+
+	/* Add EXTRA: Age to display seconds since last beacon/probe response
+	 * for given network. */
+	iwe.cmd = IWEVCUSTOM;
+	p = custom;
+	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+		      " Last beacon: %ums ago",
+		      elapsed_jiffies_msecs(network->last_scanned));
+	iwe.u.data.length = p - custom;
+	if (iwe.u.data.length)
+		start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+
+	/* Add spectrum management information */
+	iwe.cmd = -1;
+	p = custom;
+	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Channel flags: ");
+
+	if (libipw_get_channel_flags(ieee, network->channel) &
+	    LIBIPW_CH_INVALID) {
+		iwe.cmd = IWEVCUSTOM;
+		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "INVALID ");
+	}
+
+	if (libipw_get_channel_flags(ieee, network->channel) &
+	    LIBIPW_CH_RADAR_DETECT) {
+		iwe.cmd = IWEVCUSTOM;
+		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), "DFS ");
+	}
+
+	if (iwe.cmd == IWEVCUSTOM) {
+		iwe.u.data.length = p - custom;
+		start = iwe_stream_add_point(info, start, stop, &iwe, custom);
+	}
+
+	return start;
+}
+
+#define SCAN_ITEM_SIZE 128
+
+int libipw_wx_get_scan(struct libipw_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct libipw_network *network;
+	unsigned long flags;
+	int err = 0;
+
+	char *ev = extra;
+	char *stop = ev + wrqu->data.length;
+	int i = 0;
+
+	LIBIPW_DEBUG_WX("Getting scan\n");
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	list_for_each_entry(network, &ieee->network_list, list) {
+		i++;
+		if (stop - ev < SCAN_ITEM_SIZE) {
+			err = -E2BIG;
+			break;
+		}
+
+		if (ieee->scan_age == 0 ||
+		    time_after(network->last_scanned + ieee->scan_age, jiffies))
+			ev = libipw_translate_scan(ieee, ev, stop, network,
+						      info);
+		else {
+			LIBIPW_DEBUG_SCAN("Not showing network '%*pE (%pM)' due to age (%ums).\n",
+					  network->ssid_len, network->ssid,
+					  network->bssid,
+					  elapsed_jiffies_msecs(
+					               network->last_scanned));
+		}
+	}
+
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+	wrqu->data.length = ev - extra;
+	wrqu->data.flags = 0;
+
+	LIBIPW_DEBUG_WX("exit: %d networks returned.\n", i);
+
+	return err;
+}
+
+int libipw_wx_set_encode(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *keybuf)
+{
+	struct iw_point *erq = &(wrqu->encoding);
+	struct net_device *dev = ieee->dev;
+	struct libipw_security sec = {
+		.flags = 0
+	};
+	int i, key, key_provided, len;
+	struct lib80211_crypt_data **crypt;
+	int host_crypto = ieee->host_encrypt || ieee->host_decrypt;
+
+	LIBIPW_DEBUG_WX("SET_ENCODE\n");
+
+	key = erq->flags & IW_ENCODE_INDEX;
+	if (key) {
+		if (key > WEP_KEYS)
+			return -EINVAL;
+		key--;
+		key_provided = 1;
+	} else {
+		key_provided = 0;
+		key = ieee->crypt_info.tx_keyidx;
+	}
+
+	LIBIPW_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
+			   "provided" : "default");
+
+	crypt = &ieee->crypt_info.crypt[key];
+
+	if (erq->flags & IW_ENCODE_DISABLED) {
+		if (key_provided && *crypt) {
+			LIBIPW_DEBUG_WX("Disabling encryption on key %d.\n",
+					   key);
+			lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
+		} else
+			LIBIPW_DEBUG_WX("Disabling encryption.\n");
+
+		/* Check all the keys to see if any are still configured,
+		 * and if no key index was provided, de-init them all */
+		for (i = 0; i < WEP_KEYS; i++) {
+			if (ieee->crypt_info.crypt[i] != NULL) {
+				if (key_provided)
+					break;
+				lib80211_crypt_delayed_deinit(&ieee->crypt_info,
+							       &ieee->crypt_info.crypt[i]);
+			}
+		}
+
+		if (i == WEP_KEYS) {
+			sec.enabled = 0;
+			sec.encrypt = 0;
+			sec.level = SEC_LEVEL_0;
+			sec.flags |= SEC_ENABLED | SEC_LEVEL | SEC_ENCRYPT;
+		}
+
+		goto done;
+	}
+
+	sec.enabled = 1;
+	sec.encrypt = 1;
+	sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
+
+	if (*crypt != NULL && (*crypt)->ops != NULL &&
+	    strcmp((*crypt)->ops->name, "WEP") != 0) {
+		/* changing to use WEP; deinit previously used algorithm
+		 * on this key */
+		lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
+	}
+
+	if (*crypt == NULL && host_crypto) {
+		struct lib80211_crypt_data *new_crypt;
+
+		/* take WEP into use */
+		new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
+				    GFP_KERNEL);
+		if (new_crypt == NULL)
+			return -ENOMEM;
+		new_crypt->ops = lib80211_get_crypto_ops("WEP");
+		if (!new_crypt->ops) {
+			request_module("lib80211_crypt_wep");
+			new_crypt->ops = lib80211_get_crypto_ops("WEP");
+		}
+
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+			new_crypt->priv = new_crypt->ops->init(key);
+
+		if (!new_crypt->ops || !new_crypt->priv) {
+			kfree(new_crypt);
+			new_crypt = NULL;
+
+			printk(KERN_WARNING "%s: could not initialize WEP: "
+			       "load module lib80211_crypt_wep\n", dev->name);
+			return -EOPNOTSUPP;
+		}
+		*crypt = new_crypt;
+	}
+
+	/* If a new key was provided, set it up */
+	if (erq->length > 0) {
+		len = erq->length <= 5 ? 5 : 13;
+		memcpy(sec.keys[key], keybuf, erq->length);
+		if (len > erq->length)
+			memset(sec.keys[key] + erq->length, 0,
+			       len - erq->length);
+		LIBIPW_DEBUG_WX("Setting key %d to '%*pE' (%d:%d bytes)\n",
+				   key, len, sec.keys[key],
+				   erq->length, len);
+		sec.key_sizes[key] = len;
+		if (*crypt)
+			(*crypt)->ops->set_key(sec.keys[key], len, NULL,
+					       (*crypt)->priv);
+		sec.flags |= (1 << key);
+		/* This ensures a key will be activated if no key is
+		 * explicitly set */
+		if (key == sec.active_key)
+			sec.flags |= SEC_ACTIVE_KEY;
+
+	} else {
+		if (host_crypto) {
+			len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
+						     NULL, (*crypt)->priv);
+			if (len == 0) {
+				/* Set a default key of all 0 */
+				LIBIPW_DEBUG_WX("Setting key %d to all "
+						   "zero.\n", key);
+				memset(sec.keys[key], 0, 13);
+				(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
+						       (*crypt)->priv);
+				sec.key_sizes[key] = 13;
+				sec.flags |= (1 << key);
+			}
+		}
+		/* No key data - just set the default TX key index */
+		if (key_provided) {
+			LIBIPW_DEBUG_WX("Setting key %d to default Tx "
+					   "key.\n", key);
+			ieee->crypt_info.tx_keyidx = key;
+			sec.active_key = key;
+			sec.flags |= SEC_ACTIVE_KEY;
+		}
+	}
+	if (erq->flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED)) {
+		ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
+		sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN :
+		    WLAN_AUTH_SHARED_KEY;
+		sec.flags |= SEC_AUTH_MODE;
+		LIBIPW_DEBUG_WX("Auth: %s\n",
+				   sec.auth_mode == WLAN_AUTH_OPEN ?
+				   "OPEN" : "SHARED KEY");
+	}
+
+	/* For now we just support WEP, so only set that security level...
+	 * TODO: When WPA is added this is one place that needs to change */
+	sec.flags |= SEC_LEVEL;
+	sec.level = SEC_LEVEL_1;	/* 40 and 104 bit WEP */
+	sec.encode_alg[key] = SEC_ALG_WEP;
+
+      done:
+	if (ieee->set_security)
+		ieee->set_security(dev, &sec);
+
+	return 0;
+}
+
+int libipw_wx_get_encode(struct libipw_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *keybuf)
+{
+	struct iw_point *erq = &(wrqu->encoding);
+	int len, key;
+	struct libipw_security *sec = &ieee->sec;
+
+	LIBIPW_DEBUG_WX("GET_ENCODE\n");
+
+	key = erq->flags & IW_ENCODE_INDEX;
+	if (key) {
+		if (key > WEP_KEYS)
+			return -EINVAL;
+		key--;
+	} else
+		key = ieee->crypt_info.tx_keyidx;
+
+	erq->flags = key + 1;
+
+	if (!sec->enabled) {
+		erq->length = 0;
+		erq->flags |= IW_ENCODE_DISABLED;
+		return 0;
+	}
+
+	len = sec->key_sizes[key];
+	memcpy(keybuf, sec->keys[key], len);
+
+	erq->length = len;
+	erq->flags |= IW_ENCODE_ENABLED;
+
+	if (ieee->open_wep)
+		erq->flags |= IW_ENCODE_OPEN;
+	else
+		erq->flags |= IW_ENCODE_RESTRICTED;
+
+	return 0;
+}
+
+int libipw_wx_set_encodeext(struct libipw_device *ieee,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct net_device *dev = ieee->dev;
+	struct iw_point *encoding = &wrqu->encoding;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	int i, idx, ret = 0;
+	int group_key = 0;
+	const char *alg, *module;
+	struct lib80211_crypto_ops *ops;
+	struct lib80211_crypt_data **crypt;
+
+	struct libipw_security sec = {
+		.flags = 0,
+	};
+
+	idx = encoding->flags & IW_ENCODE_INDEX;
+	if (idx) {
+		if (idx < 1 || idx > WEP_KEYS)
+			return -EINVAL;
+		idx--;
+	} else
+		idx = ieee->crypt_info.tx_keyidx;
+
+	if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
+		crypt = &ieee->crypt_info.crypt[idx];
+		group_key = 1;
+	} else {
+		/* some Cisco APs use idx>0 for unicast in dynamic WEP */
+		if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
+			return -EINVAL;
+		if (ieee->iw_mode == IW_MODE_INFRA)
+			crypt = &ieee->crypt_info.crypt[idx];
+		else
+			return -EINVAL;
+	}
+
+	sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
+	if ((encoding->flags & IW_ENCODE_DISABLED) ||
+	    ext->alg == IW_ENCODE_ALG_NONE) {
+		if (*crypt)
+			lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
+
+		for (i = 0; i < WEP_KEYS; i++)
+			if (ieee->crypt_info.crypt[i] != NULL)
+				break;
+
+		if (i == WEP_KEYS) {
+			sec.enabled = 0;
+			sec.encrypt = 0;
+			sec.level = SEC_LEVEL_0;
+			sec.flags |= SEC_LEVEL;
+		}
+		goto done;
+	}
+
+	sec.enabled = 1;
+	sec.encrypt = 1;
+
+	if (group_key ? !ieee->host_mc_decrypt :
+	    !(ieee->host_encrypt || ieee->host_decrypt ||
+	      ieee->host_encrypt_msdu))
+		goto skip_host_crypt;
+
+	switch (ext->alg) {
+	case IW_ENCODE_ALG_WEP:
+		alg = "WEP";
+		module = "lib80211_crypt_wep";
+		break;
+	case IW_ENCODE_ALG_TKIP:
+		alg = "TKIP";
+		module = "lib80211_crypt_tkip";
+		break;
+	case IW_ENCODE_ALG_CCMP:
+		alg = "CCMP";
+		module = "lib80211_crypt_ccmp";
+		break;
+	default:
+		LIBIPW_DEBUG_WX("%s: unknown crypto alg %d\n",
+				   dev->name, ext->alg);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ops = lib80211_get_crypto_ops(alg);
+	if (ops == NULL) {
+		request_module(module);
+		ops = lib80211_get_crypto_ops(alg);
+	}
+	if (ops == NULL) {
+		LIBIPW_DEBUG_WX("%s: unknown crypto alg %d\n",
+				   dev->name, ext->alg);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*crypt == NULL || (*crypt)->ops != ops) {
+		struct lib80211_crypt_data *new_crypt;
+
+		lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
+
+		new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
+		if (new_crypt == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		new_crypt->ops = ops;
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+			new_crypt->priv = new_crypt->ops->init(idx);
+		if (new_crypt->priv == NULL) {
+			kfree(new_crypt);
+			ret = -EINVAL;
+			goto done;
+		}
+		*crypt = new_crypt;
+	}
+
+	if (ext->key_len > 0 && (*crypt)->ops->set_key &&
+	    (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+				   (*crypt)->priv) < 0) {
+		LIBIPW_DEBUG_WX("%s: key setting failed\n", dev->name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+      skip_host_crypt:
+	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+		ieee->crypt_info.tx_keyidx = idx;
+		sec.active_key = idx;
+		sec.flags |= SEC_ACTIVE_KEY;
+	}
+
+	if (ext->alg != IW_ENCODE_ALG_NONE) {
+		memcpy(sec.keys[idx], ext->key, ext->key_len);
+		sec.key_sizes[idx] = ext->key_len;
+		sec.flags |= (1 << idx);
+		if (ext->alg == IW_ENCODE_ALG_WEP) {
+			sec.encode_alg[idx] = SEC_ALG_WEP;
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_1;
+		} else if (ext->alg == IW_ENCODE_ALG_TKIP) {
+			sec.encode_alg[idx] = SEC_ALG_TKIP;
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_2;
+		} else if (ext->alg == IW_ENCODE_ALG_CCMP) {
+			sec.encode_alg[idx] = SEC_ALG_CCMP;
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_3;
+		}
+		/* Don't set sec level for group keys. */
+		if (group_key)
+			sec.flags &= ~SEC_LEVEL;
+	}
+      done:
+	if (ieee->set_security)
+		ieee->set_security(dev, &sec);
+
+	return ret;
+}
+
+int libipw_wx_get_encodeext(struct libipw_device *ieee,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct iw_point *encoding = &wrqu->encoding;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+	struct libipw_security *sec = &ieee->sec;
+	int idx, max_key_len;
+
+	max_key_len = encoding->length - sizeof(*ext);
+	if (max_key_len < 0)
+		return -EINVAL;
+
+	idx = encoding->flags & IW_ENCODE_INDEX;
+	if (idx) {
+		if (idx < 1 || idx > WEP_KEYS)
+			return -EINVAL;
+		idx--;
+	} else
+		idx = ieee->crypt_info.tx_keyidx;
+
+	if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
+	    ext->alg != IW_ENCODE_ALG_WEP)
+		if (idx != 0 || ieee->iw_mode != IW_MODE_INFRA)
+			return -EINVAL;
+
+	encoding->flags = idx + 1;
+	memset(ext, 0, sizeof(*ext));
+
+	if (!sec->enabled) {
+		ext->alg = IW_ENCODE_ALG_NONE;
+		ext->key_len = 0;
+		encoding->flags |= IW_ENCODE_DISABLED;
+	} else {
+		if (sec->encode_alg[idx] == SEC_ALG_WEP)
+			ext->alg = IW_ENCODE_ALG_WEP;
+		else if (sec->encode_alg[idx] == SEC_ALG_TKIP)
+			ext->alg = IW_ENCODE_ALG_TKIP;
+		else if (sec->encode_alg[idx] == SEC_ALG_CCMP)
+			ext->alg = IW_ENCODE_ALG_CCMP;
+		else
+			return -EINVAL;
+
+		ext->key_len = sec->key_sizes[idx];
+		memcpy(ext->key, sec->keys[idx], ext->key_len);
+		encoding->flags |= IW_ENCODE_ENABLED;
+		if (ext->key_len &&
+		    (ext->alg == IW_ENCODE_ALG_TKIP ||
+		     ext->alg == IW_ENCODE_ALG_CCMP))
+			ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
+
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(libipw_wx_set_encodeext);
+EXPORT_SYMBOL(libipw_wx_get_encodeext);
+
+EXPORT_SYMBOL(libipw_wx_get_scan);
+EXPORT_SYMBOL(libipw_wx_set_encode);
+EXPORT_SYMBOL(libipw_wx_get_encode);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
new file mode 100644
index 0000000..a296003
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c
@@ -0,0 +1,511 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+	int p = 0;
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+		       le32_to_cpu(il->_3945.stats.flag));
+	if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (le32_to_cpu(il->_3945.stats.flag) &
+			UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (le32_to_cpu(il->_3945.stats.flag) &
+			UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+	return p;
+}
+
+static ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz =
+	    sizeof(struct iwl39_stats_rx_phy) * 40 +
+	    sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+	ssize_t ret;
+	struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+	struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+	struct iwl39_stats_rx_non_phy *general, *accum_general;
+	struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	ofdm = &il->_3945.stats.rx.ofdm;
+	cck = &il->_3945.stats.rx.cck;
+	general = &il->_3945.stats.rx.general;
+	accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+	accum_cck = &il->_3945.accum_stats.rx.cck;
+	accum_general = &il->_3945.accum_stats.rx.general;
+	delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+	delta_cck = &il->_3945.delta_stats.rx.cck;
+	delta_general = &il->_3945.delta_stats.rx.general;
+	max_ofdm = &il->_3945.max_delta.rx.ofdm;
+	max_cck = &il->_3945.max_delta.rx.cck;
+	max_general = &il->_3945.max_delta.rx.general;
+
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "accumulative      delta         max\n",
+		      "Statistics_Rx - OFDM:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+		      le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+		      delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+		      le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+		      delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+		      le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+		      delta_ofdm->plcp_err, max_ofdm->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+		      le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+		      delta_ofdm->crc32_err, max_ofdm->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+		      le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+		      delta_ofdm->overrun_err, max_ofdm->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+		      le32_to_cpu(ofdm->early_overrun_err),
+		      accum_ofdm->early_overrun_err,
+		      delta_ofdm->early_overrun_err,
+		      max_ofdm->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+		      le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+		      delta_ofdm->crc32_good, max_ofdm->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+		      le32_to_cpu(ofdm->false_alarm_cnt),
+		      accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+		      max_ofdm->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+		      le32_to_cpu(ofdm->fina_sync_err_cnt),
+		      accum_ofdm->fina_sync_err_cnt,
+		      delta_ofdm->fina_sync_err_cnt,
+		      max_ofdm->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+		      le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+		      delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+		      le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+		      delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+		      le32_to_cpu(ofdm->unresponded_rts),
+		      accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+		      max_ofdm->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+		      accum_ofdm->rxe_frame_limit_overrun,
+		      delta_ofdm->rxe_frame_limit_overrun,
+		      max_ofdm->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+		      le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+		      delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+		      le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+		      delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Rx - CCK:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ina_cnt:",
+		      le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+		      delta_cck->ina_cnt, max_cck->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_cnt:",
+		      le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+		      delta_cck->fina_cnt, max_cck->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "plcp_err:",
+		      le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+		      delta_cck->plcp_err, max_cck->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_err:",
+		      le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+		      delta_cck->crc32_err, max_cck->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "overrun_err:",
+		      le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+		      delta_cck->overrun_err, max_cck->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "early_overrun_err:",
+		      le32_to_cpu(cck->early_overrun_err),
+		      accum_cck->early_overrun_err,
+		      delta_cck->early_overrun_err, max_cck->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "crc32_good:",
+		      le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+		      delta_cck->crc32_good, max_cck->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "false_alarm_cnt:",
+		      le32_to_cpu(cck->false_alarm_cnt),
+		      accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+		      max_cck->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_sync_err_cnt:",
+		      le32_to_cpu(cck->fina_sync_err_cnt),
+		      accum_cck->fina_sync_err_cnt,
+		      delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sfd_timeout:",
+		      le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+		      delta_cck->sfd_timeout, max_cck->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "fina_timeout:",
+		      le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+		      delta_cck->fina_timeout, max_cck->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "unresponded_rts:",
+		      le32_to_cpu(cck->unresponded_rts),
+		      accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+		      max_cck->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(cck->rxe_frame_limit_overrun),
+		      accum_cck->rxe_frame_limit_overrun,
+		      delta_cck->rxe_frame_limit_overrun,
+		      max_cck->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_ack_cnt:",
+		      le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+		      delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sent_cts_cnt:",
+		      le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+		      delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Rx - GENERAL:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bogus_cts:",
+		      le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+		      delta_general->bogus_cts, max_general->bogus_cts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bogus_ack:",
+		      le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+		      delta_general->bogus_ack, max_general->bogus_ack);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "non_bssid_frames:",
+		      le32_to_cpu(general->non_bssid_frames),
+		      accum_general->non_bssid_frames,
+		      delta_general->non_bssid_frames,
+		      max_general->non_bssid_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "filtered_frames:",
+		      le32_to_cpu(general->filtered_frames),
+		      accum_general->filtered_frames,
+		      delta_general->filtered_frames,
+		      max_general->filtered_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n",
+		      "non_channel_beacons:",
+		      le32_to_cpu(general->non_channel_beacons),
+		      accum_general->non_channel_beacons,
+		      delta_general->non_channel_beacons,
+		      max_general->non_channel_beacons);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+	ssize_t ret;
+	struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	tx = &il->_3945.stats.tx;
+	accum_tx = &il->_3945.accum_stats.tx;
+	delta_tx = &il->_3945.delta_stats.tx;
+	max_tx = &il->_3945.max_delta.tx;
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_Tx:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "preamble:",
+		      le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+		      delta_tx->preamble_cnt, max_tx->preamble_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "rx_detected_cnt:",
+		      le32_to_cpu(tx->rx_detected_cnt),
+		      accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+		      max_tx->rx_detected_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_defer_cnt:",
+		      le32_to_cpu(tx->bt_prio_defer_cnt),
+		      accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+		      max_tx->bt_prio_defer_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "bt_prio_kill_cnt:",
+		      le32_to_cpu(tx->bt_prio_kill_cnt),
+		      accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+		      max_tx->bt_prio_kill_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "few_bytes_cnt:",
+		      le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+		      delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "cts_timeout:",
+		      le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+		      delta_tx->cts_timeout, max_tx->cts_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "ack_timeout:",
+		      le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+		      delta_tx->ack_timeout, max_tx->ack_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "expected_ack_cnt:",
+		      le32_to_cpu(tx->expected_ack_cnt),
+		      accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+		      max_tx->expected_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "actual_ack_cnt:",
+		      le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+		      delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+	ssize_t ret;
+	struct iwl39_stats_general *general, *accum_general;
+	struct iwl39_stats_general *delta_general, *max_general;
+	struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+	struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * The statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	general = &il->_3945.stats.general;
+	dbg = &il->_3945.stats.general.dbg;
+	div = &il->_3945.stats.general.div;
+	accum_general = &il->_3945.accum_stats.general;
+	delta_general = &il->_3945.delta_stats.general;
+	max_general = &il->_3945.max_delta.general;
+	accum_dbg = &il->_3945.accum_stats.general.dbg;
+	delta_dbg = &il->_3945.delta_stats.general.dbg;
+	max_dbg = &il->_3945.max_delta.general.dbg;
+	accum_div = &il->_3945.accum_stats.general.div;
+	delta_div = &il->_3945.delta_stats.general.div;
+	max_div = &il->_3945.max_delta.general.div;
+	pos += il3945_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "%-32s     current"
+		      "acumulative       delta         max\n",
+		      "Statistics_General:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "burst_check:",
+		      le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+		      delta_dbg->burst_check, max_dbg->burst_check);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "burst_count:",
+		      le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+		      delta_dbg->burst_count, max_dbg->burst_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "sleep_time:",
+		      le32_to_cpu(general->sleep_time),
+		      accum_general->sleep_time, delta_general->sleep_time,
+		      max_general->sleep_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "slots_out:",
+		      le32_to_cpu(general->slots_out), accum_general->slots_out,
+		      delta_general->slots_out, max_general->slots_out);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "slots_idle:",
+		      le32_to_cpu(general->slots_idle),
+		      accum_general->slots_idle, delta_general->slots_idle,
+		      max_general->slots_idle);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+		      le32_to_cpu(general->ttl_timestamp));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_a:",
+		      le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+		      delta_div->tx_on_a, max_div->tx_on_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "tx_on_b:",
+		      le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+		      delta_div->tx_on_b, max_div->tx_on_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "exec_time:",
+		      le32_to_cpu(div->exec_time), accum_div->exec_time,
+		      delta_div->exec_time, max_div->exec_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "  %-30s %10u  %10u  %10u  %10u\n", "probe_time:",
+		      le32_to_cpu(div->probe_time), accum_div->probe_time,
+		      delta_div->probe_time, max_div->probe_time);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+const struct il_debugfs_ops il3945_debugfs_ops = {
+	.rx_stats_read = il3945_ucode_rx_stats_read,
+	.tx_stats_read = il3945_ucode_tx_stats_read,
+	.general_stats_read = il3945_ucode_general_stats_read,
+};
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
new file mode 100644
index 0000000..57e3b6c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -0,0 +1,3948 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME	"iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION	\
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION  IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+	.sw_crypto = 1,
+	.restart_fw = 1,
+	.disable_hw_scan = 1,
+	/* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39  is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN      - Force MAIN antenna
+ * IL_ANTENNA_AUX       - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+	switch (il3945_mod_params.antenna) {
+	case IL_ANTENNA_DIVERSITY:
+		return 0;
+
+	case IL_ANTENNA_MAIN:
+		if (eeprom->antenna_switch_type)
+			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+	case IL_ANTENNA_AUX:
+		if (eeprom->antenna_switch_type)
+			return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+		return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+	}
+
+	/* bad antenna selector value */
+	IL_ERR("Bad antenna selector value (0x%x)\n",
+	       il3945_mod_params.antenna);
+
+	return 0;		/* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	int ret;
+
+	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+	if (sta_id == il->hw_params.bcast_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->hw_key_idx = keyconf->keyidx;
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	D_INFO("hwcrypto: modify ucode station key info\n");
+
+	ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+				struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+	unsigned long flags;
+	struct il_addsta_cmd sta_cmd;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+	il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	D_INFO("hwcrypto: clear ucode station key info\n");
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+		       u8 sta_id)
+{
+	int ret = 0;
+
+	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	default:
+		IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+		ret = -EINVAL;
+	}
+
+	D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+	return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+	int ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+	if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	    key->cipher == WLAN_CIPHER_SUITE_WEP104)
+		return -EOPNOTSUPP;
+
+	IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+	return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+	struct list_head *element;
+
+	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+	while (!list_empty(&il->free_frames)) {
+		element = il->free_frames.next;
+		list_del(element);
+		kfree(list_entry(element, struct il3945_frame, list));
+		il->frames_count--;
+	}
+
+	if (il->frames_count) {
+		IL_WARN("%d frames still in use.  Did we lose one?\n",
+			il->frames_count);
+		il->frames_count = 0;
+	}
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+	struct il3945_frame *frame;
+	struct list_head *element;
+	if (list_empty(&il->free_frames)) {
+		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+		if (!frame) {
+			IL_ERR("Could not allocate frame!\n");
+			return NULL;
+		}
+
+		il->frames_count++;
+		return frame;
+	}
+
+	element = il->free_frames.next;
+	list_del(element);
+	return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+	list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+			 int left)
+{
+
+	if (!il_is_associated(il) || !il->beacon_skb)
+		return 0;
+
+	if (il->beacon_skb->len > left)
+		return 0;
+
+	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+	return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+	struct il3945_frame *frame;
+	unsigned int frame_size;
+	int rc;
+	u8 rate;
+
+	frame = il3945_get_free_frame(il);
+
+	if (!frame) {
+		IL_ERR("Could not obtain free frame buffer for beacon "
+		       "command.\n");
+		return -ENOMEM;
+	}
+
+	rate = il_get_lowest_plcp(il);
+
+	frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+	il3945_free_frame(il, frame);
+
+	return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+	if (il->_3945.shared_virt)
+		dma_free_coherent(&il->pci_dev->dev,
+				  sizeof(struct il3945_shared),
+				  il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+			     struct il_device_cmd *cmd,
+			     struct sk_buff *skb_frag, int sta_id)
+{
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+	struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+	tx_cmd->sec_ctl = 0;
+
+	switch (keyinfo->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+		D_TX("tx_cmd with AES hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |=
+		    TX_CMD_SEC_WEP | (info->control.hw_key->
+				      hw_key_idx & TX_CMD_SEC_MSK) <<
+		    TX_CMD_SEC_SHIFT;
+
+		memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+		D_TX("Configuring packet for WEP encryption " "with key %d\n",
+		     info->control.hw_key->hw_key_idx);
+		break;
+
+	default:
+		IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+		break;
+	}
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_hdr *hdr, u8 std_id)
+{
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+	__le32 tx_flags = tx_cmd->tx_flags;
+	__le16 fc = hdr->frame_control;
+
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		tx_flags |= TX_CMD_FLG_ACK_MSK;
+		if (ieee80211_is_mgmt(fc))
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		if (ieee80211_is_probe_resp(fc) &&
+		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+			tx_flags |= TX_CMD_FLG_TSF_MSK;
+	} else {
+		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	tx_cmd->sta_id = std_id;
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	} else {
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+		else
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+	} else {
+		tx_cmd->timeout.pm_frame_timeout = 0;
+	}
+
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il,
+	      struct ieee80211_sta *sta,
+	      struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct il3945_tx_cmd *tx_cmd;
+	struct il_tx_queue *txq = NULL;
+	struct il_queue *q = NULL;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	dma_addr_t phys_addr;
+	dma_addr_t txcmd_phys;
+	int txq_id = skb_get_queue_mapping(skb);
+	u16 len, idx, hdr_len;
+	u16 firstlen, secondlen;
+	u8 sta_id;
+	u8 tid = 0;
+	__le16 fc;
+	u8 wait_write_ptr = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (il_is_rfkill(il)) {
+		D_DROP("Dropping - RF KILL\n");
+		goto drop_unlock;
+	}
+
+	if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+	    IL_INVALID_RATE) {
+		IL_ERR("ERROR: No TX rate available.\n");
+		goto drop_unlock;
+	}
+
+	fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (ieee80211_is_auth(fc))
+		D_TX("Sending AUTH frame\n");
+	else if (ieee80211_is_assoc_req(fc))
+		D_TX("Sending ASSOC frame\n");
+	else if (ieee80211_is_reassoc_req(fc))
+		D_TX("Sending REASSOC frame\n");
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	hdr_len = ieee80211_hdrlen(fc);
+
+	/* Find idx into station table for destination station */
+	sta_id = il_sta_id_or_broadcast(il, sta);
+	if (sta_id == IL_INVALID_STATION) {
+		D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+		goto drop;
+	}
+
+	D_RATE("station Id %d\n", sta_id);
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (unlikely(tid >= MAX_TID_COUNT))
+			goto drop;
+	}
+
+	/* Descriptor for chosen Tx queue */
+	txq = &il->txq[txq_id];
+	q = &txq->q;
+
+	if ((il_queue_space(q) < q->high_mark))
+		goto drop;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+	txq->skbs[q->write_ptr] = skb;
+
+	/* Init first empty entry in queue's array of Tx/cmd buffers */
+	out_cmd = txq->cmd[idx];
+	out_meta = &txq->meta[idx];
+	tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+	memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+	/*
+	 * Set up the Tx-command (not MAC!) header.
+	 * Store the chosen Tx queue and TFD idx within the sequence field;
+	 * after Tx, uCode's Tx response will return this value so driver can
+	 * locate the frame within the tx queue and do post-tx processing.
+	 */
+	out_cmd->hdr.cmd = C_TX;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16((u16)
+			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	if (info->control.hw_key)
+		il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+	/* TODO need this for burst mode later on */
+	il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+	il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
+
+	/* Total # bytes to be transmitted */
+	tx_cmd->len = cpu_to_le16((u16) skb->len);
+
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+	tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len =
+	    sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+	    hdr_len;
+	firstlen = (len + 3) & ~3;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+			   PCI_DMA_TODEVICE);
+	if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+		goto drop_unlock;
+
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	secondlen = skb->len - hdr_len;
+	if (secondlen > 0) {
+		phys_addr =
+		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+				   PCI_DMA_TODEVICE);
+		if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+			goto drop_unlock;
+	}
+
+	/* Add buffer containing Tx command and MAC(!) header to TFD's
+	 * first entry */
+	il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, firstlen);
+	if (secondlen > 0)
+		il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen, 0,
+					       U32_PAD(secondlen));
+
+	if (!ieee80211_has_morefrags(hdr->frame_control)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
+	}
+
+	il_update_stats(il, true, fc, skb->len);
+
+	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+	il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+			  ieee80211_hdrlen(fc));
+
+	/* Tell device the write idx *just past* this latest filled TFD */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+		if (wait_write_ptr) {
+			spin_lock_irqsave(&il->lock, flags);
+			txq->need_update = 1;
+			il_txq_update_write_ptr(il, txq);
+			spin_unlock_irqrestore(&il->lock, flags);
+		}
+
+		il_stop_queue(il, txq);
+	}
+
+	return 0;
+
+drop_unlock:
+	spin_unlock_irqrestore(&il->lock, flags);
+drop:
+	return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+		       struct ieee80211_measurement_params *params, u8 type)
+{
+	struct il_spectrum_cmd spectrum;
+	struct il_rx_pkt *pkt;
+	struct il_host_cmd cmd = {
+		.id = C_SPECTRUM_MEASUREMENT,
+		.data = (void *)&spectrum,
+		.flags = CMD_WANT_SKB,
+	};
+	u32 add_time = le64_to_cpu(params->start_time);
+	int rc;
+	int spectrum_resp_status;
+	int duration = le16_to_cpu(params->duration);
+
+	if (il_is_associated(il))
+		add_time =
+		    il_usecs_to_beacons(il,
+					le64_to_cpu(params->start_time) -
+					il->_3945.last_tsf,
+					le16_to_cpu(il->timing.beacon_interval));
+
+	memset(&spectrum, 0, sizeof(spectrum));
+
+	spectrum.channel_count = cpu_to_le16(1);
+	spectrum.flags =
+	    RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+	spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+	cmd.len = sizeof(spectrum);
+	spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+	if (il_is_associated(il))
+		spectrum.start_time =
+		    il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+				       le16_to_cpu(il->timing.beacon_interval));
+	else
+		spectrum.start_time = 0;
+
+	spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+	spectrum.channels[0].channel = params->channel;
+	spectrum.channels[0].type = type;
+	if (il->active.flags & RXON_FLG_BAND_24G_MSK)
+		spectrum.flags |=
+		    RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+		    RXON_FLG_TGG_PROTECT_MSK;
+
+	rc = il_send_cmd_sync(il, &cmd);
+	if (rc)
+		return rc;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+		rc = -EIO;
+	}
+
+	spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+	switch (spectrum_resp_status) {
+	case 0:		/* Command will be handled */
+		if (pkt->u.spectrum.id != 0xff) {
+			D_INFO("Replaced existing measurement: %d\n",
+			       pkt->u.spectrum.id);
+			il->measurement_status &= ~MEASUREMENT_READY;
+		}
+		il->measurement_status |= MEASUREMENT_ACTIVE;
+		rc = 0;
+		break;
+
+	case 1:		/* Command will not be handled */
+		rc = -EAGAIN;
+		break;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+
+	return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_alive_resp *palive;
+	struct delayed_work *pwork;
+
+	palive = &pkt->u.alive_frame;
+
+	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+	       palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+		D_INFO("Initialization Alive received.\n");
+		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->init_alive_start;
+	} else {
+		D_INFO("Runtime Alive received.\n");
+		memcpy(&il->card_alive, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->alive_start;
+		il3945_disable_events(il);
+	}
+
+	/* We delay the ALIVE response by 5ms to
+	 * give the HW RF Kill time to activate... */
+	if (palive->is_valid == UCODE_VALID_OK)
+		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+	else
+		IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+	D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u8 rate = beacon->beacon_notify_hdr.rate;
+
+	D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+	     le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+	     beacon->beacon_notify_hdr.failure_frame,
+	     le32_to_cpu(beacon->ibss_mgr_status),
+	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+	unsigned long status = il->status;
+
+	IL_WARN("Card state received: HW:%s SW:%s\n",
+		(flags & HW_CARD_DISABLED) ? "Kill" : "On",
+		(flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+	_il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	if (flags & HW_CARD_DISABLED)
+		set_bit(S_RFKILL, &il->status);
+	else
+		clear_bit(S_RFKILL, &il->status);
+
+	il_scan_cancel(il);
+
+	if ((test_bit(S_RFKILL, &status) !=
+	     test_bit(S_RFKILL, &il->status)))
+		wiphy_rfkill_set_hw_state(il->hw->wiphy,
+					  test_bit(S_RFKILL, &il->status));
+	else
+		wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+	il->handlers[N_ALIVE] = il3945_hdl_alive;
+	il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+	il->handlers[N_ERROR] = il_hdl_error;
+	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+	il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+	/*
+	 * The same handler is used for both the REPLY to a discrete
+	 * stats request from the host as well as for the periodic
+	 * stats notifications (after received beacons) from the uCode.
+	 */
+	il->handlers[C_STATS] = il3945_hdl_c_stats;
+	il->handlers[N_STATS] = il3945_hdl_stats;
+
+	il_setup_rx_scan_handlers(il);
+	il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+	/* Set up hardware specific Rx handlers */
+	il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il3945_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+		/* Get next free Rx buffer, remove from free list */
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		rxq->bd[rxq->write] =
+		    il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		queue_work(il->workqueue, &il->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7) ||
+	    abs(rxq->write - rxq->read) > 7) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		rxq->need_update = 1;
+		spin_unlock_irqrestore(&rxq->lock, flags);
+		il_rx_queue_update_write_ptr(il, rxq);
+	}
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned long flags;
+	gfp_t gfp_mask = priority;
+
+	while (1) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		if (rxq->free_count > RX_LOW_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		if (il->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+		if (!page) {
+			if (net_ratelimit())
+				D_INFO("Failed to allocate SKB buffer.\n");
+			if (rxq->free_count <= RX_LOW_WATERMARK &&
+			    net_ratelimit())
+				IL_ERR("Failed to allocate SKB buffer with %0x."
+				       "Only %u free buffers remaining.\n",
+				       priority, rxq->free_count);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			break;
+		}
+
+		/* Get physical address of RB/SKB */
+		page_dma =
+		    pci_map_page(il->pci_dev, page, 0,
+				 PAGE_SIZE << il->hw_params.rx_page_order,
+				 PCI_DMA_FROMDEVICE);
+
+		if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+			__free_pages(page, il->hw_params.rx_page_order);
+			break;
+		}
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			pci_unmap_page(il->pci_dev, page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__free_pages(page, il->hw_params.rx_page_order);
+			return;
+		}
+
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		rxb->page = page;
+		rxb->page_dma = page_dma;
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+		il->alloc_rxb_page++;
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+	}
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+	struct il_priv *il = data;
+	unsigned long flags;
+
+	il3945_rx_allocate(il, GFP_KERNEL);
+
+	spin_lock_irqsave(&il->lock, flags);
+	il3945_rx_queue_restock(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+	il3945_rx_allocate(il, GFP_ATOMIC);
+
+	il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	int i;
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+	}
+
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+			  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->bd = NULL;
+	rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/*	 0   1   2   3   4   5   6   7   8   9 */
+	0, 0, 6, 10, 12, 14, 16, 17, 18, 19,	/* 00 - 09 */
+	20, 21, 22, 22, 23, 23, 24, 25, 26, 26,	/* 10 - 19 */
+	26, 26, 26, 27, 27, 28, 28, 28, 29, 29,	/* 20 - 29 */
+	29, 30, 30, 30, 31, 31, 31, 31, 32, 32,	/* 30 - 39 */
+	32, 32, 32, 33, 33, 33, 33, 33, 34, 34,	/* 40 - 49 */
+	34, 34, 34, 34, 35, 35, 35, 35, 35, 35,	/* 50 - 59 */
+	36, 36, 36, 36, 36, 36, 36, 37, 37, 37,	/* 60 - 69 */
+	37, 37, 37, 37, 37, 38, 38, 38, 38, 38,	/* 70 - 79 */
+	38, 38, 38, 38, 38, 39, 39, 39, 39, 39,	/* 80 - 89 */
+	39, 39, 39, 39, 39, 40, 40, 40, 40, 40	/* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ *   (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+	/* 1000:1 or higher just report as 60 dB */
+	if (sig_ratio >= 1000)
+		return 60;
+
+	/* 100:1 or higher, divide by 10 and use table,
+	 *   add 20 dB to make up for divide by 10 */
+	if (sig_ratio >= 100)
+		return 20 + (int)ratio2dB[sig_ratio / 10];
+
+	/* We shouldn't see this */
+	if (sig_ratio < 1)
+		return 0;
+
+	/* Use table for ratios 1:1 - 99:1 */
+	return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+	struct il_rx_buf *rxb;
+	struct il_rx_pkt *pkt;
+	struct il_rx_queue *rxq = &il->rxq;
+	u32 r, i;
+	int reclaim;
+	unsigned long flags;
+	u8 fill_rx = 0;
+	u32 count = 8;
+	int total_empty = 0;
+
+	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+	i = rxq->read;
+
+	/* calculate total frames need to be restock after handling RX */
+	total_empty = r - rxq->write_actual;
+	if (total_empty < 0)
+		total_empty += RX_QUEUE_SIZE;
+
+	if (total_empty > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		D_RX("r = %d, i = %d\n", r, i);
+
+	while (i != r) {
+		int len;
+
+		rxb = rxq->queue[i];
+
+		/* If an RXB doesn't have a Rx queue slot associated with it,
+		 * then a bug has been introduced in the queue refilling
+		 * routines -- catch it here */
+		BUG_ON(rxb == NULL);
+
+		rxq->queue[i] = NULL;
+
+		pci_unmap_page(il->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << il->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+		len += sizeof(u32);	/* account for status word */
+
+		reclaim = il_need_reclaim(il, pkt);
+
+		/* Based on type of command response or notification,
+		 *   handle those that need handling via function in
+		 *   handlers table.  See il3945_setup_handlers() */
+		if (il->handlers[pkt->hdr.cmd]) {
+			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+			il->isr_stats.handlers[pkt->hdr.cmd]++;
+			il->handlers[pkt->hdr.cmd] (il, rxb);
+		} else {
+			/* No handling needed */
+			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+		}
+
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some handler might have
+		 * already taken or freed the pages.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking il_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (rxb->page)
+				il_tx_cmd_complete(il, rxb);
+			else
+				IL_WARN("Claim null rxb?\n");
+		}
+
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (rxb->page != NULL) {
+			rxb->page_dma =
+			    pci_map_page(il->pci_dev, rxb->page, 0,
+					 PAGE_SIZE << il->hw_params.
+					 rx_page_order, PCI_DMA_FROMDEVICE);
+			if (unlikely(pci_dma_mapping_error(il->pci_dev,
+							   rxb->page_dma))) {
+				__il_free_pages(il, rxb->page);
+				rxb->page = NULL;
+				list_add_tail(&rxb->list, &rxq->rx_used);
+			} else {
+				list_add_tail(&rxb->list, &rxq->rx_free);
+				rxq->free_count++;
+			}
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		i = (i + 1) & RX_QUEUE_MASK;
+		/* If there are a lot of unused frames,
+		 * restock the Rx queue so ucode won't assert. */
+		if (fill_rx) {
+			count++;
+			if (count >= 8) {
+				rxq->read = i;
+				il3945_rx_replenish_now(il);
+				count = 0;
+			}
+		}
+	}
+
+	/* Backtrack one entry */
+	rxq->read = i;
+	if (fill_rx)
+		il3945_rx_replenish_now(il);
+	else
+		il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+	/* wait to make sure we flush pending tasklet */
+	synchronize_irq(il->pci_dev->irq);
+	tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+	switch (i) {
+	case 1:
+		return "FAIL";
+	case 2:
+		return "BAD_PARAM";
+	case 3:
+		return "BAD_CHECKSUM";
+	case 4:
+		return "NMI_INTERRUPT";
+	case 5:
+		return "SYSASSERT";
+	case 6:
+		return "FATAL_ERROR";
+	}
+
+	return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+	u32 i;
+	u32 desc, time, count, base, data1;
+	u32 blink1, blink2, ilink1, ilink2;
+
+	base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+	if (!il3945_hw_valid_rtc_data_addr(base)) {
+		IL_ERR("Not valid error log pointer 0x%08X\n", base);
+		return;
+	}
+
+	count = il_read_targ_mem(il, base);
+
+	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+		IL_ERR("Start IWL Error Log Dump:\n");
+		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+	}
+
+	IL_ERR("Desc       Time       asrtPC  blink2 "
+	       "ilink1  nmiPC   Line\n");
+	for (i = ERROR_START_OFFSET;
+	     i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+	     i += ERROR_ELEM_SIZE) {
+		desc = il_read_targ_mem(il, base + i);
+		time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+		blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+		blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+		ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+		ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+		data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+		IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+		       il3945_desc_lookup(desc), desc, time, blink1, blink2,
+		       ilink1, ilink2, data1);
+	}
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+	u32 inta, handled = 0;
+	u32 inta_fh;
+	unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u32 inta_mask;
+#endif
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+	inta = _il_rd(il, CSR_INT);
+	_il_wr(il, CSR_INT, inta);
+
+	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
+	 * Any new interrupts that happen after this, either while we're
+	 * in this tasklet, or later, will show up in next ISR/tasklet. */
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_ISR) {
+		/* just for debug */
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+		      inta_mask, inta_fh);
+	}
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+	 * atomic, make sure that inta covers all the interrupts that
+	 * we've discovered, even if FH interrupt came in just after
+	 * reading CSR_INT. */
+	if (inta_fh & CSR39_FH_INT_RX_MASK)
+		inta |= CSR_INT_BIT_FH_RX;
+	if (inta_fh & CSR39_FH_INT_TX_MASK)
+		inta |= CSR_INT_BIT_FH_TX;
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IL_ERR("Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		il_disable_interrupts(il);
+
+		il->isr_stats.hw++;
+		il_irq_handle_error(il);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			D_ISR("Scheduler finished to transmit "
+			      "the frame/frames.\n");
+			il->isr_stats.sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			D_ISR("Alive interrupt\n");
+			il->isr_stats.alive++;
+		}
+	}
+#endif
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+		       inta);
+		il->isr_stats.sw++;
+		il_irq_handle_error(il);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/* uCode wakes up after power-down sleep */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		D_ISR("Wakeup interrupt\n");
+		il_rx_queue_update_write_ptr(il, &il->rxq);
+
+		spin_lock_irqsave(&il->lock, flags);
+		il_txq_update_write_ptr(il, &il->txq[0]);
+		il_txq_update_write_ptr(il, &il->txq[1]);
+		il_txq_update_write_ptr(il, &il->txq[2]);
+		il_txq_update_write_ptr(il, &il->txq[3]);
+		il_txq_update_write_ptr(il, &il->txq[4]);
+		spin_unlock_irqrestore(&il->lock, flags);
+
+		il->isr_stats.wakeup++;
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+		il3945_rx_handle(il);
+		il->isr_stats.rx++;
+		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+	}
+
+	if (inta & CSR_INT_BIT_FH_TX) {
+		D_ISR("Tx interrupt\n");
+		il->isr_stats.tx++;
+
+		_il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+		il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+		handled |= CSR_INT_BIT_FH_TX;
+	}
+
+	if (inta & ~handled) {
+		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		il->isr_stats.unhandled++;
+	}
+
+	if (inta & ~il->inta_mask) {
+		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+			inta & ~il->inta_mask);
+		IL_WARN("   with inta_fh = 0x%08x\n", inta_fh);
+	}
+
+	/* Re-enable all interrupts */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		inta = _il_rd(il, CSR_INT);
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+	}
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
+			     u8 is_active, u8 n_probes,
+			     struct il3945_scan_channel *scan_ch,
+			     struct ieee80211_vif *vif)
+{
+	struct ieee80211_channel *chan;
+	const struct ieee80211_supported_band *sband;
+	const struct il_channel_info *ch_info;
+	u16 passive_dwell = 0;
+	u16 active_dwell = 0;
+	int added, i;
+
+	sband = il_get_hw_mode(il, band);
+	if (!sband)
+		return 0;
+
+	active_dwell = il_get_active_dwell_time(il, band, n_probes);
+	passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
+	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+		chan = il->scan_request->channels[i];
+
+		if (chan->band != band)
+			continue;
+
+		scan_ch->channel = chan->hw_value;
+
+		ch_info = il_get_channel_info(il, band, scan_ch->channel);
+		if (!il_is_channel_valid(ch_info)) {
+			D_SCAN("Channel %d is INVALID for this band.\n",
+			       scan_ch->channel);
+			continue;
+		}
+
+		scan_ch->active_dwell = cpu_to_le16(active_dwell);
+		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+		/* If passive , set up for auto-switch
+		 *  and use long active_dwell time.
+		 */
+		if (!is_active || il_is_channel_passive(ch_info) ||
+		    (chan->flags & IEEE80211_CHAN_NO_IR)) {
+			scan_ch->type = 0;	/* passive */
+			if (IL_UCODE_API(il->ucode_ver) == 1)
+				scan_ch->active_dwell =
+				    cpu_to_le16(passive_dwell - 1);
+		} else {
+			scan_ch->type = 1;	/* active */
+		}
+
+		/* Set direct probe bits. These may be used both for active
+		 * scan channels (probes gets sent right away),
+		 * or for passive channels (probes get se sent only after
+		 * hearing clear Rx packet).*/
+		if (IL_UCODE_API(il->ucode_ver) >= 2) {
+			if (n_probes)
+				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+		} else {
+			/* uCode v1 does not allow setting direct probe bits on
+			 * passive channel. */
+			if ((scan_ch->type & 1) && n_probes)
+				scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+		}
+
+		/* Set txpower levels to defaults */
+		scan_ch->tpc.dsp_atten = 110;
+		/* scan_pwr_info->tpc.dsp_atten; */
+
+		/*scan_pwr_info->tpc.tx_gain; */
+		if (band == NL80211_BAND_5GHZ)
+			scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else {
+			scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+			/* NOTE: if we were doing 6Mb OFDM for scans we'd use
+			 * power level:
+			 * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+			 */
+		}
+
+		D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+		       (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+		       (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+		scan_ch++;
+		added++;
+	}
+
+	D_SCAN("total channels to scan %d\n", added);
+	return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+	int i;
+
+	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+		rates[i].bitrate = il3945_rates[i].ieee * 5;
+		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
+		rates[i].hw_value_short = i;
+		rates[i].flags = 0;
+		if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+			/*
+			 * If CCK != 1M then set short preamble rate flag.
+			 */
+			rates[i].flags |=
+			    (il3945_rates[i].plcp ==
+			     10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+		}
+	}
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+	il_free_fw_desc(il->pci_dev, &il->ucode_code);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	u32 save_len = len;
+	int rc = 0;
+	u32 errcnt;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+	errcnt = 0;
+	for (; len > 0; len -= sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n",
+			       save_len - len, val, le32_to_cpu(*image));
+			rc = -EIO;
+			errcnt++;
+			if (errcnt >= 20)
+				break;
+		}
+	}
+
+	if (!errcnt)
+		D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+	return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	int rc = 0;
+	u32 errcnt = 0;
+	u32 i;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+#if 0				/* Enable this if you want to see details */
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+			       *image);
+#endif
+			rc = -EIO;
+			errcnt++;
+			if (errcnt >= 3)
+				break;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+	__le32 *image;
+	u32 len;
+	int rc = 0;
+
+	/* Try bootstrap */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Bootstrap uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try initialize */
+	image = (__le32 *) il->ucode_init.v_addr;
+	len = il->ucode_init.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Initialize uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try runtime/protocol */
+	image = (__le32 *) il->ucode_code.v_addr;
+	len = il->ucode_code.len;
+	rc = il3945_verify_inst_sparse(il, image, len);
+	if (rc == 0) {
+		D_INFO("Runtime uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+	/* Since nothing seems to match, show first several data entries in
+	 * instruction SRAM, so maybe visual inspection will give a clue.
+	 * Selection of bootstrap image (vs. other images) is arbitrary. */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	rc = il3945_verify_inst_full(il, image, len);
+
+	return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+	/* Remove all resets to allow NIC to operate */
+	_il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item)						\
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{									\
+	return le32_to_cpu(ucode->v1.item);				\
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+	return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+	return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+	const struct il_ucode_header *ucode;
+	int ret = -EINVAL, idx;
+	const struct firmware *ucode_raw;
+	/* firmware file name contains uCode/driver compatibility version */
+	const char *name_pre = il->cfg->fw_name_pre;
+	const unsigned int api_max = il->cfg->ucode_api_max;
+	const unsigned int api_min = il->cfg->ucode_api_min;
+	char buf[25];
+	u8 *src;
+	size_t len;
+	u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+	/* Ask kernel firmware_class module to get the boot firmware off disk.
+	 * request_firmware() is synchronous, file is in memory on return. */
+	for (idx = api_max; idx >= api_min; idx--) {
+		sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+		ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+		if (ret < 0) {
+			IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+			if (ret == -ENOENT)
+				continue;
+			else
+				goto error;
+		} else {
+			if (idx < api_max)
+				IL_ERR("Loaded firmware %s, "
+				       "which is deprecated. "
+				       " Please use API v%u instead.\n", buf,
+				       api_max);
+			D_INFO("Got firmware '%s' file "
+			       "(%zd bytes) from disk\n", buf, ucode_raw->size);
+			break;
+		}
+	}
+
+	if (ret < 0)
+		goto error;
+
+	/* Make sure that we got at least our header! */
+	if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+		IL_ERR("File size way too small!\n");
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Data from ucode file:  header followed by uCode images */
+	ucode = (struct il_ucode_header *)ucode_raw->data;
+
+	il->ucode_ver = le32_to_cpu(ucode->ver);
+	api_ver = IL_UCODE_API(il->ucode_ver);
+	inst_size = il3945_ucode_get_inst_size(ucode);
+	data_size = il3945_ucode_get_data_size(ucode);
+	init_size = il3945_ucode_get_init_size(ucode);
+	init_data_size = il3945_ucode_get_init_data_size(ucode);
+	boot_size = il3945_ucode_get_boot_size(ucode);
+	src = il3945_ucode_get_data(ucode);
+
+	/* api_ver should match the api version forming part of the
+	 * firmware filename ... but we don't check for that and only rely
+	 * on the API version read from firmware header from here on forward */
+
+	if (api_ver < api_min || api_ver > api_max) {
+		IL_ERR("Driver unable to support your firmware API. "
+		       "Driver supports v%u, firmware is v%u.\n", api_max,
+		       api_ver);
+		il->ucode_ver = 0;
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (api_ver != api_max)
+		IL_ERR("Firmware has old API version. Expected %u, "
+		       "got %u. New firmware can be obtained "
+		       "from http://www.intellinuxwireless.org.\n", api_max,
+		       api_ver);
+
+	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+		 IL_UCODE_SERIAL(il->ucode_ver));
+
+	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+	D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+	D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+	D_INFO("f/w package hdr init inst size = %u\n", init_size);
+	D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+	D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+	/* Verify size of file vs. image size info in file's header */
+	if (ucode_raw->size !=
+	    il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+	    init_size + init_data_size + boot_size) {
+
+		D_INFO("uCode file size %zd does not match expected size\n",
+		       ucode_raw->size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Verify that uCode images will fit in card's SRAM */
+	if (inst_size > IL39_MAX_INST_SIZE) {
+		D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	if (data_size > IL39_MAX_DATA_SIZE) {
+		D_INFO("uCode data len %d too large to fit in\n", data_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (init_size > IL39_MAX_INST_SIZE) {
+		D_INFO("uCode init instr len %d too large to fit in\n",
+		       init_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (init_data_size > IL39_MAX_DATA_SIZE) {
+		D_INFO("uCode init data len %d too large to fit in\n",
+		       init_data_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+	if (boot_size > IL39_MAX_BSM_SIZE) {
+		D_INFO("uCode boot instr len %d too large to fit in\n",
+		       boot_size);
+		ret = -EINVAL;
+		goto err_release;
+	}
+
+	/* Allocate ucode buffers for card's bus-master loading ... */
+
+	/* Runtime instructions and 2 copies of data:
+	 * 1) unmodified from disk
+	 * 2) backup cache for save/restore during power-downs */
+	il->ucode_code.len = inst_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+	il->ucode_data.len = data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+	il->ucode_data_backup.len = data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+	    !il->ucode_data_backup.v_addr)
+		goto err_pci_alloc;
+
+	/* Initialization instructions and data */
+	if (init_size && init_data_size) {
+		il->ucode_init.len = init_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+		il->ucode_init_data.len = init_data_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Bootstrap (instructions only, no data) */
+	if (boot_size) {
+		il->ucode_boot.len = boot_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+		if (!il->ucode_boot.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Copy images into buffers for card's bus-master reads ... */
+
+	/* Runtime instructions (first block of data in file) */
+	len = inst_size;
+	D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+	memcpy(il->ucode_code.v_addr, src, len);
+	src += len;
+
+	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+	/* Runtime data (2nd block)
+	 * NOTE:  Copy into backup buffer will be done in il3945_up()  */
+	len = data_size;
+	D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+	memcpy(il->ucode_data.v_addr, src, len);
+	memcpy(il->ucode_data_backup.v_addr, src, len);
+	src += len;
+
+	/* Initialization instructions (3rd block) */
+	if (init_size) {
+		len = init_size;
+		D_INFO("Copying (but not loading) init instr len %zd\n", len);
+		memcpy(il->ucode_init.v_addr, src, len);
+		src += len;
+	}
+
+	/* Initialization data (4th block) */
+	if (init_data_size) {
+		len = init_data_size;
+		D_INFO("Copying (but not loading) init data len %zd\n", len);
+		memcpy(il->ucode_init_data.v_addr, src, len);
+		src += len;
+	}
+
+	/* Bootstrap instructions (5th block) */
+	len = boot_size;
+	D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+	memcpy(il->ucode_boot.v_addr, src, len);
+
+	/* We have our copies now, allow OS release its copies */
+	release_firmware(ucode_raw);
+	return 0;
+
+err_pci_alloc:
+	IL_ERR("failed to allocate pci memory\n");
+	ret = -ENOMEM;
+	il3945_dealloc_ucode_pci(il);
+
+err_release:
+	release_firmware(ucode_raw);
+
+error:
+	return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+
+	/* bits 31:0 for 3945 */
+	pinst = il->ucode_code.p_addr;
+	pdata = il->ucode_data_backup.p_addr;
+
+	/* Tell bootstrap uCode where to find image to load */
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+	/* Inst byte count must be last to set up, bit 31 signals uCode
+	 *   that all new ptr/size info is in place */
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+		   il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+	D_INFO("Runtime uCode pointers are set.\n");
+
+	return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+	/* Check alive response for "valid" sign from uCode */
+	if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Initialize Alive failed.\n");
+		goto restart;
+	}
+
+	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "initialize" alive if code weren't properly loaded.  */
+	if (il3945_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad \"initialize\" uCode load.\n");
+		goto restart;
+	}
+
+	/* Send pointers to protocol/runtime uCode image ... init code will
+	 * load and launch runtime uCode, which will send us another "Alive"
+	 * notification. */
+	D_INFO("Initialization Alive received.\n");
+	if (il3945_set_ucode_ptrs(il)) {
+		/* Runtime instruction load won't happen;
+		 * take it all the way back down so we can try again */
+		D_INFO("Couldn't set up uCode pointers.\n");
+		goto restart;
+	}
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+	int thermal_spin = 0;
+	u32 rfkill;
+
+	D_INFO("Runtime Alive received.\n");
+
+	if (il->card_alive.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Alive failed.\n");
+		goto restart;
+	}
+
+	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "runtime" alive if code weren't properly loaded.  */
+	if (il3945_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad runtime uCode load.\n");
+		goto restart;
+	}
+
+	rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+	D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+	if (rfkill & 0x1) {
+		clear_bit(S_RFKILL, &il->status);
+		/* if RFKILL is not on, then wait for thermal
+		 * sensor in adapter to kick in */
+		while (il3945_hw_get_temperature(il) == 0) {
+			thermal_spin++;
+			udelay(10);
+		}
+
+		if (thermal_spin)
+			D_INFO("Thermal calibration took %dus\n",
+			       thermal_spin * 10);
+	} else
+		set_bit(S_RFKILL, &il->status);
+
+	/* After the ALIVE response, we can send commands to 3945 uCode */
+	set_bit(S_ALIVE, &il->status);
+
+	/* Enable watchdog to monitor the driver tx queues */
+	il_setup_watchdog(il);
+
+	if (il_is_rfkill(il))
+		return;
+
+	ieee80211_wake_queues(il->hw);
+
+	il->active_rate = RATES_MASK_3945;
+
+	il_power_update_mode(il, true);
+
+	if (il_is_associated(il)) {
+		struct il3945_rxon_cmd *active_rxon =
+		    (struct il3945_rxon_cmd *)(&il->active);
+
+		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	} else {
+		/* Initialize our rx_config data */
+		il_connection_init_rx_config(il);
+	}
+
+	/* Configure Bluetooth device coexistence support */
+	il_send_bt_config(il);
+
+	set_bit(S_READY, &il->status);
+
+	/* Configure the adapter for unassociated operation */
+	il3945_commit_rxon(il);
+
+	il3945_reg_txpower_periodic(il);
+
+	D_INFO("ALIVE processing complete.\n");
+	wake_up(&il->wait_command_queue);
+
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+	unsigned long flags;
+	int exit_pending;
+
+	D_INFO(DRV_NAME " is going down\n");
+
+	il_scan_cancel_timeout(il, 200);
+
+	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+	 * to prevent rearm timer */
+	del_timer_sync(&il->watchdog);
+
+	/* Station information will now be cleared in device */
+	il_clear_ucode_stations(il);
+	il_dealloc_bcast_stations(il);
+	il_clear_driver_stations(il);
+
+	/* Unblock any waiting calls */
+	wake_up_all(&il->wait_command_queue);
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* stop and reset the on-board processor */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* tell the device to stop sending interrupts */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	il3945_synchronize_irq(il);
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	/* If we have not previously called il3945_init() then
+	 * clear all bits but the RF Kill bits and return */
+	if (!il_is_init(il)) {
+		il->status =
+		    test_bit(S_RFKILL, &il->status) << S_RFKILL |
+		    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+		goto exit;
+	}
+
+	/* ...otherwise clear out all the status bits but the RF Kill
+	 * bit and continue taking the NIC down. */
+	il->status &=
+	    test_bit(S_RFKILL, &il->status) << S_RFKILL |
+	    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+	    test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
+	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+	/*
+	 * We disabled and synchronized interrupt, and priv->mutex is taken, so
+	 * here is the only thread which will program device registers, but
+	 * still have lockdep assertions, so we are taking reg_lock.
+	 */
+	spin_lock_irq(&il->reg_lock);
+	/* FIXME: il_grab_nic_access if rfkill is off ? */
+
+	il3945_hw_txq_ctx_stop(il);
+	il3945_hw_rxq_stop(il);
+	/* Power-down device's busmaster DMA clocks */
+	_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(5);
+	/* Stop the device, and put it in low power state */
+	_il_apm_stop(il);
+
+	spin_unlock_irq(&il->reg_lock);
+
+	il3945_hw_txq_ctx_free(il);
+exit:
+	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+	il->beacon_skb = NULL;
+
+	/* clear out any free frames */
+	il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+	mutex_lock(&il->mutex);
+	__il3945_down(il);
+	mutex_unlock(&il->mutex);
+
+	il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+	unsigned long flags;
+	u8 sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare broadcast station\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+
+		return -EINVAL;
+	}
+
+	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+	il->stations[sta_id].used |= IL_STA_BCAST;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+	int rc, i;
+
+	rc = il3945_alloc_bcast_station(il);
+	if (rc)
+		return rc;
+
+	if (test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_WARN("Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+		IL_ERR("ucode not available for device bring up\n");
+		return -EIO;
+	}
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RFKILL, &il->status);
+	else {
+		set_bit(S_RFKILL, &il->status);
+		return -ERFKILL;
+	}
+
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+	rc = il3945_hw_nic_init(il);
+	if (rc) {
+		IL_ERR("Unable to int nic\n");
+		return rc;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_interrupts(il);
+
+	/* really make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Copy original ucode data image from disk into backup cache.
+	 * This will be used to initialize the on-board processor's
+	 * data SRAM for a clean start when the runtime program first loads. */
+	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+	       il->ucode_data.len);
+
+	/* We return success when we resume from suspend and rf_kill is on. */
+	if (test_bit(S_RFKILL, &il->status))
+		return 0;
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+		/* load bootstrap state machine,
+		 * load bootstrap program into processor's memory,
+		 * prepare to load the "initialize" uCode */
+		rc = il->ops->load_ucode(il);
+
+		if (rc) {
+			IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+			continue;
+		}
+
+		/* start card; "initialize" will load runtime ucode */
+		il3945_nic_start(il);
+
+		D_INFO(DRV_NAME " is coming up\n");
+
+		return 0;
+	}
+
+	set_bit(S_EXIT_PENDING, &il->status);
+	__il3945_down(il);
+	clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IL_ERR("Unable to initialize device after %d attempts.\n", i);
+	return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, init_alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il3945_init_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
+		goto out;
+
+	il3945_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change.  This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, _3945.rfkill_poll.work);
+	bool old_rfkill = test_bit(S_RFKILL, &il->status);
+	bool new_rfkill =
+	    !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+	if (new_rfkill != old_rfkill) {
+		if (new_rfkill)
+			set_bit(S_RFKILL, &il->status);
+		else
+			clear_bit(S_RFKILL, &il->status);
+
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+		D_RF_KILL("RF_KILL bit toggled to %s.\n",
+			  new_rfkill ? "disable radio" : "enable radio");
+	}
+
+	/* Keep this running, even if radio now enabled.  This will be
+	 * cancelled in mac_start() if system decides to start again */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+			   round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_host_cmd cmd = {
+		.id = C_SCAN,
+		.len = sizeof(struct il3945_scan_cmd),
+		.flags = CMD_SIZE_HUGE,
+	};
+	struct il3945_scan_cmd *scan;
+	u8 n_probes = 0;
+	enum nl80211_band band;
+	bool is_active = false;
+	int ret;
+	u16 len;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->scan_cmd) {
+		il->scan_cmd =
+		    kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+			    GFP_KERNEL);
+		if (!il->scan_cmd) {
+			D_SCAN("Fail to allocate scan memory\n");
+			return -ENOMEM;
+		}
+	}
+	scan = il->scan_cmd;
+	memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+	if (il_is_associated(il)) {
+		u16 interval;
+		u32 extra;
+		u32 suspend_time = 100;
+		u32 scan_suspend_time = 100;
+
+		D_INFO("Scanning while associated...\n");
+
+		interval = vif->bss_conf.beacon_int;
+
+		scan->suspend_time = 0;
+		scan->max_out_time = cpu_to_le32(200 * 1024);
+		if (!interval)
+			interval = suspend_time;
+		/*
+		 * suspend time format:
+		 *  0-19: beacon interval in usec (time before exec.)
+		 * 20-23: 0
+		 * 24-31: number of beacons (suspend between channels)
+		 */
+
+		extra = (suspend_time / interval) << 24;
+		scan_suspend_time =
+		    0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+		scan->suspend_time = cpu_to_le32(scan_suspend_time);
+		D_SCAN("suspend_time 0x%X beacon interval %d\n",
+		       scan_suspend_time, interval);
+	}
+
+	if (il->scan_request->n_ssids) {
+		int i, p = 0;
+		D_SCAN("Kicking off active scan\n");
+		for (i = 0; i < il->scan_request->n_ssids; i++) {
+			/* always does wildcard anyway */
+			if (!il->scan_request->ssids[i].ssid_len)
+				continue;
+			scan->direct_scan[p].id = WLAN_EID_SSID;
+			scan->direct_scan[p].len =
+			    il->scan_request->ssids[i].ssid_len;
+			memcpy(scan->direct_scan[p].ssid,
+			       il->scan_request->ssids[i].ssid,
+			       il->scan_request->ssids[i].ssid_len);
+			n_probes++;
+			p++;
+		}
+		is_active = true;
+	} else
+		D_SCAN("Kicking off passive scan.\n");
+
+	/* We don't build a direct scan probe request; the uCode will do
+	 * that based on the direct_mask added to each channel entry */
+	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+	scan->tx_cmd.sta_id = il->hw_params.bcast_id;
+	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	/* flags + rate selection */
+
+	switch (il->scan_band) {
+	case NL80211_BAND_2GHZ:
+		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+		scan->tx_cmd.rate = RATE_1M_PLCP;
+		band = NL80211_BAND_2GHZ;
+		break;
+	case NL80211_BAND_5GHZ:
+		scan->tx_cmd.rate = RATE_6M_PLCP;
+		band = NL80211_BAND_5GHZ;
+		break;
+	default:
+		IL_WARN("Invalid scan band\n");
+		return -EIO;
+	}
+
+	/*
+	 * If active scaning is requested but a certain channel is marked
+	 * passive, we can do active scanning if we detect transmissions. For
+	 * passive only scanning disable switching to active on any channel.
+	 */
+	scan->good_CRC_th =
+	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+	len =
+	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+			      vif->addr, il->scan_request->ie,
+			      il->scan_request->ie_len,
+			      IL_MAX_SCAN_SIZE - sizeof(*scan));
+	scan->tx_cmd.len = cpu_to_le16(len);
+
+	/* select Rx antennas */
+	scan->flags |= il3945_get_antenna_flags(il);
+
+	scan->channel_count =
+	    il3945_get_channels_for_scan(il, band, is_active, n_probes,
+					 (void *)&scan->data[len], vif);
+	if (scan->channel_count == 0) {
+		D_SCAN("channel count %d\n", scan->channel_count);
+		return -EIO;
+	}
+
+	cmd.len +=
+	    le16_to_cpu(scan->tx_cmd.len) +
+	    scan->channel_count * sizeof(struct il3945_scan_channel);
+	cmd.data = scan;
+	scan->len = cpu_to_le16(cmd.len);
+
+	set_bit(S_SCAN_HW, &il->status);
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		clear_bit(S_SCAN_HW, &il->status);
+	return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
+		il3945_commit_rxon(il);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, restart);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+		mutex_lock(&il->mutex);
+		il->is_open = 0;
+		mutex_unlock(&il->mutex);
+		il3945_down(il);
+		ieee80211_restart_hw(il->hw);
+	} else {
+		il3945_down(il);
+
+		mutex_lock(&il->mutex);
+		if (test_bit(S_EXIT_PENDING, &il->status)) {
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		__il3945_up(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il3945_rx_replenish(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+	int rc = 0;
+
+	if (!il->vif || !il->is_open)
+		return;
+
+	D_ASSOC("Associated as %d to: %pM\n", il->vif->bss_conf.aid,
+		il->active.bssid_addr);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	il_scan_cancel_timeout(il, 200);
+
+	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il3945_commit_rxon(il);
+
+	rc = il_send_rxon_timing(il);
+	if (rc)
+		IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+	il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+	il->staging.assoc_id = cpu_to_le16(il->vif->bss_conf.aid);
+
+	D_ASSOC("assoc id %d beacon interval %d\n", il->vif->bss_conf.aid,
+		il->vif->bss_conf.beacon_int);
+
+	if (il->vif->bss_conf.use_short_preamble)
+		il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+	if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
+		if (il->vif->bss_conf.use_short_slot)
+			il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+	}
+
+	il3945_commit_rxon(il);
+
+	switch (il->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		il3945_rate_scale_init(il->hw, IL_AP_ID);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		il3945_send_beacon_cmd(il);
+		break;
+	default:
+		IL_ERR("%s Should not be called in %d mode\n", __func__,
+		      il->vif->type);
+		break;
+	}
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT	(2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter\n");
+
+	/* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+	 * ucode filename and max sizes are card-specific. */
+
+	if (!il->ucode_code.len) {
+		ret = il3945_read_ucode(il);
+		if (ret) {
+			IL_ERR("Could not read microcode: %d\n", ret);
+			mutex_unlock(&il->mutex);
+			goto out_release_irq;
+		}
+	}
+
+	ret = __il3945_up(il);
+
+	mutex_unlock(&il->mutex);
+
+	if (ret)
+		goto out_release_irq;
+
+	D_INFO("Start UP work.\n");
+
+	/* Wait for START_ALIVE from ucode. Otherwise callbacks from
+	 * mac80211 will not be run successfully. */
+	ret = wait_event_timeout(il->wait_command_queue,
+				 test_bit(S_READY, &il->status),
+				 UCODE_READY_TIMEOUT);
+	if (!ret) {
+		if (!test_bit(S_READY, &il->status)) {
+			IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+			       jiffies_to_msecs(UCODE_READY_TIMEOUT));
+			ret = -ETIMEDOUT;
+			goto out_release_irq;
+		}
+	}
+
+	/* ucode is running and will send rfkill notifications,
+	 * no need to poll the killswitch state anymore */
+	cancel_delayed_work(&il->_3945.rfkill_poll);
+
+	il->is_open = 1;
+	D_MAC80211("leave\n");
+	return 0;
+
+out_release_irq:
+	il->is_open = 0;
+	D_MAC80211("leave - failed\n");
+	return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	if (!il->is_open) {
+		D_MAC80211("leave - skip\n");
+		return;
+	}
+
+	il->is_open = 0;
+
+	il3945_down(il);
+
+	flush_workqueue(il->workqueue);
+
+	/* start polling the killswitch state again */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+			   round_jiffies_relative(2 * HZ));
+
+	D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw,
+	       struct ieee80211_tx_control *control,
+	       struct sk_buff *skb)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+	if (il3945_tx_skb(il, control->sta, skb))
+		dev_kfree_skb_any(skb);
+
+	D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+	struct ieee80211_vif *vif = il->vif;
+	int rc = 0;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* The following should be done only at AP bring up */
+	if (!(il_is_associated(il))) {
+
+		/* RXON - unassoc (to set timing command) */
+		il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		il3945_commit_rxon(il);
+
+		/* RXON Timing */
+		rc = il_send_rxon_timing(il);
+		if (rc)
+			IL_WARN("C_RXON_TIMING failed - "
+				"Attempting to continue.\n");
+
+		il->staging.assoc_id = 0;
+
+		if (vif->bss_conf.use_short_preamble)
+			il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+		if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
+			if (vif->bss_conf.use_short_slot)
+				il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+			else
+				il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+		}
+		/* restore RXON assoc */
+		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		il3945_commit_rxon(il);
+	}
+	il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key)
+{
+	struct il_priv *il = hw->priv;
+	int ret = 0;
+	u8 sta_id = IL_INVALID_STATION;
+	u8 static_key;
+
+	D_MAC80211("enter\n");
+
+	if (il3945_mod_params.sw_crypto) {
+		D_MAC80211("leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * To support IBSS RSN, don't program group keys in IBSS, the
+	 * hardware will then not attempt to decrypt the frames.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		D_MAC80211("leave - IBSS RSN\n");
+		return -EOPNOTSUPP;
+	}
+
+	static_key = !il_is_associated(il);
+
+	if (!static_key) {
+		sta_id = il_sta_id_or_broadcast(il, sta);
+		if (sta_id == IL_INVALID_STATION) {
+			D_MAC80211("leave - station not found\n");
+			return -EINVAL;
+		}
+	}
+
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 100);
+
+	switch (cmd) {
+	case SET_KEY:
+		if (static_key)
+			ret = il3945_set_static_key(il, key);
+		else
+			ret = il3945_set_dynamic_key(il, key, sta_id);
+		D_MAC80211("enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (static_key)
+			ret = il3945_remove_static_key(il);
+		else
+			ret = il3945_clear_sta_key_info(il, sta_id);
+		D_MAC80211("disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	D_MAC80211("leave ret %d\n", ret);
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+	int ret;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	u8 sta_id;
+
+	mutex_lock(&il->mutex);
+	D_INFO("station %pM\n", sta->addr);
+	sta_priv->common.sta_id = IL_INVALID_STATION;
+
+	ret = il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		mutex_unlock(&il->mutex);
+		return ret;
+	}
+
+	sta_priv->common.sta_id = sta_id;
+
+	/* Initialize rate scaling */
+	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+	il3945_rs_rate_init(il, sta, sta_id);
+	mutex_unlock(&il->mutex);
+
+	return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct il_priv *il = hw->priv;
+	__le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+		   *total_flags);
+
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&il->mutex);
+
+	il->staging.filter_flags &= ~filter_nand;
+	il->staging.filter_flags |= filter_or;
+
+	/*
+	 * Not committing directly because hardware can perform a scan,
+	 * but even if hw is ready, committing here breaks for some reason,
+	 * we'll eventually commit the filter flags change anyway.
+	 */
+
+	mutex_unlock(&il->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in il_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &=
+	    FIF_OTHER_BSS | FIF_ALLMULTI |
+	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret)
+		IL_INFO("%s is not in hex or decimal form.\n", buf);
+	else
+		il->debug_level = val;
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, 0644, il3945_show_debug_level,
+		   il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, 0444, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	char *p = (char *)buf;
+	u32 val;
+
+	val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		IL_INFO(": %s is not in decimal form.\n", buf);
+	else
+		il3945_hw_reg_set_txpower(il, val);
+
+	return count;
+}
+
+static DEVICE_ATTR(tx_power, 0644, il3945_show_tx_power, il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	return sprintf(buf, "0x%04X\n", il->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	u32 flags = simple_strtoul(buf, NULL, 0);
+
+	mutex_lock(&il->mutex);
+	if (le32_to_cpu(il->staging.flags) != flags) {
+		/* Cancel any currently running scans... */
+		if (il_scan_cancel_timeout(il, 100))
+			IL_WARN("Could not cancel scan.\n");
+		else {
+			D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+			il->staging.flags = cpu_to_le32(flags);
+			il3945_commit_rxon(il);
+		}
+	}
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(flags, 0644, il3945_show_flags, il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+			 char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	return sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+			  const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+	mutex_lock(&il->mutex);
+	if (le32_to_cpu(il->staging.filter_flags) != filter_flags) {
+		/* Cancel any currently running scans... */
+		if (il_scan_cancel_timeout(il, 100))
+			IL_WARN("Could not cancel scan.\n");
+		else {
+			D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+			       filter_flags);
+			il->staging.filter_flags = cpu_to_le32(filter_flags);
+			il3945_commit_rxon(il);
+		}
+	}
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static DEVICE_ATTR(filter_flags, 0644, il3945_show_filter_flags,
+		   il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct il_spectrum_notification measure_report;
+	u32 size = sizeof(measure_report), len = 0, ofs = 0;
+	u8 *data = (u8 *) &measure_report;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (!(il->measurement_status & MEASUREMENT_READY)) {
+		spin_unlock_irqrestore(&il->lock, flags);
+		return 0;
+	}
+	memcpy(&measure_report, &il->measure_report, size);
+	il->measurement_status = 0;
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	while (size && PAGE_SIZE - len) {
+		hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+				   PAGE_SIZE - len, true);
+		len = strlen(buf);
+		if (PAGE_SIZE - len)
+			buf[len++] = '\n';
+
+		ofs += 16;
+		size -= min(size, 16U);
+	}
+
+	return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	struct ieee80211_measurement_params params = {
+		.channel = le16_to_cpu(il->active.channel),
+		.start_time = cpu_to_le64(il->_3945.last_tsf),
+		.duration = cpu_to_le16(1),
+	};
+	u8 type = IL_MEASURE_BASIC;
+	u8 buffer[32];
+	u8 channel;
+
+	if (count) {
+		char *p = buffer;
+		strlcpy(buffer, buf, sizeof(buffer));
+		channel = simple_strtoul(p, NULL, 0);
+		if (channel)
+			params.channel = channel;
+
+		p = buffer;
+		while (*p && *p != ' ')
+			p++;
+		if (*p)
+			type = simple_strtoul(p + 1, NULL, 0);
+	}
+
+	D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+	       type, params.channel, buf);
+	il3945_get_measurement(il, &params, type);
+
+	return count;
+}
+
+static DEVICE_ATTR(measurement, 0600, il3945_show_measurement,
+		   il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	il->retry_rate = simple_strtoul(buf, NULL, 0);
+	if (il->retry_rate <= 0)
+		il->retry_rate = 1;
+
+	return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+		       char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, 0600, il3945_show_retry_rate,
+		   il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+	/* all this shit doesn't belong into sysfs anyway */
+	return 0;
+}
+
+static DEVICE_ATTR(channels, 0400, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+		     const char *buf, size_t count)
+{
+	struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+	int ant;
+
+	if (count == 0)
+		return 0;
+
+	if (sscanf(buf, "%1i", &ant) != 1) {
+		D_INFO("not in hex or decimal form.\n");
+		return count;
+	}
+
+	if (ant >= 0 && ant <= 2) {
+		D_INFO("Setting antenna select to %d.\n", ant);
+		il3945_mod_params.antenna = (enum il3945_antenna)ant;
+	} else
+		D_INFO("Bad antenna select value %d.\n", ant);
+
+	return count;
+}
+
+static DEVICE_ATTR(antenna, 0644, il3945_show_antenna, il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	if (!il_is_alive(il))
+		return -EAGAIN;
+	return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, 0444, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	char *p = (char *)buf;
+
+	if (p[0] == '1')
+		il3945_dump_nic_error_log(il);
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, 0200, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+	il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+	init_waitqueue_head(&il->wait_command_queue);
+
+	INIT_WORK(&il->restart, il3945_bg_restart);
+	INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+	INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+	INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+	INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+	il_setup_scan_deferred_work(il);
+
+	il3945_hw_setup_deferred_work(il);
+
+	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+
+	tasklet_init(&il->irq_tasklet,
+		     (void (*)(unsigned long))il3945_irq_tasklet,
+		     (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+	il3945_hw_cancel_deferred_work(il);
+
+	cancel_delayed_work_sync(&il->init_alive_start);
+	cancel_delayed_work(&il->alive_start);
+
+	il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+	&dev_attr_antenna.attr,
+	&dev_attr_channels.attr,
+	&dev_attr_dump_errors.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_filter_flags.attr,
+	&dev_attr_measurement.attr,
+	&dev_attr_retry_rate.attr,
+	&dev_attr_status.attr,
+	&dev_attr_temperature.attr,
+	&dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+	&dev_attr_debug_level.attr,
+#endif
+	NULL
+};
+
+static const struct attribute_group il3945_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = il3945_sysfs_entries,
+};
+
+static struct ieee80211_ops il3945_mac_ops __ro_after_init = {
+	.tx = il3945_mac_tx,
+	.start = il3945_mac_start,
+	.stop = il3945_mac_stop,
+	.add_interface = il_mac_add_interface,
+	.remove_interface = il_mac_remove_interface,
+	.change_interface = il_mac_change_interface,
+	.config = il_mac_config,
+	.configure_filter = il3945_configure_filter,
+	.set_key = il3945_mac_set_key,
+	.conf_tx = il_mac_conf_tx,
+	.reset_tsf = il_mac_reset_tsf,
+	.bss_info_changed = il_mac_bss_info_changed,
+	.hw_scan = il_mac_hw_scan,
+	.sta_add = il3945_mac_sta_add,
+	.sta_remove = il_mac_sta_remove,
+	.tx_last_beacon = il_mac_tx_last_beacon,
+	.flush = il_mac_flush,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+	int ret;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+	il->retry_rate = 1;
+	il->beacon_skb = NULL;
+
+	spin_lock_init(&il->sta_lock);
+	spin_lock_init(&il->hcmd_lock);
+
+	INIT_LIST_HEAD(&il->free_frames);
+
+	mutex_init(&il->mutex);
+
+	il->ieee_channels = NULL;
+	il->ieee_rates = NULL;
+	il->band = NL80211_BAND_2GHZ;
+
+	il->iw_mode = NL80211_IFTYPE_STATION;
+	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+	/* initialize force reset */
+	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+	if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+		IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+			eeprom->version);
+		ret = -EINVAL;
+		goto err;
+	}
+	ret = il_init_channel_map(il);
+	if (ret) {
+		IL_ERR("initializing regulatory failed: %d\n", ret);
+		goto err;
+	}
+
+	/* Set up txpower settings in driver for all channels */
+	if (il3945_txpower_set_from_eeprom(il)) {
+		ret = -EIO;
+		goto err_free_channel_map;
+	}
+
+	ret = il_init_geos(il);
+	if (ret) {
+		IL_ERR("initializing geos failed: %d\n", ret);
+		goto err_free_channel_map;
+	}
+	il3945_init_hw_rates(il, il->ieee_rates);
+
+	return 0;
+
+err_free_channel_map:
+	il_free_channel_map(il);
+err:
+	return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST	200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+	int ret;
+	struct ieee80211_hw *hw = il->hw;
+
+	hw->rate_control_algorithm = "iwl-3945-rs";
+	hw->sta_data_size = sizeof(struct il3945_sta_priv);
+	hw->vif_data_size = sizeof(struct il_vif_priv);
+
+	/* Tell mac80211 our characteristics */
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+
+	hw->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
+
+	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	if (il->bands[NL80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		    &il->bands[NL80211_BAND_2GHZ];
+
+	if (il->bands[NL80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		    &il->bands[NL80211_BAND_5GHZ];
+
+	il_leds_init(il);
+
+	wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+	ret = ieee80211_register_hw(il->hw);
+	if (ret) {
+		IL_ERR("Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	il->mac80211_registered = 1;
+
+	return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct il_priv *il;
+	struct ieee80211_hw *hw;
+	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+	struct il3945_eeprom *eeprom;
+	unsigned long flags;
+
+	/***********************
+	 * 1. Allocating HW data
+	 * ********************/
+
+	hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il3945_mac_ops);
+	if (!hw) {
+		err = -ENOMEM;
+		goto out;
+	}
+	il = hw->priv;
+	il->hw = hw;
+	SET_IEEE80211_DEV(hw, &pdev->dev);
+
+	il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+	D_INFO("*** LOAD DRIVER ***\n");
+	il->cfg = cfg;
+	il->ops = &il3945_ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il->debugfs_ops = &il3945_debugfs_ops;
+#endif
+	il->pci_dev = pdev;
+	il->inta_mask = CSR_INI_SET_MASK;
+
+	/***************************
+	 * 2. Initializing PCI bus
+	 * *************************/
+	pci_disable_link_state(pdev,
+			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_ieee80211_free_hw;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (err) {
+		IL_WARN("No suitable DMA available.\n");
+		goto out_pci_disable_device;
+	}
+
+	pci_set_drvdata(pdev, il);
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	/***********************
+	 * 3. Read REV Register
+	 * ********************/
+	il->hw_base = pci_ioremap_bar(pdev, 0);
+	if (!il->hw_base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	D_INFO("pci_resource_len = 0x%08llx\n",
+	       (unsigned long long)pci_resource_len(pdev, 0));
+	D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, 0x41, 0x00);
+
+	/* these spin locks will be used in apm_init and EEPROM access
+	 * we should init now
+	 */
+	spin_lock_init(&il->reg_lock);
+	spin_lock_init(&il->lock);
+
+	/*
+	 * stop and reset the on-board processor just in case it is in a
+	 * strange state ... like being left stranded by a primary kernel
+	 * and this is now the kdump kernel trying to start up
+	 */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/***********************
+	 * 4. Read EEPROM
+	 * ********************/
+
+	/* Read the EEPROM */
+	err = il_eeprom_init(il);
+	if (err) {
+		IL_ERR("Unable to init EEPROM\n");
+		goto out_iounmap;
+	}
+	/* MAC Address location in EEPROM same for 3945/4965 */
+	eeprom = (struct il3945_eeprom *)il->eeprom;
+	D_INFO("MAC address: %pM\n", eeprom->mac_address);
+	SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+	/***********************
+	 * 5. Setup HW Constants
+	 * ********************/
+	/* Device-specific setup */
+	err = il3945_hw_set_hw_params(il);
+	if (err) {
+		IL_ERR("failed to set hw settings\n");
+		goto out_eeprom_free;
+	}
+
+	/***********************
+	 * 6. Setup il
+	 * ********************/
+
+	err = il3945_init_drv(il);
+	if (err) {
+		IL_ERR("initializing driver failed\n");
+		goto out_unset_hw_params;
+	}
+
+	IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+	/***********************
+	 * 7. Setup Services
+	 * ********************/
+
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	pci_enable_msi(il->pci_dev);
+
+	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+	if (err) {
+		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+		goto out_disable_msi;
+	}
+
+	err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+	if (err) {
+		IL_ERR("failed to create sysfs device attributes\n");
+		goto out_release_irq;
+	}
+
+	il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
+	il3945_setup_deferred_work(il);
+	il3945_setup_handlers(il);
+	il_power_initialize(il);
+
+	/*********************************
+	 * 8. Setup and Register mac80211
+	 * *******************************/
+
+	il_enable_interrupts(il);
+
+	err = il3945_setup_mac(il);
+	if (err)
+		goto out_remove_sysfs;
+
+	err = il_dbgfs_register(il, DRV_NAME);
+	if (err)
+		IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+		       err);
+
+	/* Start monitoring the killswitch */
+	queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+	return 0;
+
+out_remove_sysfs:
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+	free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+	pci_disable_msi(il->pci_dev);
+	il_free_geos(il);
+	il_free_channel_map(il);
+out_unset_hw_params:
+	il3945_unset_hw_params(il);
+out_eeprom_free:
+	il_eeprom_free(il);
+out_iounmap:
+	iounmap(il->hw_base);
+out_pci_release_regions:
+	pci_release_regions(pdev);
+out_pci_disable_device:
+	pci_disable_device(pdev);
+out_ieee80211_free_hw:
+	ieee80211_free_hw(il->hw);
+out:
+	return err;
+}
+
+static void
+il3945_pci_remove(struct pci_dev *pdev)
+{
+	struct il_priv *il = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	if (!il)
+		return;
+
+	D_INFO("*** UNLOAD DRIVER ***\n");
+
+	il_dbgfs_unregister(il);
+
+	set_bit(S_EXIT_PENDING, &il->status);
+
+	il_leds_exit(il);
+
+	if (il->mac80211_registered) {
+		ieee80211_unregister_hw(il->hw);
+		il->mac80211_registered = 0;
+	} else {
+		il3945_down(il);
+	}
+
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with il_down(), but there are paths to
+	 * run il_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running il_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	il_apm_stop(il);
+
+	/* make sure we flush any pending irq or
+	 * tasklet for the driver
+	 */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il3945_synchronize_irq(il);
+
+	sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+	cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+	il3945_dealloc_ucode_pci(il);
+
+	if (il->rxq.bd)
+		il3945_rx_queue_free(il, &il->rxq);
+	il3945_hw_txq_ctx_free(il);
+
+	il3945_unset_hw_params(il);
+
+	/*netif_stop_queue(dev); */
+	flush_workqueue(il->workqueue);
+
+	/* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+	 * il->workqueue... so we can't take down the workqueue
+	 * until now... */
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+
+	free_irq(pdev->irq, il);
+	pci_disable_msi(pdev);
+
+	iounmap(il->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	il_free_channel_map(il);
+	il_free_geos(il);
+	kfree(il->scan_cmd);
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+
+	ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+	.name = DRV_NAME,
+	.id_table = il3945_hw_card_ids,
+	.probe = il3945_pci_probe,
+	.remove = il3945_pci_remove,
+	.driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+	int ret;
+	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	pr_info(DRV_COPYRIGHT "\n");
+
+	/*
+	 * Disabling hardware scan means that mac80211 will perform scans
+	 * "the hard way", rather than using device's scan.
+	 */
+	if (il3945_mod_params.disable_hw_scan) {
+		pr_info("hw_scan is disabled\n");
+		il3945_mac_ops.hw_scan = NULL;
+	}
+
+	ret = il3945_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = pci_register_driver(&il3945_driver);
+	if (ret) {
+		pr_err("Unable to initialize PCI module\n");
+		goto error_register;
+	}
+
+	return ret;
+
+error_register:
+	il3945_rate_control_unregister();
+	return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+	pci_unregister_driver(&il3945_driver);
+	il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, 0444);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, 0444);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+		   0444);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, 0644);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, 0444);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
new file mode 100644
index 0000000..e8983c6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -0,0 +1,975 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+	0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+	7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+	s8 min_rssi;
+	u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+	{-60, RATE_54M_IDX},
+	{-64, RATE_48M_IDX},
+	{-72, RATE_36M_IDX},
+	{-80, RATE_24M_IDX},
+	{-84, RATE_18M_IDX},
+	{-85, RATE_12M_IDX},
+	{-87, RATE_9M_IDX},
+	{-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+	{-60, RATE_54M_IDX},
+	{-64, RATE_48M_IDX},
+	{-68, RATE_36M_IDX},
+	{-80, RATE_24M_IDX},
+	{-84, RATE_18M_IDX},
+	{-85, RATE_12M_IDX},
+	{-86, RATE_11M_IDX},
+	{-88, RATE_5M_IDX},
+	{-90, RATE_2M_IDX},
+	{-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW		62
+#define RATE_FLUSH		(3*HZ)
+#define RATE_WIN_FLUSH		(HZ/2)
+#define IL39_RATE_HIGH_TH	11520
+#define IL_SUCCESS_UP_TH	8960
+#define IL_SUCCESS_DOWN_TH	10880
+#define RATE_MIN_FAILURE_TH	6
+#define RATE_MIN_SUCCESS_TH	8
+#define RATE_DECREASE_TH	1920
+#define RATE_RETRY_TH		15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
+{
+	u32 idx = 0;
+	u32 table_size = 0;
+	struct il3945_tpt_entry *tpt_table = NULL;
+
+	if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+		rssi = IL_MIN_RSSI_VAL;
+
+	switch (band) {
+	case NL80211_BAND_2GHZ:
+		tpt_table = il3945_tpt_table_g;
+		table_size = ARRAY_SIZE(il3945_tpt_table_g);
+		break;
+	case NL80211_BAND_5GHZ:
+		tpt_table = il3945_tpt_table_a;
+		table_size = ARRAY_SIZE(il3945_tpt_table_a);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+		idx++;
+
+	idx = min(idx, table_size - 1);
+
+	return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+	win->data = 0;
+	win->success_counter = 0;
+	win->success_ratio = -1;
+	win->counter = 0;
+	win->average_tpt = IL_INVALID_VALUE;
+	win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed.  If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+	int unflushed = 0;
+	int i;
+	unsigned long flags;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	/*
+	 * For each rate, if we have collected data on that rate
+	 * and it has been more than RATE_WIN_FLUSH
+	 * since we flushed, clear out the gathered stats
+	 */
+	for (i = 0; i < RATE_COUNT_3945; i++) {
+		if (!rs_sta->win[i].counter)
+			continue;
+
+		spin_lock_irqsave(&rs_sta->lock, flags);
+		if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+			D_RATE("flushing %d samples of rate " "idx %d\n",
+			       rs_sta->win[i].counter, i);
+			il3945_clear_win(&rs_sta->win[i]);
+		} else
+			unflushed++;
+		spin_unlock_irqrestore(&rs_sta->lock, flags);
+	}
+
+	return unflushed;
+}
+
+#define RATE_FLUSH_MAX              5000	/* msec */
+#define RATE_FLUSH_MIN              50	/* msec */
+#define IL_AVERAGE_PACKETS             1500
+
+static void
+il3945_bg_rate_scale_flush(struct timer_list *t)
+{
+	struct il3945_rs_sta *rs_sta = from_timer(rs_sta, t, rate_scale_flush);
+	struct il_priv *il __maybe_unused = rs_sta->il;
+	int unflushed = 0;
+	unsigned long flags;
+	u32 packet_count, duration, pps;
+
+	D_RATE("enter\n");
+
+	unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/* Number of packets Rx'd since last time this timer ran */
+	packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+	rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+	if (unflushed) {
+		duration =
+		    jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+		D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+		/* Determine packets per second */
+		if (duration)
+			pps = (packet_count * 1000) / duration;
+		else
+			pps = 0;
+
+		if (pps) {
+			duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+			if (duration < RATE_FLUSH_MIN)
+				duration = RATE_FLUSH_MIN;
+			else if (duration > RATE_FLUSH_MAX)
+				duration = RATE_FLUSH_MAX;
+		} else
+			duration = RATE_FLUSH_MAX;
+
+		rs_sta->flush_time = msecs_to_jiffies(duration);
+
+		D_RATE("new flush period: %d msec ave %d\n", duration,
+		       packet_count);
+
+		mod_timer(&rs_sta->rate_scale_flush,
+			  jiffies + rs_sta->flush_time);
+
+		rs_sta->last_partial_flush = jiffies;
+	} else {
+		rs_sta->flush_time = RATE_FLUSH;
+		rs_sta->flush_pending = 0;
+	}
+	/* If there weren't any unflushed entries, we don't schedule the timer
+	 * to run again */
+
+	rs_sta->last_flush = jiffies;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+		       struct il3945_rate_scale_data *win, int success,
+		       int retries, int idx)
+{
+	unsigned long flags;
+	s32 fail_count;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	if (!retries) {
+		D_RATE("leave: retries == 0 -- should be at least 1\n");
+		return;
+	}
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history win; anything older isn't really relevant any more.
+	 * If we have filled up the sliding win, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 * */
+	while (retries > 0) {
+		if (win->counter >= RATE_MAX_WINDOW) {
+
+			/* remove earliest */
+			win->counter = RATE_MAX_WINDOW - 1;
+
+			if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+				win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+				win->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		win->counter++;
+
+		/* Shift bitmap by one frame (throw away oldest history),
+		 * OR in "1", and increment "success" if this
+		 * frame was successful. */
+		win->data <<= 1;
+		if (success > 0) {
+			win->success_counter++;
+			win->data |= 0x1;
+			success--;
+		}
+
+		retries--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (win->counter > 0)
+		win->success_ratio =
+		    128 * (100 * win->success_counter) / win->counter;
+	else
+		win->success_ratio = IL_INVALID_VALUE;
+
+	fail_count = win->counter - win->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if (fail_count >= RATE_MIN_FAILURE_TH ||
+	    win->success_counter >= RATE_MIN_SUCCESS_TH)
+		win->average_tpt =
+		    ((win->success_ratio * rs_sta->expected_tpt[idx] +
+		      64) / 128);
+	else
+		win->average_tpt = IL_INVALID_VALUE;
+
+	/* Tag this win as having been updated */
+	win->stamp = jiffies;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &il->hw->conf;
+	struct il3945_sta_priv *psta;
+	struct il3945_rs_sta *rs_sta;
+	struct ieee80211_supported_band *sband;
+	int i;
+
+	D_INFO("enter\n");
+	if (sta_id == il->hw_params.bcast_id)
+		goto out;
+
+	psta = (struct il3945_sta_priv *)sta->drv_priv;
+	rs_sta = &psta->rs_sta;
+	sband = hw->wiphy->bands[conf->chandef.chan->band];
+
+	rs_sta->il = il;
+
+	rs_sta->start_rate = RATE_INVALID;
+
+	/* default to just 802.11b */
+	rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+	rs_sta->last_partial_flush = jiffies;
+	rs_sta->last_flush = jiffies;
+	rs_sta->flush_time = RATE_FLUSH;
+	rs_sta->last_tx_packets = 0;
+
+	for (i = 0; i < RATE_COUNT_3945; i++)
+		il3945_clear_win(&rs_sta->win[i]);
+
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	for (i = sband->n_bitrates - 1; i >= 0; i--) {
+		if (sta->supp_rates[sband->band] & (1 << i)) {
+			rs_sta->last_txrate_idx = i;
+			break;
+		}
+	}
+
+	il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+	/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+	if (sband->band == NL80211_BAND_5GHZ) {
+		rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+		il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
+	}
+
+out:
+	il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+	D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct il3945_rs_sta *rs_sta;
+	struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+	struct il_priv *il __maybe_unused = il_priv;
+
+	D_RATE("enter\n");
+
+	rs_sta = &psta->rs_sta;
+
+	spin_lock_init(&rs_sta->lock);
+	timer_setup(&rs_sta->rate_scale_flush, il3945_bg_rate_scale_flush, 0);
+	D_RATE("leave\n");
+
+	return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+	struct il3945_rs_sta *rs_sta = il_sta;
+
+	/*
+	 * Be careful not to use any members of il3945_rs_sta (like trying
+	 * to use il_priv to print out debugging) since it may not be fully
+	 * initialized at this point.
+	 */
+	del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+		    struct ieee80211_sta *sta, void *il_sta,
+		    struct sk_buff *skb)
+{
+	s8 retries = 0, current_count;
+	int scale_rate_idx, first_idx, last_idx;
+	unsigned long flags;
+	struct il_priv *il = (struct il_priv *)il_rate;
+	struct il3945_rs_sta *rs_sta = il_sta;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	D_RATE("enter\n");
+
+	retries = info->status.rates[0].count;
+	/* Sanity Check for retries */
+	if (retries > RATE_RETRY_TH)
+		retries = RATE_RETRY_TH;
+
+	first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+	if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+		D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+		return;
+	}
+
+	if (!il_sta) {
+		D_RATE("leave: No STA il data to update!\n");
+		return;
+	}
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!rs_sta->il) {
+		D_RATE("leave: STA il data uninitialized!\n");
+		return;
+	}
+
+	rs_sta->tx_packets++;
+
+	scale_rate_idx = first_idx;
+	last_idx = first_idx;
+
+	/*
+	 * Update the win for each rate.  We determine which rates
+	 * were Tx'd based on the total number of retries vs. the number
+	 * of retries configured for each rate -- currently set to the
+	 * il value 'retry_rate' vs. rate specific
+	 *
+	 * On exit from this while loop last_idx indicates the rate
+	 * at which the frame was finally transmitted (or failed if no
+	 * ACK)
+	 */
+	while (retries > 1) {
+		if ((retries - 1) < il->retry_rate) {
+			current_count = (retries - 1);
+			last_idx = scale_rate_idx;
+		} else {
+			current_count = il->retry_rate;
+			last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+		}
+
+		/* Update this rate accounting for as many retries
+		 * as was used for it (per current_count) */
+		il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+				       current_count, scale_rate_idx);
+		D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+		       current_count);
+
+		retries -= current_count;
+
+		scale_rate_idx = last_idx;
+	}
+
+	/* Update the last idx win with success/failure based on ACK */
+	D_RATE("Update rate %d with %s.\n", last_idx,
+	       (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+	il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+			       info->flags & IEEE80211_TX_STAT_ACK, 1,
+			       last_idx);
+
+	/* We updated the rate scale win -- if its been more than
+	 * flush_time since the last run, schedule the flush
+	 * again */
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	if (!rs_sta->flush_pending &&
+	    time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+		rs_sta->last_partial_flush = jiffies;
+		rs_sta->flush_pending = 1;
+		mod_timer(&rs_sta->rate_scale_flush,
+			  jiffies + rs_sta->flush_time);
+	}
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+			 enum nl80211_band band)
+{
+	u8 high = RATE_INVALID;
+	u8 low = RATE_INVALID;
+	struct il_priv *il __maybe_unused = rs_sta->il;
+
+	/* 802.11A walks to the next literal adjacent rate in
+	 * the rate table */
+	if (unlikely(band == NL80211_BAND_5GHZ)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = idx - 1;
+		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = idx + 1;
+		for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = idx;
+	while (low != RATE_INVALID) {
+		if (rs_sta->tgg)
+			low = il3945_rates[low].prev_rs_tgg;
+		else
+			low = il3945_rates[low].prev_rs;
+		if (low == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+		D_RATE("Skipping masked lower rate: %d\n", low);
+	}
+
+	high = idx;
+	while (high != RATE_INVALID) {
+		if (rs_sta->tgg)
+			high = il3945_rates[high].next_rs_tgg;
+		else
+			high = il3945_rates[high].next_rs;
+		if (high == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+		D_RATE("Skipping masked higher rate: %d\n", high);
+	}
+
+	return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+		   struct ieee80211_tx_rate_control *txrc)
+{
+	struct ieee80211_supported_band *sband = txrc->sband;
+	struct sk_buff *skb = txrc->skb;
+	u8 low = RATE_INVALID;
+	u8 high = RATE_INVALID;
+	u16 high_low;
+	int idx;
+	struct il3945_rs_sta *rs_sta = il_sta;
+	struct il3945_rate_scale_data *win = NULL;
+	int current_tpt = IL_INVALID_VALUE;
+	int low_tpt = IL_INVALID_VALUE;
+	int high_tpt = IL_INVALID_VALUE;
+	u32 fail_count;
+	s8 scale_action = 0;
+	unsigned long flags;
+	u16 rate_mask;
+	s8 max_rate_idx = -1;
+	struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	D_RATE("enter\n");
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (rs_sta && !rs_sta->il) {
+		D_RATE("Rate scaling information not initialized yet.\n");
+		il_sta = NULL;
+	}
+
+	if (rate_control_send_low(sta, il_sta, txrc))
+		return;
+
+	rate_mask = sta->supp_rates[sband->band];
+
+	/* get user max rate if set */
+	max_rate_idx = fls(txrc->rate_idx_mask) - 1;
+	if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
+		max_rate_idx += IL_FIRST_OFDM_RATE;
+	if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+		max_rate_idx = -1;
+
+	idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+	if (sband->band == NL80211_BAND_5GHZ)
+		rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	/* for recent assoc, choose best rate regarding
+	 * to rssi value
+	 */
+	if (rs_sta->start_rate != RATE_INVALID) {
+		if (rs_sta->start_rate < idx &&
+		    (rate_mask & (1 << rs_sta->start_rate)))
+			idx = rs_sta->start_rate;
+		rs_sta->start_rate = RATE_INVALID;
+	}
+
+	/* force user max rate if set by user */
+	if (max_rate_idx != -1 && max_rate_idx < idx) {
+		if (rate_mask & (1 << max_rate_idx))
+			idx = max_rate_idx;
+	}
+
+	win = &(rs_sta->win[idx]);
+
+	fail_count = win->counter - win->success_counter;
+
+	if (fail_count < RATE_MIN_FAILURE_TH &&
+	    win->success_counter < RATE_MIN_SUCCESS_TH) {
+		spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+		D_RATE("Invalid average_tpt on rate %d: "
+		       "counter: %d, success_counter: %d, "
+		       "expected_tpt is %sNULL\n", idx, win->counter,
+		       win->success_counter,
+		       rs_sta->expected_tpt ? "not " : "");
+
+		/* Can't calculate this yet; not enough history */
+		win->average_tpt = IL_INVALID_VALUE;
+		goto out;
+
+	}
+
+	current_tpt = win->average_tpt;
+
+	high_low =
+	    il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* If user set max rate, dont allow higher than user constrain */
+	if (max_rate_idx != -1 && max_rate_idx < high)
+		high = RATE_INVALID;
+
+	/* Collect Measured throughputs of adjacent rates */
+	if (low != RATE_INVALID)
+		low_tpt = rs_sta->win[low].average_tpt;
+
+	if (high != RATE_INVALID)
+		high_tpt = rs_sta->win[high].average_tpt;
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	scale_action = 0;
+
+	/* Low success ratio , need to drop the rate */
+	if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+		D_RATE("decrease rate because of low success_ratio\n");
+		scale_action = -1;
+		/* No throughput measured yet for adjacent rates,
+		 * try increase */
+	} else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+		if (high != RATE_INVALID &&
+		    win->success_ratio >= RATE_INCREASE_TH)
+			scale_action = 1;
+		else if (low != RATE_INVALID)
+			scale_action = 0;
+
+		/* Both adjacent throughputs are measured, but neither one has
+		 * better throughput; we're using the best rate, don't change
+		 * it! */
+	} else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+		   && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+		D_RATE("No action -- low [%d] & high [%d] < "
+		       "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+		scale_action = 0;
+
+		/* At least one of the rates has better throughput */
+	} else {
+		if (high_tpt != IL_INVALID_VALUE) {
+
+			/* High rate has better throughput, Increase
+			 * rate */
+			if (high_tpt > current_tpt &&
+			    win->success_ratio >= RATE_INCREASE_TH)
+				scale_action = 1;
+			else {
+				D_RATE("decrease rate because of high tpt\n");
+				scale_action = 0;
+			}
+		} else if (low_tpt != IL_INVALID_VALUE) {
+			if (low_tpt > current_tpt) {
+				D_RATE("decrease rate because of low tpt\n");
+				scale_action = -1;
+			} else if (win->success_ratio >= RATE_INCREASE_TH) {
+				/* Lower rate has better
+				 * throughput,decrease rate */
+				scale_action = 1;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it. */
+	if (scale_action == -1 && low != RATE_INVALID &&
+	    (win->success_ratio > RATE_HIGH_TH ||
+	     current_tpt > 100 * rs_sta->expected_tpt[low]))
+		scale_action = 0;
+
+	switch (scale_action) {
+	case -1:
+		/* Decrese rate */
+		if (low != RATE_INVALID)
+			idx = low;
+		break;
+	case 1:
+		/* Increase rate */
+		if (high != RATE_INVALID)
+			idx = high;
+
+		break;
+	case 0:
+	default:
+		/* No change */
+		break;
+	}
+
+	D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+	       low, high);
+
+out:
+
+	if (sband->band == NL80211_BAND_5GHZ) {
+		if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+			idx = IL_FIRST_OFDM_RATE;
+		rs_sta->last_txrate_idx = idx;
+		info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+	} else {
+		rs_sta->last_txrate_idx = idx;
+		info->control.rates[0].idx = rs_sta->last_txrate_idx;
+	}
+	info->control.rates[0].count = 1;
+
+	D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int j;
+	ssize_t ret;
+	struct il3945_rs_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc +=
+	    sprintf(buff + desc,
+		    "tx packets=%d last rate idx=%d\n"
+		    "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+		    lq_sta->last_txrate_idx, lq_sta->start_rate,
+		    jiffies_to_msecs(lq_sta->flush_time));
+	for (j = 0; j < RATE_COUNT_3945; j++) {
+		desc +=
+		    sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+			    lq_sta->win[j].counter,
+			    lq_sta->win[j].success_counter,
+			    lq_sta->win[j].success_ratio);
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = il3945_sta_dbgfs_stats_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+	struct il3945_rs_sta *lq_sta = il_sta;
+
+	lq_sta->rs_sta_dbgfs_stats_table_file =
+	    debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+				&rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+	struct il3945_rs_sta *lq_sta = il_sta;
+	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+			 struct cfg80211_chan_def *chandef,
+			 struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static const struct rate_control_ops rs_ops = {
+	.name = RS_NAME,
+	.tx_status = il3945_rs_tx_status,
+	.get_rate = il3945_rs_get_rate,
+	.rate_init = il3945_rs_rate_init_stub,
+	.alloc = il3945_rs_alloc,
+	.free = il3945_rs_free,
+	.alloc_sta = il3945_rs_alloc_sta,
+	.free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = il3945_add_debugfs,
+	.remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+	struct il_priv *il = hw->priv;
+	s32 rssi = 0;
+	unsigned long flags;
+	struct il3945_rs_sta *rs_sta;
+	struct ieee80211_sta *sta;
+	struct il3945_sta_priv *psta;
+
+	D_RATE("enter\n");
+
+	rcu_read_lock();
+
+	sta = ieee80211_find_sta(il->vif, il->stations[sta_id].sta.sta.addr);
+	if (!sta) {
+		D_RATE("Unable to find station to initialize rate scaling.\n");
+		rcu_read_unlock();
+		return;
+	}
+
+	psta = (void *)sta->drv_priv;
+	rs_sta = &psta->rs_sta;
+
+	spin_lock_irqsave(&rs_sta->lock, flags);
+
+	rs_sta->tgg = 0;
+	switch (il->band) {
+	case NL80211_BAND_2GHZ:
+		/* TODO: this always does G, not a regression */
+		if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+			rs_sta->tgg = 1;
+			rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+		} else
+			rs_sta->expected_tpt = il3945_expected_tpt_g;
+		break;
+	case NL80211_BAND_5GHZ:
+		rs_sta->expected_tpt = il3945_expected_tpt_a;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+	rssi = il->_3945.last_rx_rssi;
+	if (rssi == 0)
+		rssi = IL_MIN_RSSI_VAL;
+
+	D_RATE("Network RSSI: %d\n", rssi);
+
+	rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+	D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+	       rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+	rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
new file mode 100644
index 0000000..3e568ce
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -0,0 +1,2740 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+	struct il_host_cmd cmd = {
+		.id = C_LEDS,
+		.len = sizeof(struct il_led_cmd),
+		.data = led_cmd,
+		.flags = CMD_ASYNC,
+		.callback = NULL,
+	};
+
+	return il_send_cmd(il, &cmd);
+}
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np)    \
+	[RATE_##r##M_IDX] = { RATE_##r##M_PLCP,   \
+				    RATE_##r##M_IEEE,   \
+				    RATE_##ip##M_IDX, \
+				    RATE_##in##M_IDX, \
+				    RATE_##rp##M_IDX, \
+				    RATE_##rn##M_IDX, \
+				    RATE_##pp##M_IDX, \
+				    RATE_##np##M_IDX, \
+				    RATE_##r##M_IDX_TBL, \
+				    RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ *   rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+	IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2),	/*  1mbps */
+	IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5),	/*  2mbps */
+	IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11),	/*5.5mbps */
+	IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18),	/* 11mbps */
+	IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11),	/*  6mbps */
+	IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11),	/*  9mbps */
+	IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18),	/* 12mbps */
+	IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24),	/* 18mbps */
+	IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36),	/* 24mbps */
+	IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48),	/* 36mbps */
+	IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54),	/* 48mbps */
+	IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),	/* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+	u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+	if (rate == RATE_INVALID)
+		rate = rate_idx;
+	return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ *   bitmap in SRAM.  Bit position corresponds to Event # (id/type).
+ *   Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging.  This function is just a placeholder as-is,
+ *   you'll need to provide the special bits! ...
+ *   ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+	int i;
+	u32 base;		/* SRAM address of event log header */
+	u32 disable_ptr;	/* SRAM address of event-disable bitmap array */
+	u32 array_size;		/* # of u32 entries in array */
+	static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+		0x00000000,	/*   31 -    0  Event id numbers */
+		0x00000000,	/*   63 -   32 */
+		0x00000000,	/*   95 -   64 */
+		0x00000000,	/*  127 -   96 */
+		0x00000000,	/*  159 -  128 */
+		0x00000000,	/*  191 -  160 */
+		0x00000000,	/*  223 -  192 */
+		0x00000000,	/*  255 -  224 */
+		0x00000000,	/*  287 -  256 */
+		0x00000000,	/*  319 -  288 */
+		0x00000000,	/*  351 -  320 */
+		0x00000000,	/*  383 -  352 */
+		0x00000000,	/*  415 -  384 */
+		0x00000000,	/*  447 -  416 */
+		0x00000000,	/*  479 -  448 */
+		0x00000000,	/*  511 -  480 */
+		0x00000000,	/*  543 -  512 */
+		0x00000000,	/*  575 -  544 */
+		0x00000000,	/*  607 -  576 */
+		0x00000000,	/*  639 -  608 */
+		0x00000000,	/*  671 -  640 */
+		0x00000000,	/*  703 -  672 */
+		0x00000000,	/*  735 -  704 */
+		0x00000000,	/*  767 -  736 */
+		0x00000000,	/*  799 -  768 */
+		0x00000000,	/*  831 -  800 */
+		0x00000000,	/*  863 -  832 */
+		0x00000000,	/*  895 -  864 */
+		0x00000000,	/*  927 -  896 */
+		0x00000000,	/*  959 -  928 */
+		0x00000000,	/*  991 -  960 */
+		0x00000000,	/* 1023 -  992 */
+		0x00000000,	/* 1055 - 1024 */
+		0x00000000,	/* 1087 - 1056 */
+		0x00000000,	/* 1119 - 1088 */
+		0x00000000,	/* 1151 - 1120 */
+		0x00000000,	/* 1183 - 1152 */
+		0x00000000,	/* 1215 - 1184 */
+		0x00000000,	/* 1247 - 1216 */
+		0x00000000,	/* 1279 - 1248 */
+		0x00000000,	/* 1311 - 1280 */
+		0x00000000,	/* 1343 - 1312 */
+		0x00000000,	/* 1375 - 1344 */
+		0x00000000,	/* 1407 - 1376 */
+		0x00000000,	/* 1439 - 1408 */
+		0x00000000,	/* 1471 - 1440 */
+		0x00000000,	/* 1503 - 1472 */
+	};
+
+	base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+	if (!il3945_hw_valid_rtc_data_addr(base)) {
+		IL_ERR("Invalid event log pointer 0x%08X\n", base);
+		return;
+	}
+
+	disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+	array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+	if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+		D_INFO("Disabling selected uCode log events at 0x%x\n",
+		       disable_ptr);
+		for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+			il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+					  evt_disable[i]);
+
+	} else {
+		D_INFO("Selected uCode log events may be disabled\n");
+		D_INFO("  by writing \"1\"s into disable bitmap\n");
+		D_INFO("  in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+		       array_size);
+	}
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+	int idx;
+
+	for (idx = 0; idx < RATE_COUNT_3945; idx++)
+		if (il3945_rates[idx].plcp == plcp)
+			return idx;
+	return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+	switch (status & TX_STATUS_MSK) {
+	case TX_3945_STATUS_SUCCESS:
+		return "SUCCESS";
+		TX_STATUS_ENTRY(SHORT_LIMIT);
+		TX_STATUS_ENTRY(LONG_LIMIT);
+		TX_STATUS_ENTRY(FIFO_UNDERRUN);
+		TX_STATUS_ENTRY(MGMNT_ABORT);
+		TX_STATUS_ENTRY(NEXT_FRAG);
+		TX_STATUS_ENTRY(LIFE_EXPIRE);
+		TX_STATUS_ENTRY(DEST_PS);
+		TX_STATUS_ENTRY(ABORTED);
+		TX_STATUS_ENTRY(BT_RETRY);
+		TX_STATUS_ENTRY(STA_INVALID);
+		TX_STATUS_ENTRY(FRAG_DROPPED);
+		TX_STATUS_ENTRY(TID_DISABLE);
+		TX_STATUS_ENTRY(FRAME_FLUSHED);
+		TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+		TX_STATUS_ENTRY(TX_LOCKED);
+		TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+	}
+
+	return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+	return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+	int next_rate = il3945_get_prev_ieee_rate(rate);
+
+	switch (il->band) {
+	case NL80211_BAND_5GHZ:
+		if (rate == RATE_12M_IDX)
+			next_rate = RATE_9M_IDX;
+		else if (rate == RATE_6M_IDX)
+			next_rate = RATE_6M_IDX;
+		break;
+	case NL80211_BAND_2GHZ:
+		if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+		    il_is_associated(il)) {
+			if (rate == RATE_11M_IDX)
+				next_rate = RATE_5M_IDX;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	struct sk_buff *skb;
+
+	BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		skb = txq->skbs[txq->q.read_ptr];
+		ieee80211_tx_status_irqsafe(il->hw, skb);
+		txq->skbs[txq->q.read_ptr] = NULL;
+		il->ops->txq_free_tfd(il, txq);
+	}
+
+	if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+	    txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+		il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct ieee80211_tx_info *info;
+	struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+	u32 status = le32_to_cpu(tx_resp->status);
+	int rate_idx;
+	int fail;
+
+	if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+		       "is out of range [0-%d] %d %d\n", txq_id, idx,
+		       txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+		return;
+	}
+
+	/*
+	 * Firmware will not transmit frame on passive channel, if it not yet
+	 * received some valid frame on that channel. When this error happen
+	 * we have to wait until firmware will unblock itself i.e. when we
+	 * note received beacon or other frame. We unblock queues in
+	 * il3945_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+	 */
+	if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+	    il->iw_mode == NL80211_IFTYPE_STATION) {
+		il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+		D_INFO("Stopped queues - RX waiting on passive channel\n");
+	}
+
+	txq->time_stamp = jiffies;
+	info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]);
+	ieee80211_tx_info_clear_status(info);
+
+	/* Fill the MRR chain with some info about on-chip retransmissions */
+	rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+	if (info->band == NL80211_BAND_5GHZ)
+		rate_idx -= IL_FIRST_OFDM_RATE;
+
+	fail = tx_resp->failure_frame;
+
+	info->status.rates[0].idx = rate_idx;
+	info->status.rates[0].count = fail + 1;	/* add final attempt */
+
+	/* tx_status->rts_retry_count = tx_resp->failure_rts; */
+	info->flags |=
+	    ((status & TX_STATUS_MSK) ==
+	     TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+	D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+	     il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+	     tx_resp->failure_frame);
+
+	D_TX_REPLY("Tx queue reclaim %d\n", idx);
+	il3945_tx_queue_reclaim(il, txq_id, idx);
+
+	if (status & TX_ABORT_REQUIRED_MSK)
+		IL_ERR("TODO:  Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ *  RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+	int i;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+	u32 *delta, *max_delta;
+
+	prev_stats = (__le32 *) &il->_3945.stats;
+	accum_stats = (u32 *) &il->_3945.accum_stats;
+	delta = (u32 *) &il->_3945.delta_stats;
+	max_delta = (u32 *) &il->_3945.max_delta;
+
+	for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+	     i +=
+	     sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+	     accum_stats++) {
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+			*delta =
+			    (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+			*accum_stats += *delta;
+			if (*delta > *max_delta)
+				*max_delta = *delta;
+		}
+	}
+
+	/* reset accumulative stats for "no-counter" type stats */
+	il->_3945.accum_stats.general.temperature =
+	    il->_3945.stats.general.temperature;
+	il->_3945.accum_stats.general.ttl_timestamp =
+	    il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	D_RX("Statistics notification received (%d vs %d).\n",
+	     (int)sizeof(struct il3945_notif_stats),
+	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+	memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	__le32 *flag = (__le32 *) &pkt->u.raw;
+
+	if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+		memset(&il->_3945.accum_stats, 0,
+		       sizeof(struct il3945_notif_stats));
+		memset(&il->_3945.delta_stats, 0,
+		       sizeof(struct il3945_notif_stats));
+		memset(&il->_3945.max_delta, 0,
+		       sizeof(struct il3945_notif_stats));
+#endif
+		D_RX("Statistics have been cleared\n");
+	}
+	il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+	/* Filter incoming packets to determine if they are targeted toward
+	 * this network, discarding packets coming from ourselves */
+	switch (il->iw_mode) {
+	case NL80211_IFTYPE_ADHOC:	/* Header: Dest. | Source    | BSSID */
+		/* packets to our IBSS update information */
+		return ether_addr_equal_64bits(header->addr3, il->bssid);
+	case NL80211_IFTYPE_STATION:	/* Header: Dest. | AP{BSSID} | Source */
+		/* packets to our IBSS update information */
+		return ether_addr_equal_64bits(header->addr2, il->bssid);
+	default:
+		return 1;
+	}
+}
+
+#define SMALL_PACKET_SIZE 256
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+			       struct ieee80211_rx_status *stats)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+	u32 len = le16_to_cpu(rx_hdr->len);
+	struct sk_buff *skb;
+	__le16 fc = hdr->frame_control;
+	u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
+
+	/* We received data from the HW, so stop the watchdog */
+	if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
+		D_DROP("Corruption detected!\n");
+		return;
+	}
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!il->is_open)) {
+		D_DROP("Dropping packet while interface is not open.\n");
+		return;
+	}
+
+	if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+		il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+		D_INFO("Woke queues - frame received on passive channel\n");
+	}
+
+	skb = dev_alloc_skb(SMALL_PACKET_SIZE);
+	if (!skb) {
+		IL_ERR("dev_alloc_skb failed\n");
+		return;
+	}
+
+	if (!il3945_mod_params.sw_crypto)
+		il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
+				      le32_to_cpu(rx_end->status), stats);
+
+	/* If frame is small enough to fit into skb->head, copy it
+	 * and do not consume a full page
+	 */
+	if (len <= SMALL_PACKET_SIZE) {
+		skb_put_data(skb, rx_hdr->payload, len);
+	} else {
+		skb_add_rx_frag(skb, 0, rxb->page,
+				(void *)rx_hdr->payload - (void *)pkt, len,
+				fraglen);
+		il->alloc_rxb_page--;
+		rxb->page = NULL;
+	}
+	il_update_stats(il, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(il->hw, skb);
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct ieee80211_hdr *header;
+	struct ieee80211_rx_status rx_status = {};
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+	struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+	struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+	u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+	u16 rx_stats_noise_diff __maybe_unused =
+	    le16_to_cpu(rx_stats->noise_diff);
+	u8 network_packet;
+
+	rx_status.flag = 0;
+	rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+	rx_status.band =
+	    (rx_hdr->
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+	    NL80211_BAND_5GHZ;
+	rx_status.freq =
+	    ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+					   rx_status.band);
+
+	rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+	if (rx_status.band == NL80211_BAND_5GHZ)
+		rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+	rx_status.antenna =
+	    (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+	    4;
+
+	/* set the preamble flag if appropriate */
+	if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+	if ((unlikely(rx_stats->phy_count > 20))) {
+		D_DROP("dsp size out of range [0,20]: %d\n",
+		       rx_stats->phy_count);
+		return;
+	}
+
+	if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+	    !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+		D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+		return;
+	}
+
+	/* Convert 3945's rssi indicator to dBm */
+	rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+	D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+		rx_stats_sig_avg, rx_stats_noise_diff);
+
+	header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+	network_packet = il3945_is_network_packet(il, header);
+
+	D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+		network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+		rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+	if (network_packet) {
+		il->_3945.last_beacon_time =
+		    le32_to_cpu(rx_end->beacon_timestamp);
+		il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+		il->_3945.last_rx_rssi = rx_status.signal;
+	}
+
+	il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+	int count;
+	struct il_queue *q;
+	struct il3945_tfd *tfd, *tfd_tmp;
+
+	q = &txq->q;
+	tfd_tmp = (struct il3945_tfd *)txq->tfds;
+	tfd = &tfd_tmp[q->write_ptr];
+
+	if (reset)
+		memset(tfd, 0, sizeof(*tfd));
+
+	count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+	if (count >= NUM_TFD_CHUNKS || count < 0) {
+		IL_ERR("Error can not send more than %d chunks\n",
+		       NUM_TFD_CHUNKS);
+		return -EINVAL;
+	}
+
+	tfd->tbs[count].addr = cpu_to_le32(addr);
+	tfd->tbs[count].len = cpu_to_le32(len);
+
+	count++;
+
+	tfd->control_flags =
+	    cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+	return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+	struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+	int idx = txq->q.read_ptr;
+	struct il3945_tfd *tfd = &tfd_tmp[idx];
+	struct pci_dev *dev = il->pci_dev;
+	int i;
+	int counter;
+
+	/* sanity check */
+	counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+	if (counter > NUM_TFD_CHUNKS) {
+		IL_ERR("Too many chunks: %i\n", counter);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* Unmap tx_cmd */
+	if (counter)
+		pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+				 dma_unmap_len(&txq->meta[idx], len),
+				 PCI_DMA_TODEVICE);
+
+	/* unmap chunks if any */
+
+	for (i = 1; i < counter; i++)
+		pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+				 le32_to_cpu(tfd->tbs[i].len),
+				 PCI_DMA_TODEVICE);
+
+	/* free SKB */
+	if (txq->skbs) {
+		struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->skbs[txq->q.read_ptr] = NULL;
+		}
+	}
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_hdr *hdr, int sta_id)
+{
+	u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+	u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
+	u16 rate_mask;
+	int rate;
+	const u8 rts_retry_limit = 7;
+	u8 data_retry_limit;
+	__le32 tx_flags;
+	__le16 fc = hdr->frame_control;
+	struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+	rate = il3945_rates[rate_idx].plcp;
+	tx_flags = tx_cmd->tx_flags;
+
+	/* We need to figure out how to get the sta->supp_rates while
+	 * in this running context */
+	rate_mask = RATES_MASK_3945;
+
+	/* Set retry limit on DATA packets and Probe Responses */
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IL_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+	/* Set retry limit on RTS packets */
+	tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+	tx_cmd->rate = rate;
+	tx_cmd->tx_flags = tx_flags;
+
+	/* OFDM */
+	tx_cmd->supp_rates[0] =
+	    ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+	/* CCK */
+	tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+	D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+	       "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+	       le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+	       tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+	unsigned long flags_spin;
+	struct il_station_entry *station;
+
+	if (sta_id == IL_INVALID_STATION)
+		return IL_INVALID_STATION;
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	station = &il->stations[sta_id];
+
+	station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+	station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+	station->sta.mode = STA_CONTROL_MODIFY_MSK;
+	il_send_add_sta(il, &station->sta, CMD_ASYNC);
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+	return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+		if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+			il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+					APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+					~APMG_PS_CTRL_MSK_PWR_SRC);
+
+			_il_poll_bit(il, CSR_GPIO_IN,
+				     CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+				     CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+		}
+ */
+
+	il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+			      APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+			      ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+	_il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+		     CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+	il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+	il_wr(il, FH39_RCSR_WPTR(0), 0);
+	il_wr(il, FH39_RCSR_CONFIG(0),
+	      FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+	      FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+	      FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+	      FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+							       <<
+							       FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+	      | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+								 FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+	      | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+	/* fake read to flush all prev I/O */
+	il_rd(il, FH39_RSSR_CTRL);
+
+	return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+	/* bypass mode */
+	il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+	/* RA 0 is active */
+	il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+	/* all 6 fifo are active */
+	il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+	il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+	il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+	il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+	il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+	il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+	il_wr(il, FH39_TSSR_MSG_CONFIG,
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+	      FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+	return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+	int rc, txq_id;
+
+	il3945_hw_txq_ctx_free(il);
+
+	/* allocate tx queue structure */
+	rc = il_alloc_txq_mem(il);
+	if (rc)
+		return rc;
+
+	/* Tx CMD queue */
+	rc = il3945_tx_reset(il);
+	if (rc)
+		goto error;
+
+	/* Tx queue(s) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		rc = il_tx_queue_init(il, txq_id);
+		if (rc) {
+			IL_ERR("Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return rc;
+
+error:
+	il3945_hw_txq_ctx_free(il);
+	return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+	int ret = il_apm_init(il);
+
+	/* Clear APMG (NIC's internal power management) interrupts */
+	il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+	il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+	/* Reset radio chip */
+	il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+	udelay(5);
+	il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+	return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	unsigned long flags;
+	u8 rev_id = il->pci_dev->revision;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Determine HW type */
+	D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+	if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+		D_INFO("RTP type\n");
+	else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+		D_INFO("3945 RADIO-MB type\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+	} else {
+		D_INFO("3945 RADIO-MM type\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+	}
+
+	if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+		D_INFO("SKU OP mode is mrc\n");
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+	} else
+		D_INFO("SKU OP mode is basic\n");
+
+	if ((eeprom->board_revision & 0xF0) == 0xD0) {
+		D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+	} else {
+		D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+		il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+			     CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+	}
+
+	if (eeprom->almgor_m_version <= 1) {
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+		D_INFO("Card M type A version is 0x%X\n",
+		       eeprom->almgor_m_version);
+	} else {
+		D_INFO("Card M type B version is 0x%X\n",
+		       eeprom->almgor_m_version);
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+	}
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+		D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+	if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+		D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+	int rc;
+	unsigned long flags;
+	struct il_rx_queue *rxq = &il->rxq;
+
+	spin_lock_irqsave(&il->lock, flags);
+	il3945_apm_init(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il3945_set_pwr_vmain(il);
+	il3945_nic_config(il);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	if (!rxq->bd) {
+		rc = il_rx_queue_alloc(il);
+		if (rc) {
+			IL_ERR("Unable to initialize Rx queue\n");
+			return -ENOMEM;
+		}
+	} else
+		il3945_rx_queue_reset(il, rxq);
+
+	il3945_rx_replenish(il);
+
+	il3945_rx_init(il, rxq);
+
+	/* Look at using this instead:
+	   rxq->need_update = 1;
+	   il_rx_queue_update_write_ptr(il, rxq);
+	 */
+
+	il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+	rc = il3945_txq_ctx_reset(il);
+	if (rc)
+		return rc;
+
+	set_bit(S_INIT, &il->status);
+
+	return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+	int txq_id;
+
+	/* Tx queues */
+	if (il->txq) {
+		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+			if (txq_id == IL39_CMD_QUEUE_NUM)
+				il_cmd_queue_free(il);
+			else
+				il_tx_queue_free(il, txq_id);
+	}
+
+	/* free tx queue structure */
+	il_free_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+	int txq_id;
+
+	/* stop SCD */
+	_il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+	_il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+	/* reset TFD queues */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		_il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+		_il_poll_bit(il, FH39_TSSR_TX_STATUS,
+			     FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+			     FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+			     1000);
+	}
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+	return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+	return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+	return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int temperature;
+
+	temperature = il3945_hw_get_temperature(il);
+
+	/* driver's okay range is -260 to +25.
+	 *   human readable okay range is 0 to +285 */
+	D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+	/* handle insane temp reading */
+	if (il3945_hw_reg_temp_out_of_range(temperature)) {
+		IL_ERR("Error bad temperature value  %d\n", temperature);
+
+		/* if really really hot(?),
+		 *   substitute the 3rd band/group's temp measured at factory */
+		if (il->last_temperature > 100)
+			temperature = eeprom->groups[2].temperature;
+		else		/* else use most recent "sane" value from driver */
+			temperature = il->last_temperature;
+	}
+
+	return temperature;	/* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER   6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ *    (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+	int temp_diff;
+
+	il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+	temp_diff = il->temperature - il->last_temperature;
+
+	/* get absolute value */
+	if (temp_diff < 0) {
+		D_POWER("Getting cooler, delta %d,\n", temp_diff);
+		temp_diff = -temp_diff;
+	} else if (temp_diff == 0)
+		D_POWER("Same temp,\n");
+	else
+		D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+	/* if we don't need calibration, *don't* update last_temperature */
+	if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+		D_POWER("Timed thermal calib not needed\n");
+		return 0;
+	}
+
+	D_POWER("Timed thermal calib needed\n");
+
+	/* assume that caller will actually do calib ...
+	 *   update the "last temperature" value */
+	il->last_temperature = il->temperature;
+	return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF  -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+	{
+	 {251, 127},		/* 2.4 GHz, highest power */
+	 {251, 127},
+	 {251, 127},
+	 {251, 127},
+	 {251, 125},
+	 {251, 110},
+	 {251, 105},
+	 {251, 98},
+	 {187, 125},
+	 {187, 115},
+	 {187, 108},
+	 {187, 99},
+	 {243, 119},
+	 {243, 111},
+	 {243, 105},
+	 {243, 97},
+	 {243, 92},
+	 {211, 106},
+	 {211, 100},
+	 {179, 120},
+	 {179, 113},
+	 {179, 107},
+	 {147, 125},
+	 {147, 119},
+	 {147, 112},
+	 {147, 106},
+	 {147, 101},
+	 {147, 97},
+	 {147, 91},
+	 {115, 107},
+	 {235, 121},
+	 {235, 115},
+	 {235, 109},
+	 {203, 127},
+	 {203, 121},
+	 {203, 115},
+	 {203, 108},
+	 {203, 102},
+	 {203, 96},
+	 {203, 92},
+	 {171, 110},
+	 {171, 104},
+	 {171, 98},
+	 {139, 116},
+	 {227, 125},
+	 {227, 119},
+	 {227, 113},
+	 {227, 107},
+	 {227, 101},
+	 {227, 96},
+	 {195, 113},
+	 {195, 106},
+	 {195, 102},
+	 {195, 95},
+	 {163, 113},
+	 {163, 106},
+	 {163, 102},
+	 {163, 95},
+	 {131, 113},
+	 {131, 106},
+	 {131, 102},
+	 {131, 95},
+	 {99, 113},
+	 {99, 106},
+	 {99, 102},
+	 {99, 95},
+	 {67, 113},
+	 {67, 106},
+	 {67, 102},
+	 {67, 95},
+	 {35, 113},
+	 {35, 106},
+	 {35, 102},
+	 {35, 95},
+	 {3, 113},
+	 {3, 106},
+	 {3, 102},
+	 {3, 95}		/* 2.4 GHz, lowest power */
+	},
+	{
+	 {251, 127},		/* 5.x GHz, highest power */
+	 {251, 120},
+	 {251, 114},
+	 {219, 119},
+	 {219, 101},
+	 {187, 113},
+	 {187, 102},
+	 {155, 114},
+	 {155, 103},
+	 {123, 117},
+	 {123, 107},
+	 {123, 99},
+	 {123, 92},
+	 {91, 108},
+	 {59, 125},
+	 {59, 118},
+	 {59, 109},
+	 {59, 102},
+	 {59, 96},
+	 {59, 90},
+	 {27, 104},
+	 {27, 98},
+	 {27, 92},
+	 {115, 118},
+	 {115, 111},
+	 {115, 104},
+	 {83, 126},
+	 {83, 121},
+	 {83, 113},
+	 {83, 105},
+	 {83, 99},
+	 {51, 118},
+	 {51, 111},
+	 {51, 104},
+	 {51, 98},
+	 {19, 116},
+	 {19, 109},
+	 {19, 102},
+	 {19, 98},
+	 {19, 93},
+	 {171, 113},
+	 {171, 107},
+	 {171, 99},
+	 {139, 120},
+	 {139, 113},
+	 {139, 107},
+	 {139, 99},
+	 {107, 120},
+	 {107, 113},
+	 {107, 107},
+	 {107, 99},
+	 {75, 120},
+	 {75, 113},
+	 {75, 107},
+	 {75, 99},
+	 {43, 120},
+	 {43, 113},
+	 {43, 107},
+	 {43, 99},
+	 {11, 120},
+	 {11, 113},
+	 {11, 107},
+	 {11, 99},
+	 {131, 107},
+	 {131, 99},
+	 {99, 120},
+	 {99, 113},
+	 {99, 107},
+	 {99, 99},
+	 {67, 120},
+	 {67, 113},
+	 {67, 107},
+	 {67, 99},
+	 {35, 120},
+	 {35, 113},
+	 {35, 107},
+	 {35, 99},
+	 {3, 120}		/* 5.x GHz, lowest power */
+	}
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+	if (idx < 0)
+		return 0;
+	if (idx >= IL_MAX_GAIN_ENTRIES)
+		return IL_MAX_GAIN_ENTRIES - 1;
+	return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+			     const s8 *clip_pwrs,
+			     struct il_channel_info *ch_info, int band_idx)
+{
+	struct il3945_scan_power_info *scan_power_info;
+	s8 power;
+	u8 power_idx;
+
+	scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+	/* use this channel group's 6Mbit clipping/saturation pwr,
+	 *   but cap at regulatory scan power restriction (set during init
+	 *   based on eeprom channel data) for this channel.  */
+	power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+	power = min(power, il->tx_power_user_lmt);
+	scan_power_info->requested_power = power;
+
+	/* find difference between new scan *power* and current "normal"
+	 *   Tx *power* for 6Mb.  Use this difference (x2) to adjust the
+	 *   current "normal" temperature-compensated Tx power *idx* for
+	 *   this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+	 *   *idx*. */
+	power_idx =
+	    ch_info->power_info[rate_idx].power_table_idx - (power -
+							     ch_info->
+							     power_info
+							     [RATE_6M_IDX_TBL].
+							     requested_power) *
+	    2;
+
+	/* store reference idx that we use when adjusting *all* scan
+	 *   powers.  So we can accommodate user (all channel) or spectrum
+	 *   management (single channel) power changes "between" temperature
+	 *   feedback compensation procedures.
+	 * don't force fit this reference idx into gain table; it may be a
+	 *   negative number.  This will help avoid errors when we're at
+	 *   the lower bounds (highest gains, for warmest temperatures)
+	 *   of the table. */
+
+	/* don't exceed table bounds for "real" setting */
+	power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+	scan_power_info->power_table_idx = power_idx;
+	scan_power_info->tpc.tx_gain =
+	    power_gain_table[band_idx][power_idx].tx_gain;
+	scan_power_info->tpc.dsp_atten =
+	    power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+	int rate_idx, i;
+	const struct il_channel_info *ch_info = NULL;
+	struct il3945_txpowertable_cmd txpower = {
+		.channel = il->active.channel,
+	};
+	u16 chan;
+
+	if (WARN_ONCE
+	    (test_bit(S_SCAN_HW, &il->status),
+	     "TX Power requested while scanning!\n"))
+		return -EAGAIN;
+
+	chan = le16_to_cpu(il->active.channel);
+
+	txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
+	ch_info = il_get_channel_info(il, il->band, chan);
+	if (!ch_info) {
+		IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+		       il->band);
+		return -EINVAL;
+	}
+
+	if (!il_is_channel_valid(ch_info)) {
+		D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+		return 0;
+	}
+
+	/* fill cmd with power settings for all rates for current channel */
+	/* Fill OFDM rate */
+	for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+	     rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+		txpower.power[i].tpc = ch_info->power_info[i].tpc;
+		txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+		D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+			le16_to_cpu(txpower.channel), txpower.band,
+			txpower.power[i].tpc.tx_gain,
+			txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+	}
+	/* Fill CCK rates */
+	for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+	     rate_idx++, i++) {
+		txpower.power[i].tpc = ch_info->power_info[i].tpc;
+		txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+		D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+			le16_to_cpu(txpower.channel), txpower.band,
+			txpower.power[i].tpc.tx_gain,
+			txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+	}
+
+	return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+			       sizeof(struct il3945_txpowertable_cmd),
+			       &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update.  Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ *	 properly fill out the scan powers, and actual h/w gain settings,
+ *	 and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+	struct il3945_channel_power_info *power_info;
+	int power_changed = 0;
+	int i;
+	const s8 *clip_pwrs;
+	int power;
+
+	/* Get this chnlgrp's rate-to-max/clip-powers table */
+	clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+	/* Get this channel's rate-to-current-power settings table */
+	power_info = ch_info->power_info;
+
+	/* update OFDM Txpower settings */
+	for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+		int delta_idx;
+
+		/* limit new power to be no more than h/w capability */
+		power = min(ch_info->curr_txpow, clip_pwrs[i]);
+		if (power == power_info->requested_power)
+			continue;
+
+		/* find difference between old and new requested powers,
+		 *    update base (non-temp-compensated) power idx */
+		delta_idx = (power - power_info->requested_power) * 2;
+		power_info->base_power_idx -= delta_idx;
+
+		/* save new requested power value */
+		power_info->requested_power = power;
+
+		power_changed = 1;
+	}
+
+	/* update CCK Txpower settings, based on OFDM 12M setting ...
+	 *    ... all CCK power settings for a given channel are the *same*. */
+	if (power_changed) {
+		power =
+		    ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+		    IL_CCK_FROM_OFDM_POWER_DIFF;
+
+		/* do all CCK rates' il3945_channel_power_info structures */
+		for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+			power_info->requested_power = power;
+			power_info->base_power_idx =
+			    ch_info->power_info[RATE_12M_IDX_TBL].
+			    base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+			++power_info;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ *	 based strictly on regulatory (eeprom and spectrum mgt) limitations
+ *	 (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+	s8 max_power;
+
+#if 0
+	/* if we're using TGd limits, use lower of TGd or EEPROM */
+	if (ch_info->tgd_data.max_power != 0)
+		max_power =
+		    min(ch_info->tgd_data.max_power,
+			ch_info->eeprom.max_power_avg);
+
+	/* else just use EEPROM limits */
+	else
+#endif
+		max_power = ch_info->eeprom.max_power_avg;
+
+	return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ *   and the factory calibration temperatures, and bases the new settings
+ *   on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+	struct il_channel_info *ch_info = NULL;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int delta_idx;
+	const s8 *clip_pwrs;	/* array of h/w max power levels for each rate */
+	u8 a_band;
+	u8 rate_idx;
+	u8 scan_tbl_idx;
+	u8 i;
+	int ref_temp;
+	int temperature = il->temperature;
+
+	if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+		/* do not perform tx power calibration */
+		return 0;
+	}
+	/* set up new Tx power info for each and every channel, 2.4 and 5.x */
+	for (i = 0; i < il->channel_count; i++) {
+		ch_info = &il->channel_info[i];
+		a_band = il_is_channel_a_band(ch_info);
+
+		/* Get this chnlgrp's factory calibration temperature */
+		ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+		/* get power idx adjustment based on current and factory
+		 * temps */
+		delta_idx =
+		    il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+		/* set tx power value for all rates, OFDM and CCK */
+		for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+			int power_idx =
+			    ch_info->power_info[rate_idx].base_power_idx;
+
+			/* temperature compensate */
+			power_idx += delta_idx;
+
+			/* stay within table range */
+			power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+			ch_info->power_info[rate_idx].power_table_idx =
+			    (u8) power_idx;
+			ch_info->power_info[rate_idx].tpc =
+			    power_gain_table[a_band][power_idx];
+		}
+
+		/* Get this chnlgrp's rate-to-max/clip-powers table */
+		clip_pwrs =
+		    il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+		for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+		     scan_tbl_idx++) {
+			s32 actual_idx =
+			    (scan_tbl_idx ==
+			     0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+			il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+						     actual_idx, clip_pwrs,
+						     ch_info, a_band);
+		}
+	}
+
+	/* send Txpower command for current channel to ucode */
+	return il->ops->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+	struct il_channel_info *ch_info;
+	s8 max_power;
+	u8 i;
+
+	if (il->tx_power_user_lmt == power) {
+		D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+			power);
+		return 0;
+	}
+
+	D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+	il->tx_power_user_lmt = power;
+
+	/* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+	for (i = 0; i < il->channel_count; i++) {
+		ch_info = &il->channel_info[i];
+
+		/* find minimum power of all user and regulatory constraints
+		 *    (does not consider h/w clipping limitations) */
+		max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+		max_power = min(power, max_power);
+		if (max_power != ch_info->curr_txpow) {
+			ch_info->curr_txpow = max_power;
+
+			/* this considers the h/w clipping limitations */
+			il3945_hw_reg_set_new_power(il, ch_info);
+		}
+	}
+
+	/* update txpower settings for all channels,
+	 *   send to NIC if associated. */
+	il3945_is_temp_calib_needed(il);
+	il3945_hw_reg_comp_txpower_temp(il);
+
+	return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il)
+{
+	int rc = 0;
+	struct il_rx_pkt *pkt;
+	struct il3945_rxon_assoc_cmd rxon_assoc;
+	struct il_host_cmd cmd = {
+		.id = C_RXON_ASSOC,
+		.len = sizeof(rxon_assoc),
+		.flags = CMD_WANT_SKB,
+		.data = &rxon_assoc,
+	};
+	const struct il_rxon_cmd *rxon1 = &il->staging;
+	const struct il_rxon_cmd *rxon2 = &il->active;
+
+	if (rxon1->flags == rxon2->flags &&
+	    rxon1->filter_flags == rxon2->filter_flags &&
+	    rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+	    rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+		D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+		return 0;
+	}
+
+	rxon_assoc.flags = il->staging.flags;
+	rxon_assoc.filter_flags = il->staging.filter_flags;
+	rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
+	rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
+	rxon_assoc.reserved = 0;
+
+	rc = il_send_cmd_sync(il, &cmd);
+	if (rc)
+		return rc;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_RXON_ASSOC command\n");
+		rc = -EIO;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+
+	return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il)
+{
+	/* cast away the const for active_rxon in this function */
+	struct il3945_rxon_cmd *active_rxon = (void *)&il->active;
+	struct il3945_rxon_cmd *staging_rxon = (void *)&il->staging;
+	int rc = 0;
+	bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EINVAL;
+
+	if (!il_is_alive(il))
+		return -1;
+
+	/* always get timestamp with Rx frame */
+	staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+	/* select antenna */
+	staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+	staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+	rc = il_check_rxon_cmd(il);
+	if (rc) {
+		IL_ERR("Invalid RXON configuration.  Not committing.\n");
+		return -EINVAL;
+	}
+
+	/* If we don't need to send a full RXON, we can use
+	 * il3945_rxon_assoc_cmd which is used to reconfigure filter
+	 * and other flags for the current radio configuration. */
+	if (!il_full_rxon_required(il)) {
+		rc = il_send_rxon_assoc(il);
+		if (rc) {
+			IL_ERR("Error setting RXON_ASSOC "
+			       "configuration (%d).\n", rc);
+			return rc;
+		}
+
+		memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+		/*
+		 * We do not commit tx power settings while channel changing,
+		 * do it now if tx power changed.
+		 */
+		il_set_tx_power(il, il->tx_power_next, false);
+		return 0;
+	}
+
+	/* If we are currently associated and the new config requires
+	 * an RXON_ASSOC and the new config wants the associated mask enabled,
+	 * we must clear the associated from the active configuration
+	 * before we apply the new config */
+	if (il_is_associated(il) && new_assoc) {
+		D_INFO("Toggling associated bit on current RXON\n");
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+		/*
+		 * reserved4 and 5 could have been filled by the iwlcore code.
+		 * Let's clear them before pushing to the 3945.
+		 */
+		active_rxon->reserved4 = 0;
+		active_rxon->reserved5 = 0;
+		rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+				     &il->active);
+
+		/* If the mask clearing failed then we set
+		 * active_rxon back to what it was previously */
+		if (rc) {
+			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+			IL_ERR("Error clearing ASSOC_MSK on current "
+			       "configuration (%d).\n", rc);
+			return rc;
+		}
+		il_clear_ucode_stations(il);
+		il_restore_stations(il);
+	}
+
+	D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+	       "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+	       le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+	/*
+	 * reserved4 and 5 could have been filled by the iwlcore code.
+	 * Let's clear them before pushing to the 3945.
+	 */
+	staging_rxon->reserved4 = 0;
+	staging_rxon->reserved5 = 0;
+
+	il_set_rxon_hwcrypto(il, !il3945_mod_params.sw_crypto);
+
+	/* Apply the new configuration */
+	rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+			     staging_rxon);
+	if (rc) {
+		IL_ERR("Error setting new configuration (%d).\n", rc);
+		return rc;
+	}
+
+	memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+	if (!new_assoc) {
+		il_clear_ucode_stations(il);
+		il_restore_stations(il);
+	}
+
+	/* If we issue a new RXON command which required a tune then we must
+	 * send a new TXPOWER command or we won't be able to Tx any frames */
+	rc = il_set_tx_power(il, il->tx_power_next, true);
+	if (rc) {
+		IL_ERR("Error setting Tx power (%d).\n", rc);
+		return rc;
+	}
+
+	/* Init the hardware's rate fallback order based on the band */
+	rc = il3945_init_hw_rate_table(il);
+	if (rc) {
+		IL_ERR("Error setting HW rate table: %02X\n", rc);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic -  called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ *     -- correct coeffs for temp (can reset temp timer)
+ *     -- save this temp as "last",
+ *     -- send new set of gain settings to NIC
+ * NOTE:  This should continue working, even when we're not associated,
+ *   so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+	/* This will kick in the "brute force"
+	 * il3945_hw_reg_comp_txpower_temp() below */
+	if (!il3945_is_temp_calib_needed(il))
+		goto reschedule;
+
+	/* Set up a new set of temp-adjusted TxPowers, send to NIC.
+	 * This is based *only* on current temperature,
+	 * ignoring any previous power measurements */
+	il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+	queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+			   REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  _3945.thermal_periodic.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
+		goto out;
+
+	il3945_reg_txpower_periodic(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ *	 These channel groups are based on factory-tested channels;
+ *	 on A-band, EEPROM's "group frequency" entries represent the top
+ *	 channel in each group 1-4.  Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+			     const struct il_channel_info *ch_info)
+{
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+	u8 group;
+	u16 group_idx = 0;	/* based on factory calib frequencies */
+	u8 grp_channel;
+
+	/* Find the group idx for the channel ... don't use idx 1(?) */
+	if (il_is_channel_a_band(ch_info)) {
+		for (group = 1; group < 5; group++) {
+			grp_channel = ch_grp[group].group_channel;
+			if (ch_info->channel <= grp_channel) {
+				group_idx = group;
+				break;
+			}
+		}
+		/* group 4 has a few channels *above* its factory cal freq */
+		if (group == 5)
+			group_idx = 4;
+	} else
+		group_idx = 0;	/* 2.4 GHz, group 0 */
+
+	D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+	return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ *   into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+				    s32 setting_idx, s32 *new_idx)
+{
+	const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	s32 idx0, idx1;
+	s32 power = 2 * requested_power;
+	s32 i;
+	const struct il3945_eeprom_txpower_sample *samples;
+	s32 gains0, gains1;
+	s32 res;
+	s32 denominator;
+
+	chnl_grp = &eeprom->groups[setting_idx];
+	samples = chnl_grp->samples;
+	for (i = 0; i < 5; i++) {
+		if (power == samples[i].power) {
+			*new_idx = samples[i].gain_idx;
+			return 0;
+		}
+	}
+
+	if (power > samples[1].power) {
+		idx0 = 0;
+		idx1 = 1;
+	} else if (power > samples[2].power) {
+		idx0 = 1;
+		idx1 = 2;
+	} else if (power > samples[3].power) {
+		idx0 = 2;
+		idx1 = 3;
+	} else {
+		idx0 = 3;
+		idx1 = 4;
+	}
+
+	denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+	if (denominator == 0)
+		return -EINVAL;
+	gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+	gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+	res =
+	    gains0 + (gains1 - gains0) * ((s32) power -
+					  (s32) samples[idx0].power) /
+	    denominator + (1 << 18);
+	*new_idx = res >> 19;
+	return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+	u32 i;
+	s32 rate_idx;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	const struct il3945_eeprom_txpower_group *group;
+
+	D_POWER("Initializing factory calib info from EEPROM\n");
+
+	for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+		s8 *clip_pwrs;	/* table of power levels for each rate */
+		s8 satur_pwr;	/* saturation power for each chnl group */
+		group = &eeprom->groups[i];
+
+		/* sanity check on factory saturation power value */
+		if (group->saturation_power < 40) {
+			IL_WARN("Error: saturation power is %d, "
+				"less than minimum expected 40\n",
+				group->saturation_power);
+			return;
+		}
+
+		/*
+		 * Derive requested power levels for each rate, based on
+		 *   hardware capabilities (saturation power for band).
+		 * Basic value is 3dB down from saturation, with further
+		 *   power reductions for highest 3 data rates.  These
+		 *   backoffs provide headroom for high rate modulation
+		 *   power peaks, without too much distortion (clipping).
+		 */
+		/* we'll fill in this array with h/w max power levels */
+		clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+		/* divide factory saturation power by 2 to find -3dB level */
+		satur_pwr = (s8) (group->saturation_power >> 1);
+
+		/* fill in channel group's nominal powers for each rate */
+		for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+		     rate_idx++, clip_pwrs++) {
+			switch (rate_idx) {
+			case RATE_36M_IDX_TBL:
+				if (i == 0)	/* B/G */
+					*clip_pwrs = satur_pwr;
+				else	/* A */
+					*clip_pwrs = satur_pwr - 5;
+				break;
+			case RATE_48M_IDX_TBL:
+				if (i == 0)
+					*clip_pwrs = satur_pwr - 7;
+				else
+					*clip_pwrs = satur_pwr - 10;
+				break;
+			case RATE_54M_IDX_TBL:
+				if (i == 0)
+					*clip_pwrs = satur_pwr - 9;
+				else
+					*clip_pwrs = satur_pwr - 12;
+				break;
+			default:
+				*clip_pwrs = satur_pwr;
+				break;
+			}
+		}
+	}
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+	struct il_channel_info *ch_info = NULL;
+	struct il3945_channel_power_info *pwr_info;
+	struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+	int delta_idx;
+	u8 rate_idx;
+	u8 scan_tbl_idx;
+	const s8 *clip_pwrs;	/* array of power levels for each rate */
+	u8 gain, dsp_atten;
+	s8 power;
+	u8 pwr_idx, base_pwr_idx, a_band;
+	u8 i;
+	int temperature;
+
+	/* save temperature reference,
+	 *   so we can determine next time to calibrate */
+	temperature = il3945_hw_reg_txpower_get_temperature(il);
+	il->last_temperature = temperature;
+
+	il3945_hw_reg_init_channel_groups(il);
+
+	/* initialize Tx power info for each and every channel, 2.4 and 5.x */
+	for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+	     i++, ch_info++) {
+		a_band = il_is_channel_a_band(ch_info);
+		if (!il_is_channel_valid(ch_info))
+			continue;
+
+		/* find this channel's channel group (*not* "band") idx */
+		ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+		/* Get this chnlgrp's rate->max/clip-powers table */
+		clip_pwrs =
+		    il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+		/* calculate power idx *adjustment* value according to
+		 *  diff between current temperature and factory temperature */
+		delta_idx =
+		    il3945_hw_reg_adjust_power_by_temp(temperature,
+						       eeprom->groups[ch_info->
+								      group_idx].
+						       temperature);
+
+		D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+			delta_idx, temperature + IL_TEMP_CONVERT);
+
+		/* set tx power value for all OFDM rates */
+		for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+			s32 uninitialized_var(power_idx);
+			int rc;
+
+			/* use channel group's clip-power table,
+			 *   but don't exceed channel's max power */
+			s8 pwr = min(ch_info->max_power_avg,
+				     clip_pwrs[rate_idx]);
+
+			pwr_info = &ch_info->power_info[rate_idx];
+
+			/* get base (i.e. at factory-measured temperature)
+			 *    power table idx for this rate's power */
+			rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+								 ch_info->
+								 group_idx,
+								 &power_idx);
+			if (rc) {
+				IL_ERR("Invalid power idx\n");
+				return rc;
+			}
+			pwr_info->base_power_idx = (u8) power_idx;
+
+			/* temperature compensate */
+			power_idx += delta_idx;
+
+			/* stay within range of gain table */
+			power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+			/* fill 1 OFDM rate's il3945_channel_power_info struct */
+			pwr_info->requested_power = pwr;
+			pwr_info->power_table_idx = (u8) power_idx;
+			pwr_info->tpc.tx_gain =
+			    power_gain_table[a_band][power_idx].tx_gain;
+			pwr_info->tpc.dsp_atten =
+			    power_gain_table[a_band][power_idx].dsp_atten;
+		}
+
+		/* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+		pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+		power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+		pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+		base_pwr_idx =
+		    pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+		/* stay within table range */
+		pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+		gain = power_gain_table[a_band][pwr_idx].tx_gain;
+		dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+		/* fill each CCK rate's il3945_channel_power_info structure
+		 * NOTE:  All CCK-rate Txpwrs are the same for a given chnl!
+		 * NOTE:  CCK rates start at end of OFDM rates! */
+		for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+			pwr_info =
+			    &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+			pwr_info->requested_power = power;
+			pwr_info->power_table_idx = pwr_idx;
+			pwr_info->base_power_idx = base_pwr_idx;
+			pwr_info->tpc.tx_gain = gain;
+			pwr_info->tpc.dsp_atten = dsp_atten;
+		}
+
+		/* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+		for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+		     scan_tbl_idx++) {
+			s32 actual_idx =
+			    (scan_tbl_idx ==
+			     0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+			il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+						     actual_idx, clip_pwrs,
+						     ch_info, a_band);
+		}
+	}
+
+	return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+	int ret;
+
+	_il_wr(il, FH39_RCSR_CONFIG(0), 0);
+	ret = _il_poll_bit(il, FH39_RSSR_STATUS,
+			   FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+			   FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+			   1000);
+	if (ret < 0)
+		IL_ERR("Can't stop Rx DMA.\n");
+
+	return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+	int txq_id = txq->q.id;
+
+	struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+	shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+	il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+	il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+	il_wr(il, FH39_TCSR_CONFIG(txq_id),
+	      FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+	      FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+	/* fake read to flush all prev. writes */
+	_il_rd(il, FH39_TSSR_CBB_BASE);
+
+	return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+	switch (cmd_id) {
+	case C_RXON:
+		return sizeof(struct il3945_rxon_cmd);
+	case C_POWER_TBL:
+		return sizeof(struct il3945_powertable_cmd);
+	default:
+		return len;
+	}
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+	struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+	addsta->mode = cmd->mode;
+	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+	memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+	addsta->station_flags = cmd->station_flags;
+	addsta->station_flags_msk = cmd->station_flags_msk;
+	addsta->tid_disable_tx = cpu_to_le16(0);
+	addsta->rate_n_flags = cmd->rate_n_flags;
+	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+	return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+	int ret;
+	u8 sta_id;
+	unsigned long flags;
+
+	if (sta_id_r)
+		*sta_id_r = IL_INVALID_STATION;
+
+	ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM\n", addr);
+		return ret;
+	}
+
+	if (sta_id_r)
+		*sta_id_r = sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].used |= IL_STA_LOCAL;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			   bool add)
+{
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+	int ret;
+
+	if (add) {
+		ret =
+		    il3945_add_bssid_station(il, vif->bss_conf.bssid,
+					     &vif_priv->ibss_bssid_sta_id);
+		if (ret)
+			return ret;
+
+		il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+				(il->band ==
+				 NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
+				RATE_1M_PLCP);
+		il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+		return 0;
+	}
+
+	return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+				 vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+	int rc, i, idx, prev_idx;
+	struct il3945_rate_scaling_cmd rate_cmd = {
+		.reserved = {0, 0, 0},
+	};
+	struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+	for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+		idx = il3945_rates[i].table_rs_idx;
+
+		table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
+		table[idx].try_cnt = il->retry_rate;
+		prev_idx = il3945_get_prev_ieee_rate(i);
+		table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+	}
+
+	switch (il->band) {
+	case NL80211_BAND_5GHZ:
+		D_RATE("Select A mode rate scale\n");
+		/* If one of the following CCK rates is used,
+		 * have it fall back to the 6M OFDM rate */
+		for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+			table[i].next_rate_idx =
+			    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+		/* Don't fall back to CCK rates */
+		table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+		/* Don't drop out of OFDM rates */
+		table[RATE_6M_IDX_TBL].next_rate_idx =
+		    il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+		break;
+
+	case NL80211_BAND_2GHZ:
+		D_RATE("Select B/G mode rate scale\n");
+		/* If an OFDM rate is used, have it fall back to the
+		 * 1M CCK rates */
+
+		if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+		    il_is_associated(il)) {
+
+			idx = IL_FIRST_CCK_RATE;
+			for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+				table[i].next_rate_idx =
+				    il3945_rates[idx].table_rs_idx;
+
+			idx = RATE_11M_IDX_TBL;
+			/* CCK shouldn't fall back to OFDM... */
+			table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+		}
+		break;
+
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	/* Update the rate scaling for control frame Tx */
+	rate_cmd.table_id = 0;
+	rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+	if (rc)
+		return rc;
+
+	/* Update the rate scaling for data frame Tx */
+	rate_cmd.table_id = 1;
+	return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+	memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+	il->_3945.shared_virt =
+	    dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+			       &il->_3945.shared_phys, GFP_KERNEL);
+	if (!il->_3945.shared_virt)
+		return -ENOMEM;
+
+	il->hw_params.bcast_id = IL3945_BROADCAST_ID;
+
+	/* Assign number of Usable TX queues */
+	il->hw_params.max_txq_num = il->cfg->num_of_queues;
+
+	il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+	il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+	il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+	il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+	il->hw_params.max_stations = IL3945_STATION_COUNT;
+
+	il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+	il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+	il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+	il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+	return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+			 u8 rate)
+{
+	struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+	unsigned int frame_size;
+
+	tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+	tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	frame_size =
+	    il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+				     sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+	BUG_ON(frame_size > MAX_MPDU_SIZE);
+	tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+	tx_beacon_cmd->tx.rate = rate;
+	tx_beacon_cmd->tx.tx_flags =
+	    (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+	/* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+	tx_beacon_cmd->tx.supp_rates[0] =
+	    (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+	tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+	return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+	il->handlers[C_TX] = il3945_hdl_tx;
+	il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+	INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+			  il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+	cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	u32 reg;
+	u32 val;
+
+	D_INFO("Begin verify bsm\n");
+
+	/* verify BSM SRAM contents */
+	val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+	for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+	     reg += sizeof(u32), image++) {
+		val = il_rd_prph(il, reg);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("BSM uCode verification failed at "
+			       "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+			       BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+			       len, val, le32_to_cpu(*image));
+			return -EIO;
+		}
+	}
+
+	D_INFO("BSM bootstrap uCode image OK\n");
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism.  Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+	_il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+	return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+	return;
+}
+
+ /**
+  * il3945_load_bsm - Load bootstrap instructions
+  *
+  * BSM operation:
+  *
+  * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+  * in special SRAM that does not power down during RFKILL.  When powering back
+  * up after power-saving sleeps (or during initial uCode load), the BSM loads
+  * the bootstrap program into the on-board processor, and starts it.
+  *
+  * The bootstrap program loads (via DMA) instructions and data for a new
+  * program from host DRAM locations indicated by the host driver in the
+  * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+  * automatically.
+  *
+  * When initializing the NIC, the host driver points the BSM to the
+  * "initialize" uCode image.  This uCode sets up some internal data, then
+  * notifies host via "initialize alive" that it is complete.
+  *
+  * The host then replaces the BSM_DRAM_* pointer values to point to the
+  * normal runtime uCode instructions and a backup uCode data cache buffer
+  * (filled initially with starting data values for the on-board processor),
+  * then triggers the "initialize" uCode to load and launch the runtime uCode,
+  * which begins normal operation.
+  *
+  * When doing a power-save shutdown, runtime uCode saves data SRAM into
+  * the backup data cache in DRAM before SRAM is powered down.
+  *
+  * When powering back up, the BSM loads the bootstrap program.  This reloads
+  * the runtime uCode instructions and the backup data cache into SRAM,
+  * and re-launches the runtime uCode from where it left off.
+  */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	u32 inst_len;
+	u32 data_len;
+	int rc;
+	int i;
+	u32 done;
+	u32 reg_offset;
+
+	D_INFO("Begin load bsm\n");
+
+	/* make sure bootstrap program is no larger than BSM's SRAM size */
+	if (len > IL39_MAX_BSM_SIZE)
+		return -EINVAL;
+
+	/* Tell bootstrap uCode where to find the "Initialize" uCode
+	 *   in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+	 * NOTE:  il3945_initialize_alive_start() will replace these values,
+	 *        after the "initialize" uCode has run, to point to
+	 *        runtime/protocol instructions and backup data cache. */
+	pinst = il->ucode_init.p_addr;
+	pdata = il->ucode_init_data.p_addr;
+	inst_len = il->ucode_init.len;
+	data_len = il->ucode_init_data.len;
+
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+	/* Fill BSM memory with bootstrap instructions */
+	for (reg_offset = BSM_SRAM_LOWER_BOUND;
+	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
+	     reg_offset += sizeof(u32), image++)
+		_il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+	rc = il3945_verify_bsm(il);
+	if (rc)
+		return rc;
+
+	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+	il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+	il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+	il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+	/* Load bootstrap code into instruction SRAM now,
+	 *   to prepare to load "initialize" uCode */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+	/* Wait for load of bootstrap uCode to finish */
+	for (i = 0; i < 100; i++) {
+		done = il_rd_prph(il, BSM_WR_CTRL_REG);
+		if (!(done & BSM_WR_CTRL_REG_BIT_START))
+			break;
+		udelay(10);
+	}
+	if (i < 100)
+		D_INFO("BSM write complete, poll %d iterations\n", i);
+	else {
+		IL_ERR("BSM write did not complete!\n");
+		return -EIO;
+	}
+
+	/* Enable future boot loads whenever power management unit triggers it
+	 *   (e.g. when powering back up after power-save shutdown) */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+	return 0;
+}
+
+const struct il_ops il3945_ops = {
+	.txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+	.txq_free_tfd = il3945_hw_txq_free_tfd,
+	.txq_init = il3945_hw_tx_queue_init,
+	.load_ucode = il3945_load_bsm,
+	.dump_nic_error_log = il3945_dump_nic_error_log,
+	.apm_init = il3945_apm_init,
+	.send_tx_power = il3945_send_tx_power,
+	.is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+	.eeprom_acquire_semaphore = il3945_eeprom_acquire_semaphore,
+	.eeprom_release_semaphore = il3945_eeprom_release_semaphore,
+
+	.rxon_assoc = il3945_send_rxon_assoc,
+	.commit_rxon = il3945_commit_rxon,
+
+	.get_hcmd_size = il3945_get_hcmd_size,
+	.build_addsta_hcmd = il3945_build_addsta_hcmd,
+	.request_scan = il3945_request_scan,
+	.post_scan = il3945_post_scan,
+
+	.post_associate = il3945_post_associate,
+	.config_ap = il3945_config_ap,
+	.manage_ibss_station = il3945_manage_ibss_station,
+
+	.send_led_cmd = il3945_send_led_cmd,
+};
+
+static const struct il_cfg il3945_bg_cfg = {
+	.name = "3945BG",
+	.fw_name_pre = IL3945_FW_PRE,
+	.ucode_api_max = IL3945_UCODE_API_MAX,
+	.ucode_api_min = IL3945_UCODE_API_MIN,
+	.sku = IL_SKU_G,
+	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+	.mod_params = &il3945_mod_params,
+	.led_mode = IL_LED_BLINK,
+
+	.eeprom_size = IL3945_EEPROM_IMG_SIZE,
+	.num_of_queues = IL39_NUM_QUEUES,
+	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = true,
+	.led_compensation = 64,
+	.wd_timeout = IL_DEF_WD_TIMEOUT,
+
+	.regulatory_bands = {
+		EEPROM_REGULATORY_BAND_1_CHANNELS,
+		EEPROM_REGULATORY_BAND_2_CHANNELS,
+		EEPROM_REGULATORY_BAND_3_CHANNELS,
+		EEPROM_REGULATORY_BAND_4_CHANNELS,
+		EEPROM_REGULATORY_BAND_5_CHANNELS,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+	},
+};
+
+static const struct il_cfg il3945_abg_cfg = {
+	.name = "3945ABG",
+	.fw_name_pre = IL3945_FW_PRE,
+	.ucode_api_max = IL3945_UCODE_API_MAX,
+	.ucode_api_min = IL3945_UCODE_API_MIN,
+	.sku = IL_SKU_A | IL_SKU_G,
+	.eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+	.mod_params = &il3945_mod_params,
+	.led_mode = IL_LED_BLINK,
+
+	.eeprom_size = IL3945_EEPROM_IMG_SIZE,
+	.num_of_queues = IL39_NUM_QUEUES,
+	.pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+	.set_l0s = false,
+	.use_bsm = true,
+	.led_compensation = 64,
+	.wd_timeout = IL_DEF_WD_TIMEOUT,
+
+	.regulatory_bands = {
+		EEPROM_REGULATORY_BAND_1_CHANNELS,
+		EEPROM_REGULATORY_BAND_2_CHANNELS,
+		EEPROM_REGULATORY_BAND_3_CHANNELS,
+		EEPROM_REGULATORY_BAND_4_CHANNELS,
+		EEPROM_REGULATORY_BAND_5_CHANNELS,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+	},
+};
+
+const struct pci_device_id il3945_hw_card_ids[] = {
+	{IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+	{IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+	{IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.h b/drivers/net/wireless/intel/iwlegacy/3945.h
new file mode 100644
index 0000000..00030d4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/3945.h
@@ -0,0 +1,593 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h>		/* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+extern const struct il_ops il3945_ops;
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE	"iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+	u64 data;
+	s32 success_counter;
+	s32 success_ratio;
+	s32 counter;
+	s32 average_tpt;
+	unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+	spinlock_t lock;
+	struct il_priv *il;
+	s32 *expected_tpt;
+	unsigned long last_partial_flush;
+	unsigned long last_flush;
+	u32 flush_time;
+	u32 last_tx_packets;
+	u32 tx_packets;
+	u8 tgg;
+	u8 flush_pending;
+	u8 start_rate;
+	struct timer_list rate_scale_flush;
+	struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+	struct il_station_priv_common common;
+	struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+	IL_ANTENNA_DIVERSITY,
+	IL_ANTENNA_MAIN,
+	IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE		  2304U
+#define MAX_MPDU_SIZE		  2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+#define IL_TX_FIFO_AC0	0
+#define IL_TX_FIFO_AC1	1
+#define IL_TX_FIFO_AC2	2
+#define IL_TX_FIFO_AC3	3
+#define IL_TX_FIFO_HCCA_1	5
+#define IL_TX_FIFO_HCCA_2	6
+#define IL_TX_FIFO_NONE	7
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+	union {
+		struct ieee80211_hdr frame;
+		struct il3945_tx_beacon_cmd beacon;
+		u8 raw[IEEE80211_FRAME_LEN];
+		u8 cmd[360];
+	} u;
+	struct list_head list;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+#define STA_PS_STATUS_WAKE             0
+#define STA_PS_STATUS_SLEEP            1
+
+struct il3945_ibss_seq {
+	u8 mac[ETH_ALEN];
+	u16 seq_num;
+	u16 frag_num;
+	unsigned long packet_time;
+	struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+		       x->u.rx_frame.stats.payload + \
+		       x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+		       IL_RX_HDR(x)->payload + \
+		       le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+int il3945_calc_db_from_ratio(int sig_ratio);
+void il3945_rx_replenish(void *data);
+void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+				      struct ieee80211_hdr *hdr, int left);
+int il3945_dump_nic_event_log(struct il_priv *il, bool full_log, char **buf,
+			      bool display);
+void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE:  The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_         <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_      <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_      <-- Called from work queue context
+ * il3945_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+void il3945_hw_handler_setup(struct il_priv *il);
+void il3945_hw_setup_deferred_work(struct il_priv *il);
+void il3945_hw_cancel_deferred_work(struct il_priv *il);
+int il3945_hw_rxq_stop(struct il_priv *il);
+int il3945_hw_set_hw_params(struct il_priv *il);
+int il3945_hw_nic_init(struct il_priv *il);
+int il3945_hw_nic_stop_master(struct il_priv *il);
+void il3945_hw_txq_ctx_free(struct il_priv *il);
+void il3945_hw_txq_ctx_stop(struct il_priv *il);
+int il3945_hw_nic_reset(struct il_priv *il);
+int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				    dma_addr_t addr, u16 len, u8 reset, u8 pad);
+void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il3945_hw_get_temperature(struct il_priv *il);
+int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+				      struct il3945_frame *frame, u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+				 struct ieee80211_tx_info *info,
+				 struct ieee80211_hdr *hdr, int sta_id);
+int il3945_hw_reg_send_txpower(struct il_priv *il);
+int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_disable_events(struct il_priv *il);
+int il4965_get_temperature(const struct il_priv *il);
+void il3945_post_associate(struct il_priv *il);
+void il3945_config_ap(struct il_priv *il);
+
+int il3945_commit_rxon(struct il_priv *il);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE:  This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+u8 il3945_hw_find_station(struct il_priv *il, const u8 *bssid);
+
+__le32 il3945_get_antenna_flags(const struct il_priv *il);
+int il3945_init_hw_rate_table(struct il_priv *il);
+void il3945_reg_txpower_periodic(struct il_priv *il);
+int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET	95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC                      (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ *   to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ *   (PA) output voltage detector.  This same detector is used during Tx of
+ *   long packets in normal operation to provide feedback as to proper output
+ *   level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+	u8 gain_idx;		/* idx into power (gain) setup table ... */
+	s8 power;		/* ... for this pwr level for this chnl group */
+	u16 v_det;		/* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ *    to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+	struct il3945_eeprom_txpower_sample samples[5];	/* 5 power levels */
+	s32 a, b, c, d, e;	/* coefficients for voltage->power
+				 * formula (signed) */
+	s32 Fa, Fb, Fc, Fd, Fe;	/* these modify coeffs based on
+				 * frequency (signed) */
+	s8 saturation_power;	/* highest power possible by h/w in this
+				 * band */
+	u8 group_channel;	/* "representative" channel # in this band */
+	s16 temperature;	/* h/w temperature at factory calib this band
+				 * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ *   difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+	u32 Ta;
+	u32 Tb;
+	u32 Tc;
+	u32 Td;
+	u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+	u8 reserved0[16];
+	u16 device_id;		/* abs.ofs: 16 */
+	u8 reserved1[2];
+	u16 pmc;		/* abs.ofs: 20 */
+	u8 reserved2[20];
+	u8 mac_address[6];	/* abs.ofs: 42 */
+	u8 reserved3[58];
+	u16 board_revision;	/* abs.ofs: 106 */
+	u8 reserved4[11];
+	u8 board_pba_number[9];	/* abs.ofs: 119 */
+	u8 reserved5[8];
+	u16 version;		/* abs.ofs: 136 */
+	u8 sku_cap;		/* abs.ofs: 138 */
+	u8 leds_mode;		/* abs.ofs: 139 */
+	u16 oem_mode;
+	u16 wowlan_mode;	/* abs.ofs: 142 */
+	u16 leds_time_interval;	/* abs.ofs: 144 */
+	u8 leds_off_time;	/* abs.ofs: 146 */
+	u8 leds_on_time;	/* abs.ofs: 147 */
+	u8 almgor_m_version;	/* abs.ofs: 148 */
+	u8 antenna_switch_type;	/* abs.ofs: 149 */
+	u8 reserved6[42];
+	u8 sku_id[4];		/* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+	u16 band_1_count;	/* abs.ofs: 196 */
+	struct il_eeprom_channel band_1_channels[14];	/* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+	u16 band_2_count;	/* abs.ofs: 226 */
+	struct il_eeprom_channel band_2_channels[13];	/* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+	u16 band_3_count;	/* abs.ofs: 254 */
+	struct il_eeprom_channel band_3_channels[12];	/* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+	u16 band_4_count;	/* abs.ofs: 280 */
+	struct il_eeprom_channel band_4_channels[11];	/* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+	u16 band_5_count;	/* abs.ofs: 304 */
+	struct il_eeprom_channel band_5_channels[6];	/* abs.ofs: 306 */
+
+	u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+	struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+	struct il3945_eeprom_temperature_corr corrections;	/* abs.ofs: 832 */
+	u8 reserved16[172];	/* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU                (0x40)	/* bit 6    */
+#define PCI_CFG_REV_ID_BIT_RTP                      (0x80)	/* bit 7    */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES        5
+#define IL39_CMD_QUEUE_NUM	4
+
+#define IL_DEFAULT_TX_RETRY  15
+
+/*********************************************/
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        4
+
+#define TFD_CTL_COUNT_SET(n)       (n << 24)
+#define TFD_CTL_COUNT_GET(ctl)     ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n)         (n << 28)
+#define TFD_CTL_PAD_GET(ctl)       (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND		(0x000000)
+#define IL39_RTC_INST_UPPER_BOUND		(0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND		(0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+				IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+				IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+	return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+		addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+	__le32 tx_base_ptr[8];
+} __packed;
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND                   (0x0800)
+#define FH39_MEM_UPPER_BOUND                   (0x1000)
+
+#define FH39_CBCC_TBL		(FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL		(FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL		(FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL		(FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL		(FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL		(FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf)			(FH39_TFDB_TBL + \
+						 ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch)	(FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch)		(FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch)	(FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch)	(FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch)			(FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch)		(FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch)		(FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch)		(FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch)	(FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR		(FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL			(FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS		(FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch)			(FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch)		(FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch)		(FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch)	(FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE        (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG      (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS       (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL                            (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE     (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH      (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN    (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE        (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE           (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128		(0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST		(0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH			(0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER		(0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL	(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL	(0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD		(0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT		(0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE		(0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE		(0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID		(0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR		(0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON	(0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON	(0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B	(0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON		(0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON		(0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH	(0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH		(0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch)	(BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch)	(BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+	(FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+	 FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE			(0x01000000)
+
+struct il3945_tfd_tb {
+	__le32 addr;
+	__le32 len;
+} __packed;
+
+struct il3945_tfd {
+	__le32 control_flags;
+	struct il3945_tfd_tb tbs[4];
+	u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+extern const struct il_debugfs_ops il3945_debugfs_ops;
+#endif
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-calib.c b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
new file mode 100644
index 0000000..e78bdef
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
@@ -0,0 +1,934 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "4965.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+struct stats_general_data {
+	u32 beacon_silence_rssi_a;
+	u32 beacon_silence_rssi_b;
+	u32 beacon_silence_rssi_c;
+	u32 beacon_energy_a;
+	u32 beacon_energy_b;
+	u32 beacon_energy_c;
+};
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ *   but then determines that they are either noise, or transmissions
+ *   from a distant wireless network (also "noise", really) that get
+ *   "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ *   enough to receive all of our own network traffic, but not so
+ *   high that our DSP gets too busy trying to lock onto non-network
+ *   activity/noise. */
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+		       struct stats_general_data *rx_info)
+{
+	u32 max_nrg_cck = 0;
+	int i = 0;
+	u8 max_silence_rssi = 0;
+	u32 silence_ref = 0;
+	u8 silence_rssi_a = 0;
+	u8 silence_rssi_b = 0;
+	u8 silence_rssi_c = 0;
+	u32 val;
+
+	/* "false_alarms" values below are cross-multiplications to assess the
+	 *   numbers of false alarms within the measured period of actual Rx
+	 *   (Rx is off when we're txing), vs the min/max expected false alarms
+	 *   (some should be expected if rx is sensitive enough) in a
+	 *   hypothetical listening period of 200 time units (TU), 204.8 msec:
+	 *
+	 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+	 *
+	 * */
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	data = &(il->sensitivity_data);
+
+	data->nrg_auto_corr_silence_diff = 0;
+
+	/* Find max silence rssi among all 3 receivers.
+	 * This is background noise, which may include transmissions from other
+	 *    networks, measured during silence before our network's beacon */
+	silence_rssi_a =
+	    (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+	silence_rssi_b =
+	    (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+	silence_rssi_c =
+	    (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
+
+	val = max(silence_rssi_b, silence_rssi_c);
+	max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+	/* Store silence rssi in 20-beacon history table */
+	data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+	data->nrg_silence_idx++;
+	if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+		data->nrg_silence_idx = 0;
+
+	/* Find max silence rssi across 20 beacon history */
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+		val = data->nrg_silence_rssi[i];
+		silence_ref = max(silence_ref, val);
+	}
+	D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+		silence_rssi_b, silence_rssi_c, silence_ref);
+
+	/* Find max rx energy (min value!) among all 3 receivers,
+	 *   measured during beacon frame.
+	 * Save it in 10-beacon history table. */
+	i = data->nrg_energy_idx;
+	val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+	data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+	data->nrg_energy_idx++;
+	if (data->nrg_energy_idx >= 10)
+		data->nrg_energy_idx = 0;
+
+	/* Find min rx energy (max value) across 10 beacon history.
+	 * This is the minimum signal level that we want to receive well.
+	 * Add backoff (margin so we don't miss slightly lower energy frames).
+	 * This establishes an upper bound (min value) for energy threshold. */
+	max_nrg_cck = data->nrg_value[0];
+	for (i = 1; i < 10; i++)
+		max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+	max_nrg_cck += 6;
+
+	D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+		rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+		rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+	/* Count number of consecutive beacons with fewer-than-desired
+	 *   false alarms. */
+	if (false_alarms < min_false_alarms)
+		data->num_in_cck_no_fa++;
+	else
+		data->num_in_cck_no_fa = 0;
+	D_CALIB("consecutive bcns with few false alarms = %u\n",
+		data->num_in_cck_no_fa);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if (false_alarms > max_false_alarms &&
+	    data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+		D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+			max_false_alarms);
+		D_CALIB("... reducing sensitivity\n");
+		data->nrg_curr_state = IL_FA_TOO_MANY;
+		/* Store for "fewer than desired" on later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* increase energy threshold (reduce nrg value)
+		 *   to decrease sensitivity */
+		data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+		/* Else if we got fewer than desired, increase sensitivity */
+	} else if (false_alarms < min_false_alarms) {
+		data->nrg_curr_state = IL_FA_TOO_FEW;
+
+		/* Compare silence level with silence level for most recent
+		 *   healthy number or too many false alarms */
+		data->nrg_auto_corr_silence_diff =
+		    (s32) data->nrg_silence_ref - (s32) silence_ref;
+
+		D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+			false_alarms, min_false_alarms,
+			data->nrg_auto_corr_silence_diff);
+
+		/* Increase value to increase sensitivity, but only if:
+		 * 1a) previous beacon did *not* have *too many* false alarms
+		 * 1b) AND there's a significant difference in Rx levels
+		 *      from a previous beacon with too many, or healthy # FAs
+		 * OR 2) We've seen a lot of beacons (100) with too few
+		 *       false alarms */
+		if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+		    (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+		     data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
+
+			D_CALIB("... increasing sensitivity\n");
+			/* Increase nrg value to increase sensitivity */
+			val = data->nrg_th_cck + NRG_STEP_CCK;
+			data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
+		} else {
+			D_CALIB("... but not changing sensitivity\n");
+		}
+
+		/* Else we got a healthy number of false alarms, keep status quo */
+	} else {
+		D_CALIB(" FA in safe zone\n");
+		data->nrg_curr_state = IL_FA_GOOD_RANGE;
+
+		/* Store for use in "fewer than desired" with later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* If previous beacon had too many false alarms,
+		 *   give it some extra margin by reducing sensitivity again
+		 *   (but don't go below measured energy of desired Rx) */
+		if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+			D_CALIB("... increasing margin\n");
+			if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+				data->nrg_th_cck -= NRG_MARGIN;
+			else
+				data->nrg_th_cck = max_nrg_cck;
+		}
+	}
+
+	/* Make sure the energy threshold does not go above the measured
+	 * energy of the desired Rx signals (reduced by backoff margin),
+	 * or else we might start missing Rx frames.
+	 * Lower value is higher energy, so we use max()!
+	 */
+	data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+	D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
+
+	data->nrg_prev_state = data->nrg_curr_state;
+
+	/* Auto-correlation CCK algorithm */
+	if (false_alarms > min_false_alarms) {
+
+		/* increase auto_corr values to decrease sensitivity
+		 * so the DSP won't be disturbed by the noise
+		 */
+		if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+			data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+		else {
+			val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+			data->auto_corr_cck =
+			    min((u32) ranges->auto_corr_max_cck, val);
+		}
+		val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+		    min((u32) ranges->auto_corr_max_cck_mrc, val);
+	} else if (false_alarms < min_false_alarms &&
+		   (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+		    data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
+
+		/* Decrease auto_corr values to increase sensitivity */
+		val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
+		val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+		    max((u32) ranges->auto_corr_min_cck_mrc, val);
+	}
+
+	return 0;
+}
+
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
+{
+	u32 val;
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	data = &(il->sensitivity_data);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if (false_alarms > max_false_alarms) {
+
+		D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+			max_false_alarms);
+
+		val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+		    min((u32) ranges->auto_corr_max_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+		    min((u32) ranges->auto_corr_max_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+		    min((u32) ranges->auto_corr_max_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+		    min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
+	}
+
+	/* Else if we got fewer than desired, increase sensitivity */
+	else if (false_alarms < min_false_alarms) {
+
+		D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+			min_false_alarms);
+
+		val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+		    max((u32) ranges->auto_corr_min_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+		    max((u32) ranges->auto_corr_min_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+		    max((u32) ranges->auto_corr_min_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+		    max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
+	} else {
+		D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+			min_false_alarms, false_alarms, max_false_alarms);
+	}
+	return 0;
+}
+
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+				      struct il_sensitivity_data *data,
+				      __le16 *tbl)
+{
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm);
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_cck);
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+	tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+	tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+	    cpu_to_le16(data->barker_corr_th_min);
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+	    cpu_to_le16(data->barker_corr_th_min_mrc);
+	tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+	D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+		data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+		data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+		data->nrg_th_ofdm);
+
+	D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+		data->auto_corr_cck_mrc, data->nrg_th_cck);
+}
+
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
+{
+	struct il_sensitivity_cmd cmd;
+	struct il_sensitivity_data *data = NULL;
+	struct il_host_cmd cmd_out = {
+		.id = C_SENSITIVITY,
+		.len = sizeof(struct il_sensitivity_cmd),
+		.flags = CMD_ASYNC,
+		.data = &cmd,
+	};
+
+	data = &(il->sensitivity_data);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
+
+	/* Update uCode's "work" table, and copy it to DSP */
+	cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
+
+	/* Don't send command to uCode if nothing has changed */
+	if (!memcmp
+	    (&cmd.table[0], &(il->sensitivity_tbl[0]),
+	     sizeof(u16) * HD_TBL_SIZE)) {
+		D_CALIB("No change in C_SENSITIVITY\n");
+		return 0;
+	}
+
+	/* Copy table for comparison next time */
+	memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+	       sizeof(u16) * HD_TBL_SIZE);
+
+	return il_send_cmd(il, &cmd_out);
+}
+
+void
+il4965_init_sensitivity(struct il_priv *il)
+{
+	int ret = 0;
+	int i;
+	struct il_sensitivity_data *data = NULL;
+	const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
+
+	if (il->disable_sens_cal)
+		return;
+
+	D_CALIB("Start il4965_init_sensitivity\n");
+
+	/* Clear driver's sensitivity algo data */
+	data = &(il->sensitivity_data);
+
+	if (ranges == NULL)
+		return;
+
+	memset(data, 0, sizeof(struct il_sensitivity_data));
+
+	data->num_in_cck_no_fa = 0;
+	data->nrg_curr_state = IL_FA_TOO_MANY;
+	data->nrg_prev_state = IL_FA_TOO_MANY;
+	data->nrg_silence_ref = 0;
+	data->nrg_silence_idx = 0;
+	data->nrg_energy_idx = 0;
+
+	for (i = 0; i < 10; i++)
+		data->nrg_value[i] = 0;
+
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+		data->nrg_silence_rssi[i] = 0;
+
+	data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+	data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+	data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+	data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+	data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+	data->nrg_th_cck = ranges->nrg_th_cck;
+	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+	data->barker_corr_th_min = ranges->barker_corr_th_min;
+	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+	data->nrg_th_cca = ranges->nrg_th_cca;
+
+	data->last_bad_plcp_cnt_ofdm = 0;
+	data->last_fa_cnt_ofdm = 0;
+	data->last_bad_plcp_cnt_cck = 0;
+	data->last_fa_cnt_cck = 0;
+
+	ret |= il4965_sensitivity_write(il);
+	D_CALIB("<<return 0x%X\n", ret);
+}
+
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
+{
+	u32 rx_enable_time;
+	u32 fa_cck;
+	u32 fa_ofdm;
+	u32 bad_plcp_cck;
+	u32 bad_plcp_ofdm;
+	u32 norm_fa_ofdm;
+	u32 norm_fa_cck;
+	struct il_sensitivity_data *data = NULL;
+	struct stats_rx_non_phy *rx_info;
+	struct stats_rx_phy *ofdm, *cck;
+	unsigned long flags;
+	struct stats_general_data statis;
+
+	if (il->disable_sens_cal)
+		return;
+
+	data = &(il->sensitivity_data);
+
+	if (!il_is_any_associated(il)) {
+		D_CALIB("<< - not associated\n");
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+	ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+	cck = &(((struct il_notif_stats *)resp)->rx.cck);
+
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		D_CALIB("<< invalid data.\n");
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	/* Extract Statistics: */
+	rx_enable_time = le32_to_cpu(rx_info->channel_load);
+	fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+	fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+	bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+	bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+	statis.beacon_silence_rssi_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a);
+	statis.beacon_silence_rssi_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b);
+	statis.beacon_silence_rssi_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c);
+	statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+	statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+	statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
+
+	if (!rx_enable_time) {
+		D_CALIB("<< RX Enable Time == 0!\n");
+		return;
+	}
+
+	/* These stats increase monotonically, and do not reset
+	 *   at each beacon.  Calculate difference from last value, or just
+	 *   use the new stats value if it has reset or wrapped around. */
+	if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+		data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+	else {
+		bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+		data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+	}
+
+	if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+		data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+	else {
+		bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+		data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+	}
+
+	if (data->last_fa_cnt_ofdm > fa_ofdm)
+		data->last_fa_cnt_ofdm = fa_ofdm;
+	else {
+		fa_ofdm -= data->last_fa_cnt_ofdm;
+		data->last_fa_cnt_ofdm += fa_ofdm;
+	}
+
+	if (data->last_fa_cnt_cck > fa_cck)
+		data->last_fa_cnt_cck = fa_cck;
+	else {
+		fa_cck -= data->last_fa_cnt_cck;
+		data->last_fa_cnt_cck += fa_cck;
+	}
+
+	/* Total aborted signal locks */
+	norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+	norm_fa_cck = fa_cck + bad_plcp_cck;
+
+	D_CALIB("cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+		bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+	il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+	il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
+
+	il4965_sensitivity_write(il);
+}
+
+static inline u8
+il4965_find_first_chain(u8 mask)
+{
+	if (mask & ANT_A)
+		return CHAIN_A;
+	if (mask & ANT_B)
+		return CHAIN_B;
+	return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+			    struct il_chain_noise_data *data)
+{
+	u32 active_chains = 0;
+	u32 max_average_sig;
+	u16 max_average_sig_antenna_i;
+	u8 num_tx_chains;
+	u8 first_chain;
+	u16 i = 0;
+
+	average_sig[0] =
+	    data->chain_signal_a /
+	    il->cfg->chain_noise_num_beacons;
+	average_sig[1] =
+	    data->chain_signal_b /
+	    il->cfg->chain_noise_num_beacons;
+	average_sig[2] =
+	    data->chain_signal_c /
+	    il->cfg->chain_noise_num_beacons;
+
+	if (average_sig[0] >= average_sig[1]) {
+		max_average_sig = average_sig[0];
+		max_average_sig_antenna_i = 0;
+		active_chains = (1 << max_average_sig_antenna_i);
+	} else {
+		max_average_sig = average_sig[1];
+		max_average_sig_antenna_i = 1;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	if (average_sig[2] >= max_average_sig) {
+		max_average_sig = average_sig[2];
+		max_average_sig_antenna_i = 2;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+		average_sig[2]);
+	D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+		max_average_sig_antenna_i);
+
+	/* Compare signal strengths for all 3 receivers. */
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (i != max_average_sig_antenna_i) {
+			s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+			/* If signal is very weak, compared with
+			 * strongest, mark it as disconnected. */
+			if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+				data->disconn_array[i] = 1;
+			else
+				active_chains |= (1 << i);
+			D_CALIB("i = %d  rssiDelta = %d  "
+				"disconn_array[i] = %d\n", i, rssi_delta,
+				data->disconn_array[i]);
+		}
+	}
+
+	/*
+	 * The above algorithm sometimes fails when the ucode
+	 * reports 0 for all chains. It's not clear why that
+	 * happens to start with, but it is then causing trouble
+	 * because this can make us enable more chains than the
+	 * hardware really has.
+	 *
+	 * To be safe, simply mask out any chains that we know
+	 * are not on the device.
+	 */
+	active_chains &= il->hw_params.valid_rx_ant;
+
+	num_tx_chains = 0;
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		/* loops on all the bits of
+		 * il->hw_setting.valid_tx_ant */
+		u8 ant_msk = (1 << i);
+		if (!(il->hw_params.valid_tx_ant & ant_msk))
+			continue;
+
+		num_tx_chains++;
+		if (data->disconn_array[i] == 0)
+			/* there is a Tx antenna connected */
+			break;
+		if (num_tx_chains == il->hw_params.tx_chains_num &&
+		    data->disconn_array[i]) {
+			/*
+			 * If all chains are disconnected
+			 * connect the first valid tx chain
+			 */
+			first_chain =
+			    il4965_find_first_chain(il->cfg->valid_tx_ant);
+			data->disconn_array[first_chain] = 0;
+			active_chains |= BIT(first_chain);
+			D_CALIB("All Tx chains are disconnected"
+				"- declare %d as connected\n", first_chain);
+			break;
+		}
+	}
+
+	if (active_chains != il->hw_params.valid_rx_ant &&
+	    active_chains != il->chain_noise_data.active_chains)
+		D_CALIB("Detected that not all antennas are connected! "
+			"Connected: %#x, valid: %#x.\n", active_chains,
+			il->hw_params.valid_rx_ant);
+
+	/* Save for use within RXON, TX, SCAN commands, etc. */
+	data->active_chains = active_chains;
+	D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
+}
+
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+			u16 min_average_noise_antenna_i, u32 min_average_noise,
+			u8 default_chain)
+{
+	int i, ret;
+	struct il_chain_noise_data *data = &il->chain_noise_data;
+
+	data->delta_gain_code[min_average_noise_antenna_i] = 0;
+
+	for (i = default_chain; i < NUM_RX_CHAINS; i++) {
+		s32 delta_g = 0;
+
+		if (!data->disconn_array[i] &&
+		    data->delta_gain_code[i] ==
+		    CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
+			delta_g = average_noise[i] - min_average_noise;
+			data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
+			data->delta_gain_code[i] =
+			    min(data->delta_gain_code[i],
+				(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+			data->delta_gain_code[i] =
+			    (data->delta_gain_code[i] | (1 << 2));
+		} else {
+			data->delta_gain_code[i] = 0;
+		}
+	}
+	D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+		data->delta_gain_code[1], data->delta_gain_code[2]);
+
+	/* Differential gain gets sent to uCode only once */
+	if (!data->radio_write) {
+		struct il_calib_diff_gain_cmd cmd;
+		data->radio_write = 1;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+		cmd.diff_gain_a = data->delta_gain_code[0];
+		cmd.diff_gain_b = data->delta_gain_code[1];
+		cmd.diff_gain_c = data->delta_gain_code[2];
+		ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
+		if (ret)
+			D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
+
+		/* TODO we might want recalculate
+		 * rx_chain in rxon cmd */
+
+		/* Mark so we run this algo only once! */
+		data->state = IL_CHAIN_NOISE_CALIBRATED;
+	}
+}
+
+/*
+ * Accumulate 16 beacons of signal and noise stats for each of
+ *   3 receivers/antennas/rx-chains, then figure out:
+ * 1)  Which antennas are connected.
+ * 2)  Differential rx gain settings to balance the 3 receivers.
+ */
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
+{
+	struct il_chain_noise_data *data = NULL;
+
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_sig_a;
+	u32 chain_sig_b;
+	u32 chain_sig_c;
+	u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+	u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+	u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+	u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+	u16 i = 0;
+	u16 rxon_chnum = INITIALIZATION_VALUE;
+	u16 stat_chnum = INITIALIZATION_VALUE;
+	u8 rxon_band24;
+	u8 stat_band24;
+	unsigned long flags;
+	struct stats_rx_non_phy *rx_info;
+
+	if (il->disable_chain_noise_cal)
+		return;
+
+	data = &(il->chain_noise_data);
+
+	/*
+	 * Accumulate just the first "chain_noise_num_beacons" after
+	 * the first association, then we're done forever.
+	 */
+	if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+		if (data->state == IL_CHAIN_NOISE_ALIVE)
+			D_CALIB("Wait for noise calib reset\n");
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
+
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		D_CALIB(" << Interference data unavailable\n");
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	rxon_band24 = !!(il->staging.flags & RXON_FLG_BAND_24G_MSK);
+	rxon_chnum = le16_to_cpu(il->staging.channel);
+
+	stat_band24 =
+	    !!(((struct il_notif_stats *)stat_resp)->
+	       flag & STATS_REPLY_FLG_BAND_24G_MSK);
+	stat_chnum =
+	    le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
+
+	/* Make sure we accumulate data for just the associated channel
+	 *   (even if scanning). */
+	if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+		D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+			rxon_band24);
+		spin_unlock_irqrestore(&il->lock, flags);
+		return;
+	}
+
+	/*
+	 *  Accumulate beacon stats values across
+	 * "chain_noise_num_beacons"
+	 */
+	chain_noise_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	chain_noise_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	chain_noise_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+	chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+	chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+	chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	data->beacon_count++;
+
+	data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+	data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+	data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+	data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+	data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+	data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+	D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+		data->beacon_count);
+	D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+		chain_sig_c);
+	D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+		chain_noise_c);
+
+	/* If this is the "chain_noise_num_beacons", determine:
+	 * 1)  Disconnected antennas (using signal strengths)
+	 * 2)  Differential gain (using silence noise) to balance receivers */
+	if (data->beacon_count != il->cfg->chain_noise_num_beacons)
+		return;
+
+	/* Analyze signal for disconnected antenna */
+	il4965_find_disconn_antenna(il, average_sig, data);
+
+	/* Analyze noise for rx balance */
+	average_noise[0] =
+	    data->chain_noise_a / il->cfg->chain_noise_num_beacons;
+	average_noise[1] =
+	    data->chain_noise_b / il->cfg->chain_noise_num_beacons;
+	average_noise[2] =
+	    data->chain_noise_c / il->cfg->chain_noise_num_beacons;
+
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (!data->disconn_array[i] &&
+		    average_noise[i] <= min_average_noise) {
+			/* This means that chain i is active and has
+			 * lower noise values so far: */
+			min_average_noise = average_noise[i];
+			min_average_noise_antenna_i = i;
+		}
+	}
+
+	D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+		average_noise[1], average_noise[2]);
+
+	D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+		min_average_noise_antenna_i);
+
+	il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+				min_average_noise,
+				il4965_find_first_chain(il->cfg->valid_rx_ant));
+
+	/* Some power changes may have been made during the calibration.
+	 * Update and commit the RXON
+	 */
+	if (il->ops->update_chain_flags)
+		il->ops->update_chain_flags(il);
+
+	data->state = IL_CHAIN_NOISE_DONE;
+	il_power_update_mode(il, false);
+}
+
+void
+il4965_reset_run_time_calib(struct il_priv *il)
+{
+	int i;
+	memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+	memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
+	for (i = 0; i < NUM_RX_CHAINS; i++)
+		il->chain_noise_data.delta_gain_code[i] =
+		    CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+	/* Ask for stats now, the uCode will send notification
+	 * periodically after association */
+	il_send_stats_request(il, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-debug.c b/drivers/net/wireless/intel/iwlegacy/4965-debug.c
new file mode 100644
index 0000000..e0597bf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-debug.c
@@ -0,0 +1,752 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+*  Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = "  %-30s %10u\n";
+static const char *fmt_table = "  %-30s %10u  %10u  %10u  %10u\n";
+static const char *fmt_header =
+    "%-32s    current  cumulative       delta         max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+	int p = 0;
+	u32 flag;
+
+	flag = le32_to_cpu(il->_4965.stats.flag);
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+	if (flag & UCODE_STATS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+			       "\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		       (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+		       "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		       (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+		       "disabled");
+
+	return p;
+}
+
+static ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz =
+	    sizeof(struct stats_rx_phy) * 40 +
+	    sizeof(struct stats_rx_non_phy) * 40 +
+	    sizeof(struct stats_rx_ht_phy) * 40 + 400;
+	ssize_t ret;
+	struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+	struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+	struct stats_rx_non_phy *general, *accum_general;
+	struct stats_rx_non_phy *delta_general, *max_general;
+	struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/*
+	 * the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	ofdm = &il->_4965.stats.rx.ofdm;
+	cck = &il->_4965.stats.rx.cck;
+	general = &il->_4965.stats.rx.general;
+	ht = &il->_4965.stats.rx.ofdm_ht;
+	accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+	accum_cck = &il->_4965.accum_stats.rx.cck;
+	accum_general = &il->_4965.accum_stats.rx.general;
+	accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+	delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+	delta_cck = &il->_4965.delta_stats.rx.cck;
+	delta_general = &il->_4965.delta_stats.rx.general;
+	delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+	max_ofdm = &il->_4965.max_delta.rx.ofdm;
+	max_cck = &il->_4965.max_delta.rx.cck;
+	max_general = &il->_4965.max_delta.rx.general;
+	max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - OFDM:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+		      le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+		      delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+		      le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+		      delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+		      delta_ofdm->plcp_err, max_ofdm->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+		      delta_ofdm->crc32_err, max_ofdm->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+		      delta_ofdm->overrun_err, max_ofdm->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(ofdm->early_overrun_err),
+		      accum_ofdm->early_overrun_err,
+		      delta_ofdm->early_overrun_err,
+		      max_ofdm->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+		      delta_ofdm->crc32_good, max_ofdm->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+		      le32_to_cpu(ofdm->false_alarm_cnt),
+		      accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+		      max_ofdm->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+		      le32_to_cpu(ofdm->fina_sync_err_cnt),
+		      accum_ofdm->fina_sync_err_cnt,
+		      delta_ofdm->fina_sync_err_cnt,
+		      max_ofdm->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+		      le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+		      delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+		      le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+		      delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+		      le32_to_cpu(ofdm->unresponded_rts),
+		      accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+		      max_ofdm->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+		      accum_ofdm->rxe_frame_limit_overrun,
+		      delta_ofdm->rxe_frame_limit_overrun,
+		      max_ofdm->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+		      le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+		      delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+		      le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+		      delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+		      le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+		      accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+		      max_ofdm->sent_ba_rsp_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+		      le32_to_cpu(ofdm->dsp_self_kill),
+		      accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+		      max_ofdm->dsp_self_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(ofdm->mh_format_err),
+		      accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+		      max_ofdm->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "re_acq_main_rssi_sum:",
+		      le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+		      accum_ofdm->re_acq_main_rssi_sum,
+		      delta_ofdm->re_acq_main_rssi_sum,
+		      max_ofdm->re_acq_main_rssi_sum);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - CCK:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+		      le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+		      delta_cck->ina_cnt, max_cck->ina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+		      le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+		      delta_cck->fina_cnt, max_cck->fina_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+		      delta_cck->plcp_err, max_cck->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+		      delta_cck->crc32_err, max_cck->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+		      delta_cck->overrun_err, max_cck->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(cck->early_overrun_err),
+		      accum_cck->early_overrun_err,
+		      delta_cck->early_overrun_err, max_cck->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+		      delta_cck->crc32_good, max_cck->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+		      le32_to_cpu(cck->false_alarm_cnt),
+		      accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+		      max_cck->false_alarm_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+		      le32_to_cpu(cck->fina_sync_err_cnt),
+		      accum_cck->fina_sync_err_cnt,
+		      delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+		      le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+		      delta_cck->sfd_timeout, max_cck->sfd_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+		      le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+		      delta_cck->fina_timeout, max_cck->fina_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+		      le32_to_cpu(cck->unresponded_rts),
+		      accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+		      max_cck->unresponded_rts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+		      le32_to_cpu(cck->rxe_frame_limit_overrun),
+		      accum_cck->rxe_frame_limit_overrun,
+		      delta_cck->rxe_frame_limit_overrun,
+		      max_cck->rxe_frame_limit_overrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+		      le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+		      delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+		      le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+		      delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+		      le32_to_cpu(cck->sent_ba_rsp_cnt),
+		      accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+		      max_cck->sent_ba_rsp_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+		      le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+		      delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+		      delta_cck->mh_format_err, max_cck->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "re_acq_main_rssi_sum:",
+		      le32_to_cpu(cck->re_acq_main_rssi_sum),
+		      accum_cck->re_acq_main_rssi_sum,
+		      delta_cck->re_acq_main_rssi_sum,
+		      max_cck->re_acq_main_rssi_sum);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - GENERAL:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+		      le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+		      delta_general->bogus_cts, max_general->bogus_cts);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+		      le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+		      delta_general->bogus_ack, max_general->bogus_ack);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+		      le32_to_cpu(general->non_bssid_frames),
+		      accum_general->non_bssid_frames,
+		      delta_general->non_bssid_frames,
+		      max_general->non_bssid_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+		      le32_to_cpu(general->filtered_frames),
+		      accum_general->filtered_frames,
+		      delta_general->filtered_frames,
+		      max_general->filtered_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+		      le32_to_cpu(general->non_channel_beacons),
+		      accum_general->non_channel_beacons,
+		      delta_general->non_channel_beacons,
+		      max_general->non_channel_beacons);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+		      le32_to_cpu(general->channel_beacons),
+		      accum_general->channel_beacons,
+		      delta_general->channel_beacons,
+		      max_general->channel_beacons);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+		      le32_to_cpu(general->num_missed_bcon),
+		      accum_general->num_missed_bcon,
+		      delta_general->num_missed_bcon,
+		      max_general->num_missed_bcon);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "adc_rx_saturation_time:",
+		      le32_to_cpu(general->adc_rx_saturation_time),
+		      accum_general->adc_rx_saturation_time,
+		      delta_general->adc_rx_saturation_time,
+		      max_general->adc_rx_saturation_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "ina_detect_search_tm:",
+		      le32_to_cpu(general->ina_detection_search_time),
+		      accum_general->ina_detection_search_time,
+		      delta_general->ina_detection_search_time,
+		      max_general->ina_detection_search_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_a:",
+		      le32_to_cpu(general->beacon_silence_rssi_a),
+		      accum_general->beacon_silence_rssi_a,
+		      delta_general->beacon_silence_rssi_a,
+		      max_general->beacon_silence_rssi_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_b:",
+		      le32_to_cpu(general->beacon_silence_rssi_b),
+		      accum_general->beacon_silence_rssi_b,
+		      delta_general->beacon_silence_rssi_b,
+		      max_general->beacon_silence_rssi_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "beacon_silence_rssi_c:",
+		      le32_to_cpu(general->beacon_silence_rssi_c),
+		      accum_general->beacon_silence_rssi_c,
+		      delta_general->beacon_silence_rssi_c,
+		      max_general->beacon_silence_rssi_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "interference_data_flag:",
+		      le32_to_cpu(general->interference_data_flag),
+		      accum_general->interference_data_flag,
+		      delta_general->interference_data_flag,
+		      max_general->interference_data_flag);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+		      le32_to_cpu(general->channel_load),
+		      accum_general->channel_load, delta_general->channel_load,
+		      max_general->channel_load);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+		      le32_to_cpu(general->dsp_false_alarms),
+		      accum_general->dsp_false_alarms,
+		      delta_general->dsp_false_alarms,
+		      max_general->dsp_false_alarms);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+		      le32_to_cpu(general->beacon_rssi_a),
+		      accum_general->beacon_rssi_a,
+		      delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+		      le32_to_cpu(general->beacon_rssi_b),
+		      accum_general->beacon_rssi_b,
+		      delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+		      le32_to_cpu(general->beacon_rssi_c),
+		      accum_general->beacon_rssi_c,
+		      delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+		      le32_to_cpu(general->beacon_energy_a),
+		      accum_general->beacon_energy_a,
+		      delta_general->beacon_energy_a,
+		      max_general->beacon_energy_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+		      le32_to_cpu(general->beacon_energy_b),
+		      accum_general->beacon_energy_b,
+		      delta_general->beacon_energy_b,
+		      max_general->beacon_energy_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+		      le32_to_cpu(general->beacon_energy_c),
+		      accum_general->beacon_energy_c,
+		      delta_general->beacon_energy_c,
+		      max_general->beacon_energy_c);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_Rx - OFDM_HT:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+		      le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+		      delta_ht->plcp_err, max_ht->plcp_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+		      le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+		      delta_ht->overrun_err, max_ht->overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+		      le32_to_cpu(ht->early_overrun_err),
+		      accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+		      max_ht->early_overrun_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+		      le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+		      delta_ht->crc32_good, max_ht->crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+		      le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+		      delta_ht->crc32_err, max_ht->crc32_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+		      le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+		      delta_ht->mh_format_err, max_ht->mh_format_err);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+		      le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+		      delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+		      le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+		      delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+		      le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+		      delta_ht->agg_cnt, max_ht->agg_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+		      le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+		      delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+	ssize_t ret;
+	struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/* the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	tx = &il->_4965.stats.tx;
+	accum_tx = &il->_4965.accum_stats.tx;
+	delta_tx = &il->_4965.delta_stats.tx;
+	max_tx = &il->_4965.max_delta.tx;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+		      le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+		      delta_tx->preamble_cnt, max_tx->preamble_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+		      le32_to_cpu(tx->rx_detected_cnt),
+		      accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+		      max_tx->rx_detected_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+		      le32_to_cpu(tx->bt_prio_defer_cnt),
+		      accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+		      max_tx->bt_prio_defer_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+		      le32_to_cpu(tx->bt_prio_kill_cnt),
+		      accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+		      max_tx->bt_prio_kill_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+		      le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+		      delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+		      le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+		      delta_tx->cts_timeout, max_tx->cts_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+		      le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+		      delta_tx->ack_timeout, max_tx->ack_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+		      le32_to_cpu(tx->expected_ack_cnt),
+		      accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+		      max_tx->expected_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+		      le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+		      delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+		      le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+		      delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "abort_nxt_frame_mismatch:",
+		      le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+		      accum_tx->burst_abort_next_frame_mismatch_cnt,
+		      delta_tx->burst_abort_next_frame_mismatch_cnt,
+		      max_tx->burst_abort_next_frame_mismatch_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "abort_missing_nxt_frame:",
+		      le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+		      accum_tx->burst_abort_missing_next_frame_cnt,
+		      delta_tx->burst_abort_missing_next_frame_cnt,
+		      max_tx->burst_abort_missing_next_frame_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "cts_timeout_collision:",
+		      le32_to_cpu(tx->cts_timeout_collision),
+		      accum_tx->cts_timeout_collision,
+		      delta_tx->cts_timeout_collision,
+		      max_tx->cts_timeout_collision);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "ack_ba_timeout_collision:",
+		      le32_to_cpu(tx->ack_or_ba_timeout_collision),
+		      accum_tx->ack_or_ba_timeout_collision,
+		      delta_tx->ack_or_ba_timeout_collision,
+		      max_tx->ack_or_ba_timeout_collision);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+		      le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+		      delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg ba_resched_frames:",
+		      le32_to_cpu(tx->agg.ba_reschedule_frames),
+		      accum_tx->agg.ba_reschedule_frames,
+		      delta_tx->agg.ba_reschedule_frames,
+		      max_tx->agg.ba_reschedule_frames);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_agg_frame:",
+		      le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+		      accum_tx->agg.scd_query_agg_frame_cnt,
+		      delta_tx->agg.scd_query_agg_frame_cnt,
+		      max_tx->agg.scd_query_agg_frame_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_no_agg:",
+		      le32_to_cpu(tx->agg.scd_query_no_agg),
+		      accum_tx->agg.scd_query_no_agg,
+		      delta_tx->agg.scd_query_no_agg,
+		      max_tx->agg.scd_query_no_agg);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+		      le32_to_cpu(tx->agg.scd_query_agg),
+		      accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+		      max_tx->agg.scd_query_agg);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "agg scd_query_mismatch:",
+		      le32_to_cpu(tx->agg.scd_query_mismatch),
+		      accum_tx->agg.scd_query_mismatch,
+		      delta_tx->agg.scd_query_mismatch,
+		      max_tx->agg.scd_query_mismatch);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+		      le32_to_cpu(tx->agg.frame_not_ready),
+		      accum_tx->agg.frame_not_ready,
+		      delta_tx->agg.frame_not_ready,
+		      max_tx->agg.frame_not_ready);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+		      le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+		      delta_tx->agg.underrun, max_tx->agg.underrun);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+		      le32_to_cpu(tx->agg.bt_prio_kill),
+		      accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+		      max_tx->agg.bt_prio_kill);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+		      le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+		      accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+		      max_tx->agg.rx_ba_rsp_cnt);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct stats_general) * 10 + 300;
+	ssize_t ret;
+	struct stats_general_common *general, *accum_general;
+	struct stats_general_common *delta_general, *max_general;
+	struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+	struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	/* the statistic information display here is based on
+	 * the last stats notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	general = &il->_4965.stats.general.common;
+	dbg = &il->_4965.stats.general.common.dbg;
+	div = &il->_4965.stats.general.common.div;
+	accum_general = &il->_4965.accum_stats.general.common;
+	accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+	accum_div = &il->_4965.accum_stats.general.common.div;
+	delta_general = &il->_4965.delta_stats.general.common;
+	max_general = &il->_4965.max_delta.general.common;
+	delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+	max_dbg = &il->_4965.max_delta.general.common.dbg;
+	delta_div = &il->_4965.delta_stats.general.common.div;
+	max_div = &il->_4965.max_delta.general.common.div;
+
+	pos += il4965_stats_flag(il, buf, bufsz);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_header,
+		      "Statistics_General:");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+		      le32_to_cpu(general->temperature));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+		      le32_to_cpu(general->ttl_timestamp));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+		      le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+		      delta_dbg->burst_check, max_dbg->burst_check);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+		      le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+		      delta_dbg->burst_count, max_dbg->burst_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table,
+		      "wait_for_silence_timeout_count:",
+		      le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+		      accum_dbg->wait_for_silence_timeout_cnt,
+		      delta_dbg->wait_for_silence_timeout_cnt,
+		      max_dbg->wait_for_silence_timeout_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+		      le32_to_cpu(general->sleep_time),
+		      accum_general->sleep_time, delta_general->sleep_time,
+		      max_general->sleep_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+		      le32_to_cpu(general->slots_out), accum_general->slots_out,
+		      delta_general->slots_out, max_general->slots_out);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+		      le32_to_cpu(general->slots_idle),
+		      accum_general->slots_idle, delta_general->slots_idle,
+		      max_general->slots_idle);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+		      le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+		      delta_div->tx_on_a, max_div->tx_on_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+		      le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+		      delta_div->tx_on_b, max_div->tx_on_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+		      le32_to_cpu(div->exec_time), accum_div->exec_time,
+		      delta_div->exec_time, max_div->exec_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+		      le32_to_cpu(div->probe_time), accum_div->probe_time,
+		      delta_div->probe_time, max_div->probe_time);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+		      le32_to_cpu(general->rx_enable_counter),
+		      accum_general->rx_enable_counter,
+		      delta_general->rx_enable_counter,
+		      max_general->rx_enable_counter);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+		      le32_to_cpu(general->num_of_sos_states),
+		      accum_general->num_of_sos_states,
+		      delta_general->num_of_sos_states,
+		      max_general->num_of_sos_states);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+const struct il_debugfs_ops il4965_debugfs_ops = {
+	.rx_stats_read = il4965_ucode_rx_stats_read,
+	.tx_stats_read = il4965_ucode_tx_stats_read,
+	.general_stats_read = il4965_ucode_general_stats_read,
+};
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
new file mode 100644
index 0000000..280cd8a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -0,0 +1,6869 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME        "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION     IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+		IL_ERR("Tx flush command to flush out all frames\n");
+		if (!test_bit(S_EXIT_PENDING, &il->status))
+			queue_work(il->workqueue, &il->tx_flush);
+	}
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+	.restart_fw = 1,
+	/* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+	spin_lock_irqsave(&rxq->lock, flags);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
+		rxq->queue[i] = NULL;
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	u32 rb_size;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG;	/* 256 RBDs */
+	u32 rb_timeout = 0;
+
+	if (il->cfg->mod_params->amsdu_size_8K)
+		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+	else
+		rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+	/* Stop Rx DMA */
+	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+	/* Reset driver's Rx queue write idx */
+	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+	/* Tell device where to find RBD circular buffer in DRAM */
+	il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+	/* Tell device where in DRAM to update its Rx status */
+	il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+	/* Enable Rx DMA
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
+	il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+	      FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+	      FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+	      FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+	      rb_size |
+	      (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+	      (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+	return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+		if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+			il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+					       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+					       ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+	il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+			      APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+			      ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+	unsigned long flags;
+	struct il_rx_queue *rxq = &il->rxq;
+	int ret;
+
+	spin_lock_irqsave(&il->lock, flags);
+	il_apm_init(il);
+	/* Set interrupt coalescing calibration timer to default (512 usecs) */
+	il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il4965_set_pwr_vmain(il);
+	il4965_nic_config(il);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	if (!rxq->bd) {
+		ret = il_rx_queue_alloc(il);
+		if (ret) {
+			IL_ERR("Unable to initialize Rx queue\n");
+			return -ENOMEM;
+		}
+	} else
+		il4965_rx_queue_reset(il, rxq);
+
+	il4965_rx_replenish(il);
+
+	il4965_rx_init(il, rxq);
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	rxq->need_update = 1;
+	il_rx_queue_update_write_ptr(il, rxq);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Allocate or reset and init all Tx and Command queues */
+	if (!il->txq) {
+		ret = il4965_txq_ctx_alloc(il);
+		if (ret)
+			return ret;
+	} else
+		il4965_txq_ctx_reset(il);
+
+	set_bit(S_INIT, &il->status);
+
+	return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+		/* The overwritten rxb must be a used one */
+		rxb = rxq->queue[rxq->write];
+		BUG_ON(rxb && rxb->page);
+
+		/* Get next free Rx buffer, remove from free list */
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		rxq->bd[rxq->write] =
+		    il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		queue_work(il->workqueue, &il->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7)) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		rxq->need_update = 1;
+		spin_unlock_irqrestore(&rxq->lock, flags);
+		il_rx_queue_update_write_ptr(il, rxq);
+	}
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct list_head *element;
+	struct il_rx_buf *rxb;
+	struct page *page;
+	dma_addr_t page_dma;
+	unsigned long flags;
+	gfp_t gfp_mask = priority;
+
+	while (1) {
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		if (rxq->free_count > RX_LOW_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		if (il->hw_params.rx_page_order > 0)
+			gfp_mask |= __GFP_COMP;
+
+		/* Alloc a new receive buffer */
+		page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+		if (!page) {
+			if (net_ratelimit())
+				D_INFO("alloc_pages failed, " "order: %d\n",
+				       il->hw_params.rx_page_order);
+
+			if (rxq->free_count <= RX_LOW_WATERMARK &&
+			    net_ratelimit())
+				IL_ERR("Failed to alloc_pages with %s. "
+				       "Only %u free buffers remaining.\n",
+				       priority ==
+				       GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+				       rxq->free_count);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			return;
+		}
+
+		/* Get physical address of the RB */
+		page_dma =
+		    pci_map_page(il->pci_dev, page, 0,
+				 PAGE_SIZE << il->hw_params.rx_page_order,
+				 PCI_DMA_FROMDEVICE);
+		if (unlikely(pci_dma_mapping_error(il->pci_dev, page_dma))) {
+			__free_pages(page, il->hw_params.rx_page_order);
+			break;
+		}
+
+		spin_lock_irqsave(&rxq->lock, flags);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock_irqrestore(&rxq->lock, flags);
+			pci_unmap_page(il->pci_dev, page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__free_pages(page, il->hw_params.rx_page_order);
+			return;
+		}
+
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct il_rx_buf, list);
+		list_del(element);
+
+		BUG_ON(rxb->page);
+
+		rxb->page = page;
+		rxb->page_dma = page_dma;
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+		il->alloc_rxb_page++;
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+	}
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+	unsigned long flags;
+
+	il4965_rx_allocate(il, GFP_KERNEL);
+
+	spin_lock_irqsave(&il->lock, flags);
+	il4965_rx_queue_restock(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+	il4965_rx_allocate(il, GFP_ATOMIC);
+
+	il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+	int i;
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].page != NULL) {
+			pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+				       PAGE_SIZE << il->hw_params.rx_page_order,
+				       PCI_DMA_FROMDEVICE);
+			__il_free_pages(il, rxq->pool[i].page);
+			rxq->pool[i].page = NULL;
+		}
+	}
+
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+	dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+			  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->bd = NULL;
+	rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+	int ret;
+
+	_il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+	ret = _il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+			   FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+			   FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+			   1000);
+	if (ret < 0)
+		IL_ERR("Can't stop Rx DMA.\n");
+
+	return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
+{
+	int idx = 0;
+	int band_offset = 0;
+
+	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = (rate_n_flags & 0xff);
+		return idx;
+		/* Legacy rate format, search for match in table */
+	} else {
+		if (band == NL80211_BAND_5GHZ)
+			band_offset = IL_FIRST_OFDM_RATE;
+		for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+				return idx - band_offset;
+	}
+
+	return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+	/* data from PHY/DSP regarding signal strength, etc.,
+	 *   contents are always there, not configurable by host.  */
+	struct il4965_rx_non_cfg_phy *ncphy =
+	    (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+	u32 agc =
+	    (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+	    IL49_AGC_DB_POS;
+
+	u32 valid_antennae =
+	    (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+	    >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+	u8 max_rssi = 0;
+	u32 i;
+
+	/* Find max rssi among 3 possible receivers.
+	 * These values are measured by the digital signal processor (DSP).
+	 * They should stay fairly constant even as the signal strength varies,
+	 *   if the radio's automatic gain control (AGC) is working right.
+	 * AGC value (see below) will provide the "interesting" info. */
+	for (i = 0; i < 3; i++)
+		if (valid_antennae & (1 << i))
+			max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+	D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+		ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+		max_rssi, agc);
+
+	/* dBm = max_rssi dB - agc dB - constant.
+	 * Higher AGC (higher radio gain) means lower signal. */
+	return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+	u32 decrypt_out = 0;
+
+	if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+	    RX_RES_STATUS_STATION_FOUND)
+		decrypt_out |=
+		    (RX_RES_STATUS_STATION_FOUND |
+		     RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+	decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+	/* packet was not encrypted */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+	    RX_RES_STATUS_SEC_TYPE_NONE)
+		return decrypt_out;
+
+	/* packet was encrypted with unknown alg */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+	    RX_RES_STATUS_SEC_TYPE_ERR)
+		return decrypt_out;
+
+	/* decryption was not done in HW */
+	if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+	    RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+		return decrypt_out;
+
+	switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		/* alg is CCM: check MIC only */
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+			/* Bad MIC */
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+		break;
+
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+			/* Bad TTAK */
+			decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+			break;
+		}
+		/* fall through if TTAK OK */
+	default:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+		break;
+	}
+
+	D_RX("decrypt_in:0x%x  decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+	return decrypt_out;
+}
+
+#define SMALL_PACKET_SIZE 256
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+			       u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
+			       struct ieee80211_rx_status *stats)
+{
+	struct sk_buff *skb;
+	__le16 fc = hdr->frame_control;
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!il->is_open)) {
+		D_DROP("Dropping packet while interface is not open.\n");
+		return;
+	}
+
+	if (unlikely(test_bit(IL_STOP_REASON_PASSIVE, &il->stop_reason))) {
+		il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+		D_INFO("Woke queues - frame received on passive channel\n");
+	}
+
+	/* In case of HW accelerated crypto and bad decryption, drop */
+	if (!il->cfg->mod_params->sw_crypto &&
+	    il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+		return;
+
+	skb = dev_alloc_skb(SMALL_PACKET_SIZE);
+	if (!skb) {
+		IL_ERR("dev_alloc_skb failed\n");
+		return;
+	}
+
+	if (len <= SMALL_PACKET_SIZE) {
+		skb_put_data(skb, hdr, len);
+	} else {
+		skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
+				len, PAGE_SIZE << il->hw_params.rx_page_order);
+		il->alloc_rxb_page--;
+		rxb->page = NULL;
+	}
+
+	il_update_stats(il, false, fc, len);
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx(il->hw, skb);
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+static void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct ieee80211_hdr *header;
+	struct ieee80211_rx_status rx_status = {};
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_rx_phy_res *phy_res;
+	__le32 rx_pkt_status;
+	struct il_rx_mpdu_res_start *amsdu;
+	u32 len;
+	u32 ampdu_status;
+	u32 rate_n_flags;
+
+	/**
+	 * N_RX and N_RX_MPDU are handled differently.
+	 *	N_RX: physical layer info is in this buffer
+	 *	N_RX_MPDU: physical layer info was sent in separate
+	 *		command and cached in il->last_phy_res
+	 *
+	 * Here we set up local variables depending on which command is
+	 * received.
+	 */
+	if (pkt->hdr.cmd == N_RX) {
+		phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+		header =
+		    (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+					     phy_res->cfg_phy_cnt);
+
+		len = le16_to_cpu(phy_res->byte_count);
+		rx_pkt_status =
+		    *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+				 phy_res->cfg_phy_cnt + len);
+		ampdu_status = le32_to_cpu(rx_pkt_status);
+	} else {
+		if (!il->_4965.last_phy_res_valid) {
+			IL_ERR("MPDU frame without cached PHY data\n");
+			return;
+		}
+		phy_res = &il->_4965.last_phy_res;
+		amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+		header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+		len = le16_to_cpu(amsdu->byte_count);
+		rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+		ampdu_status =
+		    il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+	}
+
+	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+		D_DROP("dsp size out of range [0,20]: %d\n",
+		       phy_res->cfg_phy_cnt);
+		return;
+	}
+
+	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+		D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+		return;
+	}
+
+	/* This will be used in several places later */
+	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+	/* rx_status carries information about the packet to mac80211 */
+	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+	rx_status.band =
+	    (phy_res->
+	     phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+	    NL80211_BAND_5GHZ;
+	rx_status.freq =
+	    ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+					   rx_status.band);
+	rx_status.rate_idx =
+	    il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+	rx_status.flag = 0;
+
+	/* TSF isn't reliable. In order to allow smooth user experience,
+	 * this W/A doesn't propagate it to the mac80211 */
+	/*rx_status.flag |= RX_FLAG_MACTIME_START; */
+
+	il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
+	rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+	D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+		(unsigned long long)rx_status.mactime);
+
+	/*
+	 * "antenna number"
+	 *
+	 * It seems that the antenna field in the phy flags value
+	 * is actually a bit field. This is undefined by radiotap,
+	 * it wants an actual antenna number but I always get "7"
+	 * for most legacy frames I receive indicating that the
+	 * same frame was received on all three RX chains.
+	 *
+	 * I think this field should be removed in favor of a
+	 * new 802.11n radiotap field "RX chains" that is defined
+	 * as a bitmask.
+	 */
+	rx_status.antenna =
+	    (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+	    RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+	/* set the preamble flag if appropriate */
+	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+	/* Set up the HT phy flags */
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		rx_status.encoding = RX_ENC_HT;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		rx_status.bw = RATE_INFO_BW_40;
+	else
+		rx_status.bw = RATE_INFO_BW_20;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+
+	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+		/* We know which subframes of an A-MPDU belong
+		 * together since we get a single PHY response
+		 * from the firmware for all of them.
+		 */
+
+		rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+		rx_status.ampdu_reference = il->_4965.ampdu_ref;
+	}
+
+	il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+				       &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+static void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	il->_4965.last_phy_res_valid = true;
+	il->_4965.ampdu_ref++;
+	memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+	       sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+			     enum nl80211_band band, u8 is_active,
+			     u8 n_probes, struct il_scan_channel *scan_ch)
+{
+	struct ieee80211_channel *chan;
+	const struct ieee80211_supported_band *sband;
+	const struct il_channel_info *ch_info;
+	u16 passive_dwell = 0;
+	u16 active_dwell = 0;
+	int added, i;
+	u16 channel;
+
+	sband = il_get_hw_mode(il, band);
+	if (!sband)
+		return 0;
+
+	active_dwell = il_get_active_dwell_time(il, band, n_probes);
+	passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
+	for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+		chan = il->scan_request->channels[i];
+
+		if (chan->band != band)
+			continue;
+
+		channel = chan->hw_value;
+		scan_ch->channel = cpu_to_le16(channel);
+
+		ch_info = il_get_channel_info(il, band, channel);
+		if (!il_is_channel_valid(ch_info)) {
+			D_SCAN("Channel %d is INVALID for this band.\n",
+			       channel);
+			continue;
+		}
+
+		if (!is_active || il_is_channel_passive(ch_info) ||
+		    (chan->flags & IEEE80211_CHAN_NO_IR))
+			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+		else
+			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+		if (n_probes)
+			scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+		scan_ch->active_dwell = cpu_to_le16(active_dwell);
+		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+		/* Set txpower levels to defaults */
+		scan_ch->dsp_atten = 110;
+
+		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
+		 * power level:
+		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+		 */
+		if (band == NL80211_BAND_5GHZ)
+			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else
+			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+		D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+		       le32_to_cpu(scan_ch->type),
+		       (scan_ch->
+			type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+		       (scan_ch->
+			type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+		       passive_dwell);
+
+		scan_ch++;
+		added++;
+	}
+
+	D_SCAN("total channels to scan %d\n", added);
+	return added;
+}
+
+static void
+il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
+{
+	int i;
+	u8 ind = *ant;
+
+	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+		ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+		if (valid & BIT(ind)) {
+			*ant = ind;
+			return;
+		}
+	}
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_host_cmd cmd = {
+		.id = C_SCAN,
+		.len = sizeof(struct il_scan_cmd),
+		.flags = CMD_SIZE_HUGE,
+	};
+	struct il_scan_cmd *scan;
+	u32 rate_flags = 0;
+	u16 cmd_len;
+	u16 rx_chain = 0;
+	enum nl80211_band band;
+	u8 n_probes = 0;
+	u8 rx_ant = il->hw_params.valid_rx_ant;
+	u8 rate;
+	bool is_active = false;
+	int chan_mod;
+	u8 active_chains;
+	u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->scan_cmd) {
+		il->scan_cmd =
+		    kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+			    GFP_KERNEL);
+		if (!il->scan_cmd) {
+			D_SCAN("fail to allocate memory for scan\n");
+			return -ENOMEM;
+		}
+	}
+	scan = il->scan_cmd;
+	memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+	scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+	scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+	if (il_is_any_associated(il)) {
+		u16 interval;
+		u32 extra;
+		u32 suspend_time = 100;
+		u32 scan_suspend_time = 100;
+
+		D_INFO("Scanning while associated...\n");
+		interval = vif->bss_conf.beacon_int;
+
+		scan->suspend_time = 0;
+		scan->max_out_time = cpu_to_le32(200 * 1024);
+		if (!interval)
+			interval = suspend_time;
+
+		extra = (suspend_time / interval) << 22;
+		scan_suspend_time =
+		    (extra | ((suspend_time % interval) * 1024));
+		scan->suspend_time = cpu_to_le32(scan_suspend_time);
+		D_SCAN("suspend_time 0x%X beacon interval %d\n",
+		       scan_suspend_time, interval);
+	}
+
+	if (il->scan_request->n_ssids) {
+		int i, p = 0;
+		D_SCAN("Kicking off active scan\n");
+		for (i = 0; i < il->scan_request->n_ssids; i++) {
+			/* always does wildcard anyway */
+			if (!il->scan_request->ssids[i].ssid_len)
+				continue;
+			scan->direct_scan[p].id = WLAN_EID_SSID;
+			scan->direct_scan[p].len =
+			    il->scan_request->ssids[i].ssid_len;
+			memcpy(scan->direct_scan[p].ssid,
+			       il->scan_request->ssids[i].ssid,
+			       il->scan_request->ssids[i].ssid_len);
+			n_probes++;
+			p++;
+		}
+		is_active = true;
+	} else
+		D_SCAN("Start passive scan.\n");
+
+	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+	scan->tx_cmd.sta_id = il->hw_params.bcast_id;
+	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	switch (il->scan_band) {
+	case NL80211_BAND_2GHZ:
+		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+		chan_mod =
+		    le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+		    RXON_FLG_CHANNEL_MODE_POS;
+		if (chan_mod == CHANNEL_MODE_PURE_40) {
+			rate = RATE_6M_PLCP;
+		} else {
+			rate = RATE_1M_PLCP;
+			rate_flags = RATE_MCS_CCK_MSK;
+		}
+		break;
+	case NL80211_BAND_5GHZ:
+		rate = RATE_6M_PLCP;
+		break;
+	default:
+		IL_WARN("Invalid scan band\n");
+		return -EIO;
+	}
+
+	/*
+	 * If active scanning is requested but a certain channel is
+	 * marked passive, we can do active scanning if we detect
+	 * transmissions.
+	 *
+	 * There is an issue with some firmware versions that triggers
+	 * a sysassert on a "good CRC threshold" of zero (== disabled),
+	 * on a radar channel even though this means that we should NOT
+	 * send probes.
+	 *
+	 * The "good CRC threshold" is the number of frames that we
+	 * need to receive during our dwell time on a channel before
+	 * sending out probes -- setting this to a huge value will
+	 * mean we never reach it, but at the same time work around
+	 * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+	 * here instead of IL_GOOD_CRC_TH_DISABLED.
+	 */
+	scan->good_CRC_th =
+	    is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+	band = il->scan_band;
+
+	if (il->cfg->scan_rx_antennas[band])
+		rx_ant = il->cfg->scan_rx_antennas[band];
+
+	il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
+	rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
+	scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+	/* In power save mode use one chain, otherwise use all chains */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		/* rx_ant has been set to all valid chains previously */
+		active_chains =
+		    rx_ant & ((u8) (il->chain_noise_data.active_chains));
+		if (!active_chains)
+			active_chains = rx_ant;
+
+		D_SCAN("chain_noise_data.active_chains: %u\n",
+		       il->chain_noise_data.active_chains);
+
+		rx_ant = il4965_first_antenna(active_chains);
+	}
+
+	/* MIMO is not used here, but value is required */
+	rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+	scan->rx_chain = cpu_to_le16(rx_chain);
+
+	cmd_len =
+	    il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+			      vif->addr, il->scan_request->ie,
+			      il->scan_request->ie_len,
+			      IL_MAX_SCAN_SIZE - sizeof(*scan));
+	scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+	scan->filter_flags |=
+	    (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+	scan->channel_count =
+	    il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+					 (void *)&scan->data[cmd_len]);
+	if (scan->channel_count == 0) {
+		D_SCAN("channel count %d\n", scan->channel_count);
+		return -EIO;
+	}
+
+	cmd.len +=
+	    le16_to_cpu(scan->tx_cmd.len) +
+	    scan->channel_count * sizeof(struct il_scan_channel);
+	cmd.data = scan;
+	scan->len = cpu_to_le16(cmd.len);
+
+	set_bit(S_SCAN_HW, &il->status);
+
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		clear_bit(S_SCAN_HW, &il->status);
+
+	return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			   bool add)
+{
+	struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	if (add)
+		return il4965_add_bssid_station(il, vif->bss_conf.bssid,
+						&vif_priv->ibss_bssid_sta_id);
+	return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+				 vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+	lockdep_assert_held(&il->sta_lock);
+
+	if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+		il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+	else {
+		D_TX("free more than tfds_in_queue (%u:%d)\n",
+		     il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+		il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+	}
+}
+
+#define IL_TX_QUEUE_MSK	0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+	return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+	    il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE	3
+#define IL_NUM_RX_CHAINS_SINGLE	2
+#define IL_NUM_IDLE_CHAINS_DUAL	2
+#define IL_NUM_IDLE_CHAINS_SINGLE	1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+	/* # of Rx chains to use when expecting MIMO. */
+	if (il4965_is_single_rx_stream(il))
+		return IL_NUM_RX_CHAINS_SINGLE;
+	else
+		return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+	/* # Rx chains when idling, depending on SMPS mode */
+	switch (il->current_ht_config.smps) {
+	case IEEE80211_SMPS_STATIC:
+	case IEEE80211_SMPS_DYNAMIC:
+		return IL_NUM_IDLE_CHAINS_SINGLE;
+	case IEEE80211_SMPS_OFF:
+		return active_cnt;
+	default:
+		WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+		return active_cnt;
+	}
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+	u8 res;
+	res = (chain_bitmap & BIT(0)) >> 0;
+	res += (chain_bitmap & BIT(1)) >> 1;
+	res += (chain_bitmap & BIT(2)) >> 2;
+	res += (chain_bitmap & BIT(3)) >> 3;
+	return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il)
+{
+	bool is_single = il4965_is_single_rx_stream(il);
+	bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+	u32 active_chains;
+	u16 rx_chain;
+
+	/* Tell uCode which antennas are actually connected.
+	 * Before first association, we assume all antennas are connected.
+	 * Just after first association, il4965_chain_noise_calibration()
+	 *    checks which antennas actually *are* connected. */
+	if (il->chain_noise_data.active_chains)
+		active_chains = il->chain_noise_data.active_chains;
+	else
+		active_chains = il->hw_params.valid_rx_ant;
+
+	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+	/* How many receivers should we use? */
+	active_rx_cnt = il4965_get_active_rx_chain_count(il);
+	idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+	/* correct rx chain count according hw settings
+	 * and chain noise calibration
+	 */
+	valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+	if (valid_rx_cnt < active_rx_cnt)
+		active_rx_cnt = valid_rx_cnt;
+
+	if (valid_rx_cnt < idle_rx_cnt)
+		idle_rx_cnt = valid_rx_cnt;
+
+	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+	rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+	il->staging.rx_chain = cpu_to_le16(rx_chain);
+
+	if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+		il->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+	else
+		il->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+	D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", il->staging.rx_chain,
+		active_rx_cnt, idle_rx_cnt);
+
+	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+		active_rx_cnt < idle_rx_cnt);
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+	switch (cmd) {
+		IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+		IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+		IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+		IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+		IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+		IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+		IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+		IL_CMD(FH49_TSSR_TX_STATUS_REG);
+		IL_CMD(FH49_TSSR_TX_ERROR_REG);
+	default:
+		return "UNKNOWN";
+	}
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+	int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	int pos = 0;
+	size_t bufsz = 0;
+#endif
+	static const u32 fh_tbl[] = {
+		FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+		FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+		FH49_RSCSR_CHNL0_WPTR,
+		FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+		FH49_MEM_RSSR_SHARED_CTRL_REG,
+		FH49_MEM_RSSR_RX_STATUS_REG,
+		FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+		FH49_TSSR_TX_STATUS_REG,
+		FH49_TSSR_TX_ERROR_REG
+	};
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (display) {
+		bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+		pos +=
+		    scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+			pos +=
+			    scnprintf(*buf + pos, bufsz - pos,
+				      "  %34s: 0X%08x\n",
+				      il4965_get_fh_string(fh_tbl[i]),
+				      il_rd(il, fh_tbl[i]));
+		}
+		return pos;
+	}
+#endif
+	IL_ERR("FH register values:\n");
+	for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+		IL_ERR("  %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+		       il_rd(il, fh_tbl[i]));
+	}
+	return 0;
+}
+
+static void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_missed_beacon_notif *missed_beacon;
+
+	missed_beacon = &pkt->u.missed_beacon;
+	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+	    il->missed_beacon_threshold) {
+		D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+			le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+			le32_to_cpu(missed_beacon->total_missed_becons),
+			le32_to_cpu(missed_beacon->num_recvd_beacons),
+			le32_to_cpu(missed_beacon->num_expected_beacons));
+		if (!test_bit(S_SCANNING, &il->status))
+			il4965_init_sensitivity(il);
+	}
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+	struct stats_rx_non_phy *rx_info;
+	int num_active_rx = 0;
+	int total_silence = 0;
+	int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+	int last_rx_noise;
+
+	rx_info = &(il->_4965.stats.rx.general);
+	bcn_silence_a =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	bcn_silence_b =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	bcn_silence_c =
+	    le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+	if (bcn_silence_a) {
+		total_silence += bcn_silence_a;
+		num_active_rx++;
+	}
+	if (bcn_silence_b) {
+		total_silence += bcn_silence_b;
+		num_active_rx++;
+	}
+	if (bcn_silence_c) {
+		total_silence += bcn_silence_c;
+		num_active_rx++;
+	}
+
+	/* Average among active antennas */
+	if (num_active_rx)
+		last_rx_noise = (total_silence / num_active_rx) - 107;
+	else
+		last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+	D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+		bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ *  based on the assumption of all stats counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+	int i, size;
+	__le32 *prev_stats;
+	u32 *accum_stats;
+	u32 *delta, *max_delta;
+	struct stats_general_common *general, *accum_general;
+
+	prev_stats = (__le32 *) &il->_4965.stats;
+	accum_stats = (u32 *) &il->_4965.accum_stats;
+	size = sizeof(struct il_notif_stats);
+	general = &il->_4965.stats.general.common;
+	accum_general = &il->_4965.accum_stats.general.common;
+	delta = (u32 *) &il->_4965.delta_stats;
+	max_delta = (u32 *) &il->_4965.max_delta;
+
+	for (i = sizeof(__le32); i < size;
+	     i +=
+	     sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+	     accum_stats++) {
+		if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+			*delta =
+			    (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+			*accum_stats += *delta;
+			if (*delta > *max_delta)
+				*max_delta = *delta;
+		}
+	}
+
+	/* reset accumulative stats for "no-counter" type stats */
+	accum_general->temperature = general->temperature;
+	accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+static void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	const int recalib_seconds = 60;
+	bool change;
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	D_RX("Statistics notification received (%d vs %d).\n",
+	     (int)sizeof(struct il_notif_stats),
+	     le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+	change =
+	    ((il->_4965.stats.general.common.temperature !=
+	      pkt->u.stats.general.common.temperature) ||
+	     ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+	      (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+	/* TODO: reading some of stats is unneeded */
+	memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+	set_bit(S_STATS, &il->status);
+
+	/*
+	 * Reschedule the stats timer to occur in recalib_seconds to ensure
+	 * we get a thermal update even if the uCode doesn't give us one
+	 */
+	mod_timer(&il->stats_periodic,
+		  jiffies + msecs_to_jiffies(recalib_seconds * 1000));
+
+	if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+	    (pkt->hdr.cmd == N_STATS)) {
+		il4965_rx_calc_noise(il);
+		queue_work(il->workqueue, &il->run_time_calib_work);
+	}
+
+	if (change)
+		il4965_temperature_calib(il);
+}
+
+static void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+		memset(&il->_4965.accum_stats, 0,
+		       sizeof(struct il_notif_stats));
+		memset(&il->_4965.delta_stats, 0,
+		       sizeof(struct il_notif_stats));
+		memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+		D_RX("Statistics have been cleared\n");
+	}
+	il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ *     VO      0
+ *     VI      1
+ *     BE      2
+ *     BK      3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+		return tid_to_ac[tid];
+
+	/* no support for TIDs 8-15 yet */
+	return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(u16 tid)
+{
+	static const u8 ac_to_fifo[] = {
+		IL_TX_FIFO_VO,
+		IL_TX_FIFO_VI,
+		IL_TX_FIFO_BE,
+		IL_TX_FIFO_BK,
+	};
+
+	if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+		return ac_to_fifo[tid_to_ac[tid]];
+
+	/* no support for TIDs 8-15 yet */
+	return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+			  struct il_tx_cmd *tx_cmd,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_hdr *hdr, u8 std_id)
+{
+	__le16 fc = hdr->frame_control;
+	__le32 tx_flags = tx_cmd->tx_flags;
+
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+		tx_flags |= TX_CMD_FLG_ACK_MSK;
+		if (ieee80211_is_mgmt(fc))
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		if (ieee80211_is_probe_resp(fc) &&
+		    !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+			tx_flags |= TX_CMD_FLG_TSF_MSK;
+	} else {
+		tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	if (ieee80211_is_back_req(fc))
+		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+	tx_cmd->sta_id = std_id;
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	} else {
+		tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+		else
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+	} else {
+		tx_cmd->timeout.pm_frame_timeout = 0;
+	}
+
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
+}
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il,
+			 struct il_tx_cmd *tx_cmd,
+			 struct ieee80211_tx_info *info,
+			 struct ieee80211_sta *sta,
+			 __le16 fc)
+{
+	const u8 rts_retry_limit = 60;
+	u32 rate_flags;
+	int rate_idx;
+	u8 data_retry_limit;
+	u8 rate_plcp;
+
+	/* Set retry limit on DATA packets and Probe Responses */
+	if (ieee80211_is_probe_resp(fc))
+		data_retry_limit = 3;
+	else
+		data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+	tx_cmd->data_retry_limit = data_retry_limit;
+	/* Set retry limit on RTS packets */
+	tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+	/* DATA packets will use the uCode station table for rate/antenna
+	 * selection */
+	if (ieee80211_is_data(fc)) {
+		tx_cmd->initial_rate_idx = 0;
+		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+		return;
+	}
+
+	/**
+	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
+	 * not really a TX rate.  Thus, we use the lowest supported rate for
+	 * this band.  Also use the lowest supported rate if the stored rate
+	 * idx is invalid.
+	 */
+	rate_idx = info->control.rates[0].idx;
+	if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+	    || rate_idx > RATE_COUNT_LEGACY)
+		rate_idx = rate_lowest_index(&il->bands[info->band], sta);
+	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+	if (info->band == NL80211_BAND_5GHZ)
+		rate_idx += IL_FIRST_OFDM_RATE;
+	/* Get PLCP rate for tx_cmd->rate_n_flags */
+	rate_plcp = il_rates[rate_idx].plcp;
+	/* Zero out flags for this packet */
+	rate_flags = 0;
+
+	/* Set CCK flag as needed */
+	if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	/* Set up antennas */
+	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+	rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+
+	/* Set the rate in the TX cmd */
+	tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+			     struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+			     int sta_id)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+		if (info->flags & IEEE80211_TX_CTL_AMPDU)
+			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+		D_TX("tx_cmd with AES hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+		D_TX("tx_cmd with tkip hwcrypto\n");
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |=
+		    (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+		     TX_CMD_SEC_SHIFT);
+
+		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+		D_TX("Configuring packet for WEP encryption " "with key %d\n",
+		     keyconf->keyidx);
+		break;
+
+	default:
+		IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+		break;
+	}
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il,
+	      struct ieee80211_sta *sta,
+	      struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct il_station_priv *sta_priv = NULL;
+	struct il_tx_queue *txq;
+	struct il_queue *q;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	struct il_tx_cmd *tx_cmd;
+	int txq_id;
+	dma_addr_t phys_addr;
+	dma_addr_t txcmd_phys;
+	dma_addr_t scratch_phys;
+	u16 len, firstlen, secondlen;
+	u16 seq_number = 0;
+	__le16 fc;
+	u8 hdr_len;
+	u8 sta_id;
+	u8 wait_write_ptr = 0;
+	u8 tid = 0;
+	u8 *qc = NULL;
+	unsigned long flags;
+	bool is_agg = false;
+
+	spin_lock_irqsave(&il->lock, flags);
+	if (il_is_rfkill(il)) {
+		D_DROP("Dropping - RF KILL\n");
+		goto drop_unlock;
+	}
+
+	fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (ieee80211_is_auth(fc))
+		D_TX("Sending AUTH frame\n");
+	else if (ieee80211_is_assoc_req(fc))
+		D_TX("Sending ASSOC frame\n");
+	else if (ieee80211_is_reassoc_req(fc))
+		D_TX("Sending REASSOC frame\n");
+#endif
+
+	hdr_len = ieee80211_hdrlen(fc);
+
+	/* For management frames use broadcast id to do not break aggregation */
+	if (!ieee80211_is_data(fc))
+		sta_id = il->hw_params.bcast_id;
+	else {
+		/* Find idx into station table for destination station */
+		sta_id = il_sta_id_or_broadcast(il, sta);
+
+		if (sta_id == IL_INVALID_STATION) {
+			D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+			goto drop_unlock;
+		}
+	}
+
+	D_TX("station Id %d\n", sta_id);
+
+	if (sta)
+		sta_priv = (void *)sta->drv_priv;
+
+	if (sta_priv && sta_priv->asleep &&
+	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
+		/*
+		 * This sends an asynchronous command to the device,
+		 * but we can rely on it being processed before the
+		 * next frame is processed -- and the next frame to
+		 * this station is the one that will consume this
+		 * counter.
+		 * For now set the counter to just 1 since we do not
+		 * support uAPSD yet.
+		 */
+		il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+	}
+
+	/* FIXME: remove me ? */
+	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+	/* Access category (AC) is also the queue number */
+	txq_id = skb_get_queue_mapping(skb);
+
+	/* irqs already disabled/saved above when locking il->lock */
+	spin_lock(&il->sta_lock);
+
+	if (ieee80211_is_data_qos(fc)) {
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+			spin_unlock(&il->sta_lock);
+			goto drop_unlock;
+		}
+		seq_number = il->stations[sta_id].tid[tid].seq_number;
+		seq_number &= IEEE80211_SCTL_SEQ;
+		hdr->seq_ctrl =
+		    hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(seq_number);
+		seq_number += 0x10;
+		/* aggregation is on for this <sta,tid> */
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+		    il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+			txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+			is_agg = true;
+		}
+	}
+
+	txq = &il->txq[txq_id];
+	q = &txq->q;
+
+	if (unlikely(il_queue_space(q) < q->high_mark)) {
+		spin_unlock(&il->sta_lock);
+		goto drop_unlock;
+	}
+
+	if (ieee80211_is_data_qos(fc)) {
+		il->stations[sta_id].tid[tid].tfds_in_queue++;
+		if (!ieee80211_has_morefrags(fc))
+			il->stations[sta_id].tid[tid].seq_number = seq_number;
+	}
+
+	spin_unlock(&il->sta_lock);
+
+	txq->skbs[q->write_ptr] = skb;
+
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_cmd = txq->cmd[q->write_ptr];
+	out_meta = &txq->meta[q->write_ptr];
+	tx_cmd = &out_cmd->cmd.tx;
+	memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+	memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+	/*
+	 * Set up the Tx-command (not MAC!) header.
+	 * Store the chosen Tx queue and TFD idx within the sequence field;
+	 * after Tx, uCode's Tx response will return this value so driver can
+	 * locate the frame within the tx queue and do post-tx processing.
+	 */
+	out_cmd->hdr.cmd = C_TX;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16((u16)
+			(QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	/* Total # bytes to be transmitted */
+	tx_cmd->len = cpu_to_le16((u16) skb->len);
+
+	if (info->control.hw_key)
+		il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+	/* TODO need this for burst mode later on */
+	il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+
+	il4965_tx_cmd_build_rate(il, tx_cmd, info, sta, fc);
+
+	/*
+	 * Use the first empty entry in this queue's command buffer array
+	 * to contain the Tx command and MAC header concatenated together
+	 * (payload data will be in another buffer).
+	 * Size of this varies, due to varying MAC header length.
+	 * If end is not dword aligned, we'll have 2 extra bytes at the end
+	 * of the MAC header (device reads on dword boundaries).
+	 * We'll tell device about this padding later.
+	 */
+	len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+	firstlen = (len + 3) & ~3;
+
+	/* Tell NIC about any 2-byte padding after MAC header */
+	if (firstlen != len)
+		tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+	/* Physical address of this Tx command's header (not MAC header!),
+	 * within command buffer array. */
+	txcmd_phys =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+			   PCI_DMA_BIDIRECTIONAL);
+	if (unlikely(pci_dma_mapping_error(il->pci_dev, txcmd_phys)))
+		goto drop_unlock;
+
+	/* Set up TFD's 2nd entry to point directly to remainder of skb,
+	 * if any (802.11 null frames have no payload). */
+	secondlen = skb->len - hdr_len;
+	if (secondlen > 0) {
+		phys_addr =
+		    pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+				   PCI_DMA_TODEVICE);
+		if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr)))
+			goto drop_unlock;
+	}
+
+	/* Add buffer containing Tx command and MAC(!) header to TFD's
+	 * first entry */
+	il->ops->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen, 1, 0);
+	dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+	dma_unmap_len_set(out_meta, len, firstlen);
+	if (secondlen)
+		il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, secondlen,
+					       0, 0);
+
+	if (!ieee80211_has_morefrags(hdr->frame_control)) {
+		txq->need_update = 1;
+	} else {
+		wait_write_ptr = 1;
+		txq->need_update = 0;
+	}
+
+	scratch_phys =
+	    txcmd_phys + sizeof(struct il_cmd_header) +
+	    offsetof(struct il_tx_cmd, scratch);
+
+	/* take back ownership of DMA buffer to enable update */
+	pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+				    PCI_DMA_BIDIRECTIONAL);
+	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+	tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+	il_update_stats(il, true, fc, skb->len);
+
+	D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+	D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+	il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+	/* Set up entry for this TFD in Tx byte-count array */
+	if (info->flags & IEEE80211_TX_CTL_AMPDU)
+		il->ops->txq_update_byte_cnt_tbl(il, txq, le16_to_cpu(tx_cmd->len));
+
+	pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+				       PCI_DMA_BIDIRECTIONAL);
+
+	/* Tell device the write idx *just past* this latest filled TFD */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually,
+	 * regardless of the value of ret. "ret" only indicates
+	 * whether or not we should update the write pointer.
+	 */
+
+	/*
+	 * Avoid atomic ops if it isn't an associated client.
+	 * Also, if this is a packet for aggregation, don't
+	 * increase the counter because the ucode will stop
+	 * aggregation queues when their respective station
+	 * goes to sleep.
+	 */
+	if (sta_priv && sta_priv->client && !is_agg)
+		atomic_inc(&sta_priv->pending_frames);
+
+	if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+		if (wait_write_ptr) {
+			spin_lock_irqsave(&il->lock, flags);
+			txq->need_update = 1;
+			il_txq_update_write_ptr(il, txq);
+			spin_unlock_irqrestore(&il->lock, flags);
+		} else {
+			il_stop_queue(il, txq);
+		}
+	}
+
+	return 0;
+
+drop_unlock:
+	spin_unlock_irqrestore(&il->lock, flags);
+	return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+	ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
+				       GFP_KERNEL);
+	if (!ptr->addr)
+		return -ENOMEM;
+	ptr->size = size;
+	return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+	if (unlikely(!ptr->addr))
+		return;
+
+	dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+	memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+	int txq_id;
+
+	/* Tx queues */
+	if (il->txq) {
+		for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+			if (txq_id == il->cmd_queue)
+				il_cmd_queue_free(il);
+			else
+				il_tx_queue_free(il, txq_id);
+	}
+	il4965_free_dma_ptr(il, &il->kw);
+
+	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+	/* free tx queue structure */
+	il_free_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+	int ret, txq_id;
+	unsigned long flags;
+
+	/* Free all tx/cmd queues and keep-warm buffer */
+	il4965_hw_txq_ctx_free(il);
+
+	ret =
+	    il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+				 il->hw_params.scd_bc_tbls_size);
+	if (ret) {
+		IL_ERR("Scheduler BC Table allocation failed\n");
+		goto error_bc_tbls;
+	}
+	/* Alloc keep-warm buffer */
+	ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+	if (ret) {
+		IL_ERR("Keep Warm allocation failed\n");
+		goto error_kw;
+	}
+
+	/* allocate tx queue structure */
+	ret = il_alloc_txq_mem(il);
+	if (ret)
+		goto error;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	il4965_txq_set_sched(il, 0);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+		ret = il_tx_queue_init(il, txq_id);
+		if (ret) {
+			IL_ERR("Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	}
+
+	return ret;
+
+error:
+	il4965_hw_txq_ctx_free(il);
+	il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+	il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+	return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+	int txq_id;
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Turn off all Tx DMA fifos */
+	il4965_txq_set_sched(il, 0);
+	/* Tell NIC where to find the "keep warm" buffer */
+	il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Alloc and init all Tx queues, including the command queue (#4) */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+		il_tx_queue_reset(il, txq_id);
+}
+
+static void
+il4965_txq_ctx_unmap(struct il_priv *il)
+{
+	int txq_id;
+
+	if (!il->txq)
+		return;
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+		if (txq_id == il->cmd_queue)
+			il_cmd_queue_unmap(il);
+		else
+			il_tx_queue_unmap(il, txq_id);
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+	int ch, ret;
+
+	_il_wr_prph(il, IL49_SCD_TXFACT, 0);
+
+	/* Stop each Tx DMA channel, and wait for it to be idle */
+	for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+		_il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+		ret =
+		    _il_poll_bit(il, FH49_TSSR_TX_STATUS_REG,
+				 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+				 FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
+				 1000);
+		if (ret < 0)
+			IL_ERR("Timeout stopping DMA channel %d [0x%08x]",
+			       ch, _il_rd(il, FH49_TSSR_TX_STATUS_REG));
+	}
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+	int txq_id;
+
+	for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+		if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+			return txq_id;
+	return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+	/* Simply stop the queue, but don't change any configuration;
+	 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+		   (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+		   (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+	u32 tbl_dw_addr;
+	u32 tbl_dw;
+	u16 scd_q2ratid;
+
+	scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+	tbl_dw_addr =
+	    il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+	tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+	if (txq_id & 0x1)
+		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+	else
+		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+	il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+	return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE:  txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ *        i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+		      int tid, u16 ssn_idx)
+{
+	unsigned long flags;
+	u16 ra_tid;
+	int ret;
+
+	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+	    (IL49_FIRST_AMPDU_QUEUE +
+	     il->cfg->num_of_ampdu_queues <= txq_id)) {
+		IL_WARN("queue number out of range: %d, must be %d to %d\n",
+			txq_id, IL49_FIRST_AMPDU_QUEUE,
+			IL49_FIRST_AMPDU_QUEUE +
+			il->cfg->num_of_ampdu_queues - 1);
+		return -EINVAL;
+	}
+
+	ra_tid = BUILD_RAxTID(sta_id, tid);
+
+	/* Modify device's station table to Tx this TID */
+	ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Stop this Tx queue before configuring it */
+	il4965_tx_queue_stop_scheduler(il, txq_id);
+
+	/* Map receiver-address / traffic-ID to this queue */
+	il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+	/* Set this queue as a chain-building queue */
+	il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+	/* Place first TFD at idx corresponding to start sequence number.
+	 * Assumes that ssn_idx is valid (!= 0xFFF) */
+	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+	/* Set up Tx win size and frame limit for this queue */
+	il_write_targ_mem(il,
+			  il->scd_base_addr +
+			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+			  (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+			  & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+	il_write_targ_mem(il,
+			  il->scd_base_addr +
+			  IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+			  (SCD_FRAME_LIMIT <<
+			   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+			  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+	il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+	/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+	int sta_id;
+	int tx_fifo;
+	int txq_id;
+	int ret;
+	unsigned long flags;
+	struct il_tid_data *tid_data;
+
+	/* FIXME: warning if tx fifo not found ? */
+	tx_fifo = il4965_get_fifo_from_tid(tid);
+	if (unlikely(tx_fifo < 0))
+		return tx_fifo;
+
+	D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Start AGG on invalid station\n");
+		return -ENXIO;
+	}
+	if (unlikely(tid >= MAX_TID_COUNT))
+		return -EINVAL;
+
+	if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+		IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+		return -ENXIO;
+	}
+
+	txq_id = il4965_txq_ctx_activate_free(il);
+	if (txq_id == -1) {
+		IL_ERR("No free aggregation queue available\n");
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	tid_data = &il->stations[sta_id].tid[tid];
+	*ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+	tid_data->agg.txq_id = txq_id;
+	il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	tid_data = &il->stations[sta_id].tid[tid];
+	if (tid_data->tfds_in_queue == 0) {
+		D_HT("HW queue is empty\n");
+		tid_data->agg.state = IL_AGG_ON;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	} else {
+		D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+		     tid_data->tfds_in_queue);
+		tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+	if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+	    (IL49_FIRST_AMPDU_QUEUE +
+	     il->cfg->num_of_ampdu_queues <= txq_id)) {
+		IL_WARN("queue number out of range: %d, must be %d to %d\n",
+			txq_id, IL49_FIRST_AMPDU_QUEUE,
+			IL49_FIRST_AMPDU_QUEUE +
+			il->cfg->num_of_ampdu_queues - 1);
+		return -EINVAL;
+	}
+
+	il4965_tx_queue_stop_scheduler(il, txq_id);
+
+	il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+	il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+	il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+	/* supposes that ssn_idx is valid (!= 0xFFF) */
+	il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+	il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+	il_txq_ctx_deactivate(il, txq_id);
+	il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+	return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta, u16 tid)
+{
+	int tx_fifo_id, txq_id, sta_id, ssn;
+	struct il_tid_data *tid_data;
+	int write_ptr, read_ptr;
+	unsigned long flags;
+
+	/* FIXME: warning if tx_fifo_id not found ? */
+	tx_fifo_id = il4965_get_fifo_from_tid(tid);
+	if (unlikely(tx_fifo_id < 0))
+		return tx_fifo_id;
+
+	sta_id = il_sta_id(sta);
+
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	tid_data = &il->stations[sta_id].tid[tid];
+	ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+	txq_id = tid_data->agg.txq_id;
+
+	switch (il->stations[sta_id].tid[tid].agg.state) {
+	case IL_EMPTYING_HW_QUEUE_ADDBA:
+		/*
+		 * This can happen if the peer stops aggregation
+		 * again before we've had a chance to drain the
+		 * queue we selected previously, i.e. before the
+		 * session was really started completely.
+		 */
+		D_HT("AGG stop before setup done\n");
+		goto turn_off;
+	case IL_AGG_ON:
+		break;
+	default:
+		IL_WARN("Stopping AGG while state not ON or starting\n");
+	}
+
+	write_ptr = il->txq[txq_id].q.write_ptr;
+	read_ptr = il->txq[txq_id].q.read_ptr;
+
+	/* The queue is not empty */
+	if (write_ptr != read_ptr) {
+		D_HT("Stopping a non empty AGG HW QUEUE\n");
+		il->stations[sta_id].tid[tid].agg.state =
+		    IL_EMPTYING_HW_QUEUE_DELBA;
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	D_HT("HW queue is empty\n");
+turn_off:
+	il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+	/* do not restore/save irqs */
+	spin_unlock(&il->sta_lock);
+	spin_lock(&il->lock);
+
+	/*
+	 * the only reason this call can fail is queue number out of range,
+	 * which can happen if uCode is reloaded and all the station
+	 * information are lost. if it is outside the range, there is no need
+	 * to deactivate the uCode queue, just return "success" to allow
+	 *  mac80211 to clean up it own data.
+	 */
+	il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+	return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+	struct il_queue *q = &il->txq[txq_id].q;
+	u8 *addr = il->stations[sta_id].sta.sta.addr;
+	struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+
+	lockdep_assert_held(&il->sta_lock);
+
+	switch (il->stations[sta_id].tid[tid].agg.state) {
+	case IL_EMPTYING_HW_QUEUE_DELBA:
+		/* We are reclaiming the last packet of the */
+		/* aggregated HW queue */
+		if (txq_id == tid_data->agg.txq_id &&
+		    q->read_ptr == q->write_ptr) {
+			u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+			int tx_fifo = il4965_get_fifo_from_tid(tid);
+			D_HT("HW queue empty: continue DELBA flow\n");
+			il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+			tid_data->agg.state = IL_AGG_OFF;
+			ieee80211_stop_tx_ba_cb_irqsafe(il->vif, addr, tid);
+		}
+		break;
+	case IL_EMPTYING_HW_QUEUE_ADDBA:
+		/* We are reclaiming the last packet of the queue */
+		if (tid_data->tfds_in_queue == 0) {
+			D_HT("HW queue empty: continue ADDBA flow\n");
+			tid_data->agg.state = IL_AGG_ON;
+			ieee80211_start_tx_ba_cb_irqsafe(il->vif, addr, tid);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, const u8 *addr1)
+{
+	struct ieee80211_sta *sta;
+	struct il_station_priv *sta_priv;
+
+	rcu_read_lock();
+	sta = ieee80211_find_sta(il->vif, addr1);
+	if (sta) {
+		sta_priv = (void *)sta->drv_priv;
+		/* avoid atomic ops if this isn't a client */
+		if (sta_priv->client &&
+		    atomic_dec_return(&sta_priv->pending_frames) == 0)
+			ieee80211_sta_block_awake(il->hw, sta, false);
+	}
+	rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct sk_buff *skb, bool is_agg)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+	if (!is_agg)
+		il4965_non_agg_tx_status(il, hdr->addr1);
+
+	ieee80211_tx_status_irqsafe(il->hw, skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	int nfreed = 0;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff *skb;
+
+	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+		       q->write_ptr, q->read_ptr);
+		return 0;
+	}
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		skb = txq->skbs[txq->q.read_ptr];
+
+		if (WARN_ON_ONCE(skb == NULL))
+			continue;
+
+		hdr = (struct ieee80211_hdr *) skb->data;
+		if (ieee80211_is_data_qos(hdr->frame_control))
+			nfreed++;
+
+		il4965_tx_status(il, skb, txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+
+		txq->skbs[txq->q.read_ptr] = NULL;
+		il->ops->txq_free_tfd(il, txq);
+	}
+	return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not.  This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+				     struct il_compressed_ba_resp *ba_resp)
+{
+	int i, sh, ack;
+	u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+	int successes = 0;
+	struct ieee80211_tx_info *info;
+	u64 bitmap, sent_bitmap;
+
+	if (unlikely(!agg->wait_for_ba)) {
+		if (unlikely(ba_resp->bitmap))
+			IL_ERR("Received BA when not expected\n");
+		return -EINVAL;
+	}
+
+	/* Mark that the expected block-ack response arrived */
+	agg->wait_for_ba = 0;
+	D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+	/* Calculate shift to align block-ack bits with our Tx win bits */
+	sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+	if (sh < 0)		/* tbw something is wrong with indices */
+		sh += 0x100;
+
+	if (agg->frame_count > (64 - sh)) {
+		D_TX_REPLY("more frames than bitmap size");
+		return -1;
+	}
+
+	/* don't use 64-bit values for now */
+	bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+	/* check for success or failure according to the
+	 * transmitted bitmap and block-ack bitmap */
+	sent_bitmap = bitmap & agg->bitmap;
+
+	/* For each frame attempted in aggregation,
+	 * update driver's record of tx frame's status. */
+	i = 0;
+	while (sent_bitmap) {
+		ack = sent_bitmap & 1ULL;
+		successes += ack;
+		D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+			   i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+		sent_bitmap >>= 1;
+		++i;
+	}
+
+	D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+	info = IEEE80211_SKB_CB(il->txq[scd_flow].skbs[agg->start_idx]);
+	memset(&info->status, 0, sizeof(info->status));
+	info->flags |= IEEE80211_TX_STAT_ACK;
+	info->flags |= IEEE80211_TX_STAT_AMPDU;
+	info->status.ampdu_ack_len = successes;
+	info->status.ampdu_len = agg->frame_count;
+	il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+	return 0;
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+	status &= TX_STATUS_MSK;
+	return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 *addr)
+{
+	int i;
+	int start = 0;
+	int ret = IL_INVALID_STATION;
+	unsigned long flags;
+
+	if (il->iw_mode == NL80211_IFTYPE_ADHOC)
+		start = IL_STA_ID;
+
+	if (is_broadcast_ether_addr(addr))
+		return il->hw_params.bcast_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	for (i = start; i < il->hw_params.max_stations; i++)
+		if (il->stations[i].used &&
+		    ether_addr_equal(il->stations[i].sta.sta.addr, addr)) {
+			ret = i;
+			goto out;
+		}
+
+	D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+	/*
+	 * It may be possible that more commands interacting with stations
+	 * arrive before we completed processing the adding of
+	 * station
+	 */
+	if (ret != IL_INVALID_STATION &&
+	    (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+	     ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+	      (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+		IL_ERR("Requested station info for sta %d before ready.\n",
+		       ret);
+		ret = IL_INVALID_STATION;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+	if (il->iw_mode == NL80211_IFTYPE_STATION)
+		return IL_AP_ID;
+	else {
+		u8 *da = ieee80211_get_DA(hdr);
+
+		return il4965_find_station(il, da);
+	}
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+	return le32_to_cpup(&tx_resp->u.status +
+			    tx_resp->frame_count) & IEEE80211_MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+	status &= TX_STATUS_MSK;
+
+	switch (status) {
+	case TX_STATUS_SUCCESS:
+	case TX_STATUS_DIRECT_DONE:
+		return IEEE80211_TX_STAT_ACK;
+	case TX_STATUS_FAIL_DEST_PS:
+		return IEEE80211_TX_STAT_TX_FILTERED;
+	default:
+		return 0;
+	}
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+			  struct il4965_tx_resp *tx_resp, int txq_id,
+			  u16 start_idx)
+{
+	u16 status;
+	struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+	struct ieee80211_tx_info *info = NULL;
+	struct ieee80211_hdr *hdr = NULL;
+	u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+	int i, sh, idx;
+	u16 seq;
+	if (agg->wait_for_ba)
+		D_TX_REPLY("got tx response w/o block-ack\n");
+
+	agg->frame_count = tx_resp->frame_count;
+	agg->start_idx = start_idx;
+	agg->rate_n_flags = rate_n_flags;
+	agg->bitmap = 0;
+
+	/* num frames attempted by Tx command */
+	if (agg->frame_count == 1) {
+		/* Only one frame was attempted; no block-ack will arrive */
+		status = le16_to_cpu(frame_status[0].status);
+		idx = start_idx;
+
+		D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+			   agg->frame_count, agg->start_idx, idx);
+
+		info = IEEE80211_SKB_CB(il->txq[txq_id].skbs[idx]);
+		info->status.rates[0].count = tx_resp->failure_frame + 1;
+		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+		info->flags |= il4965_tx_status_to_mac80211(status);
+		il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+		D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+			   tx_resp->failure_frame);
+		D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+		agg->wait_for_ba = 0;
+	} else {
+		/* Two or more frames were attempted; expect block-ack */
+		u64 bitmap = 0;
+		int start = agg->start_idx;
+		struct sk_buff *skb;
+
+		/* Construct bit-map of pending frames within Tx win */
+		for (i = 0; i < agg->frame_count; i++) {
+			u16 sc;
+			status = le16_to_cpu(frame_status[i].status);
+			seq = le16_to_cpu(frame_status[i].sequence);
+			idx = SEQ_TO_IDX(seq);
+			txq_id = SEQ_TO_QUEUE(seq);
+
+			if (status &
+			    (AGG_TX_STATE_FEW_BYTES_MSK |
+			     AGG_TX_STATE_ABORT_MSK))
+				continue;
+
+			D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+				   agg->frame_count, txq_id, idx);
+
+			skb = il->txq[txq_id].skbs[idx];
+			if (WARN_ON_ONCE(skb == NULL))
+				return -1;
+			hdr = (struct ieee80211_hdr *) skb->data;
+
+			sc = le16_to_cpu(hdr->seq_ctrl);
+			if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
+				IL_ERR("BUG_ON idx doesn't match seq control"
+				       " idx=%d, seq_idx=%d, seq=%d\n", idx,
+				       IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
+				return -1;
+			}
+
+			D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+				   IEEE80211_SEQ_TO_SN(sc));
+
+			sh = idx - start;
+			if (sh > 64) {
+				sh = (start - idx) + 0xff;
+				bitmap = bitmap << sh;
+				sh = 0;
+				start = idx;
+			} else if (sh < -64)
+				sh = 0xff - (start - idx);
+			else if (sh < 0) {
+				sh = start - idx;
+				start = idx;
+				bitmap = bitmap << sh;
+				sh = 0;
+			}
+			bitmap |= 1ULL << sh;
+			D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+				   (unsigned long long)bitmap);
+		}
+
+		agg->bitmap = bitmap;
+		agg->start_idx = start;
+		D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+			   agg->frame_count, agg->start_idx,
+			   (unsigned long long)agg->bitmap);
+
+		if (bitmap)
+			agg->wait_for_ba = 1;
+	}
+	return 0;
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_tx_info *info;
+	struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+	u32 status = le32_to_cpu(tx_resp->u.status);
+	int uninitialized_var(tid);
+	int sta_id;
+	int freed;
+	u8 *qc = NULL;
+	unsigned long flags;
+
+	if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+		       "is out of range [0-%d] %d %d\n", txq_id, idx,
+		       txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+		return;
+	}
+
+	txq->time_stamp = jiffies;
+
+	skb = txq->skbs[txq->q.read_ptr];
+	info = IEEE80211_SKB_CB(skb);
+	memset(&info->status, 0, sizeof(info->status));
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	}
+
+	sta_id = il4965_get_ra_sta_id(il, hdr);
+	if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+		IL_ERR("Station not known\n");
+		return;
+	}
+
+	/*
+	 * Firmware will not transmit frame on passive channel, if it not yet
+	 * received some valid frame on that channel. When this error happen
+	 * we have to wait until firmware will unblock itself i.e. when we
+	 * note received beacon or other frame. We unblock queues in
+	 * il4965_pass_packet_to_mac80211 or in il_mac_bss_info_changed.
+	 */
+	if (unlikely((status & TX_STATUS_MSK) == TX_STATUS_FAIL_PASSIVE_NO_RX) &&
+	    il->iw_mode == NL80211_IFTYPE_STATION) {
+		il_stop_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+		D_INFO("Stopped queues - RX waiting on passive channel\n");
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	if (txq->sched_retry) {
+		const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+		struct il_ht_agg *agg = NULL;
+		WARN_ON(!qc);
+
+		agg = &il->stations[sta_id].tid[tid].agg;
+
+		il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+		/* check if BAR is needed */
+		if (tx_resp->frame_count == 1 &&
+		    !il4965_is_tx_success(status))
+			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+		if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+			idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+			D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+				   "%d idx %d\n", scd_ssn, idx);
+			freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+			if (qc)
+				il4965_free_tfds_in_queue(il, sta_id, tid,
+							  freed);
+
+			if (il->mac80211_registered &&
+			    il_queue_space(&txq->q) > txq->q.low_mark &&
+			    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+				il_wake_queue(il, txq);
+		}
+	} else {
+		info->status.rates[0].count = tx_resp->failure_frame + 1;
+		info->flags |= il4965_tx_status_to_mac80211(status);
+		il4965_hwrate_to_tx_control(il,
+					    le32_to_cpu(tx_resp->rate_n_flags),
+					    info);
+
+		D_TX_REPLY("TXQ %d status %s (0x%08x) "
+			   "rate_n_flags 0x%x retries %d\n", txq_id,
+			   il4965_get_tx_fail_reason(status), status,
+			   le32_to_cpu(tx_resp->rate_n_flags),
+			   tx_resp->failure_frame);
+
+		freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+		if (qc && likely(sta_id != IL_INVALID_STATION))
+			il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+		else if (sta_id == IL_INVALID_STATION)
+			D_TX_REPLY("Station not known\n");
+
+		if (il->mac80211_registered &&
+		    il_queue_space(&txq->q) > txq->q.low_mark)
+			il_wake_queue(il, txq);
+	}
+	if (qc && likely(sta_id != IL_INVALID_STATION))
+		il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+	il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+			    struct ieee80211_tx_info *info)
+{
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+	info->status.antenna =
+	    ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		r->flags |= IEEE80211_TX_RC_MCS;
+	if (rate_n_flags & RATE_MCS_GF_MSK)
+		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+	if (rate_n_flags & RATE_MCS_DUP_MSK)
+		r->flags |= IEEE80211_TX_RC_DUP_DATA;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		r->flags |= IEEE80211_TX_RC_SHORT_GI;
+	r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+static void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+	struct il_tx_queue *txq = NULL;
+	struct il_ht_agg *agg;
+	int idx;
+	int sta_id;
+	int tid;
+	unsigned long flags;
+
+	/* "flow" corresponds to Tx queue */
+	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+	/* "ssn" is start of block-ack Tx win, corresponds to idx
+	 * (in Tx queue's circular buffer) of first TFD/frame in win */
+	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+	if (scd_flow >= il->hw_params.max_txq_num) {
+		IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+		return;
+	}
+
+	txq = &il->txq[scd_flow];
+	sta_id = ba_resp->sta_id;
+	tid = ba_resp->tid;
+	agg = &il->stations[sta_id].tid[tid].agg;
+	if (unlikely(agg->txq_id != scd_flow)) {
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now!
+		 * since it is possible happen very often and in order
+		 * not to fill the syslog, don't enable the logging by default
+		 */
+		D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+			   scd_flow, agg->txq_id);
+		return;
+	}
+
+	/* Find idx just before block-ack win */
+	idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+		   agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+		   ba_resp->sta_id);
+	D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+		   "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+		   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+		   ba_resp->scd_flow, ba_resp->scd_ssn);
+	D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+		   (unsigned long long)agg->bitmap);
+
+	/* Update driver's record of ACK vs. not for each frame in win */
+	il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+	/* Release all TFDs before the SSN, i.e. all TFDs in front of
+	 * block-ack win (we assume that they've been successfully
+	 * transmitted ... if not, it's too late anyway). */
+	if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+		/* calculate mac80211 ampdu sw queue to wake */
+		int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+		il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+		if (il_queue_space(&txq->q) > txq->q.low_mark &&
+		    il->mac80211_registered &&
+		    agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+			il_wake_queue(il, txq);
+
+		il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+	}
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+	switch (status & TX_STATUS_MSK) {
+	case TX_STATUS_SUCCESS:
+		return "SUCCESS";
+		TX_STATUS_POSTPONE(DELAY);
+		TX_STATUS_POSTPONE(FEW_BYTES);
+		TX_STATUS_POSTPONE(QUIET_PERIOD);
+		TX_STATUS_POSTPONE(CALC_TTAK);
+		TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+		TX_STATUS_FAIL(SHORT_LIMIT);
+		TX_STATUS_FAIL(LONG_LIMIT);
+		TX_STATUS_FAIL(FIFO_UNDERRUN);
+		TX_STATUS_FAIL(DRAIN_FLOW);
+		TX_STATUS_FAIL(RFKILL_FLUSH);
+		TX_STATUS_FAIL(LIFE_EXPIRE);
+		TX_STATUS_FAIL(DEST_PS);
+		TX_STATUS_FAIL(HOST_ABORTED);
+		TX_STATUS_FAIL(BT_RETRY);
+		TX_STATUS_FAIL(STA_INVALID);
+		TX_STATUS_FAIL(FRAG_DROPPED);
+		TX_STATUS_FAIL(TID_DISABLE);
+		TX_STATUS_FAIL(FIFO_FLUSHED);
+		TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+		TX_STATUS_FAIL(PASSIVE_NO_RX);
+		TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+	}
+
+	return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+	int i, r;
+	struct il_link_quality_cmd *link_cmd;
+	u32 rate_flags = 0;
+	__le32 rate_n_flags;
+
+	link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+	if (!link_cmd) {
+		IL_ERR("Unable to allocate memory for LQ cmd.\n");
+		return NULL;
+	}
+	/* Set up the rate scaling to start at selected rate, fall back
+	 * all the way down to 1M in IEEE order, and then spin on 1M */
+	if (il->band == NL80211_BAND_5GHZ)
+		r = RATE_6M_IDX;
+	else
+		r = RATE_1M_IDX;
+
+	if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	rate_flags |=
+	    il4965_first_antenna(il->hw_params.
+				 valid_tx_ant) << RATE_MCS_ANT_POS;
+	rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+	link_cmd->general_params.single_stream_ant_msk =
+	    il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+	link_cmd->general_params.dual_stream_ant_msk =
+	    il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+							       valid_tx_ant);
+	if (!link_cmd->general_params.dual_stream_ant_msk) {
+		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+		link_cmd->general_params.dual_stream_ant_msk =
+		    il->hw_params.valid_tx_ant;
+	}
+
+	link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+	link_cmd->agg_params.agg_time_limit =
+	    cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+	link_cmd->sta_id = sta_id;
+
+	return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r)
+{
+	int ret;
+	u8 sta_id;
+	struct il_link_quality_cmd *link_cmd;
+	unsigned long flags;
+
+	if (sta_id_r)
+		*sta_id_r = IL_INVALID_STATION;
+
+	ret = il_add_station_common(il, addr, 0, NULL, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM\n", addr);
+		return ret;
+	}
+
+	if (sta_id_r)
+		*sta_id_r = sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].used |= IL_STA_LOCAL;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	/* Set up default rate scaling table in device's station table */
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+		       addr);
+		return -ENOMEM;
+	}
+
+	ret = il_send_lq_cmd(il, link_cmd, CMD_SYNC, true);
+	if (ret)
+		IL_ERR("Link quality command failed (%d)\n", ret);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, bool send_if_empty)
+{
+	int i;
+	u8 buff[sizeof(struct il_wep_cmd) +
+		sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+	struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+	size_t cmd_size = sizeof(struct il_wep_cmd);
+	struct il_host_cmd cmd = {
+		.id = C_WEPKEY,
+		.data = wep_cmd,
+		.flags = CMD_SYNC,
+	};
+	bool not_empty = false;
+
+	might_sleep();
+
+	memset(wep_cmd, 0,
+	       cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+	for (i = 0; i < WEP_KEYS_MAX; i++) {
+		u8 key_size = il->_4965.wep_keys[i].key_size;
+
+		wep_cmd->key[i].key_idx = i;
+		if (key_size) {
+			wep_cmd->key[i].key_offset = i;
+			not_empty = true;
+		} else
+			wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+
+		wep_cmd->key[i].key_size = key_size;
+		memcpy(&wep_cmd->key[i].key[3], il->_4965.wep_keys[i].key, key_size);
+	}
+
+	wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+	wep_cmd->num_keys = WEP_KEYS_MAX;
+
+	cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+	cmd.len = cmd_size;
+
+	if (not_empty || send_if_empty)
+		return il_send_cmd(il, &cmd);
+	else
+		return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il)
+{
+	lockdep_assert_held(&il->mutex);
+
+	return il4965_static_wepkey_cmd(il, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il,
+			      struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+	int idx = keyconf->keyidx;
+
+	lockdep_assert_held(&il->mutex);
+
+	D_WEP("Removing default WEP key: idx=%d\n", idx);
+
+	memset(&il->_4965.wep_keys[idx], 0, sizeof(struct il_wep_key));
+	if (il_is_rfkill(il)) {
+		D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+		/* but keys in device are clear anyway so return success */
+		return 0;
+	}
+	ret = il4965_static_wepkey_cmd(il, 1);
+	D_WEP("Remove default WEP key: idx=%d ret=%d\n", idx, ret);
+
+	return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il,
+			   struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+	int len = keyconf->keylen;
+	int idx = keyconf->keyidx;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (len != WEP_KEY_LEN_128 && len != WEP_KEY_LEN_64) {
+		D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+		return -EINVAL;
+	}
+
+	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->hw_key_idx = HW_KEY_DEFAULT;
+	il->stations[IL_AP_ID].keyinfo.cipher = keyconf->cipher;
+
+	il->_4965.wep_keys[idx].key_size = len;
+	memcpy(&il->_4965.wep_keys[idx].key, &keyconf->key, len);
+
+	ret = il4965_static_wepkey_cmd(il, false);
+
+	D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", len, idx, ret);
+	return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il,
+				struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+	key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (keyconf->keylen == WEP_KEY_LEN_128)
+		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+	if (sta_id == il->hw_params.bcast_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+	il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+	       keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	__le16 key_flags = 0;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (sta_id == il->hw_params.bcast_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+				 struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	int ret = 0;
+	__le16 key_flags = 0;
+
+	key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+	key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags &= ~STA_KEY_FLG_INVALID;
+
+	if (sta_id == il->hw_params.bcast_id)
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+	keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+	il->stations[sta_id].keyinfo.keylen = 16;
+
+	if ((il->stations[sta_id].sta.key.
+	     key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+		il->stations[sta_id].sta.key.key_offset =
+		    il_get_free_ucode_key_idx(il);
+	/* else, we are overriding an existing key => no need to allocated room
+	 * in uCode. */
+
+	WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+	     "no space for a new key");
+
+	il->stations[sta_id].sta.key.key_flags = key_flags;
+
+	/* This copy is acutally not needed: we get the key with each TX */
+	memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+	memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+		       struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+	u8 sta_id;
+	unsigned long flags;
+	int i;
+
+	if (il_scan_cancel(il)) {
+		/* cancel scan failed, just live w/ bad key and rely
+		   briefly on SW decryption */
+		return;
+	}
+
+	sta_id = il_sta_id_or_broadcast(il, sta);
+	if (sta_id == IL_INVALID_STATION)
+		return;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+	for (i = 0; i < 5; i++)
+		il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+		    cpu_to_le16(phase1key[i]);
+
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il,
+			  struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+	unsigned long flags;
+	u16 key_flags;
+	u8 keyidx;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	il->_4965.key_mapping_keys--;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+	keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+	D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+	if (keyconf->keyidx != keyidx) {
+		/* We need to remove a key with idx different that the one
+		 * in the uCode. This means that the key we need to remove has
+		 * been replaced by another one with different idx.
+		 * Don't do anything and return ok
+		 */
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	if (il->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_INVALID) {
+		IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+			key_flags);
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+
+	if (!test_and_clear_bit
+	    (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+		IL_ERR("idx %d not used in uCode key table.\n",
+		       il->stations[sta_id].sta.key.key_offset);
+	memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+	memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+	il->stations[sta_id].sta.key.key_flags =
+	    STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+	il->stations[sta_id].sta.key.key_offset = keyconf->hw_key_idx;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+	if (il_is_rfkill(il)) {
+		D_WEP
+		    ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+		return 0;
+	}
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+		       u8 sta_id)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	il->_4965.key_mapping_keys++;
+	keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		ret =
+		    il4965_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		ret =
+		    il4965_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		ret = il4965_set_wep_dynamic_key_info(il, keyconf, sta_id);
+		break;
+	default:
+		IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+		       keyconf->cipher);
+		ret = -EINVAL;
+	}
+
+	D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+	      keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+	return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il)
+{
+	struct il_link_quality_cmd *link_cmd;
+	unsigned long flags;
+	u8 sta_id;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	sta_id = il_prep_station(il, il_bcast_addr, false, NULL);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare broadcast station\n");
+		spin_unlock_irqrestore(&il->sta_lock, flags);
+
+		return -EINVAL;
+	}
+
+	il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+	il->stations[sta_id].used |= IL_STA_BCAST;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR
+		    ("Unable to initialize rate scaling for bcast station.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il)
+{
+	unsigned long flags;
+	struct il_link_quality_cmd *link_cmd;
+	u8 sta_id = il->hw_params.bcast_id;
+
+	link_cmd = il4965_sta_alloc_lq(il, sta_id);
+	if (!link_cmd) {
+		IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	if (il->stations[sta_id].lq)
+		kfree(il->stations[sta_id].lq);
+	else
+		D_INFO("Bcast sta rate scaling has not been initialized.\n");
+	il->stations[sta_id].lq = link_cmd;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+	return il4965_update_bcast_station(il);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+	unsigned long flags;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	/* Remove "disable" flag, to enable Tx for this TID */
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+	il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+			u16 ssn)
+{
+	unsigned long flags;
+	int sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION)
+		return -ENXIO;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags_msk = 0;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+	il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+	il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+	unsigned long flags;
+	int sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&il->mutex);
+
+	sta_id = il_sta_id(sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags_msk = 0;
+	il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+	il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+	il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+	il->stations[sta_id].sta.sta.modify_mask =
+	    STA_MODIFY_SLEEP_TX_COUNT_MSK;
+	il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+	il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+	if (il->ops->set_rxon_chain) {
+		il->ops->set_rxon_chain(il);
+		if (il->active.rx_chain != il->staging.rx_chain)
+			il_commit_rxon(il);
+	}
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+	struct list_head *element;
+
+	D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+	while (!list_empty(&il->free_frames)) {
+		element = il->free_frames.next;
+		list_del(element);
+		kfree(list_entry(element, struct il_frame, list));
+		il->frames_count--;
+	}
+
+	if (il->frames_count) {
+		IL_WARN("%d frames still in use.  Did we lose one?\n",
+			il->frames_count);
+		il->frames_count = 0;
+	}
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+	struct il_frame *frame;
+	struct list_head *element;
+	if (list_empty(&il->free_frames)) {
+		frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+		if (!frame) {
+			IL_ERR("Could not allocate frame!\n");
+			return NULL;
+		}
+
+		il->frames_count++;
+		return frame;
+	}
+
+	element = il->free_frames.next;
+	list_del(element);
+	return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+	memset(frame, 0, sizeof(*frame));
+	list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+			 int left)
+{
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_skb)
+		return 0;
+
+	if (il->beacon_skb->len > left)
+		return 0;
+
+	memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+	return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+		      struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+		      u32 frame_size)
+{
+	u16 tim_idx;
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+	/*
+	 * The idx is relative to frame start but we start looking at the
+	 * variable-length part of the beacon.
+	 */
+	tim_idx = mgmt->u.beacon.variable - beacon;
+
+	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+	while ((tim_idx < (frame_size - 2)) &&
+	       (beacon[tim_idx] != WLAN_EID_TIM))
+		tim_idx += beacon[tim_idx + 1] + 2;
+
+	/* If TIM field was found, set variables */
+	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+		tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+	} else
+		IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+	struct il_tx_beacon_cmd *tx_beacon_cmd;
+	u32 frame_size;
+	u32 rate_flags;
+	u32 rate;
+	/*
+	 * We have to set up the TX command, the TX Beacon command, and the
+	 * beacon contents.
+	 */
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_enabled) {
+		IL_ERR("Trying to build beacon without beaconing enabled\n");
+		return 0;
+	}
+
+	/* Initialize memory */
+	tx_beacon_cmd = &frame->u.beacon;
+	memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+	/* Set up TX beacon contents */
+	frame_size =
+	    il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+				     sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+	if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+		return 0;
+	if (!frame_size)
+		return 0;
+
+	/* Set up TX command fields */
+	tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+	tx_beacon_cmd->tx.sta_id = il->hw_params.bcast_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_beacon_cmd->tx.tx_flags =
+	    TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+	    TX_CMD_FLG_STA_RATE_MSK;
+
+	/* Set up TX beacon command fields */
+	il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+			      frame_size);
+
+	/* Set up packet rate and flags */
+	rate = il_get_lowest_plcp(il);
+	il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+	rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+	if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+		rate_flags |= RATE_MCS_CCK_MSK;
+	tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+	return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+	struct il_frame *frame;
+	unsigned int frame_size;
+	int rc;
+
+	frame = il4965_get_free_frame(il);
+	if (!frame) {
+		IL_ERR("Could not obtain free frame buffer for beacon "
+		       "command.\n");
+		return -ENOMEM;
+	}
+
+	frame_size = il4965_hw_get_beacon_cmd(il, frame);
+	if (!frame_size) {
+		IL_ERR("Error configuring the beacon command\n");
+		il4965_free_frame(il, frame);
+		return -EINVAL;
+	}
+
+	rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+	il4965_free_frame(il, frame);
+
+	return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+	dma_addr_t addr = get_unaligned_le32(&tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		addr |=
+		    ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+		    16;
+
+	return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+	return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+	struct il_tfd_tb *tb = &tfd->tbs[idx];
+	u16 hi_n_len = len << 4;
+
+	put_unaligned_le32(addr, &tb->lo);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+	tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+	tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+	return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+	struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+	struct il_tfd *tfd;
+	struct pci_dev *dev = il->pci_dev;
+	int idx = txq->q.read_ptr;
+	int i;
+	int num_tbs;
+
+	tfd = &tfd_tmp[idx];
+
+	/* Sanity check on number of chunks */
+	num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+	if (num_tbs >= IL_NUM_OF_TBS) {
+		IL_ERR("Too many chunks: %i\n", num_tbs);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* Unmap tx_cmd */
+	if (num_tbs)
+		pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+				 dma_unmap_len(&txq->meta[idx], len),
+				 PCI_DMA_BIDIRECTIONAL);
+
+	/* Unmap chunks, if any. */
+	for (i = 1; i < num_tbs; i++)
+		pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+				 il4965_tfd_tb_get_len(tfd, i),
+				 PCI_DMA_TODEVICE);
+
+	/* free SKB */
+	if (txq->skbs) {
+		struct sk_buff *skb = txq->skbs[txq->q.read_ptr];
+
+		/* can be called from irqs-disabled context */
+		if (skb) {
+			dev_kfree_skb_any(skb);
+			txq->skbs[txq->q.read_ptr] = NULL;
+		}
+	}
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+	struct il_queue *q;
+	struct il_tfd *tfd, *tfd_tmp;
+	u32 num_tbs;
+
+	q = &txq->q;
+	tfd_tmp = (struct il_tfd *)txq->tfds;
+	tfd = &tfd_tmp[q->write_ptr];
+
+	if (reset)
+		memset(tfd, 0, sizeof(*tfd));
+
+	num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+	/* Each TFD can point to a maximum 20 Tx buffers */
+	if (num_tbs >= IL_NUM_OF_TBS) {
+		IL_ERR("Error can not send more than %d chunks\n",
+		       IL_NUM_OF_TBS);
+		return -EINVAL;
+	}
+
+	BUG_ON(addr & ~DMA_BIT_MASK(36));
+	if (unlikely(addr & ~IL_TX_DMA_MASK))
+		IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+	il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+	return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+	int txq_id = txq->q.id;
+
+	/* Circular buffer (TFD queue in DRAM) physical base address */
+	il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+	return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_alive_resp *palive;
+	struct delayed_work *pwork;
+
+	palive = &pkt->u.alive_frame;
+
+	D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+	       palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+	if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+		D_INFO("Initialization Alive received.\n");
+		memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+		       sizeof(struct il_init_alive_resp));
+		pwork = &il->init_alive_start;
+	} else {
+		D_INFO("Runtime Alive received.\n");
+		memcpy(&il->card_alive, &pkt->u.alive_frame,
+		       sizeof(struct il_alive_resp));
+		pwork = &il->alive_start;
+	}
+
+	/* We delay the ALIVE response by 5ms to
+	 * give the HW RF Kill time to activate... */
+	if (palive->is_valid == UCODE_VALID_OK)
+		queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+	else
+		IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * 60 seconds since the last N_STATS was received.  We need to
+ * ensure we receive the stats in order to update the temperature
+ * used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(struct timer_list *t)
+{
+	struct il_priv *il = from_timer(il, t, stats_periodic);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* dont send host command if rf-kill is on */
+	if (!il_is_ready_rf(il))
+		return;
+
+	il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il4965_beacon_notif *beacon =
+	    (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+	D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
+	     le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+	     beacon->beacon_notify_hdr.failure_frame,
+	     le32_to_cpu(beacon->ibss_mgr_status),
+	     le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+	il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+	unsigned long flags;
+
+	D_POWER("Stop all queues\n");
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	_il_wr(il, CSR_UCODE_DRV_GP1_SET,
+	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+	_il_rd(il, CSR_UCODE_DRV_GP1);
+
+	spin_lock_irqsave(&il->reg_lock, flags);
+	if (likely(_il_grab_nic_access(il)))
+		_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+	unsigned long status = il->status;
+
+	D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+		  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+		  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+		  (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+		_il_wr(il, CSR_UCODE_DRV_GP1_SET,
+		       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+		il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+		if (!(flags & RXON_CARD_DISABLED)) {
+			_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+			       CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+			il_wr(il, HBUS_TARG_MBX_C,
+			      HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+		}
+	}
+
+	if (flags & CT_CARD_DISABLED)
+		il4965_perform_ct_kill_task(il);
+
+	if (flags & HW_CARD_DISABLED)
+		set_bit(S_RFKILL, &il->status);
+	else
+		clear_bit(S_RFKILL, &il->status);
+
+	if (!(flags & RXON_CARD_DISABLED))
+		il_scan_cancel(il);
+
+	if ((test_bit(S_RFKILL, &status) !=
+	     test_bit(S_RFKILL, &il->status)))
+		wiphy_rfkill_set_hw_state(il->hw->wiphy,
+					  test_bit(S_RFKILL, &il->status));
+	else
+		wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+	il->handlers[N_ALIVE] = il4965_hdl_alive;
+	il->handlers[N_ERROR] = il_hdl_error;
+	il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+	il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+	il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+	il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+	il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+	/*
+	 * The same handler is used for both the REPLY to a discrete
+	 * stats request from the host as well as for the periodic
+	 * stats notifications (after received beacons) from the uCode.
+	 */
+	il->handlers[C_STATS] = il4965_hdl_c_stats;
+	il->handlers[N_STATS] = il4965_hdl_stats;
+
+	il_setup_rx_scan_handlers(il);
+
+	/* status change handler */
+	il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+	il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+	/* Rx handlers */
+	il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+	il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+	il->handlers[N_RX] = il4965_hdl_rx;
+	/* block ack */
+	il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+	/* Tx response */
+	il->handlers[C_TX] = il4965_hdl_tx;
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+	struct il_rx_buf *rxb;
+	struct il_rx_pkt *pkt;
+	struct il_rx_queue *rxq = &il->rxq;
+	u32 r, i;
+	int reclaim;
+	unsigned long flags;
+	u8 fill_rx = 0;
+	u32 count = 8;
+	int total_empty;
+
+	/* uCode's read idx (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+	i = rxq->read;
+
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		D_RX("r = %d, i = %d\n", r, i);
+
+	/* calculate total frames need to be restock after handling RX */
+	total_empty = r - rxq->write_actual;
+	if (total_empty < 0)
+		total_empty += RX_QUEUE_SIZE;
+
+	if (total_empty > (RX_QUEUE_SIZE / 2))
+		fill_rx = 1;
+
+	while (i != r) {
+		int len;
+
+		rxb = rxq->queue[i];
+
+		/* If an RXB doesn't have a Rx queue slot associated with it,
+		 * then a bug has been introduced in the queue refilling
+		 * routines -- catch it here */
+		BUG_ON(rxb == NULL);
+
+		rxq->queue[i] = NULL;
+
+		pci_unmap_page(il->pci_dev, rxb->page_dma,
+			       PAGE_SIZE << il->hw_params.rx_page_order,
+			       PCI_DMA_FROMDEVICE);
+		pkt = rxb_addr(rxb);
+
+		len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+		len += sizeof(u32);	/* account for status word */
+
+		reclaim = il_need_reclaim(il, pkt);
+
+		/* Based on type of command response or notification,
+		 *   handle those that need handling via function in
+		 *   handlers table.  See il4965_setup_handlers() */
+		if (il->handlers[pkt->hdr.cmd]) {
+			D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+			     il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+			il->isr_stats.handlers[pkt->hdr.cmd]++;
+			il->handlers[pkt->hdr.cmd] (il, rxb);
+		} else {
+			/* No handling needed */
+			D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+			     i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+		}
+
+		/*
+		 * XXX: After here, we should always check rxb->page
+		 * against NULL before touching it or its virtual
+		 * memory (pkt). Because some handler might have
+		 * already taken or freed the pages.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking il_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (rxb->page)
+				il_tx_cmd_complete(il, rxb);
+			else
+				IL_WARN("Claim null rxb?\n");
+		}
+
+		/* Reuse the page if possible. For notification packets and
+		 * SKBs that fail to Rx correctly, add them back into the
+		 * rx_free list for reuse later. */
+		spin_lock_irqsave(&rxq->lock, flags);
+		if (rxb->page != NULL) {
+			rxb->page_dma =
+			    pci_map_page(il->pci_dev, rxb->page, 0,
+					 PAGE_SIZE << il->hw_params.
+					 rx_page_order, PCI_DMA_FROMDEVICE);
+
+			if (unlikely(pci_dma_mapping_error(il->pci_dev,
+							   rxb->page_dma))) {
+				__il_free_pages(il, rxb->page);
+				rxb->page = NULL;
+				list_add_tail(&rxb->list, &rxq->rx_used);
+			} else {
+				list_add_tail(&rxb->list, &rxq->rx_free);
+				rxq->free_count++;
+			}
+		} else
+			list_add_tail(&rxb->list, &rxq->rx_used);
+
+		spin_unlock_irqrestore(&rxq->lock, flags);
+
+		i = (i + 1) & RX_QUEUE_MASK;
+		/* If there are a lot of unused frames,
+		 * restock the Rx queue so ucode wont assert. */
+		if (fill_rx) {
+			count++;
+			if (count >= 8) {
+				rxq->read = i;
+				il4965_rx_replenish_now(il);
+				count = 0;
+			}
+		}
+	}
+
+	/* Backtrack one entry */
+	rxq->read = i;
+	if (fill_rx)
+		il4965_rx_replenish_now(il);
+	else
+		il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+	/* wait to make sure we flush pending tasklet */
+	synchronize_irq(il->pci_dev->irq);
+	tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+	u32 inta, handled = 0;
+	u32 inta_fh;
+	unsigned long flags;
+	u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+	u32 inta_mask;
+#endif
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 *  and will clear only when CSR_FH_INT_STATUS gets cleared. */
+	inta = _il_rd(il, CSR_INT);
+	_il_wr(il, CSR_INT, inta);
+
+	/* Ack/clear/reset pending flow-handler (DMA) interrupts.
+	 * Any new interrupts that happen after this, either while we're
+	 * in this tasklet, or later, will show up in next ISR/tasklet. */
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+	_il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_ISR) {
+		/* just for debug */
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+		      inta_mask, inta_fh);
+	}
+#endif
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	/* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+	 * atomic, make sure that inta covers all the interrupts that
+	 * we've discovered, even if FH interrupt came in just after
+	 * reading CSR_INT. */
+	if (inta_fh & CSR49_FH_INT_RX_MASK)
+		inta |= CSR_INT_BIT_FH_RX;
+	if (inta_fh & CSR49_FH_INT_TX_MASK)
+		inta |= CSR_INT_BIT_FH_TX;
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IL_ERR("Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		il_disable_interrupts(il);
+
+		il->isr_stats.hw++;
+		il_irq_handle_error(il);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			D_ISR("Scheduler finished to transmit "
+			      "the frame/frames.\n");
+			il->isr_stats.sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			D_ISR("Alive interrupt\n");
+			il->isr_stats.alive++;
+		}
+	}
+#endif
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* HW RF KILL switch toggled */
+	if (inta & CSR_INT_BIT_RF_KILL) {
+		int hw_rf_kill = 0;
+
+		if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+			hw_rf_kill = 1;
+
+		IL_WARN("RF_KILL bit toggled to %s.\n",
+			hw_rf_kill ? "disable radio" : "enable radio");
+
+		il->isr_stats.rfkill++;
+
+		/* driver only loads ucode once setting the interface up.
+		 * the driver allows loading the ucode even if the radio
+		 * is killed. Hence update the killswitch state here. The
+		 * rfkill handler will care about restarting if needed.
+		 */
+		if (hw_rf_kill) {
+			set_bit(S_RFKILL, &il->status);
+		} else {
+			clear_bit(S_RFKILL, &il->status);
+			il_force_reset(il, true);
+		}
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+
+		handled |= CSR_INT_BIT_RF_KILL;
+	}
+
+	/* Chip got too hot and stopped itself */
+	if (inta & CSR_INT_BIT_CT_KILL) {
+		IL_ERR("Microcode CT kill error detected.\n");
+		il->isr_stats.ctkill++;
+		handled |= CSR_INT_BIT_CT_KILL;
+	}
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+		       inta);
+		il->isr_stats.sw++;
+		il_irq_handle_error(il);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/*
+	 * uCode wakes up after power-down sleep.
+	 * Tell device about any new tx or host commands enqueued,
+	 * and about any Rx buffers made available while asleep.
+	 */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		D_ISR("Wakeup interrupt\n");
+		il_rx_queue_update_write_ptr(il, &il->rxq);
+		for (i = 0; i < il->hw_params.max_txq_num; i++)
+			il_txq_update_write_ptr(il, &il->txq[i]);
+		il->isr_stats.wakeup++;
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+		il4965_rx_handle(il);
+		il->isr_stats.rx++;
+		handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+	}
+
+	/* This "Tx" DMA channel is used only for loading uCode */
+	if (inta & CSR_INT_BIT_FH_TX) {
+		D_ISR("uCode load interrupt\n");
+		il->isr_stats.tx++;
+		handled |= CSR_INT_BIT_FH_TX;
+		/* Wake up uCode load routine, now that load is complete */
+		il->ucode_write_complete = 1;
+		wake_up(&il->wait_command_queue);
+	}
+
+	if (inta & ~handled) {
+		IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		il->isr_stats.unhandled++;
+	}
+
+	if (inta & ~(il->inta_mask)) {
+		IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+			inta & ~il->inta_mask);
+		IL_WARN("   with FH49_INT = 0x%08x\n", inta_fh);
+	}
+
+	/* Re-enable all interrupts */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & (IL_DL_ISR)) {
+		inta = _il_rd(il, CSR_INT);
+		inta_mask = _il_rd(il, CSR_INT_MASK);
+		inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+		D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+		      "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+	}
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret)
+		IL_ERR("%s is not in hex or decimal form.\n", buf);
+	else
+		il->debug_level = val;
+
+	return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, 0644, il4965_show_debug_level,
+		   il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_alive(il))
+		return -EAGAIN;
+
+	return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, 0444, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+
+	if (!il_is_ready_rf(il))
+		return sprintf(buf, "off\n");
+	else
+		return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct il_priv *il = dev_get_drvdata(d);
+	unsigned long val;
+	int ret;
+
+	ret = kstrtoul(buf, 10, &val);
+	if (ret)
+		IL_INFO("%s is not in decimal form.\n", buf);
+	else {
+		ret = il_set_tx_power(il, val, false);
+		if (ret)
+			IL_ERR("failed setting tx power (0x%08x).\n", ret);
+		else
+			ret = count;
+	}
+	return ret;
+}
+
+static DEVICE_ATTR(tx_power, 0644, il4965_show_tx_power,
+		   il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+	&dev_attr_temperature.attr,
+	&dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+	&dev_attr_debug_level.attr,
+#endif
+	NULL
+};
+
+static const struct attribute_group il_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+	il_free_fw_desc(il->pci_dev, &il->ucode_code);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init);
+	il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+	il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+	/* Remove all resets to allow NIC to operate */
+	_il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+				  void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+	const char *name_pre = il->cfg->fw_name_pre;
+	char tag[8];
+
+	if (first) {
+		il->fw_idx = il->cfg->ucode_api_max;
+		sprintf(tag, "%d", il->fw_idx);
+	} else {
+		il->fw_idx--;
+		sprintf(tag, "%d", il->fw_idx);
+	}
+
+	if (il->fw_idx < il->cfg->ucode_api_min) {
+		IL_ERR("no suitable firmware found!\n");
+		return -ENOENT;
+	}
+
+	sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+	D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+	return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+				       &il->pci_dev->dev, GFP_KERNEL, il,
+				       il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+	const void *inst, *data, *init, *init_data, *boot;
+	size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+		     struct il4965_firmware_pieces *pieces)
+{
+	struct il_ucode_header *ucode = (void *)ucode_raw->data;
+	u32 api_ver, hdr_size;
+	const u8 *src;
+
+	il->ucode_ver = le32_to_cpu(ucode->ver);
+	api_ver = IL_UCODE_API(il->ucode_ver);
+
+	switch (api_ver) {
+	default:
+	case 0:
+	case 1:
+	case 2:
+		hdr_size = 24;
+		if (ucode_raw->size < hdr_size) {
+			IL_ERR("File size too small!\n");
+			return -EINVAL;
+		}
+		pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+		pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+		pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+		pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+		pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+		src = ucode->v1.data;
+		break;
+	}
+
+	/* Verify size of file vs. image size info in file's header */
+	if (ucode_raw->size !=
+	    hdr_size + pieces->inst_size + pieces->data_size +
+	    pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+		IL_ERR("uCode file size %d does not match expected size\n",
+		       (int)ucode_raw->size);
+		return -EINVAL;
+	}
+
+	pieces->inst = src;
+	src += pieces->inst_size;
+	pieces->data = src;
+	src += pieces->data_size;
+	pieces->init = src;
+	src += pieces->init_size;
+	pieces->init_data = src;
+	src += pieces->init_data_size;
+	pieces->boot = src;
+	src += pieces->boot_size;
+
+	return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+	struct il_priv *il = context;
+	int err;
+	struct il4965_firmware_pieces pieces;
+	const unsigned int api_max = il->cfg->ucode_api_max;
+	const unsigned int api_min = il->cfg->ucode_api_min;
+	u32 api_ver;
+
+	u32 max_probe_length = 200;
+	u32 standard_phy_calibration_size =
+	    IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+	memset(&pieces, 0, sizeof(pieces));
+
+	if (!ucode_raw) {
+		if (il->fw_idx <= il->cfg->ucode_api_max)
+			IL_ERR("request for firmware file '%s' failed.\n",
+			       il->firmware_name);
+		goto try_again;
+	}
+
+	D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+	       ucode_raw->size);
+
+	/* Make sure that we got at least the API version number */
+	if (ucode_raw->size < 4) {
+		IL_ERR("File size way too small!\n");
+		goto try_again;
+	}
+
+	/* Data from ucode file:  header followed by uCode images */
+	err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+	if (err)
+		goto try_again;
+
+	api_ver = IL_UCODE_API(il->ucode_ver);
+
+	/*
+	 * api_ver should match the api version forming part of the
+	 * firmware filename ... but we don't check for that and only rely
+	 * on the API version read from firmware header from here on forward
+	 */
+	if (api_ver < api_min || api_ver > api_max) {
+		IL_ERR("Driver unable to support your firmware API. "
+		       "Driver supports v%u, firmware is v%u.\n", api_max,
+		       api_ver);
+		goto try_again;
+	}
+
+	if (api_ver != api_max)
+		IL_ERR("Firmware has old API version. Expected v%u, "
+		       "got v%u. New firmware can be obtained "
+		       "from http://www.intellinuxwireless.org.\n", api_max,
+		       api_ver);
+
+	IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+		IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+		IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+	snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+		 "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+		 IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+		 IL_UCODE_SERIAL(il->ucode_ver));
+
+	/*
+	 * For any of the failures below (before allocating pci memory)
+	 * we will try to load a version with a smaller API -- maybe the
+	 * user just got a corrupted version of the latest API.
+	 */
+
+	D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+	D_INFO("f/w package hdr runtime inst size = %zd\n", pieces.inst_size);
+	D_INFO("f/w package hdr runtime data size = %zd\n", pieces.data_size);
+	D_INFO("f/w package hdr init inst size = %zd\n", pieces.init_size);
+	D_INFO("f/w package hdr init data size = %zd\n", pieces.init_data_size);
+	D_INFO("f/w package hdr boot inst size = %zd\n", pieces.boot_size);
+
+	/* Verify that uCode images will fit in card's SRAM */
+	if (pieces.inst_size > il->hw_params.max_inst_size) {
+		IL_ERR("uCode instr len %zd too large to fit in\n",
+		       pieces.inst_size);
+		goto try_again;
+	}
+
+	if (pieces.data_size > il->hw_params.max_data_size) {
+		IL_ERR("uCode data len %zd too large to fit in\n",
+		       pieces.data_size);
+		goto try_again;
+	}
+
+	if (pieces.init_size > il->hw_params.max_inst_size) {
+		IL_ERR("uCode init instr len %zd too large to fit in\n",
+		       pieces.init_size);
+		goto try_again;
+	}
+
+	if (pieces.init_data_size > il->hw_params.max_data_size) {
+		IL_ERR("uCode init data len %zd too large to fit in\n",
+		       pieces.init_data_size);
+		goto try_again;
+	}
+
+	if (pieces.boot_size > il->hw_params.max_bsm_size) {
+		IL_ERR("uCode boot instr len %zd too large to fit in\n",
+		       pieces.boot_size);
+		goto try_again;
+	}
+
+	/* Allocate ucode buffers for card's bus-master loading ... */
+
+	/* Runtime instructions and 2 copies of data:
+	 * 1) unmodified from disk
+	 * 2) backup cache for save/restore during power-downs */
+	il->ucode_code.len = pieces.inst_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+	il->ucode_data.len = pieces.data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+	il->ucode_data_backup.len = pieces.data_size;
+	il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+	if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+	    !il->ucode_data_backup.v_addr)
+		goto err_pci_alloc;
+
+	/* Initialization instructions and data */
+	if (pieces.init_size && pieces.init_data_size) {
+		il->ucode_init.len = pieces.init_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+		il->ucode_init_data.len = pieces.init_data_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+		if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Bootstrap (instructions only, no data) */
+	if (pieces.boot_size) {
+		il->ucode_boot.len = pieces.boot_size;
+		il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+		if (!il->ucode_boot.v_addr)
+			goto err_pci_alloc;
+	}
+
+	/* Now that we can no longer fail, copy information */
+
+	il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+	/* Copy images into buffers for card's bus-master reads ... */
+
+	/* Runtime instructions (first block of data in file) */
+	D_INFO("Copying (but not loading) uCode instr len %zd\n",
+	       pieces.inst_size);
+	memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+	D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+	       il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+	/*
+	 * Runtime data
+	 * NOTE:  Copy into backup buffer will be done in il_up()
+	 */
+	D_INFO("Copying (but not loading) uCode data len %zd\n",
+	       pieces.data_size);
+	memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+	memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+	/* Initialization instructions */
+	if (pieces.init_size) {
+		D_INFO("Copying (but not loading) init instr len %zd\n",
+		       pieces.init_size);
+		memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+	}
+
+	/* Initialization data */
+	if (pieces.init_data_size) {
+		D_INFO("Copying (but not loading) init data len %zd\n",
+		       pieces.init_data_size);
+		memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+		       pieces.init_data_size);
+	}
+
+	/* Bootstrap instructions */
+	D_INFO("Copying (but not loading) boot instr len %zd\n",
+	       pieces.boot_size);
+	memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+	/*
+	 * figure out the offset of chain noise reset and gain commands
+	 * base on the size of standard phy calibration commands table size
+	 */
+	il->_4965.phy_calib_chain_noise_reset_cmd =
+	    standard_phy_calibration_size;
+	il->_4965.phy_calib_chain_noise_gain_cmd =
+	    standard_phy_calibration_size + 1;
+
+	/**************************************************
+	 * This is still part of probe() in a sense...
+	 *
+	 * 9. Setup and register with mac80211 and debugfs
+	 **************************************************/
+	err = il4965_mac_setup_register(il, max_probe_length);
+	if (err)
+		goto out_unbind;
+
+	err = il_dbgfs_register(il, DRV_NAME);
+	if (err)
+		IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+		       err);
+
+	err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+	if (err) {
+		IL_ERR("failed to create sysfs device attributes\n");
+		goto out_unbind;
+	}
+
+	/* We have our copies now, allow OS release its copies */
+	release_firmware(ucode_raw);
+	complete(&il->_4965.firmware_loading_complete);
+	return;
+
+try_again:
+	/* try next, if any */
+	if (il4965_request_firmware(il, false))
+		goto out_unbind;
+	release_firmware(ucode_raw);
+	return;
+
+err_pci_alloc:
+	IL_ERR("failed to allocate pci memory\n");
+	il4965_dealloc_ucode_pci(il);
+out_unbind:
+	complete(&il->_4965.firmware_loading_complete);
+	device_release_driver(&il->pci_dev->dev);
+	release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+	"OK",
+	"FAIL",
+	"BAD_PARAM",
+	"BAD_CHECKSUM",
+	"NMI_INTERRUPT_WDG",
+	"SYSASSERT",
+	"FATAL_ERROR",
+	"BAD_COMMAND",
+	"HW_ERROR_TUNE_LOCK",
+	"HW_ERROR_TEMPERATURE",
+	"ILLEGAL_CHAN_FREQ",
+	"VCC_NOT_STBL",
+	"FH49_ERROR",
+	"NMI_INTERRUPT_HOST",
+	"NMI_INTERRUPT_ACTION_PT",
+	"NMI_INTERRUPT_UNKNOWN",
+	"UCODE_VERSION_MISMATCH",
+	"HW_ERROR_ABS_LOCK",
+	"HW_ERROR_CAL_LOCK_FAIL",
+	"NMI_INTERRUPT_INST_ACTION_PT",
+	"NMI_INTERRUPT_DATA_ACTION_PT",
+	"NMI_TRM_HW_ER",
+	"NMI_INTERRUPT_TRM",
+	"NMI_INTERRUPT_BREAK_POINT",
+	"DEBUG_0",
+	"DEBUG_1",
+	"DEBUG_2",
+	"DEBUG_3",
+};
+
+static struct {
+	char *name;
+	u8 num;
+} advanced_lookup[] = {
+	{
+	"NMI_INTERRUPT_WDG", 0x34}, {
+	"SYSASSERT", 0x35}, {
+	"UCODE_VERSION_MISMATCH", 0x37}, {
+	"BAD_COMMAND", 0x38}, {
+	"NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+	"FATAL_ERROR", 0x3D}, {
+	"NMI_TRM_HW_ERR", 0x46}, {
+	"NMI_INTERRUPT_TRM", 0x4C}, {
+	"NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+	"NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+	"NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+	"NMI_INTERRUPT_HOST", 0x66}, {
+	"NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+	"NMI_INTERRUPT_UNKNOWN", 0x84}, {
+	"NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+	int i;
+	int max = ARRAY_SIZE(desc_lookup_text);
+
+	if (num < max)
+		return desc_lookup_text[num];
+
+	max = ARRAY_SIZE(advanced_lookup) - 1;
+	for (i = 0; i < max; i++) {
+		if (advanced_lookup[i].num == num)
+			break;
+	}
+	return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+	u32 data2, line;
+	u32 desc, time, count, base, data1;
+	u32 blink1, blink2, ilink1, ilink2;
+	u32 pc, hcmd;
+
+	if (il->ucode_type == UCODE_INIT)
+		base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+	else
+		base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+	if (!il->ops->is_valid_rtc_data_addr(base)) {
+		IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+		       base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+		return;
+	}
+
+	count = il_read_targ_mem(il, base);
+
+	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+		IL_ERR("Start IWL Error Log Dump:\n");
+		IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+	}
+
+	desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+	il->isr_stats.err_code = desc;
+	pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+	blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+	blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+	ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+	ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+	data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+	data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+	line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+	time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+	hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+	IL_ERR("Desc                                  Time       "
+	       "data1      data2      line\n");
+	IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+	       il4965_desc_lookup(desc), desc, time, data1, data2, line);
+	IL_ERR("pc      blink1  blink2  ilink1  ilink2  hcmd\n");
+	IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+	       blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+	struct il_ct_kill_config cmd;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&il->lock, flags);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+	       CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	cmd.critical_temperature_R =
+	    cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+	ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+	if (ret)
+		IL_ERR("C_CT_KILL_CONFIG failed\n");
+	else
+		D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+		       "critical temperature is %d\n",
+		       il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+	IL_TX_FIFO_VO,
+	IL_TX_FIFO_VI,
+	IL_TX_FIFO_BE,
+	IL_TX_FIFO_BK,
+	IL49_CMD_FIFO_NUM,
+	IL_TX_FIFO_UNUSED,
+	IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+	u32 a;
+	unsigned long flags;
+	int i, chan;
+	u32 reg_val;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Clear 4965's internal Tx Scheduler data base */
+	il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+	a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+	for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+		il_write_targ_mem(il, a, 0);
+	for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+		il_write_targ_mem(il, a, 0);
+	for (;
+	     a <
+	     il->scd_base_addr +
+	     IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+	     a += 4)
+		il_write_targ_mem(il, a, 0);
+
+	/* Tel 4965 where to find Tx byte count tables */
+	il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+	/* Enable DMA channel */
+	for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+		il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+		      FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+	/* Update FH chicken bits */
+	reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+	il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+	      reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+	/* Disable chain mode for all queues */
+	il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+	/* Initialize each Tx queue (including the command queue) */
+	for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+		/* TFD circular buffer read/write idxes */
+		il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+		il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+		/* Max Tx Window size for Scheduler-ACK mode */
+		il_write_targ_mem(il,
+				  il->scd_base_addr +
+				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+				  (SCD_WIN_SIZE <<
+				   IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+				  IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+		/* Frame limit */
+		il_write_targ_mem(il,
+				  il->scd_base_addr +
+				  IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+				  sizeof(u32),
+				  (SCD_FRAME_LIMIT <<
+				   IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+				  IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+	}
+	il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+		   (1 << il->hw_params.max_txq_num) - 1);
+
+	/* Activate all Tx DMA/FIFO channels */
+	il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+	il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+	/* make sure all queue are not stopped */
+	memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+	for (i = 0; i < 4; i++)
+		atomic_set(&il->queue_stop_count[i], 0);
+
+	/* reset to 0 to enable all the queue first */
+	il->txq_ctx_active_msk = 0;
+	/* Map each Tx/cmd queue to its corresponding fifo */
+	BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+	for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+		int ac = default_queue_to_tx_fifo[i];
+
+		il_txq_ctx_activate(il, i);
+
+		if (ac == IL_TX_FIFO_UNUSED)
+			continue;
+
+		il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+	}
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+	int ret = 0;
+
+	D_INFO("Runtime Alive received.\n");
+
+	if (il->card_alive.is_valid != UCODE_VALID_OK) {
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		D_INFO("Alive failed.\n");
+		goto restart;
+	}
+
+	/* Initialize uCode has loaded Runtime uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "runtime" alive if code weren't properly loaded.  */
+	if (il4965_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad runtime uCode load.\n");
+		goto restart;
+	}
+
+	ret = il4965_alive_notify(il);
+	if (ret) {
+		IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+		goto restart;
+	}
+
+	/* After the ALIVE response, we can send host commands to the uCode */
+	set_bit(S_ALIVE, &il->status);
+
+	/* Enable watchdog to monitor the driver tx queues */
+	il_setup_watchdog(il);
+
+	if (il_is_rfkill(il))
+		return;
+
+	ieee80211_wake_queues(il->hw);
+
+	il->active_rate = RATES_MASK;
+
+	il_power_update_mode(il, true);
+	D_INFO("Updated power mode\n");
+
+	if (il_is_associated(il)) {
+		struct il_rxon_cmd *active_rxon =
+		    (struct il_rxon_cmd *)&il->active;
+		/* apply any changes in staging */
+		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	} else {
+		/* Initialize our rx_config data */
+		il_connection_init_rx_config(il);
+
+		if (il->ops->set_rxon_chain)
+			il->ops->set_rxon_chain(il);
+	}
+
+	/* Configure bluetooth coexistence if enabled */
+	il_send_bt_config(il);
+
+	il4965_reset_run_time_calib(il);
+
+	set_bit(S_READY, &il->status);
+
+	/* Configure the adapter for unassociated operation */
+	il_commit_rxon(il);
+
+	/* At this point, the NIC is initialized and operational */
+	il4965_rf_kill_ct_config(il);
+
+	D_INFO("ALIVE processing complete.\n");
+	wake_up(&il->wait_command_queue);
+
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+	unsigned long flags;
+	int exit_pending;
+
+	D_INFO(DRV_NAME " is going down\n");
+
+	il_scan_cancel_timeout(il, 200);
+
+	exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+	/* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+	 * to prevent rearm timer */
+	del_timer_sync(&il->watchdog);
+
+	il_clear_ucode_stations(il);
+
+	/* FIXME: race conditions ? */
+	spin_lock_irq(&il->sta_lock);
+	/*
+	 * Remove all key information that is not stored as part
+	 * of station information since mac80211 may not have had
+	 * a chance to remove all the keys. When device is
+	 * reconfigured by mac80211 after an error all keys will
+	 * be reconfigured.
+	 */
+	memset(il->_4965.wep_keys, 0, sizeof(il->_4965.wep_keys));
+	il->_4965.key_mapping_keys = 0;
+	spin_unlock_irq(&il->sta_lock);
+
+	il_dealloc_bcast_stations(il);
+	il_clear_driver_stations(il);
+
+	/* Unblock any waiting calls */
+	wake_up_all(&il->wait_command_queue);
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* stop and reset the on-board processor */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	/* tell the device to stop sending interrupts */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	il4965_synchronize_irq(il);
+
+	if (il->mac80211_registered)
+		ieee80211_stop_queues(il->hw);
+
+	/* If we have not previously called il_init() then
+	 * clear all bits but the RF Kill bit and return */
+	if (!il_is_init(il)) {
+		il->status =
+		    test_bit(S_RFKILL, &il->status) << S_RFKILL |
+		    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+		    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+		goto exit;
+	}
+
+	/* ...otherwise clear out all the status bits but the RF Kill
+	 * bit and continue taking the NIC down. */
+	il->status &=
+	    test_bit(S_RFKILL, &il->status) << S_RFKILL |
+	    test_bit(S_GEO_CONFIGURED, &il->status) << S_GEO_CONFIGURED |
+	    test_bit(S_FW_ERROR, &il->status) << S_FW_ERROR |
+	    test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+	/*
+	 * We disabled and synchronized interrupt, and priv->mutex is taken, so
+	 * here is the only thread which will program device registers, but
+	 * still have lockdep assertions, so we are taking reg_lock.
+	 */
+	spin_lock_irq(&il->reg_lock);
+	/* FIXME: il_grab_nic_access if rfkill is off ? */
+
+	il4965_txq_ctx_stop(il);
+	il4965_rxq_stop(il);
+	/* Power-down device's busmaster DMA clocks */
+	_il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(5);
+	/* Make sure (redundant) we've released our request to stay awake */
+	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+	/* Stop the device, and put it in low power state */
+	_il_apm_stop(il);
+
+	spin_unlock_irq(&il->reg_lock);
+
+	il4965_txq_ctx_unmap(il);
+exit:
+	memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+	dev_kfree_skb(il->beacon_skb);
+	il->beacon_skb = NULL;
+
+	/* clear out any free frames */
+	il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+	mutex_lock(&il->mutex);
+	__il4965_down(il);
+	mutex_unlock(&il->mutex);
+
+	il4965_cancel_deferred_work(il);
+}
+
+
+static void
+il4965_set_hw_ready(struct il_priv *il)
+{
+	int ret;
+
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+	/* See if we got it */
+	ret = _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   100);
+	if (ret >= 0)
+		il->hw_ready = true;
+
+	D_INFO("hardware %s ready\n", (il->hw_ready) ? "" : "not");
+}
+
+static void
+il4965_prepare_card_hw(struct il_priv *il)
+{
+	int ret;
+
+	il->hw_ready = false;
+
+	il4965_set_hw_ready(il);
+	if (il->hw_ready)
+		return;
+
+	/* If HW is not ready, prepare the conditions to check again */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+	ret =
+	    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+			 ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+			 CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+	/* HW should be ready by now, check again. */
+	if (ret != -ETIMEDOUT)
+		il4965_set_hw_ready(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+	int i;
+	int ret;
+
+	if (test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_WARN("Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+		IL_ERR("ucode not available for device bringup\n");
+		return -EIO;
+	}
+
+	ret = il4965_alloc_bcast_station(il);
+	if (ret) {
+		il_dealloc_bcast_stations(il);
+		return ret;
+	}
+
+	il4965_prepare_card_hw(il);
+	if (!il->hw_ready) {
+		il_dealloc_bcast_stations(il);
+		IL_ERR("HW not ready\n");
+		return -EIO;
+	}
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RFKILL, &il->status);
+	else {
+		set_bit(S_RFKILL, &il->status);
+		wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+		il_dealloc_bcast_stations(il);
+		il_enable_rfkill_int(il);
+		IL_WARN("Radio disabled by HW RF Kill switch\n");
+		return 0;
+	}
+
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+	/* must be initialised before il_hw_nic_init */
+	il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+	ret = il4965_hw_nic_init(il);
+	if (ret) {
+		IL_ERR("Unable to init nic\n");
+		il_dealloc_bcast_stations(il);
+		return ret;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_interrupts(il);
+
+	/* really make sure rfkill handshake bits are cleared */
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	_il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Copy original ucode data image from disk into backup cache.
+	 * This will be used to initialize the on-board processor's
+	 * data SRAM for a clean start when the runtime program first loads. */
+	memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+	       il->ucode_data.len);
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+		/* load bootstrap state machine,
+		 * load bootstrap program into processor's memory,
+		 * prepare to load the "initialize" uCode */
+		ret = il->ops->load_ucode(il);
+
+		if (ret) {
+			IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+			continue;
+		}
+
+		/* start card; "initialize" will load runtime ucode */
+		il4965_nic_start(il);
+
+		D_INFO(DRV_NAME " is coming up\n");
+
+		return 0;
+	}
+
+	set_bit(S_EXIT_PENDING, &il->status);
+	__il4965_down(il);
+	clear_bit(S_EXIT_PENDING, &il->status);
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IL_ERR("Unable to initialize device after %d attempts.\n", i);
+	return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, init_alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il->ops->init_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, alive_start.work);
+
+	mutex_lock(&il->mutex);
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		goto out;
+
+	il4965_alive_start(il);
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  run_time_calib_work);
+
+	mutex_lock(&il->mutex);
+
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status)) {
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	if (il->start_calib) {
+		il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+		il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+	}
+
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, restart);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+		mutex_lock(&il->mutex);
+		il->is_open = 0;
+
+		__il4965_down(il);
+
+		mutex_unlock(&il->mutex);
+		il4965_cancel_deferred_work(il);
+		ieee80211_restart_hw(il->hw);
+	} else {
+		il4965_down(il);
+
+		mutex_lock(&il->mutex);
+		if (test_bit(S_EXIT_PENDING, &il->status)) {
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		__il4965_up(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+	struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	mutex_lock(&il->mutex);
+	il4965_rx_replenish(il);
+	mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT	(4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+	int ret;
+	struct ieee80211_hw *hw = il->hw;
+
+	hw->rate_control_algorithm = "iwl-4965-rs";
+
+	/* Tell mac80211 our characteristics */
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	if (il->cfg->sku & IL_SKU_N)
+		hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
+				       NL80211_FEATURE_STATIC_SMPS;
+
+	hw->sta_data_size = sizeof(struct il_station_priv);
+	hw->vif_data_size = sizeof(struct il_vif_priv);
+
+	hw->wiphy->interface_modes =
+	    BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
+
+	/*
+	 * For now, disable PS by default because it affects
+	 * RX performance significantly.
+	 */
+	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a zero-length SSID element */
+	hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+	/* Default value; 4 EDCA QOS priorities */
+	hw->queues = 4;
+
+	hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (il->bands[NL80211_BAND_2GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+		    &il->bands[NL80211_BAND_2GHZ];
+	if (il->bands[NL80211_BAND_5GHZ].n_channels)
+		il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+		    &il->bands[NL80211_BAND_5GHZ];
+
+	il_leds_init(il);
+
+	wiphy_ext_feature_set(il->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+	ret = ieee80211_register_hw(il->hw);
+	if (ret) {
+		IL_ERR("Failed to register hw (error %d)\n", ret);
+		return ret;
+	}
+	il->mac80211_registered = 1;
+
+	return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	D_MAC80211("enter\n");
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&il->mutex);
+	ret = __il4965_up(il);
+	mutex_unlock(&il->mutex);
+
+	if (ret)
+		return ret;
+
+	if (il_is_rfkill(il))
+		goto out;
+
+	D_INFO("Start UP work done.\n");
+
+	/* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+	 * mac80211 will not be run successfully. */
+	ret = wait_event_timeout(il->wait_command_queue,
+				 test_bit(S_READY, &il->status),
+				 UCODE_READY_TIMEOUT);
+	if (!ret) {
+		if (!test_bit(S_READY, &il->status)) {
+			IL_ERR("START_ALIVE timeout after %dms.\n",
+				jiffies_to_msecs(UCODE_READY_TIMEOUT));
+			return -ETIMEDOUT;
+		}
+	}
+
+	il4965_led_enable(il);
+
+out:
+	il->is_open = 1;
+	D_MAC80211("leave\n");
+	return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	if (!il->is_open)
+		return;
+
+	il->is_open = 0;
+
+	il4965_down(il);
+
+	flush_workqueue(il->workqueue);
+
+	/* User space software may expect getting rfkill changes
+	 * even if interface is down */
+	_il_wr(il, CSR_INT, 0xFFFFFFFF);
+	il_enable_rfkill_int(il);
+
+	D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw,
+	      struct ieee80211_tx_control *control,
+	      struct sk_buff *skb)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MACDUMP("enter\n");
+
+	D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+	     ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+	if (il4965_tx_skb(il, control->sta, skb))
+		dev_kfree_skb_any(skb);
+
+	D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			   struct ieee80211_key_conf *keyconf,
+			   struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+	struct il_priv *il = hw->priv;
+
+	D_MAC80211("enter\n");
+
+	il4965_update_tkip_key(il, keyconf, sta, iv32, phase1key);
+
+	D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		   struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		   struct ieee80211_key_conf *key)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+	u8 sta_id;
+	bool is_default_wep_key = false;
+
+	D_MAC80211("enter\n");
+
+	if (il->cfg->mod_params->sw_crypto) {
+		D_MAC80211("leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	/*
+	 * To support IBSS RSN, don't program group keys in IBSS, the
+	 * hardware will then not attempt to decrypt the frames.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		D_MAC80211("leave - ad-hoc group key\n");
+		return -EOPNOTSUPP;
+	}
+
+	sta_id = il_sta_id_or_broadcast(il, sta);
+	if (sta_id == IL_INVALID_STATION)
+		return -EINVAL;
+
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 100);
+
+	/*
+	 * If we are getting WEP group key and we didn't receive any key mapping
+	 * so far, we are in legacy wep mode (group key only), otherwise we are
+	 * in 1X mode.
+	 * In legacy wep mode, we use another host command to the uCode.
+	 */
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+		if (cmd == SET_KEY)
+			is_default_wep_key = !il->_4965.key_mapping_keys;
+		else
+			is_default_wep_key =
+			    (key->hw_key_idx == HW_KEY_DEFAULT);
+	}
+
+	switch (cmd) {
+	case SET_KEY:
+		if (is_default_wep_key)
+			ret = il4965_set_default_wep_key(il, key);
+		else
+			ret = il4965_set_dynamic_key(il, key, sta_id);
+
+		D_MAC80211("enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (is_default_wep_key)
+			ret = il4965_remove_default_wep_key(il, key);
+		else
+			ret = il4965_remove_dynamic_key(il, key, sta_id);
+
+		D_MAC80211("disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&il->mutex);
+	D_MAC80211("leave\n");
+
+	return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_ampdu_params *params)
+{
+	struct il_priv *il = hw->priv;
+	int ret = -EINVAL;
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+
+	D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+	if (!(il->cfg->sku & IL_SKU_N))
+		return -EACCES;
+
+	mutex_lock(&il->mutex);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		D_HT("start Rx\n");
+		ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		D_HT("stop Rx\n");
+		ret = il4965_sta_rx_agg_stop(il, sta, tid);
+		if (test_bit(S_EXIT_PENDING, &il->status))
+			ret = 0;
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		D_HT("start Tx\n");
+		ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		D_HT("stop Tx\n");
+		ret = il4965_tx_agg_stop(il, vif, sta, tid);
+		if (test_bit(S_EXIT_PENDING, &il->status))
+			ret = 0;
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ret = 0;
+		break;
+	}
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	int ret;
+	u8 sta_id;
+
+	D_INFO("received request to add station %pM\n", sta->addr);
+	mutex_lock(&il->mutex);
+	D_INFO("proceeding to add station %pM\n", sta->addr);
+	sta_priv->common.sta_id = IL_INVALID_STATION;
+
+	atomic_set(&sta_priv->pending_frames, 0);
+
+	ret =
+	    il_add_station_common(il, sta->addr, is_ap, sta, &sta_id);
+	if (ret) {
+		IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		mutex_unlock(&il->mutex);
+		return ret;
+	}
+
+	sta_priv->common.sta_id = sta_id;
+
+	/* Initialize rate scaling */
+	D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+	il4965_rs_rate_init(il, sta, sta_id);
+	mutex_unlock(&il->mutex);
+
+	return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			  struct ieee80211_channel_switch *ch_switch)
+{
+	struct il_priv *il = hw->priv;
+	const struct il_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = ch_switch->chandef.chan;
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+	u16 ch;
+
+	D_MAC80211("enter\n");
+
+	mutex_lock(&il->mutex);
+
+	if (il_is_rfkill(il))
+		goto out;
+
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status) ||
+	    test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		goto out;
+
+	if (!il_is_associated(il))
+		goto out;
+
+	if (!il->ops->set_channel_switch)
+		goto out;
+
+	ch = channel->hw_value;
+	if (le16_to_cpu(il->active.channel) == ch)
+		goto out;
+
+	ch_info = il_get_channel_info(il, channel->band, ch);
+	if (!il_is_channel_valid(ch_info)) {
+		D_MAC80211("invalid channel\n");
+		goto out;
+	}
+
+	spin_lock_irq(&il->lock);
+
+	il->current_ht_config.smps = conf->smps_mode;
+
+	/* Configure HT40 channels */
+	switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
+	case NL80211_CHAN_NO_HT:
+	case NL80211_CHAN_HT20:
+		il->ht.is_40mhz = false;
+		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+		break;
+	case NL80211_CHAN_HT40MINUS:
+		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+		il->ht.is_40mhz = true;
+		break;
+	case NL80211_CHAN_HT40PLUS:
+		il->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		il->ht.is_40mhz = true;
+		break;
+	}
+
+	if ((le16_to_cpu(il->staging.channel) != ch))
+		il->staging.flags = 0;
+
+	il_set_rxon_channel(il, channel);
+	il_set_rxon_ht(il, ht_conf);
+	il_set_flags_for_band(il, channel->band, il->vif);
+
+	spin_unlock_irq(&il->lock);
+
+	il_set_rate(il);
+	/*
+	 * at this point, staging_rxon has the
+	 * configuration for channel switch
+	 */
+	set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+	il->switch_channel = cpu_to_le16(ch);
+	if (il->ops->set_channel_switch(il, ch_switch)) {
+		clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+		il->switch_channel = 0;
+		ieee80211_chswitch_done(il->vif, false);
+	}
+
+out:
+	mutex_unlock(&il->mutex);
+	D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+			unsigned int *total_flags, u64 multicast)
+{
+	struct il_priv *il = hw->priv;
+	__le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+		   *total_flags);
+
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
+	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&il->mutex);
+
+	il->staging.filter_flags &= ~filter_nand;
+	il->staging.filter_flags |= filter_or;
+
+	/*
+	 * Not committing directly because hardware can perform a scan,
+	 * but we'll eventually commit the filter flags change anyway.
+	 */
+
+	mutex_unlock(&il->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in il_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &=
+	    FIF_OTHER_BSS | FIF_ALLMULTI |
+	    FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv,
+					  txpower_work);
+
+	mutex_lock(&il->mutex);
+
+	/* If a scan happened to start before we got here
+	 * then just return; the stats notification will
+	 * kick off another scheduled work to compensate for
+	 * any temperature delta we missed here. */
+	if (test_bit(S_EXIT_PENDING, &il->status) ||
+	    test_bit(S_SCANNING, &il->status))
+		goto out;
+
+	/* Regardless of if we are associated, we must reconfigure the
+	 * TX power since frames can be sent on non-radar channels while
+	 * not associated */
+	il->ops->send_tx_power(il);
+
+	/* Update last_temperature to keep is_calib_needed from running
+	 * when it isn't needed... */
+	il->last_temperature = il->temperature;
+out:
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+	il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+	init_waitqueue_head(&il->wait_command_queue);
+
+	INIT_WORK(&il->restart, il4965_bg_restart);
+	INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+	INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+	INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+	INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+	il_setup_scan_deferred_work(il);
+
+	INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+	timer_setup(&il->stats_periodic, il4965_bg_stats_periodic, 0);
+
+	timer_setup(&il->watchdog, il_bg_watchdog, 0);
+
+	tasklet_init(&il->irq_tasklet,
+		     (void (*)(unsigned long))il4965_irq_tasklet,
+		     (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+	cancel_work_sync(&il->txpower_work);
+	cancel_delayed_work_sync(&il->init_alive_start);
+	cancel_delayed_work(&il->alive_start);
+	cancel_work_sync(&il->run_time_calib_work);
+
+	il_cancel_scan_deferred_work(il);
+
+	del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+	int i;
+
+	for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+		rates[i].bitrate = il_rates[i].ieee * 5;
+		rates[i].hw_value = i;	/* Rate scaling will work on idxes */
+		rates[i].hw_value_short = i;
+		rates[i].flags = 0;
+		if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+			/*
+			 * If CCK != 1M then set short preamble rate flag.
+			 */
+			rates[i].flags |=
+			    (il_rates[i].plcp ==
+			     RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+		}
+	}
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+	il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+	il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+			   int tx_fifo_id, int scd_retry)
+{
+	int txq_id = txq->q.id;
+
+	/* Find out whether to activate Tx queue */
+	int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+	/* Set up and activate */
+	il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+		   (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+		   (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+		   (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+		   IL49_SCD_QUEUE_STTS_REG_MSK);
+
+	txq->sched_retry = scd_retry;
+
+	D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+	       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static const struct ieee80211_ops il4965_mac_ops = {
+	.tx = il4965_mac_tx,
+	.start = il4965_mac_start,
+	.stop = il4965_mac_stop,
+	.add_interface = il_mac_add_interface,
+	.remove_interface = il_mac_remove_interface,
+	.change_interface = il_mac_change_interface,
+	.config = il_mac_config,
+	.configure_filter = il4965_configure_filter,
+	.set_key = il4965_mac_set_key,
+	.update_tkip_key = il4965_mac_update_tkip_key,
+	.conf_tx = il_mac_conf_tx,
+	.reset_tsf = il_mac_reset_tsf,
+	.bss_info_changed = il_mac_bss_info_changed,
+	.ampdu_action = il4965_mac_ampdu_action,
+	.hw_scan = il_mac_hw_scan,
+	.sta_add = il4965_mac_sta_add,
+	.sta_remove = il_mac_sta_remove,
+	.channel_switch = il4965_mac_channel_switch,
+	.tx_last_beacon = il_mac_tx_last_beacon,
+	.flush = il_mac_flush,
+};
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+	int ret;
+
+	spin_lock_init(&il->sta_lock);
+	spin_lock_init(&il->hcmd_lock);
+
+	INIT_LIST_HEAD(&il->free_frames);
+
+	mutex_init(&il->mutex);
+
+	il->ieee_channels = NULL;
+	il->ieee_rates = NULL;
+	il->band = NL80211_BAND_2GHZ;
+
+	il->iw_mode = NL80211_IFTYPE_STATION;
+	il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+	il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+	/* initialize force reset */
+	il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+	/* Choose which receivers/antennas to use */
+	if (il->ops->set_rxon_chain)
+		il->ops->set_rxon_chain(il);
+
+	il_init_scan_params(il);
+
+	ret = il_init_channel_map(il);
+	if (ret) {
+		IL_ERR("initializing regulatory failed: %d\n", ret);
+		goto err;
+	}
+
+	ret = il_init_geos(il);
+	if (ret) {
+		IL_ERR("initializing geos failed: %d\n", ret);
+		goto err_free_channel_map;
+	}
+	il4965_init_hw_rates(il, il->ieee_rates);
+
+	return 0;
+
+err_free_channel_map:
+	il_free_channel_map(il);
+err:
+	return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+	il_free_geos(il);
+	il_free_channel_map(il);
+	kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+	il->hw_rev = _il_rd(il, CSR_HW_REV);
+	il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+	il->rev_id = il->pci_dev->revision;
+	D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static const struct il_sensitivity_ranges il4965_sensitivity = {
+	.min_nrg_cck = 97,
+	.max_nrg_cck = 0,	/* not used, set to 0 */
+
+	.auto_corr_min_ofdm = 85,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 140,
+	.auto_corr_max_ofdm_mrc_x1 = 270,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 200,
+	.auto_corr_max_cck_mrc = 400,
+
+	.nrg_th_cck = 100,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void
+il4965_set_hw_params(struct il_priv *il)
+{
+	il->hw_params.bcast_id = IL4965_BROADCAST_ID;
+	il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+	il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+	if (il->cfg->mod_params->amsdu_size_8K)
+		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+	else
+		il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+	il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+	if (il->cfg->mod_params->disable_11n)
+		il->cfg->sku &= ~IL_SKU_N;
+
+	if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+	    il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+		il->cfg->num_of_queues =
+		    il->cfg->mod_params->num_of_queues;
+
+	il->hw_params.max_txq_num = il->cfg->num_of_queues;
+	il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+	il->hw_params.scd_bc_tbls_size =
+	    il->cfg->num_of_queues *
+	    sizeof(struct il4965_scd_bc_tbl);
+
+	il->hw_params.tfd_size = sizeof(struct il_tfd);
+	il->hw_params.max_stations = IL4965_STATION_COUNT;
+	il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+	il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+	il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+	il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
+
+	il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+	il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+	il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+	il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+	il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+	il->hw_params.ct_kill_threshold =
+	   CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+
+	il->hw_params.sens = &il4965_sensitivity;
+	il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+}
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct il_priv *il;
+	struct ieee80211_hw *hw;
+	struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+	unsigned long flags;
+	u16 pci_cmd;
+
+	/************************
+	 * 1. Allocating HW data
+	 ************************/
+
+	hw = ieee80211_alloc_hw(sizeof(struct il_priv), &il4965_mac_ops);
+	if (!hw) {
+		err = -ENOMEM;
+		goto out;
+	}
+	il = hw->priv;
+	il->hw = hw;
+	SET_IEEE80211_DEV(hw, &pdev->dev);
+
+	D_INFO("*** LOAD DRIVER ***\n");
+	il->cfg = cfg;
+	il->ops = &il4965_ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	il->debugfs_ops = &il4965_debugfs_ops;
+#endif
+	il->pci_dev = pdev;
+	il->inta_mask = CSR_INI_SET_MASK;
+
+	/**************************
+	 * 2. Initializing PCI bus
+	 **************************/
+	pci_disable_link_state(pdev,
+			       PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_ieee80211_free_hw;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+	if (err) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (!err)
+			err =
+			    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+		/* both attempts failed: */
+		if (err) {
+			IL_WARN("No suitable DMA available.\n");
+			goto out_pci_disable_device;
+		}
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	pci_set_drvdata(pdev, il);
+
+	/***********************
+	 * 3. Read REV register
+	 ***********************/
+	il->hw_base = pci_ioremap_bar(pdev, 0);
+	if (!il->hw_base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	D_INFO("pci_resource_len = 0x%08llx\n",
+	       (unsigned long long)pci_resource_len(pdev, 0));
+	D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+	/* these spin locks will be used in apm_ops.init and EEPROM access
+	 * we should init now
+	 */
+	spin_lock_init(&il->reg_lock);
+	spin_lock_init(&il->lock);
+
+	/*
+	 * stop and reset the on-board processor just in case it is in a
+	 * strange state ... like being left stranded by a primary kernel
+	 * and this is now the kdump kernel trying to start up
+	 */
+	_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+	il4965_hw_detect(il);
+	IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	il4965_prepare_card_hw(il);
+	if (!il->hw_ready) {
+		IL_WARN("Failed, HW not ready\n");
+		err = -EIO;
+		goto out_iounmap;
+	}
+
+	/*****************
+	 * 4. Read EEPROM
+	 *****************/
+	/* Read the EEPROM */
+	err = il_eeprom_init(il);
+	if (err) {
+		IL_ERR("Unable to init EEPROM\n");
+		goto out_iounmap;
+	}
+	err = il4965_eeprom_check_version(il);
+	if (err)
+		goto out_free_eeprom;
+
+	/* extract MAC Address */
+	il4965_eeprom_get_mac(il, il->addresses[0].addr);
+	D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+	il->hw->wiphy->addresses = il->addresses;
+	il->hw->wiphy->n_addresses = 1;
+
+	/************************
+	 * 5. Setup HW constants
+	 ************************/
+	il4965_set_hw_params(il);
+
+	/*******************
+	 * 6. Setup il
+	 *******************/
+
+	err = il4965_init_drv(il);
+	if (err)
+		goto out_free_eeprom;
+	/* At this point both hw and il are initialized. */
+
+	/********************
+	 * 7. Setup services
+	 ********************/
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	pci_enable_msi(il->pci_dev);
+
+	err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+	if (err) {
+		IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+		goto out_disable_msi;
+	}
+
+	il4965_setup_deferred_work(il);
+	il4965_setup_handlers(il);
+
+	/*********************************************
+	 * 8. Enable interrupts and read RFKILL state
+	 *********************************************/
+
+	/* enable rfkill interrupt: hw bug w/a */
+	pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+	if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+		pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+		pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+	}
+
+	il_enable_rfkill_int(il);
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+		clear_bit(S_RFKILL, &il->status);
+	else
+		set_bit(S_RFKILL, &il->status);
+
+	wiphy_rfkill_set_hw_state(il->hw->wiphy,
+				  test_bit(S_RFKILL, &il->status));
+
+	il_power_initialize(il);
+
+	init_completion(&il->_4965.firmware_loading_complete);
+
+	err = il4965_request_firmware(il, true);
+	if (err)
+		goto out_destroy_workqueue;
+
+	return 0;
+
+out_destroy_workqueue:
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+	free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+	pci_disable_msi(il->pci_dev);
+	il4965_uninit_drv(il);
+out_free_eeprom:
+	il_eeprom_free(il);
+out_iounmap:
+	iounmap(il->hw_base);
+out_pci_release_regions:
+	pci_release_regions(pdev);
+out_pci_disable_device:
+	pci_disable_device(pdev);
+out_ieee80211_free_hw:
+	ieee80211_free_hw(il->hw);
+out:
+	return err;
+}
+
+static void
+il4965_pci_remove(struct pci_dev *pdev)
+{
+	struct il_priv *il = pci_get_drvdata(pdev);
+	unsigned long flags;
+
+	if (!il)
+		return;
+
+	wait_for_completion(&il->_4965.firmware_loading_complete);
+
+	D_INFO("*** UNLOAD DRIVER ***\n");
+
+	il_dbgfs_unregister(il);
+	sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+	/* ieee80211_unregister_hw call wil cause il_mac_stop to
+	 * to be called and il4965_down since we are removing the device
+	 * we need to set S_EXIT_PENDING bit.
+	 */
+	set_bit(S_EXIT_PENDING, &il->status);
+
+	il_leds_exit(il);
+
+	if (il->mac80211_registered) {
+		ieee80211_unregister_hw(il->hw);
+		il->mac80211_registered = 0;
+	} else {
+		il4965_down(il);
+	}
+
+	/*
+	 * Make sure device is reset to low power before unloading driver.
+	 * This may be redundant with il4965_down(), but there are paths to
+	 * run il4965_down() without calling apm_ops.stop(), and there are
+	 * paths to avoid running il4965_down() at all before leaving driver.
+	 * This (inexpensive) call *makes sure* device is reset.
+	 */
+	il_apm_stop(il);
+
+	/* make sure we flush any pending irq or
+	 * tasklet for the driver
+	 */
+	spin_lock_irqsave(&il->lock, flags);
+	il_disable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il4965_synchronize_irq(il);
+
+	il4965_dealloc_ucode_pci(il);
+
+	if (il->rxq.bd)
+		il4965_rx_queue_free(il, &il->rxq);
+	il4965_hw_txq_ctx_free(il);
+
+	il_eeprom_free(il);
+
+	/*netif_stop_queue(dev); */
+	flush_workqueue(il->workqueue);
+
+	/* ieee80211_unregister_hw calls il_mac_stop, which flushes
+	 * il->workqueue... so we can't take down the workqueue
+	 * until now... */
+	destroy_workqueue(il->workqueue);
+	il->workqueue = NULL;
+
+	free_irq(il->pci_dev->irq, il);
+	pci_disable_msi(il->pci_dev);
+	iounmap(il->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+
+	il4965_uninit_drv(il);
+
+	dev_kfree_skb(il->beacon_skb);
+
+	ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+	il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static const struct pci_device_id il4965_hw_card_ids[] = {
+	{IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+	{IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+	.name = DRV_NAME,
+	.id_table = il4965_hw_card_ids,
+	.probe = il4965_pci_probe,
+	.remove = il4965_pci_remove,
+	.driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+	int ret;
+	pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	pr_info(DRV_COPYRIGHT "\n");
+
+	ret = il4965_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = pci_register_driver(&il4965_driver);
+	if (ret) {
+		pr_err("Unable to initialize PCI module\n");
+		goto error_register;
+	}
+
+	return ret;
+
+error_register:
+	il4965_rate_control_unregister();
+	return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+	pci_unregister_driver(&il4965_driver);
+	il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, 0644);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, 0444);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, 0444);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, 0444);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int, 0444);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0 [disabled])");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, 0444);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
new file mode 100644
index 0000000..54ff838
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -0,0 +1,2833 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY      1
+#define IL_HT_NUMBER_TRY   3
+
+#define RATE_MAX_WINDOW		62	/* # tx in history win */
+#define RATE_MIN_FAILURE_TH		6	/* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH		8	/* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX		15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+	RATE_6M_IDX, RATE_6M_IDX,
+	RATE_6M_IDX, RATE_6M_IDX,
+	RATE_6M_IDX,
+	RATE_6M_IDX, RATE_9M_IDX,
+	RATE_12M_IDX, RATE_18M_IDX,
+	RATE_24M_IDX, RATE_36M_IDX,
+	RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+	/*ANT_NONE -> */ ANT_NONE,
+	/*ANT_A    -> */ ANT_B,
+	/*ANT_B    -> */ ANT_C,
+	/*ANT_AB   -> */ ANT_BC,
+	/*ANT_C    -> */ ANT_A,
+	/*ANT_AC   -> */ ANT_AB,
+	/*ANT_BC   -> */ ANT_AC,
+	/*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+	[RATE_##r##M_IDX] = { RATE_##r##M_PLCP,      \
+				    RATE_SISO_##s##M_PLCP, \
+				    RATE_MIMO2_##s##M_PLCP,\
+				    RATE_##r##M_IEEE,      \
+				    RATE_##ip##M_IDX,    \
+				    RATE_##in##M_IDX,    \
+				    RATE_##rp##M_IDX,    \
+				    RATE_##rn##M_IDX,    \
+				    RATE_##pp##M_IDX,    \
+				    RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+	IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),	/*  1mbps */
+	IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),		/*  2mbps */
+	IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),	/*5.5mbps */
+	IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),	/* 11mbps */
+	IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),		/*  6mbps */
+	IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),	/*  9mbps */
+	IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),	/* 12mbps */
+	IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),	/* 18mbps */
+	IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),	/* 24mbps */
+	IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),	/* 36mbps */
+	IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),	/* 48mbps */
+	IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+	IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+	int idx = 0;
+
+	/* HT rate format */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = (rate_n_flags & 0xff);
+
+		if (idx >= RATE_MIMO2_6M_PLCP)
+			idx = idx - RATE_MIMO2_6M_PLCP;
+
+		idx += IL_FIRST_OFDM_RATE;
+		/* skip 9M not supported in ht */
+		if (idx >= RATE_9M_IDX)
+			idx += 1;
+		if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+			return idx;
+
+		/* legacy rate format, search for match in table */
+	} else {
+		for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+			if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+				return idx;
+	}
+
+	return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+					 struct sk_buff *skb,
+					 struct ieee80211_sta *sta,
+					 struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+				    struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+				    bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+				    u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202},	/* Norm */
+	{0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210},	/* SGI */
+	{0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381},	/* AGG */
+	{0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257},	/* Norm */
+	{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264},	/* SGI */
+	{0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640},	/* AGG */
+	{0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250},	/* Norm */
+	{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256},	/* SGI */
+	{0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619},	/* AGG */
+	{0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660},	/* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289},	/* Norm */
+	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293},	/* SGI */
+	{0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922},	/* AGG */
+	{0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966},	/* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+	{"1", "BPSK DSSS"},
+	{"2", "QPSK DSSS"},
+	{"5.5", "BPSK CCK"},
+	{"11", "QPSK CCK"},
+	{"6", "BPSK 1/2"},
+	{"9", "BPSK 1/2"},
+	{"12", "QPSK 1/2"},
+	{"18", "QPSK 3/4"},
+	{"24", "16QAM 1/2"},
+	{"36", "16QAM 3/4"},
+	{"48", "64QAM 2/3"},
+	{"54", "64QAM 3/4"},
+	{"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM	(8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+	return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+	win->data = 0;
+	win->success_counter = 0;
+	win->success_ratio = IL_INVALID_VALUE;
+	win->counter = 0;
+	win->average_tpt = IL_INVALID_VALUE;
+	win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+	return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *	removes the old data from the stats. All data that is older than
+ *	TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+	/* The oldest age we want to keep */
+	u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+	while (tl->queue_count && tl->time_stamp < oldest_time) {
+		tl->total -= tl->packet_count[tl->head];
+		tl->packet_count[tl->head] = 0;
+		tl->time_stamp += TID_QUEUE_CELL_SPACING;
+		tl->queue_count--;
+		tl->head++;
+		if (tl->head >= TID_QUEUE_MAX_SIZE)
+			tl->head = 0;
+	}
+}
+
+/*
+ *	increment traffic load value for tid and also remove
+ *	any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 idx;
+	struct il_traffic_load *tl = NULL;
+	u8 tid;
+
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	} else
+		return MAX_TID_COUNT;
+
+	if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+		return MAX_TID_COUNT;
+
+	tl = &lq_data->load[tid];
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	/* Happens only for the first packet. Initialize the data */
+	if (!(tl->queue_count)) {
+		tl->total = 1;
+		tl->time_stamp = curr_time;
+		tl->queue_count = 1;
+		tl->head = 0;
+		tl->packet_count[0] = 1;
+		return MAX_TID_COUNT;
+	}
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (idx >= TID_QUEUE_MAX_SIZE)
+		il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+	idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+	tl->packet_count[idx] = tl->packet_count[idx] + 1;
+	tl->total = tl->total + 1;
+
+	if ((idx + 1) > tl->queue_count)
+		tl->queue_count = idx + 1;
+
+	return tid;
+}
+
+/*
+	get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 idx;
+	struct il_traffic_load *tl = NULL;
+
+	if (tid >= TID_MAX_LOAD_COUNT)
+		return 0;
+
+	tl = &(lq_data->load[tid]);
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	if (!(tl->queue_count))
+		return 0;
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (idx >= TID_QUEUE_MAX_SIZE)
+		il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+	return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+				 u8 tid, struct ieee80211_sta *sta)
+{
+	int ret = -EAGAIN;
+	u32 load;
+
+	load = il4965_rs_tl_get_load(lq_data, tid);
+
+	if (load > IL_AGG_LOAD_THRESHOLD) {
+		D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+		ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+		if (ret == -EAGAIN) {
+			/*
+			 * driver and mac80211 is out of sync
+			 * this might be cause by reloading firmware
+			 * stop the tx ba session here
+			 */
+			IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+			ieee80211_stop_tx_ba_session(sta, tid);
+		}
+	} else
+		D_HT("Aggregation not enabled for tid %d because load = %u\n",
+		     tid, load);
+
+	return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+			 struct ieee80211_sta *sta)
+{
+	if (tid < TID_MAX_LOAD_COUNT)
+		il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+	else
+		IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+		       TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+	    !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+	    !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+	if (tbl->expected_tpt)
+		return tbl->expected_tpt[rs_idx];
+	return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate.  win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+			  int attempts, int successes)
+{
+	struct il_rate_scale_data *win = NULL;
+	static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+	s32 fail_count, tpt;
+
+	if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+		return -EINVAL;
+
+	/* Select win for current tx bit rate */
+	win = &(tbl->win[scale_idx]);
+
+	/* Get expected throughput */
+	tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history win; anything older isn't really relevant any more.
+	 * If we have filled up the sliding win, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 */
+	while (attempts > 0) {
+		if (win->counter >= RATE_MAX_WINDOW) {
+
+			/* remove earliest */
+			win->counter = RATE_MAX_WINDOW - 1;
+
+			if (win->data & mask) {
+				win->data &= ~mask;
+				win->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		win->counter++;
+
+		/* Shift bitmap by one frame to throw away oldest history */
+		win->data <<= 1;
+
+		/* Mark the most recent #successes attempts as successful */
+		if (successes > 0) {
+			win->success_counter++;
+			win->data |= 0x1;
+			successes--;
+		}
+
+		attempts--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (win->counter > 0)
+		win->success_ratio =
+		    128 * (100 * win->success_counter) / win->counter;
+	else
+		win->success_ratio = IL_INVALID_VALUE;
+
+	fail_count = win->counter - win->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if (fail_count >= RATE_MIN_FAILURE_TH ||
+	    win->success_counter >= RATE_MIN_SUCCESS_TH)
+		win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+	else
+		win->average_tpt = IL_INVALID_VALUE;
+
+	/* Tag this win as having been updated */
+	win->stamp = jiffies;
+
+	return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+			     int idx, u8 use_green)
+{
+	u32 rate_n_flags = 0;
+
+	if (is_legacy(tbl->lq_type)) {
+		rate_n_flags = il_rates[idx].plcp;
+		if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+			rate_n_flags |= RATE_MCS_CCK_MSK;
+
+	} else if (is_Ht(tbl->lq_type)) {
+		if (idx > IL_LAST_OFDM_RATE) {
+			IL_ERR("Invalid HT rate idx %d\n", idx);
+			idx = IL_LAST_OFDM_RATE;
+		}
+		rate_n_flags = RATE_MCS_HT_MSK;
+
+		if (is_siso(tbl->lq_type))
+			rate_n_flags |= il_rates[idx].plcp_siso;
+		else
+			rate_n_flags |= il_rates[idx].plcp_mimo2;
+	} else {
+		IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+	}
+
+	rate_n_flags |=
+	    ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+	if (is_Ht(tbl->lq_type)) {
+		if (tbl->is_ht40) {
+			if (tbl->is_dup)
+				rate_n_flags |= RATE_MCS_DUP_MSK;
+			else
+				rate_n_flags |= RATE_MCS_HT40_MSK;
+		}
+		if (tbl->is_SGI)
+			rate_n_flags |= RATE_MCS_SGI_MSK;
+
+		if (use_green) {
+			rate_n_flags |= RATE_MCS_GF_MSK;
+			if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+				rate_n_flags &= ~RATE_MCS_SGI_MSK;
+				IL_ERR("GF was set with SGI:SISO\n");
+			}
+		}
+	}
+	return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+				enum nl80211_band band,
+				struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+	u8 il4965_num_of_ant =
+	    il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+	u8 mcs;
+
+	memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+	*rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+	if (*rate_idx == RATE_INVALID) {
+		*rate_idx = -1;
+		return -EINVAL;
+	}
+	tbl->is_SGI = 0;	/* default legacy setup */
+	tbl->is_ht40 = 0;
+	tbl->is_dup = 0;
+	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+	tbl->lq_type = LQ_NONE;
+	tbl->max_search = IL_MAX_SEARCH;
+
+	/* legacy rate format */
+	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+		if (il4965_num_of_ant == 1) {
+			if (band == NL80211_BAND_5GHZ)
+				tbl->lq_type = LQ_A;
+			else
+				tbl->lq_type = LQ_G;
+		}
+		/* HT rate format */
+	} else {
+		if (rate_n_flags & RATE_MCS_SGI_MSK)
+			tbl->is_SGI = 1;
+
+		if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+		    (rate_n_flags & RATE_MCS_DUP_MSK))
+			tbl->is_ht40 = 1;
+
+		if (rate_n_flags & RATE_MCS_DUP_MSK)
+			tbl->is_dup = 1;
+
+		mcs = il4965_rs_extract_rate(rate_n_flags);
+
+		/* SISO */
+		if (mcs <= RATE_SISO_60M_PLCP) {
+			if (il4965_num_of_ant == 1)
+				tbl->lq_type = LQ_SISO;	/*else NONE */
+			/* MIMO2 */
+		} else {
+			if (il4965_num_of_ant == 2)
+				tbl->lq_type = LQ_MIMO2;
+		}
+	}
+	return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+			 struct il_scale_tbl_info *tbl)
+{
+	u8 new_ant_type;
+
+	if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+		return 0;
+
+	if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+		return 0;
+
+	new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+	while (new_ant_type != tbl->ant_type &&
+	       !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+		new_ant_type = ant_toggle_lookup[new_ant_type];
+
+	if (new_ant_type == tbl->ant_type)
+		return 0;
+
+	tbl->ant_type = new_ant_type;
+	*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+	*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+	return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
+{
+	return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+	       !il->ht.non_gf_sta_present;
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+			      struct ieee80211_hdr *hdr,
+			      enum il_table_type rate_type)
+{
+	if (is_legacy(rate_type)) {
+		return lq_sta->active_legacy_rate;
+	} else {
+		if (is_siso(rate_type))
+			return lq_sta->active_siso_rate;
+		else
+			return lq_sta->active_mimo2_rate;
+	}
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+			    int rate_type)
+{
+	u8 high = RATE_INVALID;
+	u8 low = RATE_INVALID;
+
+	/* 802.11A or ht walks to the next literal adjacent rate in
+	 * the rate table */
+	if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = idx - 1;
+		for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = idx + 1;
+		for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = idx;
+	while (low != RATE_INVALID) {
+		low = il_rates[low].prev_rs;
+		if (low == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+		D_RATE("Skipping masked lower rate: %d\n", low);
+	}
+
+	high = idx;
+	while (high != RATE_INVALID) {
+		high = il_rates[high].next_rs;
+		if (high == RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+		D_RATE("Skipping masked higher rate: %d\n", high);
+	}
+
+	return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+			 struct il_scale_tbl_info *tbl, u8 scale_idx,
+			 u8 ht_possible)
+{
+	s32 low;
+	u16 rate_mask;
+	u16 high_low;
+	u8 switch_to_legacy = 0;
+	u8 is_green = lq_sta->is_green;
+	struct il_priv *il = lq_sta->drv;
+
+	/* check if we need to switch from HT to legacy rates.
+	 * assumption is that mandatory rates (1Mbps or 6Mbps)
+	 * are always supported (spec demand) */
+	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+		switch_to_legacy = 1;
+		scale_idx = rs_ht_to_legacy[scale_idx];
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			tbl->lq_type = LQ_A;
+		else
+			tbl->lq_type = LQ_G;
+
+		if (il4965_num_of_ant(tbl->ant_type) > 1)
+			tbl->ant_type =
+			    il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+		tbl->is_ht40 = 0;
+		tbl->is_SGI = 0;
+		tbl->max_search = IL_MAX_SEARCH;
+	}
+
+	rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+	/* Mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		/* supp_rates has no CCK bits in A mode */
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			rate_mask =
+			    (u16) (rate_mask &
+				   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+		else
+			rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+	}
+
+	/* If we switched from HT to legacy, check current rate */
+	if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+		low = scale_idx;
+		goto out;
+	}
+
+	high_low =
+	    il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+					tbl->lq_type);
+	low = high_low & 0xff;
+
+	if (low == RATE_INVALID)
+		low = scale_idx;
+
+out:
+	return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+			  struct il_scale_tbl_info *b)
+{
+	return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+		a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+		    struct ieee80211_sta *sta, void *il_sta,
+		    struct sk_buff *skb)
+{
+	int legacy_success;
+	int retries;
+	int rs_idx, mac_idx, i;
+	struct il_lq_sta *lq_sta = il_sta;
+	struct il_link_quality_cmd *table;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct il_priv *il = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	enum mac80211_rate_control_flags mac_flags;
+	u32 tx_rate;
+	struct il_scale_tbl_info tbl_type;
+	struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+
+	D_RATE("get frame ack response, update rate scale win\n");
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!lq_sta) {
+		D_RATE("Station rate scaling not created yet.\n");
+		return;
+	} else if (!lq_sta->drv) {
+		D_RATE("Rate scaling not initialized yet.\n");
+		return;
+	}
+
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		return;
+
+	/* This packet was aggregated but doesn't carry status info */
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
+		return;
+
+	/*
+	 * Ignore this Tx frame response if its initial rate doesn't match
+	 * that of latest Link Quality command.  There may be stragglers
+	 * from a previous Link Quality command, but we're no longer interested
+	 * in those; they're either from the "active" mode while we're trying
+	 * to check "search" mode, or a prior "search" mode after we've moved
+	 * to a new "search" mode (which might become the new "active" mode).
+	 */
+	table = &lq_sta->lq;
+	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+	il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+	if (il->band == NL80211_BAND_5GHZ)
+		rs_idx -= IL_FIRST_OFDM_RATE;
+	mac_flags = info->status.rates[0].flags;
+	mac_idx = info->status.rates[0].idx;
+	/* For HT packets, map MCS to PLCP */
+	if (mac_flags & IEEE80211_TX_RC_MCS) {
+		mac_idx &= RATE_MCS_CODE_MSK;	/* Remove # of streams */
+		if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+			mac_idx++;
+		/*
+		 * mac80211 HT idx is always zero-idxed; we need to move
+		 * HT OFDM rates after CCK rates in 2.4 GHz band
+		 */
+		if (il->band == NL80211_BAND_2GHZ)
+			mac_idx += IL_FIRST_OFDM_RATE;
+	}
+	/* Here we actually compare this rate to the latest LQ command */
+	if (mac_idx < 0 ||
+	    tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+	    tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+	    tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+	    tbl_type.ant_type != info->status.antenna ||
+	    !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+	    || !!(tx_rate & RATE_MCS_GF_MSK) !=
+	    !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+		D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+		       rs_idx, tx_rate);
+		/*
+		 * Since rates mis-match, the last LQ command may have failed.
+		 * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+		 * ... driver.
+		 */
+		lq_sta->missed_rate_counter++;
+		if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+			lq_sta->missed_rate_counter = 0;
+			il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
+		}
+		/* Regardless, ignore this status info for outdated rate */
+		return;
+	} else
+		/* Rate did match, so reset the missed_rate_counter */
+		lq_sta->missed_rate_counter = 0;
+
+	/* Figure out if rate scale algorithm is in active or search table */
+	if (il4965_table_type_matches
+	    (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+	} else
+	    if (il4965_table_type_matches
+		(&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	} else {
+		D_RATE("Neither active nor search matches tx rate\n");
+		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+		       tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+		       tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+		       tbl_type.ant_type, tbl_type.is_SGI);
+		/*
+		 * no matching table found, let's by-pass the data collection
+		 * and continue to perform rate scale to find the rate table
+		 */
+		il4965_rs_stay_in_table(lq_sta, true);
+		goto done;
+	}
+
+	/*
+	 * Updating the frame history depends on whether packets were
+	 * aggregated.
+	 *
+	 * For aggregation, all packets were transmitted at the same rate, the
+	 * first idx into rate scale table.
+	 */
+	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+		tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+		il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+						&rs_idx);
+		il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+					  info->status.ampdu_len,
+					  info->status.ampdu_ack_len);
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += info->status.ampdu_ack_len;
+			lq_sta->total_failed +=
+			    (info->status.ampdu_len -
+			     info->status.ampdu_ack_len);
+		}
+	} else {
+		/*
+		 * For legacy, update frame history with for each Tx retry.
+		 */
+		retries = info->status.rates[0].count - 1;
+		/* HW doesn't send more than 15 retries */
+		retries = min(retries, 15);
+
+		/* The last transmission may have been successful */
+		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		/* Collect data for each rate used during failed TX attempts */
+		for (i = 0; i <= retries; ++i) {
+			tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+			il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+							&tbl_type, &rs_idx);
+			/*
+			 * Only collect stats if retried rate is in the same RS
+			 * table as active/search.
+			 */
+			if (il4965_table_type_matches(&tbl_type, curr_tbl))
+				tmp_tbl = curr_tbl;
+			else if (il4965_table_type_matches
+				 (&tbl_type, other_tbl))
+				tmp_tbl = other_tbl;
+			else
+				continue;
+			il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+						  i <
+						  retries ? 0 : legacy_success);
+		}
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += legacy_success;
+			lq_sta->total_failed += retries + (1 - legacy_success);
+		}
+	}
+	/* The last TX rate is cached in lq_sta; it's set in if/else above */
+	lq_sta->last_rate_n_flags = tx_rate;
+done:
+	/* See if there's a better rate or modulation mode to try. */
+	if (sta->supp_rates[sband->band])
+		il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+			    struct il_lq_sta *lq_sta)
+{
+	D_RATE("we are staying in the same table\n");
+	lq_sta->stay_in_tbl = 1;	/* only place this gets set */
+	if (is_legacy) {
+		lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+		lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+	} else {
+		lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+		lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+	}
+	lq_sta->table_count = 0;
+	lq_sta->total_failed = 0;
+	lq_sta->total_success = 0;
+	lq_sta->flush_timer = jiffies;
+	lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+				 struct il_scale_tbl_info *tbl)
+{
+	/* Used to choose among HT tables */
+	s32(*ht_tbl_pointer)[RATE_COUNT];
+
+	/* Check for invalid LQ type */
+	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Legacy rates have only one table */
+	if (is_legacy(tbl->lq_type)) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Choose among many HT tables depending on number of streams
+	 * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+	 * status */
+	if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_siso20MHz;
+	else if (is_siso(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_siso40MHz;
+	else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+	else			/* if (is_mimo2(tbl->lq_type)) <-- must be true */
+		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+	if (!tbl->is_SGI && !lq_sta->is_agg)	/* Normal */
+		tbl->expected_tpt = ht_tbl_pointer[0];
+	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
+		tbl->expected_tpt = ht_tbl_pointer[1];
+	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
+		tbl->expected_tpt = ht_tbl_pointer[2];
+	else			/* AGG+SGI */
+		tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+			struct il_scale_tbl_info *tbl,	/* "search" */
+			u16 rate_mask, s8 idx)
+{
+	/* "active" values */
+	struct il_scale_tbl_info *active_tbl =
+	    &(lq_sta->lq_info[lq_sta->active_tbl]);
+	s32 active_sr = active_tbl->win[idx].success_ratio;
+	s32 active_tpt = active_tbl->expected_tpt[idx];
+
+	/* expected "search" throughput */
+	s32 *tpt_tbl = tbl->expected_tpt;
+
+	s32 new_rate, high, low, start_hi;
+	u16 high_low;
+	s8 rate = idx;
+
+	new_rate = high = low = start_hi = RATE_INVALID;
+
+	for (;;) {
+		high_low =
+		    il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+						tbl->lq_type);
+
+		low = high_low & 0xff;
+		high = (high_low >> 8) & 0xff;
+
+		/*
+		 * Lower the "search" bit rate, to give new "search" mode
+		 * approximately the same throughput as "active" if:
+		 *
+		 * 1) "Active" mode has been working modestly well (but not
+		 *    great), and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above the actual
+		 *    measured "active" throughput (but less than expected
+		 *    "active" throughput under perfect conditions).
+		 * OR
+		 * 2) "Active" mode has been working perfectly or very well
+		 *    and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above expected
+		 *    "active" throughput (under perfect conditions).
+		 */
+		if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+		     (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+		      && tpt_tbl[rate] <= active_tpt)) ||
+		    (active_sr >= RATE_SCALE_SWITCH &&
+		     tpt_tbl[rate] > active_tpt)) {
+
+			/* (2nd or later pass)
+			 * If we've already tried to raise the rate, and are
+			 * now trying to lower it, use the higher rate. */
+			if (start_hi != RATE_INVALID) {
+				new_rate = start_hi;
+				break;
+			}
+
+			new_rate = rate;
+
+			/* Loop again with lower rate */
+			if (low != RATE_INVALID)
+				rate = low;
+
+			/* Lower rate not available, use the original */
+			else
+				break;
+
+			/* Else try to raise the "search" rate to match "active" */
+		} else {
+			/* (2nd or later pass)
+			 * If we've already tried to lower the rate, and are
+			 * now trying to raise it, use the lower rate. */
+			if (new_rate != RATE_INVALID)
+				break;
+
+			/* Loop again with higher rate */
+			else if (high != RATE_INVALID) {
+				start_hi = high;
+				rate = high;
+
+				/* Higher rate not available, use the original */
+			} else {
+				new_rate = rate;
+				break;
+			}
+		}
+	}
+
+	return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+			  struct ieee80211_conf *conf,
+			  struct ieee80211_sta *sta,
+			  struct il_scale_tbl_info *tbl, int idx)
+{
+	u16 rate_mask;
+	s32 rate;
+	s8 is_green = lq_sta->is_green;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+		return -1;
+
+	/* Need both Tx chains/antennas to support MIMO */
+	if (il->hw_params.tx_chains_num < 2)
+		return -1;
+
+	D_RATE("LQ: try to switch to MIMO2\n");
+
+	tbl->lq_type = LQ_MIMO2;
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->action = 0;
+	tbl->max_search = IL_MAX_SEARCH;
+	rate_mask = lq_sta->active_mimo2_rate;
+
+	if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+	rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+	D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+	if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+		D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+		       rate_mask);
+		return -1;
+	}
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+	D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+	       is_green);
+	return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+			 struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+			 struct il_scale_tbl_info *tbl, int idx)
+{
+	u16 rate_mask;
+	u8 is_green = lq_sta->is_green;
+	s32 rate;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	D_RATE("LQ: try to switch to SISO\n");
+
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->lq_type = LQ_SISO;
+	tbl->action = 0;
+	tbl->max_search = IL_MAX_SEARCH;
+	rate_mask = lq_sta->active_siso_rate;
+
+	if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	if (is_green)
+		tbl->is_SGI = 0;	/*11n spec: no SGI in SISO+Greenfield */
+
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+	rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+	D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+	if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+		D_RATE("can not switch with idx %d rate mask %x\n", rate,
+		       rate_mask);
+		return -1;
+	}
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+	D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+	       is_green);
+	return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			    struct ieee80211_conf *conf,
+			    struct ieee80211_sta *sta, int idx)
+{
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	int ret = 0;
+	u8 update_search_tbl_counter = 0;
+
+	tbl->action = IL_LEGACY_SWITCH_SISO;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_LEGACY_SWITCH_ANTENNA1:
+		case IL_LEGACY_SWITCH_ANTENNA2:
+			D_RATE("LQ: Legacy toggle Antenna\n");
+
+			if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+			     tx_chains_num <= 1) ||
+			    (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+			     tx_chains_num <= 2))
+				break;
+
+			/* Don't change antenna if success has been great */
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			/* Set up search table to try other antenna */
+			memcpy(search_tbl, tbl, sz);
+
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				il4965_rs_set_expected_tpt_table(lq_sta,
+								 search_tbl);
+				goto out;
+			}
+			break;
+		case IL_LEGACY_SWITCH_SISO:
+			D_RATE("LQ: Legacy switch to SISO\n");
+
+			/* Set up search table to try SISO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			ret =
+			    il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+						     search_tbl, idx);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+
+			break;
+		case IL_LEGACY_SWITCH_MIMO2_AB:
+		case IL_LEGACY_SWITCH_MIMO2_AC:
+		case IL_LEGACY_SWITCH_MIMO2_BC:
+			D_RATE("LQ: Legacy switch to MIMO2\n");
+
+			/* Set up search table to try MIMO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+						      search_tbl, idx);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+			break;
+		}
+		tbl->action++;
+		if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+			tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+		tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+	return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			     struct ieee80211_conf *conf,
+			     struct ieee80211_sta *sta, int idx)
+{
+	u8 is_green = lq_sta->is_green;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	start_action = tbl->action;
+
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_SISO_SWITCH_ANTENNA1:
+		case IL_SISO_SWITCH_ANTENNA2:
+			D_RATE("LQ: SISO toggle Antenna\n");
+			if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+			     tx_chains_num <= 1) ||
+			    (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+			     tx_chains_num <= 2))
+				break;
+
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IL_SISO_SWITCH_MIMO2_AB:
+		case IL_SISO_SWITCH_MIMO2_AC:
+		case IL_SISO_SWITCH_MIMO2_BC:
+			D_RATE("LQ: SISO switch to MIMO2\n");
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+						      search_tbl, idx);
+			if (!ret)
+				goto out;
+			break;
+		case IL_SISO_SWITCH_GI:
+			if (!tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+			memcpy(search_tbl, tbl, sz);
+			if (is_green) {
+				if (!tbl->is_SGI)
+					break;
+				else
+					IL_ERR("SGI was set in GF+SISO\n");
+			}
+			search_tbl->is_SGI = !tbl->is_SGI;
+			il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[idx])
+					break;
+			}
+			search_tbl->current_rate =
+			    il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+							 is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+		}
+		tbl->action++;
+		if (tbl->action > IL_SISO_SWITCH_GI)
+			tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_SISO_SWITCH_GI)
+		tbl->action = IL_SISO_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+
+	return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+			      struct ieee80211_conf *conf,
+			      struct ieee80211_sta *sta, int idx)
+{
+	s8 is_green = lq_sta->is_green;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct il_scale_tbl_info *search_tbl =
+	    &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct il_rate_scale_data *win = &(tbl->win[idx]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz =
+	    (sizeof(struct il_scale_tbl_info) -
+	     (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IL_MIMO2_SWITCH_ANTENNA1:
+		case IL_MIMO2_SWITCH_ANTENNA2:
+			D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+			if (tx_chains_num <= 2)
+				break;
+
+			if (win->success_ratio >= IL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (il4965_rs_toggle_antenna
+			    (valid_tx_ant, &search_tbl->current_rate,
+			     search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IL_MIMO2_SWITCH_SISO_A:
+		case IL_MIMO2_SWITCH_SISO_B:
+		case IL_MIMO2_SWITCH_SISO_C:
+			D_RATE("LQ: MIMO2 switch to SISO\n");
+
+			/* Set up new search table for SISO */
+			memcpy(search_tbl, tbl, sz);
+
+			if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+				search_tbl->ant_type = ANT_A;
+			else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+				search_tbl->ant_type = ANT_B;
+			else
+				search_tbl->ant_type = ANT_C;
+
+			if (!il4965_rs_is_valid_ant
+			    (valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret =
+			    il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+						     search_tbl, idx);
+			if (!ret)
+				goto out;
+
+			break;
+
+		case IL_MIMO2_SWITCH_GI:
+			if (!tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 &&
+			    !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+			/* Set up new search table for MIMO2 */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = !tbl->is_SGI;
+			il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+			/*
+			 * If active table already uses the fastest possible
+			 * modulation (dual stream with short guard interval),
+			 * and it's working well, there's no need to look
+			 * for a better type of modulation!
+			 */
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[idx])
+					break;
+			}
+			search_tbl->current_rate =
+			    il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+							 is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+
+		}
+		tbl->action++;
+		if (tbl->action > IL_MIMO2_SWITCH_GI)
+			tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return 0;
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IL_MIMO2_SWITCH_GI)
+		tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+
+	return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+	struct il_scale_tbl_info *tbl;
+	int i;
+	int active_tbl;
+	int flush_interval_passed = 0;
+	struct il_priv *il;
+
+	il = lq_sta->drv;
+	active_tbl = lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	/* If we've been disallowing search, see if we should now allow it */
+	if (lq_sta->stay_in_tbl) {
+
+		/* Elapsed time using current modulation mode */
+		if (lq_sta->flush_timer)
+			flush_interval_passed =
+			    time_after(jiffies,
+				       (unsigned long)(lq_sta->flush_timer +
+						       RATE_SCALE_FLUSH_INTVL));
+
+		/*
+		 * Check if we should allow search for new modulation mode.
+		 * If many frames have failed or succeeded, or we've used
+		 * this same modulation for a long time, allow search, and
+		 * reset history stats that keep track of whether we should
+		 * allow a new search.  Also (below) reset all bitmaps and
+		 * stats in active history.
+		 */
+		if (force_search ||
+		    lq_sta->total_failed > lq_sta->max_failure_limit ||
+		    lq_sta->total_success > lq_sta->max_success_limit ||
+		    (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+		     flush_interval_passed)) {
+			D_RATE("LQ: stay is expired %d %d %d\n",
+			       lq_sta->total_failed, lq_sta->total_success,
+			       flush_interval_passed);
+
+			/* Allow search for new mode */
+			lq_sta->stay_in_tbl = 0;	/* only place reset */
+			lq_sta->total_failed = 0;
+			lq_sta->total_success = 0;
+			lq_sta->flush_timer = 0;
+
+			/*
+			 * Else if we've used this modulation mode enough repetitions
+			 * (regardless of elapsed time or success/failure), reset
+			 * history bitmaps and rate-specific stats for all rates in
+			 * active table.
+			 */
+		} else {
+			lq_sta->table_count++;
+			if (lq_sta->table_count >= lq_sta->table_count_limit) {
+				lq_sta->table_count = 0;
+
+				D_RATE("LQ: stay in table clear win\n");
+				for (i = 0; i < RATE_COUNT; i++)
+					il4965_rs_rate_scale_clear_win(&
+								       (tbl->
+									win
+									[i]));
+			}
+		}
+
+		/* If transitioning to allow "search", reset all history
+		 * bitmaps and stats in active table (this will become the new
+		 * "search" table). */
+		if (!lq_sta->stay_in_tbl) {
+			for (i = 0; i < RATE_COUNT; i++)
+				il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+		}
+	}
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_lq_sta *lq_sta,
+			  struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+	u32 rate;
+
+	/* Update uCode's rate table. */
+	rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+	il4965_rs_fill_link_cmd(il, lq_sta, rate);
+	il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+			     struct ieee80211_sta *sta,
+			     struct il_lq_sta *lq_sta)
+{
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	int low = RATE_INVALID;
+	int high = RATE_INVALID;
+	int idx;
+	int i;
+	struct il_rate_scale_data *win = NULL;
+	int current_tpt = IL_INVALID_VALUE;
+	int low_tpt = IL_INVALID_VALUE;
+	int high_tpt = IL_INVALID_VALUE;
+	u32 fail_count;
+	s8 scale_action = 0;
+	u16 rate_mask;
+	u8 update_lq = 0;
+	struct il_scale_tbl_info *tbl, *tbl1;
+	u16 rate_scale_idx_msk = 0;
+	u8 is_green = 0;
+	u8 active_tbl = 0;
+	u8 done_search = 0;
+	u16 high_low;
+	s32 sr;
+	u8 tid = MAX_TID_COUNT;
+	struct il_tid_data *tid_data;
+
+	D_RATE("rate scale calculate new rate for skb\n");
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	/* TODO: this could probably be improved.. */
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
+		return;
+
+	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+	tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+	if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+		tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+		if (tid_data->agg.state == IL_AGG_OFF)
+			lq_sta->is_agg = 0;
+		else
+			lq_sta->is_agg = 1;
+	} else
+		lq_sta->is_agg = 0;
+
+	/*
+	 * Select rate-scale / modulation-mode table to work with in
+	 * the rest of this function:  "search" if searching for better
+	 * modulation mode, or "active" if doing rate scaling within a mode.
+	 */
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+	if (is_legacy(tbl->lq_type))
+		lq_sta->is_green = 0;
+	else
+		lq_sta->is_green = il4965_rs_use_green(il, sta);
+	is_green = lq_sta->is_green;
+
+	/* current tx rate */
+	idx = lq_sta->last_txrate_idx;
+
+	D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+	/* rates available for this association, and for modulation mode */
+	rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+	D_RATE("mask 0x%04X\n", rate_mask);
+
+	/* mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			/* supp_rates has no CCK bits in A mode */
+			rate_scale_idx_msk =
+			    (u16) (rate_mask &
+				   (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+		else
+			rate_scale_idx_msk =
+			    (u16) (rate_mask & lq_sta->supp_rates);
+
+	} else
+		rate_scale_idx_msk = rate_mask;
+
+	if (!rate_scale_idx_msk)
+		rate_scale_idx_msk = rate_mask;
+
+	if (!((1 << idx) & rate_scale_idx_msk)) {
+		IL_ERR("Current Rate is not valid\n");
+		if (lq_sta->search_better_tbl) {
+			/* revert to active table if search table is not valid */
+			tbl->lq_type = LQ_NONE;
+			lq_sta->search_better_tbl = 0;
+			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+			/* get "active" rate info */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+			il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx,
+						      is_green);
+		}
+		return;
+	}
+
+	/* Get expected throughput table and history win for current rate */
+	if (!tbl->expected_tpt) {
+		IL_ERR("tbl->expected_tpt is NULL\n");
+		return;
+	}
+
+	/* force user max rate if set by user */
+	if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+		idx = lq_sta->max_rate_idx;
+		update_lq = 1;
+		win = &(tbl->win[idx]);
+		goto lq_update;
+	}
+
+	win = &(tbl->win[idx]);
+
+	/*
+	 * If there is not enough history to calculate actual average
+	 * throughput, keep analyzing results of more tx frames, without
+	 * changing rate or mode (bypass most of the rest of this function).
+	 * Set up new rate table in uCode only if old rate is not supported
+	 * in current association (use new rate found above).
+	 */
+	fail_count = win->counter - win->success_counter;
+	if (fail_count < RATE_MIN_FAILURE_TH &&
+	    win->success_counter < RATE_MIN_SUCCESS_TH) {
+		D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+		       win->success_counter, win->counter, idx);
+
+		/* Can't calculate this yet; not enough history */
+		win->average_tpt = IL_INVALID_VALUE;
+
+		/* Should we stay with this modulation mode,
+		 * or search for a new one? */
+		il4965_rs_stay_in_table(lq_sta, false);
+
+		goto out;
+	}
+	/* Else we have enough samples; calculate estimate of
+	 * actual average throughput */
+	if (win->average_tpt !=
+	    ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+		IL_ERR("expected_tpt should have been calculated by now\n");
+		win->average_tpt =
+		    ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+	}
+
+	/* If we are searching for better modulation mode, check success. */
+	if (lq_sta->search_better_tbl) {
+		/* If good success, continue using the "search" mode;
+		 * no need to send new link quality command, since we're
+		 * continuing to use the setup that we've been trying. */
+		if (win->average_tpt > lq_sta->last_tpt) {
+
+			D_RATE("LQ: SWITCHING TO NEW TBL "
+			       "suc=%d cur-tpt=%d old-tpt=%d\n",
+			       win->success_ratio, win->average_tpt,
+			       lq_sta->last_tpt);
+
+			if (!is_legacy(tbl->lq_type))
+				lq_sta->enable_counter = 1;
+
+			/* Swap tables; "search" becomes "active" */
+			lq_sta->active_tbl = active_tbl;
+			current_tpt = win->average_tpt;
+
+			/* Else poor success; go back to mode in "active" table */
+		} else {
+
+			D_RATE("LQ: GOING BACK TO THE OLD TBL "
+			       "suc=%d cur-tpt=%d old-tpt=%d\n",
+			       win->success_ratio, win->average_tpt,
+			       lq_sta->last_tpt);
+
+			/* Nullify "search" table */
+			tbl->lq_type = LQ_NONE;
+
+			/* Revert to "active" table */
+			active_tbl = lq_sta->active_tbl;
+			tbl = &(lq_sta->lq_info[active_tbl]);
+
+			/* Revert to "active" rate and throughput info */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+			current_tpt = lq_sta->last_tpt;
+
+			/* Need to set up a new rate table in uCode */
+			update_lq = 1;
+		}
+
+		/* Either way, we've made a decision; modulation mode
+		 * search is done, allow rate adjustment next time. */
+		lq_sta->search_better_tbl = 0;
+		done_search = 1;	/* Don't switch modes below! */
+		goto lq_update;
+	}
+
+	/* (Else) not in search of better modulation mode, try for better
+	 * starting rate, while staying in this mode. */
+	high_low =
+	    il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+					tbl->lq_type);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* If user set max rate, dont allow higher than user constrain */
+	if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+		high = RATE_INVALID;
+
+	sr = win->success_ratio;
+
+	/* Collect measured throughputs for current and adjacent rates */
+	current_tpt = win->average_tpt;
+	if (low != RATE_INVALID)
+		low_tpt = tbl->win[low].average_tpt;
+	if (high != RATE_INVALID)
+		high_tpt = tbl->win[high].average_tpt;
+
+	scale_action = 0;
+
+	/* Too many failures, decrease rate */
+	if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+		D_RATE("decrease rate because of low success_ratio\n");
+		scale_action = -1;
+
+		/* No throughput measured yet for adjacent rates; try increase. */
+	} else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+		if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+			scale_action = 1;
+		else if (low != RATE_INVALID)
+			scale_action = 0;
+	}
+
+	/* Both adjacent throughputs are measured, but neither one has better
+	 * throughput; we're using the best rate, don't change it! */
+	else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+		 low_tpt < current_tpt && high_tpt < current_tpt)
+		scale_action = 0;
+
+	/* At least one adjacent rate's throughput is measured,
+	 * and may have better performance. */
+	else {
+		/* Higher adjacent rate's throughput is measured */
+		if (high_tpt != IL_INVALID_VALUE) {
+			/* Higher rate has better throughput */
+			if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+				scale_action = 1;
+			else
+				scale_action = 0;
+
+			/* Lower adjacent rate's throughput is measured */
+		} else if (low_tpt != IL_INVALID_VALUE) {
+			/* Lower rate has better throughput */
+			if (low_tpt > current_tpt) {
+				D_RATE("decrease rate because of low tpt\n");
+				scale_action = -1;
+			} else if (sr >= RATE_INCREASE_TH) {
+				scale_action = 1;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it. */
+	if (scale_action == -1 && low != RATE_INVALID &&
+	    (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+		scale_action = 0;
+
+	switch (scale_action) {
+	case -1:
+		/* Decrease starting rate, update uCode's rate table */
+		if (low != RATE_INVALID) {
+			update_lq = 1;
+			idx = low;
+		}
+
+		break;
+	case 1:
+		/* Increase starting rate, update uCode's rate table */
+		if (high != RATE_INVALID) {
+			update_lq = 1;
+			idx = high;
+		}
+
+		break;
+	case 0:
+		/* No change */
+	default:
+		break;
+	}
+
+	D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+	       idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+	/* Replace uCode's rate table for the destination station. */
+	if (update_lq)
+		il4965_rs_update_rate_tbl(il, lq_sta, tbl, idx, is_green);
+
+	/* Should we stay with this modulation mode,
+	 * or search for a new one? */
+	il4965_rs_stay_in_table(lq_sta, false);
+
+	/*
+	 * Search for new modulation mode if we're:
+	 * 1)  Not changing rates right now
+	 * 2)  Not just finishing up a search
+	 * 3)  Allowing a new search
+	 */
+	if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+		/* Save current throughput to compare with "search" throughput */
+		lq_sta->last_tpt = current_tpt;
+
+		/* Select a new "search" modulation mode to try.
+		 * If one is found, set up the new "search" table. */
+		if (is_legacy(tbl->lq_type))
+			il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+		else if (is_siso(tbl->lq_type))
+			il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+						     idx);
+		else		/* (is_mimo2(tbl->lq_type)) */
+			il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+						      idx);
+
+		/* If new "search" mode was selected, set up in uCode table */
+		if (lq_sta->search_better_tbl) {
+			/* Access the "search" table, clear its history. */
+			tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+			for (i = 0; i < RATE_COUNT; i++)
+				il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+			/* Use new "search" start rate */
+			idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+			D_RATE("Switch current  mcs: %X idx: %d\n",
+			       tbl->current_rate, idx);
+			il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+			il_send_lq_cmd(il, &lq_sta->lq, CMD_ASYNC, false);
+		} else
+			done_search = 1;
+	}
+
+	if (done_search && !lq_sta->stay_in_tbl) {
+		/* If the "active" (non-search) mode was legacy,
+		 * and we've tried switching antennas,
+		 * but we haven't been able to try HT modes (not available),
+		 * stay with best antenna legacy modulation for a while
+		 * before next round of mode comparisons. */
+		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+		    lq_sta->action_counter > tbl1->max_search) {
+			D_RATE("LQ: STAY in legacy table\n");
+			il4965_rs_set_stay_in_table(il, 1, lq_sta);
+		}
+
+		/* If we're in an HT mode, and all 3 mode switch actions
+		 * have been tried and compared, stay in this best modulation
+		 * mode for a while before next round of mode comparisons. */
+		if (lq_sta->enable_counter &&
+		    lq_sta->action_counter >= tbl1->max_search) {
+			if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+			    tid != MAX_TID_COUNT) {
+				tid_data =
+				    &il->stations[lq_sta->lq.sta_id].tid[tid];
+				if (tid_data->agg.state == IL_AGG_OFF) {
+					D_RATE("try to aggregate tid %d\n",
+					       tid);
+					il4965_rs_tl_turn_on_agg(il, tid,
+								 lq_sta, sta);
+				}
+			}
+			il4965_rs_set_stay_in_table(il, 0, lq_sta);
+		}
+	}
+
+out:
+	tbl->current_rate =
+	    il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+	i = idx;
+	lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+			struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+	struct il_scale_tbl_info *tbl;
+	int rate_idx;
+	int i;
+	u32 rate;
+	u8 use_green;
+	u8 active_tbl = 0;
+	u8 valid_tx_ant;
+
+	if (!sta || !lq_sta)
+		return;
+
+	use_green = il4965_rs_use_green(il, sta);
+
+	i = lq_sta->last_txrate_idx;
+
+	valid_tx_ant = il->hw_params.valid_tx_ant;
+
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	if (i < 0 || i >= RATE_COUNT)
+		i = 0;
+
+	rate = il_rates[i].plcp;
+	tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+	rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+	if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+		rate |= RATE_MCS_CCK_MSK;
+
+	il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+	if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+		il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+	rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+	tbl->current_rate = rate;
+	il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+	il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+	il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+	il_send_lq_cmd(il, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+		   struct ieee80211_tx_rate_control *txrc)
+{
+
+	struct sk_buff *skb = txrc->skb;
+	struct ieee80211_supported_band *sband = txrc->sband;
+	struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct il_lq_sta *lq_sta = il_sta;
+	int rate_idx;
+
+	D_RATE("rate scale calculate new rate for skb\n");
+
+	/* Get max rate if user set max rate */
+	if (lq_sta) {
+		lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
+		if (sband->band == NL80211_BAND_5GHZ &&
+		    lq_sta->max_rate_idx != -1)
+			lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+		if (lq_sta->max_rate_idx < 0 ||
+		    lq_sta->max_rate_idx >= RATE_COUNT)
+			lq_sta->max_rate_idx = -1;
+	}
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (lq_sta && !lq_sta->drv) {
+		D_RATE("Rate scaling not initialized yet.\n");
+		il_sta = NULL;
+	}
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	if (rate_control_send_low(sta, il_sta, txrc))
+		return;
+
+	if (!lq_sta)
+		return;
+
+	rate_idx = lq_sta->last_txrate_idx;
+
+	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+		rate_idx -= IL_FIRST_OFDM_RATE;
+		/* 6M and 9M shared same MCS idx */
+		rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+		if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+		    RATE_MIMO2_6M_PLCP)
+			rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+		info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_SHORT_GI;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_DUP_DATA;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_40_MHZ_WIDTH;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+			info->control.rates[0].flags |=
+			    IEEE80211_TX_RC_GREEN_FIELD;
+	} else {
+		/* Check for invalid rates */
+		if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+		    (sband->band == NL80211_BAND_5GHZ &&
+		     rate_idx < IL_FIRST_OFDM_RATE))
+			rate_idx = rate_lowest_index(sband, sta);
+		/* On valid 5 GHz rate, adjust idx */
+		else if (sband->band == NL80211_BAND_5GHZ)
+			rate_idx -= IL_FIRST_OFDM_RATE;
+		info->control.rates[0].flags = 0;
+	}
+	info->control.rates[0].idx = rate_idx;
+	info->control.rates[0].count = 1;
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+	struct il_station_priv *sta_priv =
+	    (struct il_station_priv *)sta->drv_priv;
+	struct il_priv *il;
+
+	il = (struct il_priv *)il_rate;
+	D_RATE("create station rate scale win\n");
+
+	return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+	int i, j;
+	struct ieee80211_hw *hw = il->hw;
+	struct ieee80211_conf *conf = &il->hw->conf;
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct il_station_priv *sta_priv;
+	struct il_lq_sta *lq_sta;
+	struct ieee80211_supported_band *sband;
+
+	sta_priv = (struct il_station_priv *)sta->drv_priv;
+	lq_sta = &sta_priv->lq_sta;
+	sband = hw->wiphy->bands[conf->chandef.chan->band];
+
+	lq_sta->lq.sta_id = sta_id;
+
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < RATE_COUNT; i++)
+			il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+						       win[i]);
+
+	lq_sta->flush_timer = 0;
+	lq_sta->supp_rates = sta->supp_rates[sband->band];
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < RATE_COUNT; i++)
+			il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+						       win[i]);
+
+	D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+	       sta_id);
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	lq_sta->is_dup = 0;
+	lq_sta->max_rate_idx = -1;
+	lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+	lq_sta->is_green = il4965_rs_use_green(il, sta);
+	lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+	lq_sta->band = il->band;
+	/*
+	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
+	 */
+	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+	lq_sta->active_siso_rate &= ~((u16) 0x2);
+	lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+	/* Same here */
+	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+	lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+	lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+	/* These values will be overridden later */
+	lq_sta->lq.general_params.single_stream_ant_msk =
+	    il4965_first_antenna(il->hw_params.valid_tx_ant);
+	lq_sta->lq.general_params.dual_stream_ant_msk =
+	    il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+							       valid_tx_ant);
+	if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+		lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+		lq_sta->lq.general_params.dual_stream_ant_msk =
+		    il->hw_params.valid_tx_ant;
+	}
+
+	/* as default allow aggregation for all tids */
+	lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+	lq_sta->drv = il;
+
+	/* Set last_txrate_idx to lowest rate */
+	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+	if (sband->band == NL80211_BAND_5GHZ)
+		lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+	lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->dbg_fixed_rate = 0;
+#endif
+
+	il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+			u32 new_rate)
+{
+	struct il_scale_tbl_info tbl_type;
+	int idx = 0;
+	int rate_idx;
+	int repeat_rate = 0;
+	u8 ant_toggle_cnt = 0;
+	u8 use_ht_possible = 1;
+	u8 valid_tx_ant = 0;
+	struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+	/* Override starting rate (idx 0) if needed for debug purposes */
+	il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+	/* Interpret new_rate (rate_n_flags) */
+	il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+					&rate_idx);
+
+	/* How many times should we repeat the initial rate? */
+	if (is_legacy(tbl_type.lq_type)) {
+		ant_toggle_cnt = 1;
+		repeat_rate = IL_NUMBER_TRY;
+	} else {
+		repeat_rate = IL_HT_NUMBER_TRY;
+	}
+
+	lq_cmd->general_params.mimo_delimiter =
+	    is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+	/* Fill 1st table entry (idx 0) */
+	lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+	if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+		lq_cmd->general_params.single_stream_ant_msk =
+		    tbl_type.ant_type;
+	} else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+		lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+	}
+	/* otherwise we don't modify the existing value */
+	idx++;
+	repeat_rate--;
+	if (il)
+		valid_tx_ant = il->hw_params.valid_tx_ant;
+
+	/* Fill rest of rate table */
+	while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+		/* Repeat initial/next rate.
+		 * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+		 * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+		while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+			if (is_legacy(tbl_type.lq_type)) {
+				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+					ant_toggle_cnt++;
+				else if (il &&
+					 il4965_rs_toggle_antenna(valid_tx_ant,
+								  &new_rate,
+								  &tbl_type))
+					ant_toggle_cnt = 1;
+			}
+
+			/* Override next rate if needed for debug purposes */
+			il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+			/* Fill next table entry */
+			lq_cmd->rs_table[idx].rate_n_flags =
+			    cpu_to_le32(new_rate);
+			repeat_rate--;
+			idx++;
+		}
+
+		il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+						&tbl_type, &rate_idx);
+
+		/* Indicate to uCode which entries might be MIMO.
+		 * If initial rate was MIMO, this will finally end up
+		 * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+		if (is_mimo(tbl_type.lq_type))
+			lq_cmd->general_params.mimo_delimiter = idx;
+
+		/* Get next rate */
+		new_rate =
+		    il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+					     use_ht_possible);
+
+		/* How many times should we repeat the next rate? */
+		if (is_legacy(tbl_type.lq_type)) {
+			if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+				ant_toggle_cnt++;
+			else if (il &&
+				 il4965_rs_toggle_antenna(valid_tx_ant,
+							  &new_rate, &tbl_type))
+				ant_toggle_cnt = 1;
+
+			repeat_rate = IL_NUMBER_TRY;
+		} else {
+			repeat_rate = IL_HT_NUMBER_TRY;
+		}
+
+		/* Don't allow HT rates after next pass.
+		 * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+		use_ht_possible = 0;
+
+		/* Override next rate if needed for debug purposes */
+		il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+		/* Fill next table entry */
+		lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+		idx++;
+		repeat_rate--;
+	}
+
+	lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+	lq_cmd->agg_params.agg_time_limit =
+	    cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+	return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+	struct il_priv *il __maybe_unused = il_r;
+
+	D_RATE("enter\n");
+	D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+	struct il_priv *il;
+	u8 valid_tx_ant;
+	u8 ant_sel_tx;
+
+	il = lq_sta->drv;
+	valid_tx_ant = il->hw_params.valid_tx_ant;
+	if (lq_sta->dbg_fixed_rate) {
+		ant_sel_tx =
+		    ((lq_sta->
+		      dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+		     RATE_MCS_ANT_POS);
+		if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+			*rate_n_flags = lq_sta->dbg_fixed_rate;
+			D_RATE("Fixed rate ON\n");
+		} else {
+			lq_sta->dbg_fixed_rate = 0;
+			IL_ERR
+			    ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+			     ant_sel_tx, valid_tx_ant);
+			D_RATE("Fixed rate OFF\n");
+		}
+	} else {
+		D_RATE("Fixed rate OFF\n");
+	}
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_priv *il;
+	char buf[64];
+	size_t buf_size;
+	u32 parsed_rate;
+
+	il = lq_sta->drv;
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x", &parsed_rate) == 1)
+		lq_sta->dbg_fixed_rate = parsed_rate;
+	else
+		lq_sta->dbg_fixed_rate = 0;
+
+	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
+	lq_sta->active_siso_rate = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo2_rate = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+
+	D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+	       lq_sta->dbg_fixed_rate);
+
+	if (lq_sta->dbg_fixed_rate) {
+		il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+		il_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
+	}
+
+	return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i = 0;
+	int idx = 0;
+	ssize_t ret;
+
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_priv *il;
+	struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+	il = lq_sta->drv;
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+	desc +=
+	    sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+		    lq_sta->total_failed, lq_sta->total_success,
+		    lq_sta->active_legacy_rate);
+	desc +=
+	    sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+	desc +=
+	    sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+		    (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+		    (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+		    (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+	desc +=
+	    sprintf(buff + desc, "lq type %s\n",
+		    (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+	if (is_Ht(tbl->lq_type)) {
+		desc +=
+		    sprintf(buff + desc, " %s",
+			    (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+		desc +=
+		    sprintf(buff + desc, " %s",
+			    (tbl->is_ht40) ? "40MHz" : "20MHz");
+		desc +=
+		    sprintf(buff + desc, " %s %s %s\n",
+			    (tbl->is_SGI) ? "SGI" : "",
+			    (lq_sta->is_green) ? "GF enabled" : "",
+			    (lq_sta->is_agg) ? "AGG on" : "");
+	}
+	desc +=
+	    sprintf(buff + desc, "last tx rate=0x%X\n",
+		    lq_sta->last_rate_n_flags);
+	desc +=
+	    sprintf(buff + desc,
+		    "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+		    lq_sta->lq.general_params.flags,
+		    lq_sta->lq.general_params.mimo_delimiter,
+		    lq_sta->lq.general_params.single_stream_ant_msk,
+		    lq_sta->lq.general_params.dual_stream_ant_msk);
+
+	desc +=
+	    sprintf(buff + desc,
+		    "agg:"
+		    "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+		    le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+		    lq_sta->lq.agg_params.agg_dis_start_th,
+		    lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+	desc +=
+	    sprintf(buff + desc,
+		    "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+		    lq_sta->lq.general_params.start_rate_idx[0],
+		    lq_sta->lq.general_params.start_rate_idx[1],
+		    lq_sta->lq.general_params.start_rate_idx[2],
+		    lq_sta->lq.general_params.start_rate_idx[3]);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		idx =
+		    il4965_hwrate_to_plcp_idx(le32_to_cpu
+					      (lq_sta->lq.rs_table[i].
+					       rate_n_flags));
+		if (is_legacy(tbl->lq_type)) {
+			desc +=
+			    sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+				    le32_to_cpu(lq_sta->lq.rs_table[i].
+						rate_n_flags),
+				    il_rate_mcs[idx].mbps);
+		} else {
+			desc +=
+			    sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+				    i,
+				    le32_to_cpu(lq_sta->lq.rs_table[i].
+						rate_n_flags),
+				    il_rate_mcs[idx].mbps,
+				    il_rate_mcs[idx].mcs);
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+	.write = il4965_rs_sta_dbgfs_scale_table_write,
+	.read = il4965_rs_sta_dbgfs_scale_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i, j;
+	ssize_t ret;
+
+	struct il_lq_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	for (i = 0; i < LQ_SIZE; i++) {
+		desc +=
+		    sprintf(buff + desc,
+			    "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+			    "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+			    lq_sta->lq_info[i].lq_type,
+			    lq_sta->lq_info[i].is_SGI,
+			    lq_sta->lq_info[i].is_ht40,
+			    lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+			    lq_sta->lq_info[i].current_rate);
+		for (j = 0; j < RATE_COUNT; j++) {
+			desc +=
+			    sprintf(buff + desc,
+				    "counter=%d success=%d %%=%d\n",
+				    lq_sta->lq_info[i].win[j].counter,
+				    lq_sta->lq_info[i].win[j].success_counter,
+				    lq_sta->lq_info[i].win[j].success_ratio);
+		}
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = il4965_rs_sta_dbgfs_stats_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+					 char __user *user_buf, size_t count,
+					 loff_t *ppos)
+{
+	char buff[120];
+	int desc = 0;
+	struct il_lq_sta *lq_sta = file->private_data;
+	struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+	if (is_Ht(tbl->lq_type))
+		desc +=
+		    sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+			    tbl->expected_tpt[lq_sta->last_txrate_idx]);
+	else
+		desc +=
+		    sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+			    il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+	.read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+	struct il_lq_sta *lq_sta = il_sta;
+	lq_sta->rs_sta_dbgfs_scale_table_file =
+	    debugfs_create_file("rate_scale_table", 0600, dir,
+				lq_sta, &rs_sta_dbgfs_scale_table_ops);
+	lq_sta->rs_sta_dbgfs_stats_table_file =
+	    debugfs_create_file("rate_stats_table", 0400, dir, lq_sta,
+				&rs_sta_dbgfs_stats_table_ops);
+	lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+	    debugfs_create_file("rate_scale_data", 0400, dir, lq_sta,
+				&rs_sta_dbgfs_rate_scale_data_ops);
+	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+	    debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+			      &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+	struct il_lq_sta *lq_sta = il_sta;
+	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+			 struct cfg80211_chan_def *chandef,
+			 struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static const struct rate_control_ops rs_4965_ops = {
+	.name = IL4965_RS_NAME,
+	.tx_status = il4965_rs_tx_status,
+	.get_rate = il4965_rs_get_rate,
+	.rate_init = il4965_rs_rate_init_stub,
+	.alloc = il4965_rs_alloc,
+	.free = il4965_rs_free,
+	.alloc_sta = il4965_rs_alloc_sta,
+	.free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = il4965_rs_add_debugfs,
+	.remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
new file mode 100644
index 0000000..c3c638e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -0,0 +1,1950 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ *   using sample data 100 bytes apart.  If these sample points are good,
+ *   it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	int ret = 0;
+	u32 errcnt = 0;
+	u32 i;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 3)
+				break;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ *     looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+	u32 val;
+	u32 save_len = len;
+	int ret = 0;
+	u32 errcnt;
+
+	D_INFO("ucode inst image size is %u\n", len);
+
+	il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+	errcnt = 0;
+	for (; len > 0; len -= sizeof(u32), image++) {
+		/* read data comes through single port, auto-incr addr */
+		/* NOTE: Use the debugless read so we don't flood kernel log
+		 * if IL_DL_IO is set */
+		val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("uCode INST section is invalid at "
+			       "offset 0x%x, is 0x%x, s/b 0x%x\n",
+			       save_len - len, val, le32_to_cpu(*image));
+			ret = -EIO;
+			errcnt++;
+			if (errcnt >= 20)
+				break;
+		}
+	}
+
+	if (!errcnt)
+		D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+	return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ *    and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+	__le32 *image;
+	u32 len;
+	int ret;
+
+	/* Try bootstrap */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Bootstrap uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try initialize */
+	image = (__le32 *) il->ucode_init.v_addr;
+	len = il->ucode_init.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Initialize uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	/* Try runtime/protocol */
+	image = (__le32 *) il->ucode_code.v_addr;
+	len = il->ucode_code.len;
+	ret = il4965_verify_inst_sparse(il, image, len);
+	if (!ret) {
+		D_INFO("Runtime uCode is good in inst SRAM\n");
+		return 0;
+	}
+
+	IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+	/* Since nothing seems to match, show first several data entries in
+	 * instruction SRAM, so maybe visual inspection will give a clue.
+	 * Selection of bootstrap image (vs. other images) is arbitrary. */
+	image = (__le32 *) il->ucode_boot.v_addr;
+	len = il->ucode_boot.len;
+	ret = il4965_verify_inst_full(il, image, len);
+
+	return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+	u16 count;
+	int ret;
+
+	for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+		/* Request semaphore */
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+		/* See if we got it */
+		ret =
+		    _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+				 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				 EEPROM_SEM_TIMEOUT);
+		if (ret >= 0)
+			return ret;
+	}
+
+	return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+	il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+		     CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+	u16 eeprom_ver;
+	u16 calib_ver;
+
+	eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+	calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+	if (eeprom_ver < il->cfg->eeprom_ver ||
+	    calib_ver < il->cfg->eeprom_calib_ver)
+		goto err;
+
+	IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+	return 0;
+err:
+	IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+	       "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+	       calib_ver, il->cfg->eeprom_calib_ver);
+	return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+	const u8 *addr = il_eeprom_query_addr(il,
+					      EEPROM_MAC_ADDRESS);
+	memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+	struct il_host_cmd cmd = {
+		.id = C_LEDS,
+		.len = sizeof(struct il_led_cmd),
+		.data = led_cmd,
+		.flags = CMD_ASYNC,
+		.callback = NULL,
+	};
+	u32 reg;
+
+	reg = _il_rd(il, CSR_LED_REG);
+	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+		_il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+	return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+	_il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	u32 reg;
+	u32 val;
+
+	D_INFO("Begin verify bsm\n");
+
+	/* verify BSM SRAM contents */
+	val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+	for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+	     reg += sizeof(u32), image++) {
+		val = il_rd_prph(il, reg);
+		if (val != le32_to_cpu(*image)) {
+			IL_ERR("BSM uCode verification failed at "
+			       "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+			       BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+			       len, val, le32_to_cpu(*image));
+			return -EIO;
+		}
+	}
+
+	D_INFO("BSM bootstrap uCode image OK\n");
+
+	return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL.  When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers.  Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image.  This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+	__le32 *image = il->ucode_boot.v_addr;
+	u32 len = il->ucode_boot.len;
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	u32 inst_len;
+	u32 data_len;
+	int i;
+	u32 done;
+	u32 reg_offset;
+	int ret;
+
+	D_INFO("Begin load bsm\n");
+
+	il->ucode_type = UCODE_RT;
+
+	/* make sure bootstrap program is no larger than BSM's SRAM size */
+	if (len > IL49_MAX_BSM_SIZE)
+		return -EINVAL;
+
+	/* Tell bootstrap uCode where to find the "Initialize" uCode
+	 *   in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+	 * NOTE:  il_init_alive_start() will replace these values,
+	 *        after the "initialize" uCode has run, to point to
+	 *        runtime/protocol instructions and backup data cache.
+	 */
+	pinst = il->ucode_init.p_addr >> 4;
+	pdata = il->ucode_init_data.p_addr >> 4;
+	inst_len = il->ucode_init.len;
+	data_len = il->ucode_init_data.len;
+
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+	/* Fill BSM memory with bootstrap instructions */
+	for (reg_offset = BSM_SRAM_LOWER_BOUND;
+	     reg_offset < BSM_SRAM_LOWER_BOUND + len;
+	     reg_offset += sizeof(u32), image++)
+		_il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+	ret = il4965_verify_bsm(il);
+	if (ret)
+		return ret;
+
+	/* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+	il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+	il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+	il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+	/* Load bootstrap code into instruction SRAM now,
+	 *   to prepare to load "initialize" uCode */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+	/* Wait for load of bootstrap uCode to finish */
+	for (i = 0; i < 100; i++) {
+		done = il_rd_prph(il, BSM_WR_CTRL_REG);
+		if (!(done & BSM_WR_CTRL_REG_BIT_START))
+			break;
+		udelay(10);
+	}
+	if (i < 100)
+		D_INFO("BSM write complete, poll %d iterations\n", i);
+	else {
+		IL_ERR("BSM write did not complete!\n");
+		return -EIO;
+	}
+
+	/* Enable future boot loads whenever power management unit triggers it
+	 *   (e.g. when powering back up after power-save shutdown) */
+	il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+	return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+	dma_addr_t pinst;
+	dma_addr_t pdata;
+	int ret = 0;
+
+	/* bits 35:4 for 4965 */
+	pinst = il->ucode_code.p_addr >> 4;
+	pdata = il->ucode_data_backup.p_addr >> 4;
+
+	/* Tell bootstrap uCode where to find image to load */
+	il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+	il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+	il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+	/* Inst byte count must be last to set up, bit 31 signals uCode
+	 *   that all new ptr/size info is in place */
+	il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+		   il->ucode_code.len | BSM_DRAM_INST_LOAD);
+	D_INFO("Runtime uCode pointers are set.\n");
+
+	return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ *   Voltage, temperature, and MIMO tx gain correction, now stored in il
+ *   (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+	/* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+	 * This is a paranoid check, because we would not have gotten the
+	 * "initialize" alive if code weren't properly loaded.  */
+	if (il4965_verify_ucode(il)) {
+		/* Runtime instruction load was bad;
+		 * take it all the way back down so we can try again */
+		D_INFO("Bad \"initialize\" uCode load.\n");
+		goto restart;
+	}
+
+	/* Calculate temperature */
+	il->temperature = il4965_hw_get_temperature(il);
+
+	/* Send pointers to protocol/runtime uCode image ... init code will
+	 * load and launch runtime uCode, which will send us another "Alive"
+	 * notification. */
+	D_INFO("Initialization Alive received.\n");
+	if (il4965_set_ucode_ptrs(il)) {
+		/* Runtime instruction load won't happen;
+		 * take it all the way back down so we can try again */
+		D_INFO("Couldn't set up uCode pointers.\n");
+		goto restart;
+	}
+	return;
+
+restart:
+	queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+	int chan_mod =
+	    le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+	    RXON_FLG_CHANNEL_MODE_POS;
+	return (chan_mod == CHANNEL_MODE_PURE_40 ||
+		chan_mod == CHANNEL_MODE_MIXED);
+}
+
+void
+il4965_nic_config(struct il_priv *il)
+{
+	unsigned long flags;
+	u16 radio_cfg;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+	/* write radio config values to register */
+	if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+		il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+			   EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+			   EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+			   EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		   CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+	il->calib_info =
+	    (struct il_eeprom_calib_info *)
+	    il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+	spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ *  ... once chain noise is calibrated the first time, it's good forever.  */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+	struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+	if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+		struct il_calib_diff_gain_cmd cmd;
+
+		/* clear data for chain noise calibration algorithm */
+		data->chain_noise_a = 0;
+		data->chain_noise_b = 0;
+		data->chain_noise_c = 0;
+		data->chain_signal_a = 0;
+		data->chain_signal_b = 0;
+		data->chain_signal_c = 0;
+		data->beacon_count = 0;
+
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+		cmd.diff_gain_a = 0;
+		cmd.diff_gain_b = 0;
+		cmd.diff_gain_c = 0;
+		if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+			IL_ERR("Could not send C_PHY_CALIBRATION\n");
+		data->state = IL_CHAIN_NOISE_ACCUMULATE;
+		D_CALIB("Run chain_noise_calibrate\n");
+	}
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+	s32 sign = 1;
+
+	if (num < 0) {
+		sign = -sign;
+		num = -num;
+	}
+	if (denom < 0) {
+		sign = -sign;
+		denom = -denom;
+	}
+	*res = 1;
+	*res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+	return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+	s32 comp = 0;
+
+	if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+	    TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+		return 0;
+
+	il4965_math_div_round(current_voltage - eeprom_voltage,
+			      TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+	if (current_voltage > eeprom_voltage)
+		comp *= 2;
+	if ((comp < -2) || (comp > 2))
+		comp = 0;
+
+	return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+	if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+		return CALIB_CH_GROUP_5;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+		return CALIB_CH_GROUP_1;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+		return CALIB_CH_GROUP_2;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+		return CALIB_CH_GROUP_3;
+
+	if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+	    channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+		return CALIB_CH_GROUP_4;
+
+	return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+	s32 b = -1;
+
+	for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+		if (il->calib_info->band_info[b].ch_from == 0)
+			continue;
+
+		if (channel >= il->calib_info->band_info[b].ch_from &&
+		    channel <= il->calib_info->band_info[b].ch_to)
+			break;
+	}
+
+	return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+	s32 val;
+
+	if (x2 == x1)
+		return y1;
+	else {
+		il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+		return val + y2;
+	}
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest.  Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+			struct il_eeprom_calib_ch_info *chan_info)
+{
+	s32 s = -1;
+	u32 c;
+	u32 m;
+	const struct il_eeprom_calib_measure *m1;
+	const struct il_eeprom_calib_measure *m2;
+	struct il_eeprom_calib_measure *omeas;
+	u32 ch_i1;
+	u32 ch_i2;
+
+	s = il4965_get_sub_band(il, channel);
+	if (s >= EEPROM_TX_POWER_BANDS) {
+		IL_ERR("Tx Power can not find channel %d\n", channel);
+		return -1;
+	}
+
+	ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+	ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+	chan_info->ch_num = (u8) channel;
+
+	D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+		  ch_i1, ch_i2);
+
+	for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+		for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+			m1 = &(il->calib_info->band_info[s].ch1.
+			       measurements[c][m]);
+			m2 = &(il->calib_info->band_info[s].ch2.
+			       measurements[c][m]);
+			omeas = &(chan_info->measurements[c][m]);
+
+			omeas->actual_pow =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->actual_pow, ch_i2,
+							  m2->actual_pow);
+			omeas->gain_idx =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->gain_idx, ch_i2,
+							  m2->gain_idx);
+			omeas->temperature =
+			    (u8) il4965_interpolate_value(channel, ch_i1,
+							  m1->temperature,
+							  ch_i2,
+							  m2->temperature);
+			omeas->pa_det =
+			    (s8) il4965_interpolate_value(channel, ch_i1,
+							  m1->pa_det, ch_i2,
+							  m2->pa_det);
+
+			D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+				  m, m1->actual_pow, m2->actual_pow,
+				  omeas->actual_pow);
+			D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+				  m, m1->gain_idx, m2->gain_idx,
+				  omeas->gain_idx);
+			D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+				  m, m1->pa_det, m2->pa_det, omeas->pa_det);
+			D_TXPOWER("chain %d meas %d  T1=%d  T2=%d  T=%d\n", c,
+				  m, m1->temperature, m2->temperature,
+				  omeas->temperature);
+		}
+	}
+
+	return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 20 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 20 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM SISO 40 MHz */
+	10, 10, 10, 10, 10, 15, 17, 20,	/* OFDM MIMO 40 MHz */
+	10			/* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ *   ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+	s32 degrees_per_05db_a;
+	s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+	{
+	9, 2},			/* group 0 5.2, ch  34-43 */
+	{
+	4, 1},			/* group 1 5.2, ch  44-70 */
+	{
+	4, 1},			/* group 2 5.2, ch  71-124 */
+	{
+	4, 1},			/* group 3 5.2, ch 125-200 */
+	{
+	3, 1}			/* group 4 2.4, ch   all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+	if (!band) {
+		if ((rate_power_idx & 7) <= 4)
+			return MIN_TX_GAIN_IDX_52GHZ_EXT;
+	}
+	return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+	u8 dsp;
+	u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+	/* 5.2GHz power gain idx table */
+	{
+	 {123, 0x3F},		/* highest txpower */
+	 {117, 0x3F},
+	 {110, 0x3F},
+	 {104, 0x3F},
+	 {98, 0x3F},
+	 {110, 0x3E},
+	 {104, 0x3E},
+	 {98, 0x3E},
+	 {110, 0x3D},
+	 {104, 0x3D},
+	 {98, 0x3D},
+	 {110, 0x3C},
+	 {104, 0x3C},
+	 {98, 0x3C},
+	 {110, 0x3B},
+	 {104, 0x3B},
+	 {98, 0x3B},
+	 {110, 0x3A},
+	 {104, 0x3A},
+	 {98, 0x3A},
+	 {110, 0x39},
+	 {104, 0x39},
+	 {98, 0x39},
+	 {110, 0x38},
+	 {104, 0x38},
+	 {98, 0x38},
+	 {110, 0x37},
+	 {104, 0x37},
+	 {98, 0x37},
+	 {110, 0x36},
+	 {104, 0x36},
+	 {98, 0x36},
+	 {110, 0x35},
+	 {104, 0x35},
+	 {98, 0x35},
+	 {110, 0x34},
+	 {104, 0x34},
+	 {98, 0x34},
+	 {110, 0x33},
+	 {104, 0x33},
+	 {98, 0x33},
+	 {110, 0x32},
+	 {104, 0x32},
+	 {98, 0x32},
+	 {110, 0x31},
+	 {104, 0x31},
+	 {98, 0x31},
+	 {110, 0x30},
+	 {104, 0x30},
+	 {98, 0x30},
+	 {110, 0x25},
+	 {104, 0x25},
+	 {98, 0x25},
+	 {110, 0x24},
+	 {104, 0x24},
+	 {98, 0x24},
+	 {110, 0x23},
+	 {104, 0x23},
+	 {98, 0x23},
+	 {110, 0x22},
+	 {104, 0x18},
+	 {98, 0x18},
+	 {110, 0x17},
+	 {104, 0x17},
+	 {98, 0x17},
+	 {110, 0x16},
+	 {104, 0x16},
+	 {98, 0x16},
+	 {110, 0x15},
+	 {104, 0x15},
+	 {98, 0x15},
+	 {110, 0x14},
+	 {104, 0x14},
+	 {98, 0x14},
+	 {110, 0x13},
+	 {104, 0x13},
+	 {98, 0x13},
+	 {110, 0x12},
+	 {104, 0x08},
+	 {98, 0x08},
+	 {110, 0x07},
+	 {104, 0x07},
+	 {98, 0x07},
+	 {110, 0x06},
+	 {104, 0x06},
+	 {98, 0x06},
+	 {110, 0x05},
+	 {104, 0x05},
+	 {98, 0x05},
+	 {110, 0x04},
+	 {104, 0x04},
+	 {98, 0x04},
+	 {110, 0x03},
+	 {104, 0x03},
+	 {98, 0x03},
+	 {110, 0x02},
+	 {104, 0x02},
+	 {98, 0x02},
+	 {110, 0x01},
+	 {104, 0x01},
+	 {98, 0x01},
+	 {110, 0x00},
+	 {104, 0x00},
+	 {98, 0x00},
+	 {93, 0x00},
+	 {88, 0x00},
+	 {83, 0x00},
+	 {78, 0x00},
+	 },
+	/* 2.4GHz power gain idx table */
+	{
+	 {110, 0x3f},		/* highest txpower */
+	 {104, 0x3f},
+	 {98, 0x3f},
+	 {110, 0x3e},
+	 {104, 0x3e},
+	 {98, 0x3e},
+	 {110, 0x3d},
+	 {104, 0x3d},
+	 {98, 0x3d},
+	 {110, 0x3c},
+	 {104, 0x3c},
+	 {98, 0x3c},
+	 {110, 0x3b},
+	 {104, 0x3b},
+	 {98, 0x3b},
+	 {110, 0x3a},
+	 {104, 0x3a},
+	 {98, 0x3a},
+	 {110, 0x39},
+	 {104, 0x39},
+	 {98, 0x39},
+	 {110, 0x38},
+	 {104, 0x38},
+	 {98, 0x38},
+	 {110, 0x37},
+	 {104, 0x37},
+	 {98, 0x37},
+	 {110, 0x36},
+	 {104, 0x36},
+	 {98, 0x36},
+	 {110, 0x35},
+	 {104, 0x35},
+	 {98, 0x35},
+	 {110, 0x34},
+	 {104, 0x34},
+	 {98, 0x34},
+	 {110, 0x33},
+	 {104, 0x33},
+	 {98, 0x33},
+	 {110, 0x32},
+	 {104, 0x32},
+	 {98, 0x32},
+	 {110, 0x31},
+	 {104, 0x31},
+	 {98, 0x31},
+	 {110, 0x30},
+	 {104, 0x30},
+	 {98, 0x30},
+	 {110, 0x6},
+	 {104, 0x6},
+	 {98, 0x6},
+	 {110, 0x5},
+	 {104, 0x5},
+	 {98, 0x5},
+	 {110, 0x4},
+	 {104, 0x4},
+	 {98, 0x4},
+	 {110, 0x3},
+	 {104, 0x3},
+	 {98, 0x3},
+	 {110, 0x2},
+	 {104, 0x2},
+	 {98, 0x2},
+	 {110, 0x1},
+	 {104, 0x1},
+	 {98, 0x1},
+	 {110, 0x0},
+	 {104, 0x0},
+	 {98, 0x0},
+	 {97, 0},
+	 {96, 0},
+	 {95, 0},
+	 {94, 0},
+	 {93, 0},
+	 {92, 0},
+	 {91, 0},
+	 {90, 0},
+	 {89, 0},
+	 {88, 0},
+	 {87, 0},
+	 {86, 0},
+	 {85, 0},
+	 {84, 0},
+	 {83, 0},
+	 {82, 0},
+	 {81, 0},
+	 {80, 0},
+	 {79, 0},
+	 {78, 0},
+	 {77, 0},
+	 {76, 0},
+	 {75, 0},
+	 {74, 0},
+	 {73, 0},
+	 {72, 0},
+	 {71, 0},
+	 {70, 0},
+	 {69, 0},
+	 {68, 0},
+	 {67, 0},
+	 {66, 0},
+	 {65, 0},
+	 {64, 0},
+	 {63, 0},
+	 {62, 0},
+	 {61, 0},
+	 {60, 0},
+	 {59, 0},
+	 }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+			u8 ctrl_chan_high,
+			struct il4965_tx_power_db *tx_power_tbl)
+{
+	u8 saturation_power;
+	s32 target_power;
+	s32 user_target_power;
+	s32 power_limit;
+	s32 current_temp;
+	s32 reg_limit;
+	s32 current_regulatory;
+	s32 txatten_grp = CALIB_CH_GROUP_MAX;
+	int i;
+	int c;
+	const struct il_channel_info *ch_info = NULL;
+	struct il_eeprom_calib_ch_info ch_eeprom_info;
+	const struct il_eeprom_calib_measure *measurement;
+	s16 voltage;
+	s32 init_voltage;
+	s32 voltage_compensation;
+	s32 degrees_per_05db_num;
+	s32 degrees_per_05db_denom;
+	s32 factory_temp;
+	s32 temperature_comp[2];
+	s32 factory_gain_idx[2];
+	s32 factory_actual_pwr[2];
+	s32 power_idx;
+
+	/* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+	 *   are used for idxing into txpower table) */
+	user_target_power = 2 * il->tx_power_user_lmt;
+
+	/* Get current (RXON) channel, band, width */
+	D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+	ch_info = il_get_channel_info(il, il->band, channel);
+
+	if (!il_is_channel_valid(ch_info))
+		return -EINVAL;
+
+	/* get txatten group, used to select 1) thermal txpower adjustment
+	 *   and 2) mimo txpower balance between Tx chains. */
+	txatten_grp = il4965_get_tx_atten_grp(channel);
+	if (txatten_grp < 0) {
+		IL_ERR("Can't find txatten group for channel %d.\n", channel);
+		return txatten_grp;
+	}
+
+	D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+		  txatten_grp);
+
+	if (is_ht40) {
+		if (ctrl_chan_high)
+			channel -= 2;
+		else
+			channel += 2;
+	}
+
+	/* hardware txpower limits ...
+	 * saturation (clipping distortion) txpowers are in half-dBm */
+	if (band)
+		saturation_power = il->calib_info->saturation_power24;
+	else
+		saturation_power = il->calib_info->saturation_power52;
+
+	if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+	    saturation_power > IL_TX_POWER_SATURATION_MAX) {
+		if (band)
+			saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+		else
+			saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+	}
+
+	/* regulatory txpower limits ... reg_limit values are in half-dBm,
+	 *   max_power_avg values are in dBm, convert * 2 */
+	if (is_ht40)
+		reg_limit = ch_info->ht40_max_power_avg * 2;
+	else
+		reg_limit = ch_info->max_power_avg * 2;
+
+	if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+	    (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+		if (band)
+			reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+		else
+			reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+	}
+
+	/* Interpolate txpower calibration values for this channel,
+	 *   based on factory calibration tests on spaced channels. */
+	il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+	/* calculate tx gain adjustment based on power supply voltage */
+	voltage = le16_to_cpu(il->calib_info->voltage);
+	init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+	voltage_compensation =
+	    il4965_get_voltage_compensation(voltage, init_voltage);
+
+	D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+		  voltage, voltage_compensation);
+
+	/* get current temperature (Celsius) */
+	current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+	current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+	current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+	/* select thermal txpower adjustment params, based on channel group
+	 *   (same frequency group used for mimo txatten adjustment) */
+	degrees_per_05db_num =
+	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+	degrees_per_05db_denom =
+	    tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+	/* get per-chain txpower values from factory measurements */
+	for (c = 0; c < 2; c++) {
+		measurement = &ch_eeprom_info.measurements[c][1];
+
+		/* txgain adjustment (in half-dB steps) based on difference
+		 *   between factory and current temperature */
+		factory_temp = measurement->temperature;
+		il4965_math_div_round((current_temp -
+				       factory_temp) * degrees_per_05db_denom,
+				      degrees_per_05db_num,
+				      &temperature_comp[c]);
+
+		factory_gain_idx[c] = measurement->gain_idx;
+		factory_actual_pwr[c] = measurement->actual_pow;
+
+		D_TXPOWER("chain = %d\n", c);
+		D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+			  factory_temp, current_temp, temperature_comp[c]);
+
+		D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+			  factory_actual_pwr[c]);
+	}
+
+	/* for each of 33 bit-rates (including 1 for CCK) */
+	for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+		u8 is_mimo_rate;
+		union il4965_tx_power_dual_stream tx_power;
+
+		/* for mimo, reduce each chain's txpower by half
+		 * (3dB, 6 steps), so total output power is regulatory
+		 * compliant. */
+		if (i & 0x8) {
+			current_regulatory =
+			    reg_limit -
+			    IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+			is_mimo_rate = 1;
+		} else {
+			current_regulatory = reg_limit;
+			is_mimo_rate = 0;
+		}
+
+		/* find txpower limit, either hardware or regulatory */
+		power_limit = saturation_power - back_off_table[i];
+		if (power_limit > current_regulatory)
+			power_limit = current_regulatory;
+
+		/* reduce user's txpower request if necessary
+		 * for this rate on this channel */
+		target_power = user_target_power;
+		if (target_power > power_limit)
+			target_power = power_limit;
+
+		D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+			  saturation_power - back_off_table[i],
+			  current_regulatory, user_target_power, target_power);
+
+		/* for each of 2 Tx chains (radio transmitters) */
+		for (c = 0; c < 2; c++) {
+			s32 atten_value;
+
+			if (is_mimo_rate)
+				atten_value =
+				    (s32) le32_to_cpu(il->card_alive_init.
+						      tx_atten[txatten_grp][c]);
+			else
+				atten_value = 0;
+
+			/* calculate idx; higher idx means lower txpower */
+			power_idx =
+			    (u8) (factory_gain_idx[c] -
+				  (target_power - factory_actual_pwr[c]) -
+				  temperature_comp[c] - voltage_compensation +
+				  atten_value);
+
+/*			D_TXPOWER("calculated txpower idx %d\n",
+						power_idx); */
+
+			if (power_idx < get_min_power_idx(i, band))
+				power_idx = get_min_power_idx(i, band);
+
+			/* adjust 5 GHz idx to support negative idxes */
+			if (!band)
+				power_idx += 9;
+
+			/* CCK, rate 32, reduce txpower for CCK */
+			if (i == POWER_TBL_CCK_ENTRY)
+				power_idx +=
+				    IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+			/* stay within the table! */
+			if (power_idx > 107) {
+				IL_WARN("txpower idx %d > 107\n", power_idx);
+				power_idx = 107;
+			}
+			if (power_idx < 0) {
+				IL_WARN("txpower idx %d < 0\n", power_idx);
+				power_idx = 0;
+			}
+
+			/* fill txpower command for this rate/chain */
+			tx_power.s.radio_tx_gain[c] =
+			    gain_table[band][power_idx].radio;
+			tx_power.s.dsp_predis_atten[c] =
+			    gain_table[band][power_idx].dsp;
+
+			D_TXPOWER("chain %d mimo %d idx %d "
+				  "gain 0x%02x dsp %d\n", c, atten_value,
+				  power_idx, tx_power.s.radio_tx_gain[c],
+				  tx_power.s.dsp_predis_atten[c]);
+		}		/* for each chain */
+
+		tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+	}			/* for each rate */
+
+	return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+	struct il4965_txpowertable_cmd cmd = { 0 };
+	int ret;
+	u8 band = 0;
+	bool is_ht40 = false;
+	u8 ctrl_chan_high = 0;
+
+	if (WARN_ONCE
+	    (test_bit(S_SCAN_HW, &il->status),
+	     "TX Power requested while scanning!\n"))
+		return -EAGAIN;
+
+	band = il->band == NL80211_BAND_2GHZ;
+
+	is_ht40 = iw4965_is_ht40_channel(il->active.flags);
+
+	if (is_ht40 && (il->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+		ctrl_chan_high = 1;
+
+	cmd.band = band;
+	cmd.channel = il->active.channel;
+
+	ret =
+	    il4965_fill_txpower_tbl(il, band, le16_to_cpu(il->active.channel),
+				    is_ht40, ctrl_chan_high, &cmd.tx_power);
+	if (ret)
+		goto out;
+
+	ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+	return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il)
+{
+	int ret = 0;
+	struct il4965_rxon_assoc_cmd rxon_assoc;
+	const struct il_rxon_cmd *rxon1 = &il->staging;
+	const struct il_rxon_cmd *rxon2 = &il->active;
+
+	if (rxon1->flags == rxon2->flags &&
+	    rxon1->filter_flags == rxon2->filter_flags &&
+	    rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+	    rxon1->ofdm_ht_single_stream_basic_rates ==
+	    rxon2->ofdm_ht_single_stream_basic_rates &&
+	    rxon1->ofdm_ht_dual_stream_basic_rates ==
+	    rxon2->ofdm_ht_dual_stream_basic_rates &&
+	    rxon1->rx_chain == rxon2->rx_chain &&
+	    rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+		D_INFO("Using current RXON_ASSOC.  Not resending.\n");
+		return 0;
+	}
+
+	rxon_assoc.flags = il->staging.flags;
+	rxon_assoc.filter_flags = il->staging.filter_flags;
+	rxon_assoc.ofdm_basic_rates = il->staging.ofdm_basic_rates;
+	rxon_assoc.cck_basic_rates = il->staging.cck_basic_rates;
+	rxon_assoc.reserved = 0;
+	rxon_assoc.ofdm_ht_single_stream_basic_rates =
+	    il->staging.ofdm_ht_single_stream_basic_rates;
+	rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+	    il->staging.ofdm_ht_dual_stream_basic_rates;
+	rxon_assoc.rx_chain_select_flags = il->staging.rx_chain;
+
+	ret =
+	    il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+				  &rxon_assoc, NULL);
+
+	return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il)
+{
+	/* cast away the const for active_rxon in this function */
+	struct il_rxon_cmd *active_rxon = (void *)&il->active;
+	int ret;
+	bool new_assoc = !!(il->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+	if (!il_is_alive(il))
+		return -EBUSY;
+
+	/* always get timestamp with Rx frame */
+	il->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+	ret = il_check_rxon_cmd(il);
+	if (ret) {
+		IL_ERR("Invalid RXON configuration.  Not committing.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * receive commit_rxon request
+	 * abort any previous channel switch if still in process
+	 */
+	if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+	    il->switch_channel != il->staging.channel) {
+		D_11H("abort channel switch on %d\n",
+		      le16_to_cpu(il->switch_channel));
+		il_chswitch_done(il, false);
+	}
+
+	/* If we don't need to send a full RXON, we can use
+	 * il_rxon_assoc_cmd which is used to reconfigure filter
+	 * and other flags for the current radio configuration. */
+	if (!il_full_rxon_required(il)) {
+		ret = il_send_rxon_assoc(il);
+		if (ret) {
+			IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+			return ret;
+		}
+
+		memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
+		il_print_rx_config_cmd(il);
+		/*
+		 * We do not commit tx power settings while channel changing,
+		 * do it now if tx power changed.
+		 */
+		il_set_tx_power(il, il->tx_power_next, false);
+		return 0;
+	}
+
+	/* If we are currently associated and the new config requires
+	 * an RXON_ASSOC and the new config wants the associated mask enabled,
+	 * we must clear the associated from the active configuration
+	 * before we apply the new config */
+	if (il_is_associated(il) && new_assoc) {
+		D_INFO("Toggling associated bit on current RXON\n");
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+		ret =
+		    il_send_cmd_pdu(il, C_RXON,
+				    sizeof(struct il_rxon_cmd), active_rxon);
+
+		/* If the mask clearing failed then we set
+		 * active_rxon back to what it was previously */
+		if (ret) {
+			active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+			IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+			return ret;
+		}
+		il_clear_ucode_stations(il);
+		il_restore_stations(il);
+		ret = il4965_restore_default_wep_keys(il);
+		if (ret) {
+			IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+			return ret;
+		}
+	}
+
+	D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+	       "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+	       le16_to_cpu(il->staging.channel), il->staging.bssid_addr);
+
+	il_set_rxon_hwcrypto(il, !il->cfg->mod_params->sw_crypto);
+
+	/* Apply the new configuration
+	 * RXON unassoc clears the station table in uCode so restoration of
+	 * stations is needed after it (the RXON command) completes
+	 */
+	if (!new_assoc) {
+		ret =
+		    il_send_cmd_pdu(il, C_RXON,
+				    sizeof(struct il_rxon_cmd), &il->staging);
+		if (ret) {
+			IL_ERR("Error setting new RXON (%d)\n", ret);
+			return ret;
+		}
+		D_INFO("Return from !new_assoc RXON.\n");
+		memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
+		il_clear_ucode_stations(il);
+		il_restore_stations(il);
+		ret = il4965_restore_default_wep_keys(il);
+		if (ret) {
+			IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+			return ret;
+		}
+	}
+	if (new_assoc) {
+		il->start_calib = 0;
+		/* Apply the new configuration
+		 * RXON assoc doesn't clear the station table in uCode,
+		 */
+		ret =
+		    il_send_cmd_pdu(il, C_RXON,
+				    sizeof(struct il_rxon_cmd), &il->staging);
+		if (ret) {
+			IL_ERR("Error setting new RXON (%d)\n", ret);
+			return ret;
+		}
+		memcpy(active_rxon, &il->staging, sizeof(*active_rxon));
+	}
+	il_print_rx_config_cmd(il);
+
+	il4965_init_sensitivity(il);
+
+	/* If we issue a new RXON command which required a tune then we must
+	 * send a new TXPOWER command or we won't be able to Tx any frames */
+	ret = il_set_tx_power(il, il->tx_power_next, true);
+	if (ret) {
+		IL_ERR("Error sending TX power (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+			 struct ieee80211_channel_switch *ch_switch)
+{
+	int rc;
+	u8 band = 0;
+	bool is_ht40 = false;
+	u8 ctrl_chan_high = 0;
+	struct il4965_channel_switch_cmd cmd;
+	const struct il_channel_info *ch_info;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
+	struct ieee80211_vif *vif = il->vif;
+	band = (il->band == NL80211_BAND_2GHZ);
+
+	if (WARN_ON_ONCE(vif == NULL))
+		return -EIO;
+
+	is_ht40 = iw4965_is_ht40_channel(il->staging.flags);
+
+	if (is_ht40 && (il->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+		ctrl_chan_high = 1;
+
+	cmd.band = band;
+	cmd.expect_beacon = 0;
+	ch = ch_switch->chandef.chan->hw_value;
+	cmd.channel = cpu_to_le16(ch);
+	cmd.rxon_flags = il->staging.flags;
+	cmd.rxon_filter_flags = il->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+		if (switch_count >
+		    ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+			switch_count -=
+			    (il->ucode_beacon_time - tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+		    vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time =
+		    il_usecs_to_beacons(il, switch_time_in_usec,
+					beacon_interval);
+		cmd.switch_time =
+		    il_add_beacon_time(il, il->ucode_beacon_time,
+				       ucode_switch_time, beacon_interval);
+	}
+	D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+	ch_info = il_get_channel_info(il, il->band, ch);
+	if (ch_info)
+		cmd.expect_beacon = il_is_channel_radar(ch_info);
+	else {
+		IL_ERR("invalid channel switch from %u to %u\n",
+		       il->active.channel, ch);
+		return -EFAULT;
+	}
+
+	rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+				     &cmd.tx_power);
+	if (rc) {
+		D_11H("error:%d  fill txpower_tbl\n", rc);
+		return rc;
+	}
+
+	return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+			       u16 byte_cnt)
+{
+	struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+	int txq_id = txq->q.id;
+	int write_ptr = txq->q.write_ptr;
+	int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+	__le16 bc_ent;
+
+	WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	bc_ent = cpu_to_le16(len & 0xFFF);
+	/* Set up byte count within first 256 entries */
+	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+	/* If within first 64 entries, duplicate at end */
+	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+		    bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+	s32 temperature;
+	s32 vt;
+	s32 R1, R2, R3;
+	u32 R4;
+
+	if (test_bit(S_TEMPERATURE, &il->status) &&
+	    (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+		D_TEMP("Running HT40 temperature calibration\n");
+		R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+		R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+		R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+		R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+	} else {
+		D_TEMP("Running temperature calibration\n");
+		R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+		R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+		R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+		R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+	}
+
+	/*
+	 * Temperature is only 23 bits, so sign extend out to 32.
+	 *
+	 * NOTE If we haven't received a stats notification yet
+	 * with an updated temperature, use R4 provided to us in the
+	 * "initialize" ALIVE response.
+	 */
+	if (!test_bit(S_TEMPERATURE, &il->status))
+		vt = sign_extend32(R4, 23);
+	else
+		vt = sign_extend32(le32_to_cpu
+				   (il->_4965.stats.general.common.temperature),
+				   23);
+
+	D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+	if (R3 == R1) {
+		IL_ERR("Calibration conflict R1 == R3\n");
+		return -1;
+	}
+
+	/* Calculate temperature in degrees Kelvin, adjust by 97%.
+	 * Add offset to center the adjustment around 0 degrees Centigrade. */
+	temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+	temperature /= (R3 - R1);
+	temperature =
+	    (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+	D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+	       KELVIN_TO_CELSIUS(temperature));
+
+	return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD   3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+	int temp_diff;
+
+	if (!test_bit(S_STATS, &il->status)) {
+		D_TEMP("Temperature not updated -- no stats.\n");
+		return 0;
+	}
+
+	temp_diff = il->temperature - il->last_temperature;
+
+	/* get absolute value */
+	if (temp_diff < 0) {
+		D_POWER("Getting cooler, delta %d\n", temp_diff);
+		temp_diff = -temp_diff;
+	} else if (temp_diff == 0)
+		D_POWER("Temperature unchanged\n");
+	else
+		D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+	if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+		D_POWER(" => thermal txpower calib not needed\n");
+		return 0;
+	}
+
+	D_POWER(" => thermal txpower calib needed\n");
+
+	return 1;
+}
+
+void
+il4965_temperature_calib(struct il_priv *il)
+{
+	s32 temp;
+
+	temp = il4965_hw_get_temperature(il);
+	if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+		return;
+
+	if (il->temperature != temp) {
+		if (il->temperature)
+			D_TEMP("Temperature changed " "from %dC to %dC\n",
+			       KELVIN_TO_CELSIUS(il->temperature),
+			       KELVIN_TO_CELSIUS(temp));
+		else
+			D_TEMP("Temperature " "initialized to %dC\n",
+			       KELVIN_TO_CELSIUS(temp));
+	}
+
+	il->temperature = temp;
+	set_bit(S_TEMPERATURE, &il->status);
+
+	if (!il->disable_tx_power_cal &&
+	    unlikely(!test_bit(S_SCANNING, &il->status)) &&
+	    il4965_is_temp_calib_needed(il))
+		queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+	switch (cmd_id) {
+	case C_RXON:
+		return (u16) sizeof(struct il4965_rxon_cmd);
+	default:
+		return len;
+	}
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+	struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+	addsta->mode = cmd->mode;
+	memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+	memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+	addsta->station_flags = cmd->station_flags;
+	addsta->station_flags_msk = cmd->station_flags_msk;
+	addsta->tid_disable_tx = cmd->tid_disable_tx;
+	addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+	addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+	addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+	addsta->sleep_tx_count = cmd->sleep_tx_count;
+	addsta->reserved1 = cpu_to_le16(0);
+	addsta->reserved2 = cpu_to_le16(0);
+
+	return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	if (memcmp(&il->staging, &il->active, sizeof(il->staging)))
+		il_commit_rxon(il);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+	struct ieee80211_vif *vif = il->vif;
+	int ret = 0;
+
+	if (!vif || !il->is_open)
+		return;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	il_scan_cancel_timeout(il, 200);
+
+	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il_commit_rxon(il);
+
+	ret = il_send_rxon_timing(il);
+	if (ret)
+		IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+	il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+	il_set_rxon_ht(il, &il->current_ht_config);
+
+	if (il->ops->set_rxon_chain)
+		il->ops->set_rxon_chain(il);
+
+	il->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+	D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+		vif->bss_conf.beacon_int);
+
+	if (vif->bss_conf.use_short_preamble)
+		il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+	if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
+		if (vif->bss_conf.use_short_slot)
+			il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+	}
+
+	il_commit_rxon(il);
+
+	D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+		il->active.bssid_addr);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		il4965_send_beacon_cmd(il);
+		break;
+	default:
+		IL_ERR("%s Should not be called in %d mode\n", __func__,
+		       vif->type);
+		break;
+	}
+
+	/* the chain noise calibration will enabled PM upon completion
+	 * If chain noise has already been run, then we need to enable
+	 * power management here */
+	if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+		il_power_update_mode(il, false);
+
+	/* Enable Rx differential gain and sensitivity calibrations */
+	il4965_chain_noise_reset(il);
+	il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+	struct ieee80211_vif *vif = il->vif;
+	int ret = 0;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	/* The following should be done only at AP bring up */
+	if (!il_is_associated(il)) {
+
+		/* RXON - unassoc (to set timing command) */
+		il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+		il_commit_rxon(il);
+
+		/* RXON Timing */
+		ret = il_send_rxon_timing(il);
+		if (ret)
+			IL_WARN("RXON timing failed - "
+				"Attempting to continue.\n");
+
+		/* AP has all antennas */
+		il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+		il_set_rxon_ht(il, &il->current_ht_config);
+		if (il->ops->set_rxon_chain)
+			il->ops->set_rxon_chain(il);
+
+		il->staging.assoc_id = 0;
+
+		if (vif->bss_conf.use_short_preamble)
+			il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+		if (il->staging.flags & RXON_FLG_BAND_24G_MSK) {
+			if (vif->bss_conf.use_short_slot)
+				il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+			else
+				il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+		}
+		/* need to send beacon cmd before committing assoc RXON! */
+		il4965_send_beacon_cmd(il);
+		/* restore RXON assoc */
+		il->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		il_commit_rxon(il);
+	}
+	il4965_send_beacon_cmd(il);
+}
+
+const struct il_ops il4965_ops = {
+	.txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+	.txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+	.txq_free_tfd = il4965_hw_txq_free_tfd,
+	.txq_init = il4965_hw_tx_queue_init,
+	.is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+	.init_alive_start = il4965_init_alive_start,
+	.load_ucode = il4965_load_bsm,
+	.dump_nic_error_log = il4965_dump_nic_error_log,
+	.dump_fh = il4965_dump_fh,
+	.set_channel_switch = il4965_hw_channel_switch,
+	.apm_init = il_apm_init,
+	.send_tx_power = il4965_send_tx_power,
+	.update_chain_flags = il4965_update_chain_flags,
+	.eeprom_acquire_semaphore = il4965_eeprom_acquire_semaphore,
+	.eeprom_release_semaphore = il4965_eeprom_release_semaphore,
+
+	.rxon_assoc = il4965_send_rxon_assoc,
+	.commit_rxon = il4965_commit_rxon,
+	.set_rxon_chain = il4965_set_rxon_chain,
+
+	.get_hcmd_size = il4965_get_hcmd_size,
+	.build_addsta_hcmd = il4965_build_addsta_hcmd,
+	.request_scan = il4965_request_scan,
+	.post_scan = il4965_post_scan,
+
+	.post_associate = il4965_post_associate,
+	.config_ap = il4965_config_ap,
+	.manage_ibss_station = il4965_manage_ibss_station,
+	.update_bcast_stations = il4965_update_bcast_stations,
+
+	.send_led_cmd = il4965_send_led_cmd,
+};
+
+struct il_cfg il4965_cfg = {
+	.name = "Intel(R) Wireless WiFi Link 4965AGN",
+	.fw_name_pre = IL4965_FW_PRE,
+	.ucode_api_max = IL4965_UCODE_API_MAX,
+	.ucode_api_min = IL4965_UCODE_API_MIN,
+	.sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+	.valid_tx_ant = ANT_AB,
+	.valid_rx_ant = ANT_ABC,
+	.eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+	.eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+	.mod_params = &il4965_mod_params,
+	.led_mode = IL_LED_BLINK,
+	/*
+	 * Force use of chains B and C for scan RX on 5 GHz band
+	 * because the device has off-channel reception on chain A.
+	 */
+	.scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
+
+	.eeprom_size = IL4965_EEPROM_IMG_SIZE,
+	.num_of_queues = IL49_NUM_QUEUES,
+	.num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+	.pll_cfg_val = 0,
+	.set_l0s = true,
+	.use_bsm = true,
+	.led_compensation = 61,
+	.chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+	.wd_timeout = IL_DEF_WD_TIMEOUT,
+	.temperature_kelvin = true,
+	.ucode_tracing = true,
+	.sensitivity_calib_by_driver = true,
+	.chain_noise_calib_by_driver = true,
+
+	.regulatory_bands = {
+		EEPROM_REGULATORY_BAND_1_CHANNELS,
+		EEPROM_REGULATORY_BAND_2_CHANNELS,
+		EEPROM_REGULATORY_BAND_3_CHANNELS,
+		EEPROM_REGULATORY_BAND_4_CHANNELS,
+		EEPROM_REGULATORY_BAND_5_CHANNELS,
+		EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+		EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
+	},
+
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
new file mode 100644
index 0000000..527e8b5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -0,0 +1,1283 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+extern const struct il_ops il4965_ops;
+
+extern struct il_mod_params il4965_mod_params;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+			       int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+void il4965_nic_config(struct il_priv *il);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+				    dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+				 struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE:  Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+				int tx_fifo_id, int scd_retry);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+			       bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+	return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il);
+int il4965_add_bssid_station(struct il_priv *il, const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+				  struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il,
+			       struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il);
+int il4965_set_dynamic_key(struct il_priv *il,
+			   struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il,
+			      struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il,
+			    struct ieee80211_key_conf *keyconf,
+			    struct ieee80211_sta *sta, u32 iv32,
+			    u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+			    int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+			   int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+	return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw,
+		   struct ieee80211_tx_control *control,
+		   struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+			     unsigned int changed_flags,
+			     unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+		       struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+		       struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_key_conf *keyconf,
+				struct ieee80211_sta *sta, u32 iv32,
+				u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    struct ieee80211_ampdu_params *params);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta);
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			  struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE			1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE	7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor.  Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND		(0x000000)
+#define IL49_RTC_INST_UPPER_BOUND		(0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND		(0x80A000)
+
+#define IL49_RTC_INST_SIZE  (IL49_RTC_INST_UPPER_BOUND - \
+				IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE  (IL49_RTC_DATA_UPPER_BOUND - \
+				IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+	return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+		addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent).  The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp).  After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE:  uCode provides the R4 value as a 23-bit signed value.  Driver
+ *        must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE:  The basic formula is 259 * (R4-R2) / (R3-R1).  The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin).  The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN  (263)
+#define IL_TX_POWER_TEMPERATURE_MAX  (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+	((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+	 (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+void il4965_temperature_calib(struct il_priv *il);
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ *     1) EEPROM
+ *     2) "initialize" alive notification
+ *     3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1)  Regulatory information (max txpower and channel usage flags) is provided
+ *     separately for each channel that can possibly supported by 4965.
+ *     40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ *     (legacy) channels.
+ *
+ *     See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ *     for locations in EEPROM.
+ *
+ * 2)  Factory txpower calibration information is provided separately for
+ *     sub-bands of contiguous channels.  2.4GHz has just one sub-band,
+ *     but 5 GHz has several sub-bands.
+ *
+ *     In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ *     See struct il4965_eeprom_calib_info (and the tree of structures
+ *     contained within it) for format, and struct il4965_eeprom for
+ *     locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1)  Temperature calculation parameters.
+ *
+ * 2)  Power supply voltage measurement.
+ *
+ * 3)  Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1)  Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1)  Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ *     Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ *     If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     2 transmitters will be used simultaneously; driver must reduce the
+ *     regulatory limit by 3 dB (half-power) for each transmitter, so the
+ *     combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2)  Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ *     backoff for this bit rate*.  Do not exceed (saturation - backoff[rate]);
+ *     reduce target txpower if necessary.
+ *
+ *     Backoff values below are in 1/2 dB units (equivalent to steps in
+ *     txpower gain tables):
+ *
+ *     OFDM 6 - 36 MBit:  10 steps (5 dB)
+ *     OFDM 48 MBit:      15 steps (7.5 dB)
+ *     OFDM 54 MBit:      17 steps (8.5 dB)
+ *     OFDM 60 MBit:      20 steps (10 dB)
+ *     CCK all rates:     10 steps (5 dB)
+ *
+ *     Backoff values apply to saturation txpower on a per-transmitter basis;
+ *     when using MIMO (2 transmitters), each transmitter uses the same
+ *     saturation level provided in EEPROM, and the same backoff values;
+ *     no reduction (such as with regulatory txpower limits) is required.
+ *
+ *     Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ *     widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ *     factory measurement for ht40 channels.
+ *
+ *     The result of this step is the final target txpower.  The rest of
+ *     the steps figure out the proper settings for the device to achieve
+ *     that target txpower.
+ *
+ *
+ * 3)  Determine (EEPROM) calibration sub band for the target channel, by
+ *     comparing against first and last channels in each sub band
+ *     (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4)  Linearly interpolate (EEPROM) factory calibration measurement sets,
+ *     referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ *     Interpolation is based on difference between target channel's frequency
+ *     and the sample channels' frequencies.  Since channel numbers are based
+ *     on frequency (5 MHz between each channel number), this is equivalent
+ *     to interpolating based on channel number differences.
+ *
+ *     Note that the sample channels may or may not be the channels at the
+ *     edges of the sub band.  The target channel may be "outside" of the
+ *     span of the sampled channels.
+ *
+ *     Driver may choose the pair (for 2 Tx chains) of measurements (see
+ *     struct il4965_eeprom_calib_ch_info) for which the actual measured
+ *     txpower comes closest to the desired txpower.  Usually, though,
+ *     the middle set of measurements is closest to the regulatory limits,
+ *     and is therefore a good choice for all txpower calculations (this
+ *     assumes that high accuracy is needed for maximizing legal txpower,
+ *     while lower txpower configurations do not need as much accuracy).
+ *
+ *     Driver should interpolate both members of the chosen measurement pair,
+ *     i.e. for both Tx chains (radio transmitters), unless the driver knows
+ *     that only one of the chains will be used (e.g. only one tx antenna
+ *     connected, but this should be unusual).  The rate scaling algorithm
+ *     switches antennas to find best performance, so both Tx chains will
+ *     be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ *     Driver should interpolate factory values for temperature, gain table
+ *     idx, and actual power.  The power amplifier detector values are
+ *     not used by the driver.
+ *
+ *     Sanity check:  If the target channel happens to be one of the sample
+ *     channels, the results should agree with the sample channel's
+ *     measurements!
+ *
+ *
+ * 5)  Find difference between desired txpower and (interpolated)
+ *     factory-measured txpower.  Using (interpolated) factory gain table idx
+ *     (shown elsewhere) as a starting point, adjust this idx lower to
+ *     increase txpower, or higher to decrease txpower, until the target
+ *     txpower is reached.  Each step in the gain table is 1/2 dB.
+ *
+ *     For example, if factory measured txpower is 16 dBm, and target txpower
+ *     is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ *     by 3 dB.
+ *
+ *
+ * 6)  Find difference between current device temperature and (interpolated)
+ *     factory-measured temperature for sub-band.  Factory values are in
+ *     degrees Celsius.  To calculate current temperature, see comments for
+ *     "4965 temperature calculation".
+ *
+ *     If current temperature is higher than factory temperature, driver must
+ *     increase gain (lower gain table idx), and vice verse.
+ *
+ *     Temperature affects gain differently for different channels:
+ *
+ *     2.4 GHz all channels:  3.5 degrees per half-dB step
+ *     5 GHz channels 34-43:  4.5 degrees per half-dB step
+ *     5 GHz channels >= 44:  4.0 degrees per half-dB step
+ *
+ *     NOTE:  Temperature can increase rapidly when transmitting, especially
+ *            with heavy traffic at high txpowers.  Driver should update
+ *            temperature calculations often under these conditions to
+ *            maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7)  Find difference between current power supply voltage indicator
+ *     (from "initialize alive") and factory-measured power supply voltage
+ *     indicator (EEPROM).
+ *
+ *     If the current voltage is higher (indicator is lower) than factory
+ *     voltage, gain should be reduced (gain table idx increased) by:
+ *
+ *     (eeprom - current) / 7
+ *
+ *     If the current voltage is lower (indicator is higher) than factory
+ *     voltage, gain should be increased (gain table idx decreased) by:
+ *
+ *     2 * (current - eeprom) / 7
+ *
+ *     If number of idx steps in either direction turns out to be > 2,
+ *     something is wrong ... just use 0.
+ *
+ *     NOTE:  Voltage compensation is independent of band/channel.
+ *
+ *     NOTE:  "Initialize" uCode measures current voltage, which is assumed
+ *            to be constant after this initial measurement.  Voltage
+ *            compensation for txpower (number of steps in gain table)
+ *            may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8)  If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ *     adjust txpower for each transmitter chain, so txpower is balanced
+ *     between the two chains.  There are 5 pairs of tx_atten[group][chain]
+ *     values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ *     Group 0:  5 GHz channel 34-43
+ *     Group 1:  5 GHz channel 44-70
+ *     Group 2:  5 GHz channel 71-124
+ *     Group 3:  5 GHz channel 125-200
+ *     Group 4:  2.4 GHz all channels
+ *
+ *     Add the tx_atten[group][chain] value to the idx for the target chain.
+ *     The values are signed, but are in pairs of 0 and a non-negative number,
+ *     so as to reduce gain (if necessary) of the "hotter" channel.  This
+ *     avoids any need to double-check for regulatory compliance after
+ *     this step.
+ *
+ *
+ * 9)  If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ *     value to the idx:
+ *
+ *     Hardware rev B:  9 steps (4.5 dB)
+ *     Hardware rev C:  5 steps (2.5 dB)
+ *
+ *     Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ *     bits [3:2], 1 = B, 2 = C.
+ *
+ *     NOTE:  This compensation is in addition to any saturation backoff that
+ *            might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ *     Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ *     location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device.  That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V   (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB.  Note that these
+ * are *relative* steps, not indications of absolute output power.  Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower.  This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands.  The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration).  A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX		(0)	/* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT	(-9)	/* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *   0        110         0x3f      (highest gain)
+ *   1        104         0x3f
+ *   2         98         0x3f
+ *   3        110         0x3e
+ *   4        104         0x3e
+ *   5         98         0x3e
+ *   6        110         0x3d
+ *   7        104         0x3d
+ *   8         98         0x3d
+ *   9        110         0x3c
+ *  10        104         0x3c
+ *  11         98         0x3c
+ *  12        110         0x3b
+ *  13        104         0x3b
+ *  14         98         0x3b
+ *  15        110         0x3a
+ *  16        104         0x3a
+ *  17         98         0x3a
+ *  18        110         0x39
+ *  19        104         0x39
+ *  20         98         0x39
+ *  21        110         0x38
+ *  22        104         0x38
+ *  23         98         0x38
+ *  24        110         0x37
+ *  25        104         0x37
+ *  26         98         0x37
+ *  27        110         0x36
+ *  28        104         0x36
+ *  29         98         0x36
+ *  30        110         0x35
+ *  31        104         0x35
+ *  32         98         0x35
+ *  33        110         0x34
+ *  34        104         0x34
+ *  35         98         0x34
+ *  36        110         0x33
+ *  37        104         0x33
+ *  38         98         0x33
+ *  39        110         0x32
+ *  40        104         0x32
+ *  41         98         0x32
+ *  42        110         0x31
+ *  43        104         0x31
+ *  44         98         0x31
+ *  45        110         0x30
+ *  46        104         0x30
+ *  47         98         0x30
+ *  48        110          0x6
+ *  49        104          0x6
+ *  50         98          0x6
+ *  51        110          0x5
+ *  52        104          0x5
+ *  53         98          0x5
+ *  54        110          0x4
+ *  55        104          0x4
+ *  56         98          0x4
+ *  57        110          0x3
+ *  58        104          0x3
+ *  59         98          0x3
+ *  60        110          0x2
+ *  61        104          0x2
+ *  62         98          0x2
+ *  63        110          0x1
+ *  64        104          0x1
+ *  65         98          0x1
+ *  66        110          0x0
+ *  67        104          0x0
+ *  68         98          0x0
+ *  69         97            0
+ *  70         96            0
+ *  71         95            0
+ *  72         94            0
+ *  73         93            0
+ *  74         92            0
+ *  75         91            0
+ *  76         90            0
+ *  77         89            0
+ *  78         88            0
+ *  79         87            0
+ *  80         86            0
+ *  81         85            0
+ *  82         84            0
+ *  83         83            0
+ *  84         82            0
+ *  85         81            0
+ *  86         80            0
+ *  87         79            0
+ *  88         78            0
+ *  89         77            0
+ *  90         76            0
+ *  91         75            0
+ *  92         74            0
+ *  93         73            0
+ *  94         72            0
+ *  95         71            0
+ *  96         70            0
+ *  97         69            0
+ *  98         68            0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index    Dsp gain   Radio gain
+ *  -9 	      123         0x3F      (highest gain)
+ *  -8 	      117         0x3F
+ *  -7        110         0x3F
+ *  -6        104         0x3F
+ *  -5         98         0x3F
+ *  -4        110         0x3E
+ *  -3        104         0x3E
+ *  -2         98         0x3E
+ *  -1        110         0x3D
+ *   0        104         0x3D
+ *   1         98         0x3D
+ *   2        110         0x3C
+ *   3        104         0x3C
+ *   4         98         0x3C
+ *   5        110         0x3B
+ *   6        104         0x3B
+ *   7         98         0x3B
+ *   8        110         0x3A
+ *   9        104         0x3A
+ *  10         98         0x3A
+ *  11        110         0x39
+ *  12        104         0x39
+ *  13         98         0x39
+ *  14        110         0x38
+ *  15        104         0x38
+ *  16         98         0x38
+ *  17        110         0x37
+ *  18        104         0x37
+ *  19         98         0x37
+ *  20        110         0x36
+ *  21        104         0x36
+ *  22         98         0x36
+ *  23        110         0x35
+ *  24        104         0x35
+ *  25         98         0x35
+ *  26        110         0x34
+ *  27        104         0x34
+ *  28         98         0x34
+ *  29        110         0x33
+ *  30        104         0x33
+ *  31         98         0x33
+ *  32        110         0x32
+ *  33        104         0x32
+ *  34         98         0x32
+ *  35        110         0x31
+ *  36        104         0x31
+ *  37         98         0x31
+ *  38        110         0x30
+ *  39        104         0x30
+ *  40         98         0x30
+ *  41        110         0x25
+ *  42        104         0x25
+ *  43         98         0x25
+ *  44        110         0x24
+ *  45        104         0x24
+ *  46         98         0x24
+ *  47        110         0x23
+ *  48        104         0x23
+ *  49         98         0x23
+ *  50        110         0x22
+ *  51        104         0x18
+ *  52         98         0x18
+ *  53        110         0x17
+ *  54        104         0x17
+ *  55         98         0x17
+ *  56        110         0x16
+ *  57        104         0x16
+ *  58         98         0x16
+ *  59        110         0x15
+ *  60        104         0x15
+ *  61         98         0x15
+ *  62        110         0x14
+ *  63        104         0x14
+ *  64         98         0x14
+ *  65        110         0x13
+ *  66        104         0x13
+ *  67         98         0x13
+ *  68        110         0x12
+ *  69        104         0x08
+ *  70         98         0x08
+ *  71        110         0x07
+ *  72        104         0x07
+ *  73         98         0x07
+ *  74        110         0x06
+ *  75        104         0x06
+ *  76         98         0x06
+ *  77        110         0x05
+ *  78        104         0x05
+ *  79         98         0x05
+ *  80        110         0x04
+ *  81        104         0x04
+ *  82         98         0x04
+ *  83        110         0x03
+ *  84        104         0x03
+ *  85         98         0x03
+ *  86        110         0x02
+ *  87        104         0x02
+ *  88         98         0x02
+ *  89        110         0x01
+ *  90        104         0x01
+ *  91         98         0x01
+ *  92        110         0x00
+ *  93        104         0x00
+ *  94         98         0x00
+ *  95         93         0x00
+ *  96         88         0x00
+ *  97         83         0x00
+ *  98         78         0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated.  These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24   (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52   (34)
+#define IL_TX_POWER_REGULATORY_MIN          (0)
+#define IL_TX_POWER_REGULATORY_MAX          (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion.  This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24   (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52   (38)
+#define IL_TX_POWER_SATURATION_MIN          (20)
+#define IL_TX_POWER_SATURATION_MAX          (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain.  Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below.  If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43:  4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200:  4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels:  3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+	CALIB_CH_GROUP_1 = 0,
+	CALIB_CH_GROUP_2 = 1,
+	CALIB_CH_GROUP_3 = 2,
+	CALIB_CH_GROUP_4 = 3,
+	CALIB_CH_GROUP_5 = 4,
+	CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues).  All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device.  When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs).  Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS	7
+#define IL49_CMD_FIFO_NUM	4
+#define IL49_NUM_QUEUES	16
+#define IL49_NUM_AMPDU_QUEUES	8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue.  If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+	u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND		(0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET	44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+#define IL4965_DEFAULT_TX_RETRY  15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE	10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+extern const struct il_debugfs_ops il4965_debugfs_ops;
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND                   (0x1000)
+#define FH49_MEM_UPPER_BOUND                   (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG		     (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND          (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND          (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x)  (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the idx of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver; see struct il4965_shared, val0):
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1!  See below).
+ * NOTE:  4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD.  The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx.  For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND	(FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND	(FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0		(FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG	(FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG	(FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG	(FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR        (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG	for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND      (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND      (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0            (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG	(FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0)	/* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000)	/* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000)	/* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000)	/* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000)	/* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000)	/* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
+#define RX_RB_TIMEOUT	(0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND           (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND           (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG       (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG	(FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+					(FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE	(0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT	28
+
+/* TFDB  Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND       (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND       (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl)  (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND  (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND  (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM                            (7)
+#define FH50_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl)	\
+		(FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV		(0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE	(0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD	(0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD	(0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD	(0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD	(0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE	(0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF	(0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	(0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY	(0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT	(0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID	(0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM		(20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX		(12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND		(FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND		(FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG		(FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *	uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *	uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *	command was received from the scheduler while the TRB was already full
+ *	with previous command
+ *	uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *	bit is set, it indicates that the FH has received a full indication
+ *	from the RTC TxFIFO and the current value of the TxCredit counter was
+ *	not equal to zero. This mean that the credit mechanism was not
+ *	synchronized to the TxFIFO status
+ *	uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG		(FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL		(9)
+#define FH49_SRVC_LOWER_BOUND	(FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND	(FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+		(FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG	(FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN	(0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000	/* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/Kconfig b/drivers/net/wireless/intel/iwlegacy/Kconfig
new file mode 100644
index 0000000..fb91972
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/Kconfig
@@ -0,0 +1,100 @@
+config IWLEGACY
+	tristate
+	select FW_LOADER
+	select NEW_LEDS
+	select LEDS_CLASS
+	select LEDS_TRIGGERS
+	select MAC80211_LEDS
+
+config IWL4965
+	tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
+	depends on PCI && MAC80211
+	select IWLEGACY
+	---help---
+	  This option enables support for
+
+	  Select to build the driver supporting the:
+
+	  Intel Wireless WiFi Link 4965AGN
+
+	  This driver uses the kernel's mac80211 subsystem.
+
+	  In order to use this driver, you will need a microcode (uCode)
+	  image for it. You can obtain the microcode from:
+
+	          <http://intellinuxwireless.org/>.
+
+	  The microcode is typically installed in /lib/firmware. You can
+	  look in the hotplug script /etc/hotplug/firmware.agent to
+	  determine which directory FIRMWARE_DIR is set to when the script
+	  runs.
+
+	  If you want to compile the driver as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want),
+	  say M here and read <file:Documentation/kbuild/modules.txt>.  The
+	  module will be called iwl4965.
+
+config IWL3945
+	tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
+	depends on PCI && MAC80211
+	select IWLEGACY
+	---help---
+	  Select to build the driver supporting the:
+
+	  Intel PRO/Wireless 3945ABG/BG Network Connection
+
+	  This driver uses the kernel's mac80211 subsystem.
+
+	  In order to use this driver, you will need a microcode (uCode)
+	  image for it. You can obtain the microcode from:
+
+	          <http://intellinuxwireless.org/>.
+
+	  The microcode is typically installed in /lib/firmware. You can
+	  look in the hotplug script /etc/hotplug/firmware.agent to
+	  determine which directory FIRMWARE_DIR is set to when the script
+	  runs.
+
+	  If you want to compile the driver as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want),
+	  say M here and read <file:Documentation/kbuild/modules.txt>.  The
+	  module will be called iwl3945.
+
+menu "iwl3945 / iwl4965 Debugging Options"
+	depends on IWLEGACY
+
+config IWLEGACY_DEBUG
+	bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+	depends on IWLEGACY
+	---help---
+	  This option will enable debug tracing output for the iwlegacy
+	  drivers.
+
+	  This will result in the kernel module being ~100k larger.  You can
+	  control which debug output is sent to the kernel log by setting the
+	  value in
+
+		/sys/class/net/wlan0/device/debug_level
+
+	  This entry will only exist if this option is enabled.
+
+	  To set a value, simply echo an 8-byte hex value to the same file:
+
+		  % echo 0x43fff > /sys/class/net/wlan0/device/debug_level
+
+	  You can find the list of debug mask values in:
+		  drivers/net/wireless/iwlegacy/common.h
+
+	  If this is your first time using this driver, you should say Y here
+	  as the debug information can assist others in helping you resolve
+	  any problems you may encounter.
+
+config IWLEGACY_DEBUGFS
+        bool "iwlegacy (iwl 3945/4965) debugfs support"
+        depends on IWLEGACY && MAC80211_DEBUGFS
+        ---help---
+	  Enable creation of debugfs files for the iwlegacy drivers. This
+	  is a low-impact option that allows getting insight into the
+	  driver's state at runtime.
+
+endmenu
diff --git a/drivers/net/wireless/intel/iwlegacy/Makefile b/drivers/net/wireless/intel/iwlegacy/Makefile
new file mode 100644
index 0000000..c5ad045
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IWLEGACY)	+= iwlegacy.o
+iwlegacy-objs 		:= common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
+
+iwlegacy-objs += $(iwlegacy-m)
+
+# 4965
+obj-$(CONFIG_IWL4965)	+= iwl4965.o
+iwl4965-objs		:= 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
+
+# 3945
+obj-$(CONFIG_IWL3945)	+= iwl3945.o
+iwl3945-objs		:= 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
new file mode 100644
index 0000000..dd74413
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -0,0 +1,3370 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __il_commands_h__
+#define __il_commands_h__
+
+#include <linux/ieee80211.h>
+
+struct il_priv;
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver)	(((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver)	(((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver)	((ver) & 0x000000FF)
+
+/* Tx rates */
+#define IL_CCK_RATES	4
+#define IL_OFDM_RATES	8
+#define IL_MAX_RATES	(IL_CCK_RATES + IL_OFDM_RATES)
+
+enum {
+	N_ALIVE = 0x1,
+	N_ERROR = 0x2,
+
+	/* RXON and QOS commands */
+	C_RXON = 0x10,
+	C_RXON_ASSOC = 0x11,
+	C_QOS_PARAM = 0x13,
+	C_RXON_TIMING = 0x14,
+
+	/* Multi-Station support */
+	C_ADD_STA = 0x18,
+	C_REM_STA = 0x19,
+
+	/* Security */
+	C_WEPKEY = 0x20,
+
+	/* RX, TX, LEDs */
+	N_3945_RX = 0x1b,	/* 3945 only */
+	C_TX = 0x1c,
+	C_RATE_SCALE = 0x47,	/* 3945 only */
+	C_LEDS = 0x48,
+	C_TX_LINK_QUALITY_CMD = 0x4e,	/* for 4965 */
+
+	/* 802.11h related */
+	C_CHANNEL_SWITCH = 0x72,
+	N_CHANNEL_SWITCH = 0x73,
+	C_SPECTRUM_MEASUREMENT = 0x74,
+	N_SPECTRUM_MEASUREMENT = 0x75,
+
+	/* Power Management */
+	C_POWER_TBL = 0x77,
+	N_PM_SLEEP = 0x7A,
+	N_PM_DEBUG_STATS = 0x7B,
+
+	/* Scan commands and notifications */
+	C_SCAN = 0x80,
+	C_SCAN_ABORT = 0x81,
+	N_SCAN_START = 0x82,
+	N_SCAN_RESULTS = 0x83,
+	N_SCAN_COMPLETE = 0x84,
+
+	/* IBSS/AP commands */
+	N_BEACON = 0x90,
+	C_TX_BEACON = 0x91,
+
+	/* Miscellaneous commands */
+	C_TX_PWR_TBL = 0x97,
+
+	/* Bluetooth device coexistence config command */
+	C_BT_CONFIG = 0x9b,
+
+	/* Statistics */
+	C_STATS = 0x9c,
+	N_STATS = 0x9d,
+
+	/* RF-KILL commands and notifications */
+	N_CARD_STATE = 0xa1,
+
+	/* Missed beacons notification */
+	N_MISSED_BEACONS = 0xa2,
+
+	C_CT_KILL_CONFIG = 0xa4,
+	C_SENSITIVITY = 0xa8,
+	C_PHY_CALIBRATION = 0xb0,
+	N_RX_PHY = 0xc0,
+	N_RX_MPDU = 0xc1,
+	N_RX = 0xc3,
+	N_COMPRESSED_BA = 0xc5,
+
+	IL_CN_MAX = 0xff
+};
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
+
+#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
+#define SEQ_TO_IDX(s)	((s) & 0xff)
+#define IDX_TO_SEQ(i)	((i) & 0xff)
+#define SEQ_HUGE_FRAME	cpu_to_le16(0x4000)
+#define SEQ_RX_FRAME	cpu_to_le16(0x8000)
+
+/**
+ * struct il_cmd_header
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct il_cmd_header {
+	u8 cmd;			/* Command ID:  C_RXON, etc. */
+	u8 flags;		/* 0:5 reserved, 6 abort, 7 internal */
+	/*
+	 * The driver sets up the sequence number to values of its choosing.
+	 * uCode does not use this value, but passes it back to the driver
+	 * when sending the response to each driver-originated command, so
+	 * the driver can match the response to the command.  Since the values
+	 * don't get used by uCode, the driver may set up an arbitrary format.
+	 *
+	 * There is one exception:  uCode sets bit 15 when it originates
+	 * the response/notification, i.e. when the response/notification
+	 * is not a direct response to a command sent by the driver.  For
+	 * example, uCode issues N_3945_RX when it sends a received frame
+	 * to the driver; it is not a direct response to any driver command.
+	 *
+	 * The Linux driver uses the following format:
+	 *
+	 *  0:7         tfd idx - position within TX queue
+	 *  8:12        TX queue id
+	 *  13          reserved
+	 *  14          huge - driver sets this to indicate command is in the
+	 *              'huge' storage at the end of the command buffers
+	 *  15          unsolicited RX or uCode-originated notification
+	 */
+	__le16 sequence;
+
+	/* command or response/notification data follows immediately */
+	u8 data[0];
+} __packed;
+
+/**
+ * struct il3945_tx_power
+ *
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
+ *
+ * Each entry contains two values:
+ * 1)  DSP gain (or sometimes called DSP attenuation).  This is a fine-grained
+ *     linear value that multiplies the output of the digital signal processor,
+ *     before being sent to the analog radio.
+ * 2)  Radio gain.  This sets the analog gain of the radio Tx path.
+ *     It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
+ */
+struct il3945_tx_power {
+	u8 tx_gain;		/* gain for analog radio */
+	u8 dsp_atten;		/* gain for DSP */
+} __packed;
+
+/**
+ * struct il3945_power_per_rate
+ *
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
+ */
+struct il3945_power_per_rate {
+	u8 rate;		/* plcp */
+	struct il3945_tx_power tpc;
+	u8 reserved;
+} __packed;
+
+/**
+ * iwl4965 rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwl4965 commands:
+ *  N_RX (response only)
+ *  N_RX_MPDU (response only)
+ *  C_TX (both command and response)
+ *  C_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ *  2-0:  0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * 4965 has 2 transmitters
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS	14
+#define RATE_MCS_ANT_A_MSK	0x04000
+#define RATE_MCS_ANT_B_MSK	0x08000
+#define RATE_MCS_ANT_C_MSK	0x10000
+#define RATE_MCS_ANT_AB_MSK	(RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK	(RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TBL_NUM_ENTRIES			33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES		32
+#define POWER_TBL_CCK_ENTRY			32
+
+#define IL_PWR_NUM_HT_OFDM_ENTRIES		24
+#define IL_PWR_CCK_ENTRIES			2
+
+/**
+ * union il4965_tx_power_dual_stream
+ *
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
+ * Use __le32 version (struct tx_power_dual_stream) when building command.
+ *
+ * Driver provides radio gain and DSP attenuation settings to device in pairs,
+ * one value for each transmitter chain.  The first value is for transmitter A,
+ * second for transmitter B.
+ *
+ * For SISO bit rates, both values in a pair should be identical.
+ * For MIMO rates, one value may be different from the other,
+ * in order to balance the Tx output between the two transmitters.
+ *
+ * See more details in doc for TXPOWER in 4965.h.
+ */
+union il4965_tx_power_dual_stream {
+	struct {
+		u8 radio_tx_gain[2];
+		u8 dsp_predis_atten[2];
+	} s;
+	u32 dw;
+};
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
+ *
+ * Same format as il_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+	__le32 dw;
+} __packed;
+
+/**
+ * struct il4965_tx_power_db
+ *
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
+ */
+struct il4965_tx_power_db {
+	struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK	cpu_to_le32(0x1)
+#define INITIALIZE_SUBTYPE    (9)
+
+/*
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "initialize alive" notification once the initialization
+ * uCode image has completed its work, and is ready to load the runtime image.
+ * This is the *first* "alive" notification that the driver will receive after
+ * rebooting uCode; the "initialize" alive is indicated by subtype field == 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * For 4965, this notification contains important calibration data for
+ * calculating txpower settings:
+ *
+ * 1)  Power supply voltage indication.  The voltage sensor outputs higher
+ *     values for lower voltage, and vice verse.
+ *
+ * 2)  Temperature measurement parameters, for each of two channel widths
+ *     (20 MHz and 40 MHz) supported by the radios.  Temperature sensing
+ *     is done via one of the receiver chains, and channel width influences
+ *     the results.
+ *
+ * 3)  Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
+ *     for each of 5 frequency ranges.
+ */
+struct il_init_alive_resp {
+	u8 ucode_minor;
+	u8 ucode_major;
+	__le16 reserved1;
+	u8 sw_rev[8];
+	u8 ver_type;
+	u8 ver_subtype;		/* "9" for initialize alive */
+	__le16 reserved2;
+	__le32 log_event_table_ptr;
+	__le32 error_event_table_ptr;
+	__le32 timestamp;
+	__le32 is_valid;
+
+	/* calibration values from "initialize" uCode */
+	__le32 voltage;		/* signed, higher value is lower voltage */
+	__le32 therm_r1[2];	/* signed, 1st for normal, 2nd for HT40 */
+	__le32 therm_r2[2];	/* signed */
+	__le32 therm_r3[2];	/* signed */
+	__le32 therm_r4[2];	/* signed */
+	__le32 tx_atten[5][2];	/* signed MIMO gain comp, 5 freq groups,
+				 * 2 Tx chains */
+} __packed;
+
+/**
+ * N_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver.  This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1)  log_event_table_ptr indicates base of the event log.  This traces
+ *     a 256-entry history of uCode execution within a circular buffer.
+ *     Its header format is:
+ *
+ *	__le32 log_size;     log capacity (in number of entries)
+ *	__le32 type;         (1) timestamp with each entry, (0) no timestamp
+ *	__le32 wraps;        # times uCode has wrapped to top of circular buffer
+ *      __le32 write_idx;  next circular buffer entry that uCode would fill
+ *
+ *     The header is followed by the circular buffer of log entries.  Entries
+ *     with timestamps have the following format:
+ *
+ *	__le32 event_id;     range 0 - 1500
+ *	__le32 timestamp;    low 32 bits of TSF (of network, if associated)
+ *	__le32 data;         event_id-specific data value
+ *
+ *     Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2)  error_event_table_ptr indicates base of the error log.  This contains
+ *     information about any uCode error that occurs.  For 4965, the format
+ *     of the error log is:
+ *
+ *	__le32 valid;        (nonzero) valid, (0) log is empty
+ *	__le32 error_id;     type of error
+ *	__le32 pc;           program counter
+ *	__le32 blink1;       branch link
+ *	__le32 blink2;       branch link
+ *	__le32 ilink1;       interrupt link
+ *	__le32 ilink2;       interrupt link
+ *	__le32 data1;        error-specific data
+ *	__le32 data2;        error-specific data
+ *	__le32 line;         source code line of error
+ *	__le32 bcon_time;    beacon timer
+ *	__le32 tsf_low;      network timestamp function timer
+ *	__le32 tsf_hi;       network timestamp function timer
+ *	__le32 gp1;          GP1 timer register
+ *	__le32 gp2;          GP2 timer register
+ *	__le32 gp3;          GP3 timer register
+ *	__le32 ucode_ver;    uCode version
+ *	__le32 hw_ver;       HW Silicon version
+ *	__le32 brd_ver;      HW board version
+ *	__le32 log_pc;       log program counter
+ *	__le32 frame_ptr;    frame pointer
+ *	__le32 stack_ptr;    stack pointer
+ *	__le32 hcmd;         last host command
+ *	__le32 isr0;         isr status register LMPM_NIC_ISR0: rxtx_flag
+ *	__le32 isr1;         isr status register LMPM_NIC_ISR1: host_flag
+ *	__le32 isr2;         isr status register LMPM_NIC_ISR2: enc_flag
+ *	__le32 isr3;         isr status register LMPM_NIC_ISR3: time_flag
+ *	__le32 isr4;         isr status register LMPM_NIC_ISR4: wico interrupt
+ *	__le32 isr_pref;     isr status register LMPM_NIC_PREF_STAT
+ *	__le32 wait_event;   wait event() caller address
+ *	__le32 l2p_control;  L2pControlField
+ *	__le32 l2p_duration; L2pDurationField
+ *	__le32 l2p_mhvalid;  L2pMhValidBits
+ *	__le32 l2p_addr_match; L2pAddrMatchStat
+ *	__le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
+ *	__le32 u_timestamp;  indicate when the date and time of the compilation
+ *	__le32 reserved;
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+struct il_alive_resp {
+	u8 ucode_minor;
+	u8 ucode_major;
+	__le16 reserved1;
+	u8 sw_rev[8];
+	u8 ver_type;
+	u8 ver_subtype;		/* not "9" for runtime alive */
+	__le16 reserved2;
+	__le32 log_event_table_ptr;	/* SRAM address for event log */
+	__le32 error_event_table_ptr;	/* SRAM address for error log */
+	__le32 timestamp;
+	__le32 is_valid;
+} __packed;
+
+/*
+ * N_ERROR = 0x2 (response only, not a command)
+ */
+struct il_error_resp {
+	__le32 error_type;
+	u8 cmd_id;
+	u8 reserved1;
+	__le16 bad_cmd_seq_num;
+	__le32 error_info;
+	__le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types  */
+enum {
+	RXON_DEV_TYPE_AP = 1,
+	RXON_DEV_TYPE_ESS = 3,
+	RXON_DEV_TYPE_IBSS = 4,
+	RXON_DEV_TYPE_SNIFFER = 6,
+};
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK		cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS		(0)
+#define RXON_RX_CHAIN_VALID_MSK			cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS			(1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK		cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS		(4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK	cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS	(7)
+#define RXON_RX_CHAIN_CNT_MSK			cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS			(10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK		cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS		(12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK		cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS		(14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK           cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK                cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK        cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK        cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK          cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK     cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK            cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK            cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK              cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK              cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK       cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK    cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS		(22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK	cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS		(23)
+
+#define RXON_FLG_HT_PROT_MSK			cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK			cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS		(25)
+#define RXON_FLG_CHANNEL_MODE_MSK		cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+	CHANNEL_MODE_LEGACY = 0,
+	CHANNEL_MODE_PURE_40 = 1,
+	CHANNEL_MODE_MIXED = 2,
+	CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY			\
+	cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40			\
+	cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED			\
+	cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN			cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK         cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK        cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK      cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK     cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK           cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
+
+/**
+ * C_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE:  When tuning to a new channel, driver must set the
+ *        RXON_FILTER_ASSOC_MSK to 0.  This will clear station-dependent
+ *        info within the device, including the station tables, tx retry
+ *        rate tables, and txpower tables.  Driver must build a new station
+ *        table and txpower table before transmitting anything on the RXON
+ *        channel.
+ *
+ * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
+ *        issue a new C_TX_PWR_TBL after each C_RXON (0x10),
+ *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct il3945_rxon_cmd {
+	u8 node_addr[6];
+	__le16 reserved1;
+	u8 bssid_addr[6];
+	__le16 reserved2;
+	u8 wlap_bssid_addr[6];
+	__le16 reserved3;
+	u8 dev_type;
+	u8 air_propagation;
+	__le16 reserved4;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 assoc_id;
+	__le32 flags;
+	__le32 filter_flags;
+	__le16 channel;
+	__le16 reserved5;
+} __packed;
+
+struct il4965_rxon_cmd {
+	u8 node_addr[6];
+	__le16 reserved1;
+	u8 bssid_addr[6];
+	__le16 reserved2;
+	u8 wlap_bssid_addr[6];
+	__le16 reserved3;
+	u8 dev_type;
+	u8 air_propagation;
+	__le16 rx_chain;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 assoc_id;
+	__le32 flags;
+	__le32 filter_flags;
+	__le16 channel;
+	u8 ofdm_ht_single_stream_basic_rates;
+	u8 ofdm_ht_dual_stream_basic_rates;
+} __packed;
+
+/* Create a common rxon cmd which will be typecast into the 3945 or 4965
+ * specific rxon cmd, depending on where it is called from.
+ */
+struct il_rxon_cmd {
+	u8 node_addr[6];
+	__le16 reserved1;
+	u8 bssid_addr[6];
+	__le16 reserved2;
+	u8 wlap_bssid_addr[6];
+	__le16 reserved3;
+	u8 dev_type;
+	u8 air_propagation;
+	__le16 rx_chain;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 assoc_id;
+	__le32 flags;
+	__le32 filter_flags;
+	__le16 channel;
+	u8 ofdm_ht_single_stream_basic_rates;
+	u8 ofdm_ht_dual_stream_basic_rates;
+	u8 reserved4;
+	u8 reserved5;
+} __packed;
+
+/*
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct il3945_rxon_assoc_cmd {
+	__le32 flags;
+	__le32 filter_flags;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 reserved;
+} __packed;
+
+struct il4965_rxon_assoc_cmd {
+	__le32 flags;
+	__le32 filter_flags;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	u8 ofdm_ht_single_stream_basic_rates;
+	u8 ofdm_ht_dual_stream_basic_rates;
+	__le16 rx_chain_select_flags;
+	__le16 reserved;
+} __packed;
+
+#define IL_CONN_MAX_LISTEN_INTERVAL	10
+#define IL_MAX_UCODE_BEACON_INTERVAL	4	/* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL	1	/* 1024 */
+
+/*
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct il_rxon_time_cmd {
+	__le64 timestamp;
+	__le16 beacon_interval;
+	__le16 atim_win;
+	__le32 beacon_init_val;
+	__le16 listen_interval;
+	u8 dtim_period;
+	u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+struct il3945_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	struct il3945_power_per_rate power[IL_MAX_RATES];
+} __packed;
+
+struct il4965_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	struct il4965_tx_power_db tx_power;
+} __packed;
+
+/*
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
+ */
+struct il_csa_notification {
+	__le16 band;
+	__le16 channel;
+	__le32 status;		/* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
+ *
+ * @cw_min: Contention win, start value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention win, max value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *          performing random backoff timing prior to Tx).  Device default 1.
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * Device will automatically increase contention win by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct il_ac_qos {
+	__le16 cw_min;
+	__le16 cw_max;
+	u8 aifsn;
+	u8 reserved1;
+	__le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK	cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK		cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK	cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM                4
+
+/*
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct il_qosparam_cmd {
+	__le32 qos_flags;
+	struct il_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define	IL_AP_ID		0
+#define	IL_STA_ID		2
+#define	IL3945_BROADCAST_ID	24
+#define IL3945_STATION_COUNT	25
+#define IL4965_BROADCAST_ID	31
+#define	IL4965_STATION_COUNT	32
+
+#define	IL_STATION_COUNT	32	/* MAX(3945,4965) */
+#define	IL_INVALID_STATION	255
+
+#define STA_FLG_TX_RATE_MSK		cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK		cpu_to_le32(1 << 8)
+#define STA_FLG_RTS_MIMO_PROT_MSK	cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK	cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS	(19)
+#define STA_FLG_MAX_AGG_SIZE_MSK	cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK		cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK		cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS	(23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK	cpu_to_le32(7 << 23)
+
+/* Use in mode field.  1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK		0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK	cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC	cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP		cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP	cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP	cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS	8
+#define STA_KEY_FLG_INVALID	cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK	cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK	cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK		cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM		8
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define	STA_MODIFY_KEY_MASK		0x01
+#define	STA_MODIFY_TID_DISABLE_TX	0x02
+#define	STA_MODIFY_TX_RATE_MSK		0x04
+#define STA_MODIFY_ADDBA_TID_MSK	0x08
+#define STA_MODIFY_DELBA_TID_MSK	0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK	0x20
+
+/* Receiver address (actually, Rx station's idx into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
+
+struct il4965_keyinfo {
+	__le16 key_flags;
+	u8 tkip_rx_tsc_byte2;	/* TSC[2] for key mix ph1 detection */
+	u8 reserved1;
+	__le16 tkip_rx_ttak[5];	/* 10-byte unicast TKIP TTAK */
+	u8 key_offset;
+	u8 reserved2;
+	u8 key[16];		/* 16-byte unicast decryption key */
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: idx of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+	u8 addr[ETH_ALEN];
+	__le16 reserved1;
+	u8 sta_id;
+	u8 modify_mask;
+	__le16 reserved2;
+} __packed;
+
+/*
+ * C_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (4965 devices uses
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
+ *
+ * C_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE:  RXON command (without "associated" bit set) wipes the station table
+ *        clean.  Moving into RF_KILL state does this also.  Driver must set up
+ *        new station table before transmitting anything on the RXON channel
+ *        (except active scans or active measurements; those commands carry
+ *        their own txpower/rate setup data).
+ *
+ *        When getting started on a new channel, driver must set up the
+ *        IL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        station in a BSS, once an AP is selected, driver sets up the AP STA
+ *        in the IL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        are all that are needed for a BSS client station.  If the device is
+ *        used as AP, or in an IBSS network, driver must set up station table
+ *        entries for all STAs in network, starting with idx IL_STA_ID.
+ */
+
+struct il3945_addsta_cmd {
+	u8 mode;		/* 1: modify existing, 0: add new station */
+	u8 reserved[3];
+	struct sta_id_modify sta;
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
+	__le32 station_flags_msk;	/* STA_FLG_* */
+
+	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+	 * corresponding to bit (e.g. bit 5 controls TID 5).
+	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+	__le16 tid_disable_tx;
+
+	__le16 rate_n_flags;
+
+	/* TID for which to add block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	u8 add_immediate_ba_tid;
+
+	/* TID for which to remove block-ack support.
+	 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+	u8 remove_immediate_ba_tid;
+
+	/* Starting Sequence Number for added block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	__le16 add_immediate_ba_ssn;
+} __packed;
+
+struct il4965_addsta_cmd {
+	u8 mode;		/* 1: modify existing, 0: add new station */
+	u8 reserved[3];
+	struct sta_id_modify sta;
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
+	__le32 station_flags_msk;	/* STA_FLG_* */
+
+	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+	 * corresponding to bit (e.g. bit 5 controls TID 5).
+	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+	__le16 tid_disable_tx;
+
+	__le16 reserved1;
+
+	/* TID for which to add block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	u8 add_immediate_ba_tid;
+
+	/* TID for which to remove block-ack support.
+	 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+	u8 remove_immediate_ba_tid;
+
+	/* Starting Sequence Number for added block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	__le16 add_immediate_ba_ssn;
+
+	/*
+	 * Number of packets OK to transmit to station even though
+	 * it is asleep -- used to synchronise PS-poll and u-APSD
+	 * responses while ucode keeps track of STA sleep state.
+	 */
+	__le16 sleep_tx_count;
+
+	__le16 reserved2;
+} __packed;
+
+/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
+struct il_addsta_cmd {
+	u8 mode;		/* 1: modify existing, 0: add new station */
+	u8 reserved[3];
+	struct sta_id_modify sta;
+	struct il4965_keyinfo key;
+	__le32 station_flags;	/* STA_FLG_* */
+	__le32 station_flags_msk;	/* STA_FLG_* */
+
+	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+	 * corresponding to bit (e.g. bit 5 controls TID 5).
+	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+	__le16 tid_disable_tx;
+
+	__le16 rate_n_flags;	/* 3945 only */
+
+	/* TID for which to add block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	u8 add_immediate_ba_tid;
+
+	/* TID for which to remove block-ack support.
+	 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+	u8 remove_immediate_ba_tid;
+
+	/* Starting Sequence Number for added block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	__le16 add_immediate_ba_ssn;
+
+	/*
+	 * Number of packets OK to transmit to station even though
+	 * it is asleep -- used to synchronise PS-poll and u-APSD
+	 * responses while ucode keeps track of STA sleep state.
+	 */
+	__le16 sleep_tx_count;
+
+	__le16 reserved2;
+} __packed;
+
+#define ADD_STA_SUCCESS_MSK		0x1
+#define ADD_STA_NO_ROOM_IN_TBL	0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE	0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA	0x8
+/*
+ * C_ADD_STA = 0x18 (response)
+ */
+struct il_add_sta_resp {
+	u8 status;		/* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK              0x1
+/*
+ *  C_REM_STA = 0x19 (response)
+ */
+struct il_rem_sta_resp {
+	u8 status;
+} __packed;
+
+/*
+ *  C_REM_STA = 0x19 (command)
+ */
+struct il_rem_sta_cmd {
+	u8 num_sta;		/* number of removed stations */
+	u8 reserved[3];
+	u8 addr[ETH_ALEN];	/* MAC addr of the first station */
+	u8 reserved2[2];
+} __packed;
+
+#define IL_TX_FIFO_BK_MSK		cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK		cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK		cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK		cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK		cpu_to_le32(0xffc00)
+
+#define IL_DROP_SINGLE		0
+#define IL_DROP_SELECTED	1
+#define IL_DROP_ALL		2
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct il_wep_key {
+	u8 key_idx;
+	u8 key_offset;
+	u8 reserved1[2];
+	u8 key_size;
+	u8 reserved2[3];
+	u8 key[16];
+} __packed;
+
+struct il_wep_cmd {
+	u8 num_keys;
+	u8 global_key_type;
+	u8 flags;
+	u8 reserved;
+	struct il_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR	cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW	cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK	cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK		cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK	cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK	cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK		0x70
+#define RX_RES_PHY_FLAGS_ANTENNA_POS		4
+#define RX_RES_PHY_FLAGS_AGG_MSK	cpu_to_le16(1 << 7)
+
+#define RX_RES_STATUS_SEC_TYPE_MSK	(0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE	(0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP	(0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP	(0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP	(0x3 << 8)
+#define	RX_RES_STATUS_SEC_TYPE_ERR	(0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND	(1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH	(1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK	(0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT	(0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK	(0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC	(0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK	(0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK	(0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK	(0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK	(1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK	(0x800)
+
+struct il3945_rx_frame_stats {
+	u8 phy_count;
+	u8 id;
+	u8 rssi;
+	u8 agc;
+	__le16 sig_avg;
+	__le16 noise_diff;
+	u8 payload[0];
+} __packed;
+
+struct il3945_rx_frame_hdr {
+	__le16 channel;
+	__le16 phy_flags;
+	u8 reserved1;
+	u8 rate;
+	__le16 len;
+	u8 payload[0];
+} __packed;
+
+struct il3945_rx_frame_end {
+	__le32 status;
+	__le64 timestamp;
+	__le32 beacon_timestamp;
+} __packed;
+
+/*
+ * N_3945_RX = 0x1b (response only, not a command)
+ *
+ * NOTE:  DO NOT dereference from casts to this structure
+ * It is provided only for calculating minimum data set size.
+ * The actual offsets of the hdr and end are dynamic based on
+ * stats.phy_count
+ */
+struct il3945_rx_frame {
+	struct il3945_rx_frame_stats stats;
+	struct il3945_rx_frame_hdr hdr;
+	struct il3945_rx_frame_end end;
+} __packed;
+
+#define IL39_RX_FRAME_SIZE	(4 + sizeof(struct il3945_rx_frame))
+
+/* Fixed (non-configurable) rx data from phy */
+
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET	(4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK	(0x70)
+#define IL49_AGC_DB_MASK			(0x3f80)	/* MASK(7,13) */
+#define IL49_AGC_DB_POS			(7)
+struct il4965_rx_non_cfg_phy {
+	__le16 ant_selection;	/* ant A bit 4, ant B bit 5, ant C bit 6 */
+	__le16 agc_info;	/* agc code 0:6, agc dB 7:13, reserved 14:15 */
+	u8 rssi_info[6];	/* we use even entries, 0/2/4 for A/B/C rssi */
+	u8 pad[0];
+} __packed;
+
+/*
+ * N_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct il_rx_phy_res {
+	u8 non_cfg_phy_cnt;	/* non configurable DSP phy data byte count */
+	u8 cfg_phy_cnt;		/* configurable DSP phy data byte count */
+	u8 stat_id;		/* configurable DSP phy data set ID */
+	u8 reserved1;
+	__le64 timestamp;	/* TSF at on air rise */
+	__le32 beacon_time_stamp;	/* beacon at on-air rise */
+	__le16 phy_flags;	/* general phy flags: band, modulation, ... */
+	__le16 channel;		/* channel number */
+	u8 non_cfg_phy_buf[32];	/* for various implementations of non_cfg_phy */
+	__le32 rate_n_flags;	/* RATE_MCS_* */
+	__le16 byte_count;	/* frame's byte-count */
+	__le16 frame_time;	/* frame's time on the air */
+} __packed;
+
+struct il_rx_mpdu_res_start {
+	__le16 byte_count;
+	__le16 reserved;
+} __packed;
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each C_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * N_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
+ *
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* C_TX Tx flags field */
+
+/*
+ * 1: Use Request-To-Send protocol before this frame.
+ * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
+ */
+#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
+
+/*
+ * 1: Transmit Clear-To-Send to self before this frame.
+ * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
+ * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
+ */
+#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For 4965 devices:
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_idx indicates first rate to try;
+ *    uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ *    This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
+
+/*
+ * 1: Frame requires full Tx-Op protection.
+ * Set this if either RTS or CTS Tx Flag gets set.
+ */
+#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
+
+/* Tx antenna selection field; used only for 3945, reserved (0) for 4965 devices.
+ * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
+#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in C_SCAN for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ *    alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both).  Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP		0x01
+#define TX_CMD_SEC_CCM		0x02
+#define TX_CMD_SEC_TKIP		0x03
+#define TX_CMD_SEC_MSK		0x03
+#define TX_CMD_SEC_SHIFT	6
+#define TX_CMD_SEC_KEY128	0x08
+
+/*
+ * C_TX = 0x1c (command)
+ */
+
+struct il3945_tx_cmd {
+	/*
+	 * MPDU byte count:
+	 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+	 * + 8 byte IV for CCM or TKIP (not used for WEP)
+	 * + Data payload
+	 * + 8-byte MIC (not used for CCM/WEP)
+	 * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+	 *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+	 * Range: 14-2342 bytes.
+	 */
+	__le16 len;
+
+	/*
+	 * MPDU or MSDU byte count for next frame.
+	 * Used for fragmentation and bursting, but not 11n aggregation.
+	 * Same as "len", but for next frame.  Set to 0 if not applicable.
+	 */
+	__le16 next_frame_len;
+
+	__le32 tx_flags;	/* TX_CMD_FLG_* */
+
+	u8 rate;
+
+	/* Index of recipient station in uCode's station table */
+	u8 sta_id;
+	u8 tid_tspec;
+	u8 sec_ctl;
+	u8 key[16];
+	union {
+		u8 byte[8];
+		__le16 word[4];
+		__le32 dw[2];
+	} tkip_mic;
+	__le32 next_frame_info;
+	union {
+		__le32 life_time;
+		__le32 attempt;
+	} stop_time;
+	u8 supp_rates[2];
+	u8 rts_retry_limit;	/*byte 50 */
+	u8 data_retry_limit;	/*byte 51 */
+	union {
+		__le16 pm_frame_timeout;
+		__le16 attempt_duration;
+	} timeout;
+
+	/*
+	 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+	 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+	 */
+	__le16 driver_txop;
+
+	/*
+	 * MAC header goes here, followed by 2 bytes padding if MAC header
+	 * length is 26 or 30 bytes, followed by payload data
+	 */
+	u8 payload[0];
+	struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * C_TX = 0x1c (response)
+ */
+struct il3945_tx_resp {
+	u8 failure_rts;
+	u8 failure_frame;
+	u8 bt_kill_count;
+	u8 rate;
+	__le32 wireless_media_time;
+	__le32 status;		/* TX status */
+} __packed;
+
+/*
+ * 4965 uCode updates these Tx attempt count values in host DRAM.
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct il_dram_scratch {
+	u8 try_cnt;		/* Tx attempts */
+	u8 bt_kill_cnt;		/* Tx attempts blocked by Bluetooth device */
+	__le16 reserved;
+} __packed;
+
+struct il_tx_cmd {
+	/*
+	 * MPDU byte count:
+	 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+	 * + 8 byte IV for CCM or TKIP (not used for WEP)
+	 * + Data payload
+	 * + 8-byte MIC (not used for CCM/WEP)
+	 * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+	 *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+	 * Range: 14-2342 bytes.
+	 */
+	__le16 len;
+
+	/*
+	 * MPDU or MSDU byte count for next frame.
+	 * Used for fragmentation and bursting, but not 11n aggregation.
+	 * Same as "len", but for next frame.  Set to 0 if not applicable.
+	 */
+	__le16 next_frame_len;
+
+	__le32 tx_flags;	/* TX_CMD_FLG_* */
+
+	/* uCode may modify this field of the Tx command (in host DRAM!).
+	 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+	struct il_dram_scratch scratch;
+
+	/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+	__le32 rate_n_flags;	/* RATE_MCS_* */
+
+	/* Index of destination station in uCode's station table */
+	u8 sta_id;
+
+	/* Type of security encryption:  CCM or TKIP */
+	u8 sec_ctl;		/* TX_CMD_SEC_* */
+
+	/*
+	 * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
+	 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
+	 * data frames, this field may be used to selectively reduce initial
+	 * rate (via non-0 value) for special frames (e.g. management), while
+	 * still supporting rate scaling for all frames.
+	 */
+	u8 initial_rate_idx;
+	u8 reserved;
+	u8 key[16];
+	__le16 next_frame_flags;
+	__le16 reserved2;
+	union {
+		__le32 life_time;
+		__le32 attempt;
+	} stop_time;
+
+	/* Host DRAM physical address pointer to "scratch" in this command.
+	 * Must be dword aligned.  "0" in dram_lsb_ptr disables usage. */
+	__le32 dram_lsb_ptr;
+	u8 dram_msb_ptr;
+
+	u8 rts_retry_limit;	/*byte 50 */
+	u8 data_retry_limit;	/*byte 51 */
+	u8 tid_tspec;
+	union {
+		__le16 pm_frame_timeout;
+		__le16 attempt_duration;
+	} timeout;
+
+	/*
+	 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+	 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+	 */
+	__le16 driver_txop;
+
+	/*
+	 * MAC header goes here, followed by 2 bytes padding if MAC header
+	 * length is 26 or 30 bytes, followed by payload data
+	 */
+	u8 payload[0];
+	struct ieee80211_hdr hdr[0];
+} __packed;
+
+/* TX command response is sent after *3945* transmission attempts.
+ *
+ * NOTES:
+ *
+ * TX_STATUS_FAIL_NEXT_FRAG
+ *
+ * If the fragment flag in the MAC header for the frame being transmitted
+ * is set and there is insufficient time to transmit the next frame, the
+ * TX status will be returned with 'TX_STATUS_FAIL_NEXT_FRAG'.
+ *
+ * TX_STATUS_FIFO_UNDERRUN
+ *
+ * Indicates the host did not provide bytes to the FIFO fast enough while
+ * a TX was in progress.
+ *
+ * TX_STATUS_FAIL_MGMNT_ABORT
+ *
+ * This status is only possible if the ABORT ON MGMT RX parameter was
+ * set to true with the TX command.
+ *
+ * If the MSB of the status parameter is set then an abort sequence is
+ * required.  This sequence consists of the host activating the TX Abort
+ * control line, and then waiting for the TX Abort command response.  This
+ * indicates that a the device is no longer in a transmit state, and that the
+ * command FIFO has been cleared.  The host must then deactivate the TX Abort
+ * control line.  Receiving is still allowed in this case.
+ */
+enum {
+	TX_3945_STATUS_SUCCESS = 0x01,
+	TX_3945_STATUS_DIRECT_DONE = 0x02,
+	TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
+	TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
+	TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+	TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
+	TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
+	TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+	TX_3945_STATUS_FAIL_DEST_PS = 0x88,
+	TX_3945_STATUS_FAIL_ABORTED = 0x89,
+	TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
+	TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
+	TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+	TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
+	TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
+	TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+	TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
+	TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+/*
+ * TX command response is sent after *4965* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+	TX_STATUS_SUCCESS = 0x01,
+	TX_STATUS_DIRECT_DONE = 0x02,
+	/* postpone TX */
+	TX_STATUS_POSTPONE_DELAY = 0x40,
+	TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+	TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+	TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+	/* abort TX */
+	TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+	TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+	TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+	TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+	TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+	TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+	TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+	TX_STATUS_FAIL_DEST_PS = 0x88,
+	TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+	TX_STATUS_FAIL_BT_RETRY = 0x8a,
+	TX_STATUS_FAIL_STA_INVALID = 0x8b,
+	TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+	TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+	TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+	TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+	TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+	TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define	TX_PACKET_MODE_REGULAR		0x0000
+#define	TX_PACKET_MODE_BURST_SEQ	0x0100
+#define	TX_PACKET_MODE_BURST_FIRST	0x0200
+
+enum {
+	TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+	TX_STATUS_MSK = 0x000000ff,	/* bits 0:7 */
+	TX_STATUS_DELAY_MSK = 0x00000040,
+	TX_STATUS_ABORT_MSK = 0x00000080,
+	TX_PACKET_MODE_MSK = 0x0000ff00,	/* bits 8:15 */
+	TX_FIFO_NUMBER_MSK = 0x00070000,	/* bits 16:18 */
+	TX_RESERVED = 0x00780000,	/* bits 19:22 */
+	TX_POWER_PA_DETECT_MSK = 0x7f800000,	/* bits 23:30 */
+	TX_ABORT_REQUIRED_MSK = 0x80000000,	/* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+	AGG_TX_STATE_TRANSMITTED = 0x00,
+	AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+	AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+	AGG_TX_STATE_ABORT_MSK = 0x08,
+	AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+	AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+	AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+	AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+	AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+	AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+	AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK	0x00000fff	/* bits 0:11 */
+#define AGG_TX_TRY_MSK		0x0000f000	/* bits 12:15 */
+
+#define AGG_TX_STATE_LAST_SENT_MSK  (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+				     AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * C_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)  No aggregation (frame_count == 1).  This reports Tx results for
+ *     a single frame.  Multiple attempts, at various bit rates, may have
+ *     been made for this frame.
+ *
+ * 2)  Aggregation (frame_count > 1).  This reports Tx results for
+ *     2 or more frames that used block-acknowledge.  All frames were
+ *     transmitted at same rate.  Rate scaling may have been used if first
+ *     frame in this new agg block failed in previous agg block(s).
+ *
+ *     Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ *     block-ack has not been received by the time the 4965 device records
+ *     this status.
+ *     This status relates to reasons the tx might have been blocked or aborted
+ *     within the sending station (this 4965 device), rather than whether it was
+ *     received successfully by the destination station.
+ */
+struct agg_tx_status {
+	__le16 status;
+	__le16 sequence;
+} __packed;
+
+struct il4965_tx_resp {
+	u8 frame_count;		/* 1 no aggregation, >1 aggregation */
+	u8 bt_kill_count;	/* # blocked by bluetooth (unused for agg) */
+	u8 failure_rts;		/* # failures due to unsuccessful RTS */
+	u8 failure_frame;	/* # failures due to no ACK (unused for agg) */
+
+	/* For non-agg:  Rate at which frame was successful.
+	 * For agg:  Rate at which all frames were transmitted. */
+	__le32 rate_n_flags;	/* RATE_MCS_*  */
+
+	/* For non-agg:  RTS + CTS + frame tx attempts time + ACK.
+	 * For agg:  RTS + CTS + aggregation tx time + block-ack time. */
+	__le16 wireless_media_time;	/* uSecs */
+
+	__le16 reserved;
+	__le32 pa_power1;	/* RF power amplifier measurement (not used) */
+	__le32 pa_power2;
+
+	/*
+	 * For non-agg:  frame status TX_STATUS_*
+	 * For agg:  status of 1st frame, AGG_TX_STATE_*; other frame status
+	 *           fields follow this one, up to frame_count.
+	 *           Bit fields:
+	 *           11- 0:  AGG_TX_STATE_* status code
+	 *           15-12:  Retry count for 1st frame in aggregation (retries
+	 *                   occur if tx failed for this frame when it was a
+	 *                   member of a previous aggregation block).  If rate
+	 *                   scaling is used, retry count indicates the rate
+	 *                   table entry used for all frames in the new agg.
+	 *           31-16:  Sequence # for this frame's Tx cmd (not SSN!)
+	 */
+	union {
+		__le32 status;
+		struct agg_tx_status agg_status[0];	/* for each agg frame */
+	} u;
+} __packed;
+
+/*
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct il_compressed_ba_resp {
+	__le32 sta_addr_lo32;
+	__le16 sta_addr_hi16;
+	__le16 reserved;
+
+	/* Index of recipient (BA-sending) station in uCode's station table */
+	u8 sta_id;
+	u8 tid;
+	__le16 seq_ctl;
+	__le64 bitmap;
+	__le16 scd_flow;
+	__le16 scd_ssn;
+} __packed;
+
+/*
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
+ *
+ * See details under "TXPOWER" in 4965.h.
+ */
+
+struct il3945_txpowertable_cmd {
+	u8 band;		/* 0: 5 GHz, 1: 2.4 GHz */
+	u8 reserved;
+	__le16 channel;
+	struct il3945_power_per_rate power[IL_MAX_RATES];
+} __packed;
+
+struct il4965_txpowertable_cmd {
+	u8 band;		/* 0: 5 GHz, 1: 2.4 GHz */
+	u8 reserved;
+	__le16 channel;
+	struct il4965_tx_power_db tx_power;
+} __packed;
+
+/**
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
+ *
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
+ *
+ * NOTE: The table of rates passed to the uCode via the
+ * RATE_SCALE command sets up the corresponding order of
+ * rates used for all related commands, including rate
+ * masks, etc.
+ *
+ * For example, if you set 9MB (PLCP 0x0f) as the first
+ * rate in the rate table, the bit mask for that rate
+ * when passed through ofdm_basic_rates on the C_RXON
+ * command would be bit 0 (1 << 0)
+ */
+struct il3945_rate_scaling_info {
+	__le16 rate_n_flags;
+	u8 try_cnt;
+	u8 next_rate_idx;
+} __packed;
+
+struct il3945_rate_scaling_cmd {
+	u8 table_id;
+	u8 reserved[3];
+	struct il3945_rate_scaling_info table[IL_MAX_RATES];
+} __packed;
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK	(1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define  LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define  LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define  LINK_QUAL_ANT_A_MSK (1 << 0)
+#define  LINK_QUAL_ANT_B_MSK (1 << 1)
+#define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+/**
+ * struct il_link_qual_general_params
+ *
+ * Used in C_TX_LINK_QUALITY_CMD
+ */
+struct il_link_qual_general_params {
+	u8 flags;
+
+	/* No entries at or above this (driver chosen) idx contain MIMO */
+	u8 mimo_delimiter;
+
+	/* Best single antenna to use for single stream (legacy, SISO). */
+	u8 single_stream_ant_msk;	/* LINK_QUAL_ANT_* */
+
+	/* Best antennas to use for MIMO (unused for 4965, assumes both). */
+	u8 dual_stream_ant_msk;	/* LINK_QUAL_ANT_* */
+
+	/*
+	 * If driver needs to use different initial rates for different
+	 * EDCA QOS access categories (as implemented by tx fifos 0-3),
+	 * this table will set that up, by indicating the idxes in the
+	 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+	 * Otherwise, driver should set all entries to 0.
+	 *
+	 * Entry usage:
+	 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+	 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+	 */
+	u8 start_rate_idx[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)	/* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX	(8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN	(100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF	(3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX	(255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN	(0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(31)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
+
+/**
+ * struct il_link_qual_agg_params
+ *
+ * Used in C_TX_LINK_QUALITY_CMD
+ */
+struct il_link_qual_agg_params {
+
+	/*
+	 *Maximum number of uSec in aggregation.
+	 * default set to 4000 (4 milliseconds) if not configured in .cfg
+	 */
+	__le16 agg_time_limit;
+
+	/*
+	 * Number of Tx retries allowed for a frame, before that frame will
+	 * no longer be considered for the start of an aggregation sequence
+	 * (scheduler will then try to tx it as single frame).
+	 * Driver should set this to 3.
+	 */
+	u8 agg_dis_start_th;
+
+	/*
+	 * Maximum number of frames in aggregation.
+	 * 0 = no limit (default).  1 = no aggregation.
+	 * Other values = max # frames in aggregation.
+	 */
+	u8 agg_frame_cnt_limit;
+
+	__le32 reserved;
+} __packed;
+
+/*
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
+ *
+ * Each station in the 4965 device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received.  This command replaces the entire table for
+ * one station.
+ *
+ * NOTE:  Station must already be in 4965 device's station table.
+ *	  Use C_ADD_STA.
+ *
+ * The rate scaling procedures described below work well.  Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TBL
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1)  If using High-throughput (HT) (SISO or MIMO) initial rate:
+ *     a) Use this same initial rate for first 3 entries.
+ *     b) Find next lower available rate using same mode (SISO or MIMO),
+ *        use for next 3 entries.  If no lower rate available, switch to
+ *        legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ *     c) If using MIMO, set command's mimo_delimiter to number of entries
+ *        using MIMO (3 or 6).
+ *     d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ *        no MIMO, no short guard interval), at the next lower bit rate
+ *        (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ *        legacy procedure for remaining table entries.
+ *
+ * 2)  If using legacy initial rate:
+ *     a) Use the initial rate for only one entry.
+ *     b) For each following entry, reduce the rate to next lower available
+ *        rate, until reaching the lowest available rate.
+ *     c) When reducing rate, also switch antenna selection.
+ *     d) Once lowest available rate is reached, repeat this rate until
+ *        rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for 4965 devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history:  One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding win of the 62 most recent tx attempts at that rate.  The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the win (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the win.
+ *
+ * When the 4965 device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command.  The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station.  The Tx response struct il_tx_resp indicates the Tx rate in
+ * rate_n_flags field.  After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate.  The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1)  Calculate actual throughput (success ratio * expected throughput, see
+ *     table below) for current initial rate.  Do this only if enough frames
+ *     have been attempted to make the value meaningful:  at least 6 failed
+ *     tx attempts, or at least 8 successes.  If not enough, don't try rate
+ *     scaling yet.
+ *
+ * 2)  Find available rates adjacent to current initial rate.  Available means:
+ *     a)  supported by hardware &&
+ *     b)  supported by association &&
+ *     c)  within any constraints selected by user
+ *
+ * 3)  Gather measured throughputs for adjacent rates.  These might not have
+ *     enough history to calculate a throughput.  That's okay, we might try
+ *     using one of them anyway!
+ *
+ * 4)  Try decreasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  lower adjacent rate has better measured throughput ||
+ *     c)  higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ *     As a sanity check, if decrease was determined above, leave rate
+ *     unchanged if:
+ *     a)  lower rate unavailable
+ *     b)  success ratio at current rate > 85% (very good)
+ *     c)  current measured throughput is better than expected throughput
+ *         of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5)  Try increasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  both adjacent rates' throughputs are unmeasured (try it!) ||
+ *     b)  higher adjacent rate has better measured throughput ||
+ *     c)  lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ *     As a sanity check, if increase was determined above, leave rate
+ *     unchanged if:
+ *     a)  success ratio at current rate < 70%.  This is not particularly
+ *         good performance; higher rate is sure to have poorer success.
+ *
+ * 6)  Re-evaluate the rate after each tx frame.  If working with block-
+ *     acknowledge, history and stats may be calculated for the entire
+ *     block (including prior history that fits within the history wins),
+ *     before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput.  The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ *   480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ *   4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ *   Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ *   Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ *   Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval.  When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames).  Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples.  The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ *           RATE:  1    2    5   11    6   9   12   18   24   36   48   54   60
+ *
+ *              G:  7   13   35   58   40  57   72   98  121  154  177  186  186
+ *              A:  0    0    0    0   40  57   72   98  121  154  177  186  186
+ *     SISO 20MHz:  0    0    0    0   42  42   76  102  124  159  183  193  202
+ * SGI SISO 20MHz:  0    0    0    0   46  46   82  110  132  168  192  202  211
+ *     MIMO 20MHz:  0    0    0    0   74  74  123  155  179  214  236  244  251
+ * SGI MIMO 20MHz:  0    0    0    0   81  81  131  164  188  222  243  251  257
+ *     SISO 40MHz:  0    0    0    0   77  77  127  160  184  220  242  250  257
+ * SGI SISO 40MHz:  0    0    0    0   83  83  135  169  193  229  250  257  264
+ *     MIMO 40MHz:  0    0    0    0  123 123  182  214  235  264  279  285  289
+ * SGI MIMO 40MHz:  0    0    0    0  131 131  191  222  242  270  284  289  293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old.  If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode.  After trying all 3, a best mode is found.  Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct il_link_quality_cmd {
+
+	/* Index of destination/recipient station in uCode's station table */
+	u8 sta_id;
+	u8 reserved1;
+	__le16 control;		/* not used */
+	struct il_link_qual_general_params general_params;
+	struct il_link_qual_agg_params agg_params;
+
+	/*
+	 * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+	 * specifies 1st Tx rate attempted, via idx into this table.
+	 * 4965 devices works its way through table when retrying Tx.
+	 */
+	struct {
+		__le32 rate_n_flags;	/* RATE_MCS_*, RATE_* */
+	} rs_table[LINK_QUAL_MAX_RETRY_NUM];
+	__le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ *   bit 0 - 1: BT channel announcement enabled
+ *           0: disable
+ *   bit 1 - 1: priority of BT device enabled
+ *           0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY	   BIT(1)
+
+#define BT_COEX_ENABLE  (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_DEF (0x1E)
+
+#define BT_MAX_KILL_DEF (0x5)
+
+/*
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * 3945 and 4965 devices support hardware handshake with Bluetooth device on
+ * same platform.  Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct il_bt_cmd {
+	u8 flags;
+	u8 lead_time;
+	u8 max_kill;
+	u8 reserved;
+	__le32 kill_ack_mask;
+	__le32 kill_cts_mask;
+} __packed;
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK         | \
+				 RXON_FILTER_CTL2HOST_MSK        | \
+				 RXON_FILTER_ACCEPT_GRP_MSK      | \
+				 RXON_FILTER_DIS_DECRYPT_MSK     | \
+				 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+				 RXON_FILTER_ASSOC_MSK           | \
+				 RXON_FILTER_BCON_AWARE_MSK)
+
+struct il_measure_channel {
+	__le32 duration;	/* measurement duration in extended beacon
+				 * format */
+	u8 channel;		/* channel to measure */
+	u8 type;		/* see enum il_measure_type */
+	__le16 reserved;
+} __packed;
+
+/*
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
+ */
+struct il_spectrum_cmd {
+	__le16 len;		/* number of bytes starting from token */
+	u8 token;		/* token id */
+	u8 id;			/* measurement id -- 0 or 1 */
+	u8 origin;		/* 0 = TGh, 1 = other, 2 = TGk */
+	u8 periodic;		/* 1 = periodic */
+	__le16 path_loss_timeout;
+	__le32 start_time;	/* start time in extended beacon format */
+	__le32 reserved2;
+	__le32 flags;		/* rxon flags */
+	__le32 filter_flags;	/* rxon filter flags */
+	__le16 channel_count;	/* minimum 1, maximum 10 */
+	__le16 reserved3;
+	struct il_measure_channel channels[10];
+} __packed;
+
+/*
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
+ */
+struct il_spectrum_resp {
+	u8 token;
+	u8 id;			/* id of the prior command replaced, or 0xff */
+	__le16 status;		/* 0 - command will be handled
+				 * 1 - cannot handle (conflicts with another
+				 *     measurement) */
+} __packed;
+
+enum il_measurement_state {
+	IL_MEASUREMENT_START = 0,
+	IL_MEASUREMENT_STOP = 1,
+};
+
+enum il_measurement_status {
+	IL_MEASUREMENT_OK = 0,
+	IL_MEASUREMENT_CONCURRENT = 1,
+	IL_MEASUREMENT_CSA_CONFLICT = 2,
+	IL_MEASUREMENT_TGH_CONFLICT = 3,
+	/* 4-5 reserved */
+	IL_MEASUREMENT_STOPPED = 6,
+	IL_MEASUREMENT_TIMEOUT = 7,
+	IL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct il_measurement_histogram {
+	__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 0.8usec counts */
+	__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct il_measurement_cca_counters {
+	__le32 ofdm;
+	__le32 cck;
+} __packed;
+
+enum il_measure_type {
+	IL_MEASURE_BASIC = (1 << 0),
+	IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+	IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+	IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+	IL_MEASURE_FRAME = (1 << 4),
+	/* bits 5:6 are reserved */
+	IL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
+ */
+struct il_spectrum_notification {
+	u8 id;			/* measurement id -- 0 or 1 */
+	u8 token;
+	u8 channel_idx;		/* idx in measurement channel list */
+	u8 state;		/* 0 - start, 1 - stop */
+	__le32 start_time;	/* lower 32-bits of TSF */
+	u8 band;		/* 0 - 5.2GHz, 1 - 2.4GHz */
+	u8 channel;
+	u8 type;		/* see enum il_measurement_type */
+	u8 reserved1;
+	/* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
+	 * valid if applicable for measurement type requested. */
+	__le32 cca_ofdm;	/* cca fraction time in 40Mhz clock periods */
+	__le32 cca_cck;		/* cca fraction time in 44Mhz clock periods */
+	__le32 cca_time;	/* channel load time in usecs */
+	u8 basic_type;		/* 0 - bss, 1 - ofdm preamble, 2 -
+				 * unidentified */
+	u8 reserved2[3];
+	struct il_measurement_histogram histogram;
+	__le32 stop_time;	/* lower 32-bits of TSF */
+	__le32 status;		/* see il_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct il_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ *   bit 0 - '0' Driver not allow power management
+ *           '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ *   bit 1 - '0' Don't send sleep notification
+ *           '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ *   bit 2 - '0' PM have to walk up every DTIM
+ *           '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ *   bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ *           '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ *   bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ *   bit 31/30- '00' use both mac/xtal sleeps
+ *              '01' force Mac sleep
+ *              '10' force xtal sleep
+ *              '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IL_POWER_VEC_SIZE 5
+
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK		cpu_to_le16(BIT(0))
+#define IL_POWER_SLEEP_OVER_DTIM_MSK		cpu_to_le16(BIT(2))
+#define IL_POWER_PCI_PM_MSK			cpu_to_le16(BIT(3))
+
+struct il3945_powertable_cmd {
+	__le16 flags;
+	u8 reserved[2];
+	__le32 rx_data_timeout;
+	__le32 tx_data_timeout;
+	__le32 sleep_interval[IL_POWER_VEC_SIZE];
+} __packed;
+
+struct il_powertable_cmd {
+	__le16 flags;
+	u8 keep_alive_seconds;	/* 3945 reserved */
+	u8 debug_flags;		/* 3945 reserved */
+	__le32 rx_data_timeout;
+	__le32 tx_data_timeout;
+	__le32 sleep_interval[IL_POWER_VEC_SIZE];
+	__le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct il_sleep_notification {
+	u8 pm_sleep_mode;
+	u8 pm_wakeup_src;
+	__le16 reserved;
+	__le32 sleep_time;
+	__le32 tsf_low;
+	__le32 bcon_timer;
+} __packed;
+
+/* Sleep states.  all devices identical. */
+enum {
+	IL_PM_NO_SLEEP = 0,
+	IL_PM_SLP_MAC = 1,
+	IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+	IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+	IL_PM_SLP_PHY = 4,
+	IL_PM_SLP_REPENT = 5,
+	IL_PM_WAKEUP_BY_TIMER = 6,
+	IL_PM_WAKEUP_BY_DRIVER = 7,
+	IL_PM_WAKEUP_BY_RFKILL = 8,
+	/* 3 reserved */
+	IL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
+ */
+struct il_card_state_notif {
+	__le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED   0x01
+#define SW_CARD_DISABLED   0x02
+#define CT_CARD_DISABLED   0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct il_ct_kill_config {
+	__le32 reserved;
+	__le32 critical_temperature_M;
+	__le32 critical_temperature_R;
+} __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
+
+/**
+ * struct il_scan_channel - entry in C_SCAN channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1)  SSID for directed active scans
+ * 2)  Txpower setting (for rate specified within Tx command)
+ * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
+ *     quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct il_scan_cmd about max_out_time and quiet_time):
+ * 1)  If using passive_dwell (i.e. passive_dwell != 0):
+ *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2)  quiet_time <= active_dwell
+ * 3)  If restricting off-channel time (i.e. max_out_time !=0):
+ *     passive_dwell < max_out_time
+ *     active_dwell < max_out_time
+ */
+struct il3945_scan_channel {
+	/*
+	 * type is defined as:
+	 * 0:0 1 = active, 0 = passive
+	 * 1:4 SSID direct bit map; if a bit is set, then corresponding
+	 *     SSID IE is transmitted in probe request.
+	 * 5:7 reserved
+	 */
+	u8 type;
+	u8 channel;		/* band is selected by il3945_scan_cmd "flags" field */
+	struct il3945_tx_power tpc;
+	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
+	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes u8 type */
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+
+struct il_scan_channel {
+	/*
+	 * type is defined as:
+	 * 0:0 1 = active, 0 = passive
+	 * 1:20 SSID direct bit map; if a bit is set, then corresponding
+	 *     SSID IE is transmitted in probe request.
+	 * 21:31 reserved
+	 */
+	__le32 type;
+	__le16 channel;		/* band is selected by il_scan_cmd "flags" field */
+	u8 tx_gain;		/* gain for analog radio */
+	u8 dsp_atten;		/* gain for DSP */
+	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
+	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IL_SCAN_PROBE_MASK(n)	cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct il_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
+ * each channel may select different ssids from among the 20 (4) entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct il_ssid_ie {
+	u8 id;
+	u8 len;
+	u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX_3945		4
+#define PROBE_OPTION_MAX		20
+#define TX_CMD_LIFE_TIME_INFINITE	cpu_to_le32(0xFFFFFFFF)
+#define IL_GOOD_CRC_TH_DISABLED	0
+#define IL_GOOD_CRC_TH_DEFAULT		cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER		cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
+
+/*
+ * C_SCAN = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background.  The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel.  That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments.  The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1)  Sends SCAN_START notification to driver
+ * 2)  Checks to see if it has time to do scan for one channel
+ * 3)  Sends NULL packet, with power-save (PS) bit set to 1,
+ *     to tell AP that we're going off-channel
+ * 4)  Tunes to first channel in scan list, does active or passive scan
+ * 5)  Sends SCAN_RESULT notification to driver
+ * 6)  Checks to see if it has time to do scan on *next* channel in list
+ * 7)  Repeats 4-6 until it no longer has time to scan the next channel
+ *     before max_out_time expires
+ * 8)  Returns to service channel
+ * 9)  Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request.  This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel.  If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct il_scan_channel.
+ */
+
+struct il3945_scan_cmd {
+	__le16 len;
+	u8 reserved0;
+	u8 channel_count;	/* # channels in channel list */
+	__le16 quiet_time;	/* dwell only this # millisecs on quiet channel
+				 * (only for active scan) */
+	__le16 quiet_plcp_th;	/* quiet chnl is < this # pkts (typ. 1) */
+	__le16 good_CRC_th;	/* passive -> active promotion threshold */
+	__le16 reserved1;
+	__le32 max_out_time;	/* max usec to be away from associated (service)
+				 * channel */
+	__le32 suspend_time;	/* pause scan this long (in "extended beacon
+				 * format") when returning to service channel:
+				 * 3945; 31:24 # beacons, 19:0 additional usec,
+				 * 4965; 31:22 # beacons, 21:0 additional usec.
+				 */
+	__le32 flags;		/* RXON_FLG_* */
+	__le32 filter_flags;	/* RXON_FILTER_* */
+
+	/* For active scans (set to all-0s for passive scans).
+	 * Does not include payload.  Must specify Tx rate; no rate scaling. */
+	struct il3945_tx_cmd tx_cmd;
+
+	/* For directed active scans (set to all-0s otherwise) */
+	struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+
+	/*
+	 * Probe request frame, followed by channel list.
+	 *
+	 * Size of probe request frame is specified by byte count in tx_cmd.
+	 * Channel list follows immediately after probe request frame.
+	 * Number of channels in list is specified by channel_count.
+	 * Each channel in list is of type:
+	 *
+	 * struct il3945_scan_channel channels[0];
+	 *
+	 * NOTE:  Only one band of channels can be scanned per pass.  You
+	 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+	 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
+	 * before requesting another scan.
+	 */
+	u8 data[0];
+} __packed;
+
+struct il_scan_cmd {
+	__le16 len;
+	u8 reserved0;
+	u8 channel_count;	/* # channels in channel list */
+	__le16 quiet_time;	/* dwell only this # millisecs on quiet channel
+				 * (only for active scan) */
+	__le16 quiet_plcp_th;	/* quiet chnl is < this # pkts (typ. 1) */
+	__le16 good_CRC_th;	/* passive -> active promotion threshold */
+	__le16 rx_chain;	/* RXON_RX_CHAIN_* */
+	__le32 max_out_time;	/* max usec to be away from associated (service)
+				 * channel */
+	__le32 suspend_time;	/* pause scan this long (in "extended beacon
+				 * format") when returning to service chnl:
+				 * 3945; 31:24 # beacons, 19:0 additional usec,
+				 * 4965; 31:22 # beacons, 21:0 additional usec.
+				 */
+	__le32 flags;		/* RXON_FLG_* */
+	__le32 filter_flags;	/* RXON_FILTER_* */
+
+	/* For active scans (set to all-0s for passive scans).
+	 * Does not include payload.  Must specify Tx rate; no rate scaling. */
+	struct il_tx_cmd tx_cmd;
+
+	/* For directed active scans (set to all-0s otherwise) */
+	struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+	/*
+	 * Probe request frame, followed by channel list.
+	 *
+	 * Size of probe request frame is specified by byte count in tx_cmd.
+	 * Channel list follows immediately after probe request frame.
+	 * Number of channels in list is specified by channel_count.
+	 * Each channel in list is of type:
+	 *
+	 * struct il_scan_channel channels[0];
+	 *
+	 * NOTE:  Only one band of channels can be scanned per pass.  You
+	 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+	 * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
+	 * before requesting another scan.
+	 */
+	u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS	cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS            0x2
+
+/*
+ * C_SCAN = 0x80 (response)
+ */
+struct il_scanreq_notification {
+	__le32 status;		/* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * N_SCAN_START = 0x82 (notification only, not a command)
+ */
+struct il_scanstart_notification {
+	__le32 tsf_low;
+	__le32 tsf_high;
+	__le32 beacon_timer;
+	u8 channel;
+	u8 band;
+	u8 reserved[2];
+	__le32 status;
+} __packed;
+
+#define  SCAN_OWNER_STATUS 0x1
+#define  MEASURE_OWNER_STATUS 0x2
+
+#define IL_PROBE_STATUS_OK		0
+#define IL_PROBE_STATUS_TX_FAILED	BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IL_PROBE_STATUS_FAIL_TTL	BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT	BIT(2)
+
+#define NUMBER_OF_STATS 1	/* first __le32 is good CRC */
+/*
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
+ */
+struct il_scanresults_notification {
+	u8 channel;
+	u8 band;
+	u8 probe_status;
+	u8 num_probe_not_sent;	/* not enough time to send */
+	__le32 tsf_low;
+	__le32 tsf_high;
+	__le32 stats[NUMBER_OF_STATS];
+} __packed;
+
+/*
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
+ */
+struct il_scancomplete_notification {
+	u8 scanned_channels;
+	u8 status;
+	u8 last_channel;
+	__le32 tsf_low;
+	__le32 tsf_high;
+} __packed;
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum il_ibss_manager {
+	IL_NOT_IBSS_MANAGER = 0,
+	IL_IBSS_MANAGER = 1,
+};
+
+/*
+ * N_BEACON = 0x90 (notification only, not a command)
+ */
+
+struct il3945_beacon_notif {
+	struct il3945_tx_resp beacon_notify_hdr;
+	__le32 low_tsf;
+	__le32 high_tsf;
+	__le32 ibss_mgr_status;
+} __packed;
+
+struct il4965_beacon_notif {
+	struct il4965_tx_resp beacon_notify_hdr;
+	__le32 low_tsf;
+	__le32 high_tsf;
+	__le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
+ */
+
+struct il3945_tx_beacon_cmd {
+	struct il3945_tx_cmd tx;
+	__le16 tim_idx;
+	u8 tim_size;
+	u8 reserved1;
+	struct ieee80211_hdr frame[0];	/* beacon frame */
+} __packed;
+
+struct il_tx_beacon_cmd {
+	struct il_tx_cmd tx;
+	__le16 tim_idx;
+	u8 tim_size;
+	u8 reserved1;
+	struct ieee80211_hdr frame[0];	/* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} success;
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} failed;
+} __packed;
+
+/* stats command response */
+
+struct iwl39_stats_rx_phy {
+	__le32 ina_cnt;
+	__le32 fina_cnt;
+	__le32 plcp_err;
+	__le32 crc32_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 false_alarm_cnt;
+	__le32 fina_sync_err_cnt;
+	__le32 sfd_timeout;
+	__le32 fina_timeout;
+	__le32 unresponded_rts;
+	__le32 rxe_frame_limit_overrun;
+	__le32 sent_ack_cnt;
+	__le32 sent_cts_cnt;
+} __packed;
+
+struct iwl39_stats_rx_non_phy {
+	__le32 bogus_cts;	/* CTS received when not expecting CTS */
+	__le32 bogus_ack;	/* ACK received when not expecting ACK */
+	__le32 non_bssid_frames;	/* number of frames with BSSID that
+					 * doesn't belong to the STA BSSID */
+	__le32 filtered_frames;	/* count frames that were dumped in the
+				 * filtering process */
+	__le32 non_channel_beacons;	/* beacons with our bss id but not on
+					 * our serving channel */
+} __packed;
+
+struct iwl39_stats_rx {
+	struct iwl39_stats_rx_phy ofdm;
+	struct iwl39_stats_rx_phy cck;
+	struct iwl39_stats_rx_non_phy general;
+} __packed;
+
+struct iwl39_stats_tx {
+	__le32 preamble_cnt;
+	__le32 rx_detected_cnt;
+	__le32 bt_prio_defer_cnt;
+	__le32 bt_prio_kill_cnt;
+	__le32 few_bytes_cnt;
+	__le32 cts_timeout;
+	__le32 ack_timeout;
+	__le32 expected_ack_cnt;
+	__le32 actual_ack_cnt;
+} __packed;
+
+struct stats_dbg {
+	__le32 burst_check;
+	__le32 burst_count;
+	__le32 wait_for_silence_timeout_cnt;
+	__le32 reserved[3];
+} __packed;
+
+struct iwl39_stats_div {
+	__le32 tx_on_a;
+	__le32 tx_on_b;
+	__le32 exec_time;
+	__le32 probe_time;
+} __packed;
+
+struct iwl39_stats_general {
+	__le32 temperature;
+	struct stats_dbg dbg;
+	__le32 sleep_time;
+	__le32 slots_out;
+	__le32 slots_idle;
+	__le32 ttl_timestamp;
+	struct iwl39_stats_div div;
+} __packed;
+
+struct stats_rx_phy {
+	__le32 ina_cnt;
+	__le32 fina_cnt;
+	__le32 plcp_err;
+	__le32 crc32_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 false_alarm_cnt;
+	__le32 fina_sync_err_cnt;
+	__le32 sfd_timeout;
+	__le32 fina_timeout;
+	__le32 unresponded_rts;
+	__le32 rxe_frame_limit_overrun;
+	__le32 sent_ack_cnt;
+	__le32 sent_cts_cnt;
+	__le32 sent_ba_rsp_cnt;
+	__le32 dsp_self_kill;
+	__le32 mh_format_err;
+	__le32 re_acq_main_rssi_sum;
+	__le32 reserved3;
+} __packed;
+
+struct stats_rx_ht_phy {
+	__le32 plcp_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 crc32_err;
+	__le32 mh_format_err;
+	__le32 agg_crc32_good;
+	__le32 agg_mpdu_cnt;
+	__le32 agg_cnt;
+	__le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
+
+struct stats_rx_non_phy {
+	__le32 bogus_cts;	/* CTS received when not expecting CTS */
+	__le32 bogus_ack;	/* ACK received when not expecting ACK */
+	__le32 non_bssid_frames;	/* number of frames with BSSID that
+					 * doesn't belong to the STA BSSID */
+	__le32 filtered_frames;	/* count frames that were dumped in the
+				 * filtering process */
+	__le32 non_channel_beacons;	/* beacons with our bss id but not on
+					 * our serving channel */
+	__le32 channel_beacons;	/* beacons with our bss id and in our
+				 * serving channel */
+	__le32 num_missed_bcon;	/* number of missed beacons */
+	__le32 adc_rx_saturation_time;	/* count in 0.8us units the time the
+					 * ADC was in saturation */
+	__le32 ina_detection_search_time;	/* total time (in 0.8us) searched
+						 * for INA */
+	__le32 beacon_silence_rssi_a;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_b;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_c;	/* RSSI silence after beacon frame */
+	__le32 interference_data_flag;	/* flag for interference data
+					 * availability. 1 when data is
+					 * available. */
+	__le32 channel_load;	/* counts RX Enable time in uSec */
+	__le32 dsp_false_alarms;	/* DSP false alarm (both OFDM
+					 * and CCK) counter */
+	__le32 beacon_rssi_a;
+	__le32 beacon_rssi_b;
+	__le32 beacon_rssi_c;
+	__le32 beacon_energy_a;
+	__le32 beacon_energy_b;
+	__le32 beacon_energy_c;
+} __packed;
+
+struct stats_rx {
+	struct stats_rx_phy ofdm;
+	struct stats_rx_phy cck;
+	struct stats_rx_non_phy general;
+	struct stats_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct stats_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct stats_tx_power {
+	u8 ant_a;
+	u8 ant_b;
+	u8 ant_c;
+	u8 reserved;
+} __packed;
+
+struct stats_tx_non_phy_agg {
+	__le32 ba_timeout;
+	__le32 ba_reschedule_frames;
+	__le32 scd_query_agg_frame_cnt;
+	__le32 scd_query_no_agg;
+	__le32 scd_query_agg;
+	__le32 scd_query_mismatch;
+	__le32 frame_not_ready;
+	__le32 underrun;
+	__le32 bt_prio_kill;
+	__le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct stats_tx {
+	__le32 preamble_cnt;
+	__le32 rx_detected_cnt;
+	__le32 bt_prio_defer_cnt;
+	__le32 bt_prio_kill_cnt;
+	__le32 few_bytes_cnt;
+	__le32 cts_timeout;
+	__le32 ack_timeout;
+	__le32 expected_ack_cnt;
+	__le32 actual_ack_cnt;
+	__le32 dump_msdu_cnt;
+	__le32 burst_abort_next_frame_mismatch_cnt;
+	__le32 burst_abort_missing_next_frame_cnt;
+	__le32 cts_timeout_collision;
+	__le32 ack_or_ba_timeout_collision;
+	struct stats_tx_non_phy_agg agg;
+
+	__le32 reserved1;
+} __packed;
+
+struct stats_div {
+	__le32 tx_on_a;
+	__le32 tx_on_b;
+	__le32 exec_time;
+	__le32 probe_time;
+	__le32 reserved1;
+	__le32 reserved2;
+} __packed;
+
+struct stats_general_common {
+	__le32 temperature;	/* radio temperature */
+	struct stats_dbg dbg;
+	__le32 sleep_time;
+	__le32 slots_out;
+	__le32 slots_idle;
+	__le32 ttl_timestamp;
+	struct stats_div div;
+	__le32 rx_enable_counter;
+	/*
+	 * num_of_sos_states:
+	 *  count the number of times we have to re-tune
+	 *  in order to get out of bad PHY status
+	 */
+	__le32 num_of_sos_states;
+} __packed;
+
+struct stats_general {
+	struct stats_general_common common;
+	__le32 reserved2;
+	__le32 reserved3;
+} __packed;
+
+#define UCODE_STATS_CLEAR_MSK		(0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK		(0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK	(0x1 << 2)
+
+/*
+ * C_STATS = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * N_STATSs after received beacons (see below).  This flag
+ * does not affect the response to the C_STATS 0x9c itself.
+ */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)	/* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)	/* see above */
+struct il_stats_cmd {
+	__le32 configuration_flags;	/* IL_STATS_CONF_* */
+} __packed;
+
+/*
+ * N_STATS = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * C_STATS 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues C_STATS
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans.  uCode clears stats
+ * appropriately so that each notification contains stats for only the
+ * one channel that has just been scanned.
+ */
+#define STATS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+
+struct il3945_notif_stats {
+	__le32 flag;
+	struct iwl39_stats_rx rx;
+	struct iwl39_stats_tx tx;
+	struct iwl39_stats_general general;
+} __packed;
+
+struct il_notif_stats {
+	__le32 flag;
+	struct stats_rx rx;
+	struct stats_tx tx;
+	struct stats_general general;
+} __packed;
+
+/*
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
+ *
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IL_MISSED_BEACON_THRESHOLD_MIN	(1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF	(5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX	IL_MISSED_BEACON_THRESHOLD_DEF
+
+struct il_missed_beacon_notif {
+	__le32 consecutive_missed_beacons;
+	__le32 total_missed_becons;
+	__le32 num_expected_beacons;
+	__le32 num_recvd_beacons;
+} __packed;
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot.  Driver must calibrate only:
+ *
+ * 1)  Tx power (depends on temperature), described elsewhere
+ * 2)  Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3)  Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms".  False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting).  Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers N_STATSs after each
+ * received beacon.  These provide information to the driver to analyze the
+ * sensitivity.  Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source.  Pertinent stats include:
+ *
+ * From "general" stats (struct stats_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ *   Measure of energy of desired signal.  Used for establishing a level
+ *   below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ *   Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ *   uSecs of actual Rx time during beacon period (varies according to
+ *   how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ *   Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ *   Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE:  Both false_alarm_cnt and plcp_err increment monotonically from
+ *        beacon to beacon, i.e. each value is an accumulation of all errors
+ *        before and including the latest beacon.  Values will wrap around to 0
+ *        after counting up to 2^32 - 1.  Driver must differentiate vs.
+ *        previous beacon's values to determine # false alarms in the current
+ *        beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX     220   /  220  /  270
+ *
+ *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ *   by *adding* 1 to all 4 of the table entries above, up to the max for
+ *   each entry.  Conversely, if false alarm rate is too low (less than 5
+ *   for each 204.8 msecs listening), *subtract* 1 from each entry to
+ *   increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ *   1).  20-beacon history of maximum background noise, indicated by
+ *        (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ *        3 receivers.  For any given beacon, the "silence reference" is
+ *        the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ *   2).  10-beacon history of strongest signal level, as indicated
+ *        by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ *        i.e. the strength of the signal through the best receiver at the
+ *        moment.  These measurements are "upside down", with lower values
+ *        for stronger signals, so max energy will be *minimum* value.
+ *
+ *        Then for any given beacon, the driver must determine the *weakest*
+ *        of the strongest signals; this is the minimum level that needs to be
+ *        successfully detected, when using the best receiver at the moment.
+ *        "Max cck energy" is the maximum (higher value means lower energy!)
+ *        of the last 10 minima.  Once this is determined, driver must add
+ *        a little margin by adding "6" to it.
+ *
+ *   3).  Number of consecutive beacon periods with too few false alarms.
+ *        Reset this to 0 at the first beacon period that falls within the
+ *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_IDX                100   /    0  /  100
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), method for reducing
+ *   sensitivity is:
+ *
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
+ *       up to max 400.
+ *
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
+ *       sensitivity has been reduced a significant amount; bring it up to
+ *       a moderate 161.  Otherwise, *add* 3, up to max 200.
+ *
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
+ *       sensitivity has been reduced only a moderate or small amount;
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
+ *       down to min 0.  Otherwise (if gain has been significantly reduced),
+ *       don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
+ *
+ *       b)  Save a snapshot of the "silence reference".
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too low
+ *   (less than 5 for each 204.8 msecs listening), method for increasing
+ *   sensitivity is used only if:
+ *
+ *   1a)  Previous beacon did not have too many false alarms
+ *   1b)  AND difference between previous "silence reference" and current
+ *        "silence reference" (prev - current) is 2 or more,
+ *   OR 2)  100 or more consecutive beacon periods have had rate of
+ *          less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ *   Method for increasing sensitivity:
+ *
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
+ *       down to min 125.
+ *
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
+ *       down to min 200.
+ *
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ *   (between 5 and 50 for each 204.8 msecs listening):
+ *
+ *   1)  Save a snapshot of the silence reference.
+ *
+ *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
+ *       give some extra margin to energy threshold by *subtracting* 8
+ *       from value in HD_MIN_ENERGY_CCK_DET_IDX.
+ *
+ *   For all cases (too few, too many, good range), make sure that the CCK
+ *   detection threshold (energy) is below the energy level for robust
+ *   detection over the past 10 beacon periods, the "Max cck energy".
+ *   Lower values mean higher energy; this means making sure that the value
+ *   in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE  (11)	/* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX                 (0)	/* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX          (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX                  (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL	cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL	cpu_to_le16(1)
+
+/**
+ * struct il_sensitivity_cmd
+ * @control:  (1) updates working table, (0) updates default table
+ * @table:  energy threshold values, use HD_* as idx into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct il_sensitivity_cmd {
+	__le16 control;		/* always use "1" */
+	__le16 table[HD_TBL_SIZE];	/* use HD_* as idx */
+} __packed;
+
+/**
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of 4965 device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct stats_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c.  Compare the other two to the
+ * strongest.  If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx.  If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c.  This Rx chain
+ * will be the reference, with 0 gain adjustment.  Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain".  The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ *   2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+/* The default calibrate table size if not specified by firmware */
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE	18
+enum {
+	IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+	IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+};
+
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE		(253)
+
+struct il_calib_hdr {
+	u8 op_code;
+	u8 first_group;
+	u8 groups_num;
+	u8 data_valid;
+} __packed;
+
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+	struct il_calib_hdr hdr;
+	s8 diff_gain_a;		/* see above */
+	s8 diff_gain_b;
+	s8 diff_gain_c;
+	u8 reserved1;
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * C_LEDS = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct il_led_cmd {
+	__le32 interval;	/* "interval" in uSec */
+	u8 id;			/* 1: Activity, 2: Link, 3: Tech */
+	u8 off;			/* # intervals off while blinking;
+				 * "0", with >0 "on" value, turns LED on */
+	u8 on;			/* # intervals on while blinking;
+				 * "0", regardless of "off", turns LED off */
+	u8 reserved;
+} __packed;
+
+/******************************************************************************
+ * (13)
+ * Union of all expected notifications/responses:
+ *
+ *****************************************************************************/
+
+#define IL_RX_FRAME_SIZE_MSK	0x00003fff
+
+struct il_rx_pkt {
+	/*
+	 * The first 4 bytes of the RX frame header contain both the RX frame
+	 * size and some flags.
+	 * Bit fields:
+	 * 31:    flag flush RB request
+	 * 30:    flag ignore TC (terminal counter) request
+	 * 29:    flag fast IRQ request
+	 * 28-14: Reserved
+	 * 13-00: RX frame size
+	 */
+	__le32 len_n_flags;
+	struct il_cmd_header hdr;
+	union {
+		struct il3945_rx_frame rx_frame;
+		struct il3945_tx_resp tx_resp;
+		struct il3945_beacon_notif beacon_status;
+
+		struct il_alive_resp alive_frame;
+		struct il_spectrum_notification spectrum_notif;
+		struct il_csa_notification csa_notif;
+		struct il_error_resp err_resp;
+		struct il_card_state_notif card_state_notif;
+		struct il_add_sta_resp add_sta;
+		struct il_rem_sta_resp rem_sta;
+		struct il_sleep_notification sleep_notif;
+		struct il_spectrum_resp spectrum;
+		struct il_notif_stats stats;
+		struct il_compressed_ba_resp compressed_ba;
+		struct il_missed_beacon_notif missed_beacon;
+		__le32 status;
+		u8 raw[0];
+	} u;
+} __packed;
+
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
new file mode 100644
index 0000000..6514baf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -0,0 +1,5597 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+	const int interval = 10; /* microseconds */
+	int t = 0;
+
+	do {
+		if ((_il_rd(il, addr) & mask) == (bits & mask))
+			return t;
+		udelay(interval);
+		t += interval;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(_il_poll_bit);
+
+void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&p->reg_lock, reg_flags);
+	_il_set_bit(p, r, m);
+	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_set_bit);
+
+void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&p->reg_lock, reg_flags);
+	_il_clear_bit(p, r, m);
+	spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_clear_bit);
+
+bool
+_il_grab_nic_access(struct il_priv *il)
+{
+	int ret;
+	u32 val;
+
+	/* this bit wakes up the NIC */
+	_il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+	/*
+	 * These bits say the device is running, and should keep running for
+	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+	 * but they do not indicate that embedded SRAM is restored yet;
+	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
+	 * to/from host DRAM when sleeping/waking for power-saving.
+	 * Each direction takes approximately 1/4 millisecond; with this
+	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+	 * series of register accesses are expected (e.g. reading Event Log),
+	 * to keep device from sleeping.
+	 *
+	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+	 * SRAM is okay/restored.  We don't check that here because this call
+	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
+	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+	 *
+	 */
+	ret =
+	    _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+			 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+			  CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+	if (unlikely(ret < 0)) {
+		val = _il_rd(il, CSR_GP_CNTRL);
+		WARN_ONCE(1, "Timeout waiting for ucode processor access "
+			     "(CSR_GP_CNTRL 0x%08x)\n", val);
+		_il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(_il_grab_nic_access);
+
+int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+	const int interval = 10; /* microseconds */
+	int t = 0;
+
+	do {
+		if ((il_rd(il, addr) & mask) == mask)
+			return t;
+		udelay(interval);
+		t += interval;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(il_poll_bit);
+
+u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+	unsigned long reg_flags;
+	u32 val;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	val = _il_rd_prph(il, reg);
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return val;
+}
+EXPORT_SYMBOL(il_rd_prph);
+
+void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		_il_wr_prph(il, addr, val);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_wr_prph);
+
+u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+	unsigned long reg_flags;
+	u32 value;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+
+	_il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+	value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return value;
+}
+EXPORT_SYMBOL(il_read_targ_mem);
+
+void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		_il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+		_il_wr(il, HBUS_TARG_MEM_WDAT, val);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_write_targ_mem);
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+	switch (cmd) {
+		IL_CMD(N_ALIVE);
+		IL_CMD(N_ERROR);
+		IL_CMD(C_RXON);
+		IL_CMD(C_RXON_ASSOC);
+		IL_CMD(C_QOS_PARAM);
+		IL_CMD(C_RXON_TIMING);
+		IL_CMD(C_ADD_STA);
+		IL_CMD(C_REM_STA);
+		IL_CMD(C_WEPKEY);
+		IL_CMD(N_3945_RX);
+		IL_CMD(C_TX);
+		IL_CMD(C_RATE_SCALE);
+		IL_CMD(C_LEDS);
+		IL_CMD(C_TX_LINK_QUALITY_CMD);
+		IL_CMD(C_CHANNEL_SWITCH);
+		IL_CMD(N_CHANNEL_SWITCH);
+		IL_CMD(C_SPECTRUM_MEASUREMENT);
+		IL_CMD(N_SPECTRUM_MEASUREMENT);
+		IL_CMD(C_POWER_TBL);
+		IL_CMD(N_PM_SLEEP);
+		IL_CMD(N_PM_DEBUG_STATS);
+		IL_CMD(C_SCAN);
+		IL_CMD(C_SCAN_ABORT);
+		IL_CMD(N_SCAN_START);
+		IL_CMD(N_SCAN_RESULTS);
+		IL_CMD(N_SCAN_COMPLETE);
+		IL_CMD(N_BEACON);
+		IL_CMD(C_TX_BEACON);
+		IL_CMD(C_TX_PWR_TBL);
+		IL_CMD(C_BT_CONFIG);
+		IL_CMD(C_STATS);
+		IL_CMD(N_STATS);
+		IL_CMD(N_CARD_STATE);
+		IL_CMD(N_MISSED_BEACONS);
+		IL_CMD(C_CT_KILL_CONFIG);
+		IL_CMD(C_SENSITIVITY);
+		IL_CMD(C_PHY_CALIBRATION);
+		IL_CMD(N_RX_PHY);
+		IL_CMD(N_RX_MPDU);
+		IL_CMD(N_RX);
+		IL_CMD(N_COMPRESSED_BA);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+			struct il_rx_pkt *pkt)
+{
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from %s (0x%08X)\n",
+		       il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+		return;
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	switch (cmd->hdr.cmd) {
+	case C_TX_LINK_QUALITY_CMD:
+	case C_SENSITIVITY:
+		D_HC_DUMP("back from %s (0x%08X)\n",
+			  il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+		break;
+	default:
+		D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+		     pkt->hdr.flags);
+	}
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	int ret;
+
+	BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+	/* An asynchronous command can not expect an SKB to be set. */
+	BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+	/* Assign a generic callback if one is not provided */
+	if (!cmd->callback)
+		cmd->callback = il_generic_cmd_callback;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EBUSY;
+
+	ret = il_enqueue_hcmd(il, cmd);
+	if (ret < 0) {
+		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+		       il_get_cmd_string(cmd->id), ret);
+		return ret;
+	}
+	return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	int cmd_idx;
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	BUG_ON(cmd->flags & CMD_ASYNC);
+
+	/* A synchronous command can not have a callback set. */
+	BUG_ON(cmd->callback);
+
+	D_INFO("Attempting to send sync command %s\n",
+	       il_get_cmd_string(cmd->id));
+
+	set_bit(S_HCMD_ACTIVE, &il->status);
+	D_INFO("Setting HCMD_ACTIVE for command %s\n",
+	       il_get_cmd_string(cmd->id));
+
+	cmd_idx = il_enqueue_hcmd(il, cmd);
+	if (cmd_idx < 0) {
+		ret = cmd_idx;
+		IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+		       il_get_cmd_string(cmd->id), ret);
+		goto out;
+	}
+
+	ret = wait_event_timeout(il->wait_command_queue,
+				 !test_bit(S_HCMD_ACTIVE, &il->status),
+				 HOST_COMPLETE_TIMEOUT);
+	if (!ret) {
+		if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+			IL_ERR("Error sending %s: time out after %dms.\n",
+			       il_get_cmd_string(cmd->id),
+			       jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+			clear_bit(S_HCMD_ACTIVE, &il->status);
+			D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+			       il_get_cmd_string(cmd->id));
+			ret = -ETIMEDOUT;
+			goto cancel;
+		}
+	}
+
+	if (test_bit(S_RFKILL, &il->status)) {
+		IL_ERR("Command %s aborted: RF KILL Switch\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -ECANCELED;
+		goto fail;
+	}
+	if (test_bit(S_FW_ERROR, &il->status)) {
+		IL_ERR("Command %s failed: FW Error\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -EIO;
+		goto fail;
+	}
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+		IL_ERR("Error: Response NULL in '%s'\n",
+		       il_get_cmd_string(cmd->id));
+		ret = -EIO;
+		goto cancel;
+	}
+
+	ret = 0;
+	goto out;
+
+cancel:
+	if (cmd->flags & CMD_WANT_SKB) {
+		/*
+		 * Cancel the CMD_WANT_SKB flag for the cmd in the
+		 * TX cmd queue. Otherwise in case the cmd comes
+		 * in later, it will possibly set an invalid
+		 * address (cmd->meta.source).
+		 */
+		il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+	}
+fail:
+	if (cmd->reply_page) {
+		il_free_pages(il, cmd->reply_page);
+		cmd->reply_page = 0;
+	}
+out:
+	return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	if (cmd->flags & CMD_ASYNC)
+		return il_send_cmd_async(il, cmd);
+
+	return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+	struct il_host_cmd cmd = {
+		.id = id,
+		.len = len,
+		.data = data,
+	};
+
+	return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+		      void (*callback) (struct il_priv *il,
+					struct il_device_cmd *cmd,
+					struct il_rx_pkt *pkt))
+{
+	struct il_host_cmd cmd = {
+		.id = id,
+		.len = len,
+		.data = data,
+	};
+
+	cmd.flags |= CMD_ASYNC;
+	cmd.callback = callback;
+
+	return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, 0444);
+MODULE_PARM_DESC(led_mode,
+		 "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput		OFF time(ms)	ON time (ms)
+ *	>300			25		25
+ *	>200 to 300		40		40
+ *	>100 to 200		55		55
+ *	>70 to 100		65		65
+ *	>50 to 70		75		75
+ *	>20 to 50		85		85
+ *	>10 to 20		95		95
+ *	>5 to 10		110		110
+ *	>1 to 5			130		130
+ *	>0 to 1			167		167
+ *	<=0					SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+	{.throughput = 0,		.blink_time = 334},
+	{.throughput = 1 * 1024 - 1,	.blink_time = 260},
+	{.throughput = 5 * 1024 - 1,	.blink_time = 220},
+	{.throughput = 10 * 1024 - 1,	.blink_time = 190},
+	{.throughput = 20 * 1024 - 1,	.blink_time = 170},
+	{.throughput = 50 * 1024 - 1,	.blink_time = 150},
+	{.throughput = 70 * 1024 - 1,	.blink_time = 130},
+	{.throughput = 100 * 1024 - 1,	.blink_time = 110},
+	{.throughput = 200 * 1024 - 1,	.blink_time = 80},
+	{.throughput = 300 * 1024 - 1,	.blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+	if (!compensation) {
+		IL_ERR("undefined blink compensation: "
+		       "use pre-defined blinking time\n");
+		return time;
+	}
+
+	return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+	struct il_led_cmd led_cmd = {
+		.id = IL_LED_LINK,
+		.interval = IL_DEF_LED_INTRVL
+	};
+	int ret;
+
+	if (!test_bit(S_READY, &il->status))
+		return -EBUSY;
+
+	if (il->blink_on == on && il->blink_off == off)
+		return 0;
+
+	if (off == 0) {
+		/* led is SOLID_ON */
+		on = IL_LED_SOLID;
+	}
+
+	D_LED("Led blink time compensation=%u\n",
+	      il->cfg->led_compensation);
+	led_cmd.on =
+	    il_blink_compensation(il, on,
+				  il->cfg->led_compensation);
+	led_cmd.off =
+	    il_blink_compensation(il, off,
+				  il->cfg->led_compensation);
+
+	ret = il->ops->send_led_cmd(il, &led_cmd);
+	if (!ret) {
+		il->blink_on = on;
+		il->blink_off = off;
+	}
+	return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+		      enum led_brightness brightness)
+{
+	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+	unsigned long on = 0;
+
+	if (brightness > 0)
+		on = IL_LED_SOLID;
+
+	il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+		 unsigned long *delay_off)
+{
+	struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+	return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+	int mode = led_mode;
+	int ret;
+
+	if (mode == IL_LED_DEFAULT)
+		mode = il->cfg->led_mode;
+
+	il->led.name =
+	    kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+	il->led.brightness_set = il_led_brightness_set;
+	il->led.blink_set = il_led_blink_set;
+	il->led.max_brightness = 1;
+
+	switch (mode) {
+	case IL_LED_DEFAULT:
+		WARN_ON(1);
+		break;
+	case IL_LED_BLINK:
+		il->led.default_trigger =
+		    ieee80211_create_tpt_led_trigger(il->hw,
+						     IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+						     il_blink,
+						     ARRAY_SIZE(il_blink));
+		break;
+	case IL_LED_RF_STATE:
+		il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+		break;
+	}
+
+	ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+	if (ret) {
+		kfree(il->led.name);
+		return;
+	}
+
+	il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+	if (!il->led_registered)
+		return;
+
+	led_classdev_unregister(&il->led);
+	kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel.  We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware.  This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel.  There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = {	/* 4915-5080MHz */
+	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = {	/* 5170-5320MHz */
+	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = {	/* 5500-5700MHz */
+	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = {	/* 5725-5825MHz */
+	145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = {	/* 2.4 ht40 channel */
+	1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = {	/* 5.2 ht40 channel */
+	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+	u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+	int ret = 0;
+
+	D_EEPROM("EEPROM signature=0x%08x\n", gp);
+	switch (gp) {
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+		break;
+	default:
+		IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+		ret = -ENOENT;
+		break;
+	}
+	return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+	BUG_ON(offset >= il->cfg->eeprom_size);
+	return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+	if (!il->eeprom)
+		return 0;
+	return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+	__le16 *e;
+	u32 gp = _il_rd(il, CSR_EEPROM_GP);
+	int sz;
+	int ret;
+	u16 addr;
+
+	/* allocate eeprom */
+	sz = il->cfg->eeprom_size;
+	D_EEPROM("NVM size = %d\n", sz);
+	il->eeprom = kzalloc(sz, GFP_KERNEL);
+	if (!il->eeprom)
+		return -ENOMEM;
+
+	e = (__le16 *) il->eeprom;
+
+	il->ops->apm_init(il);
+
+	ret = il_eeprom_verify_signature(il);
+	if (ret < 0) {
+		IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+		ret = -ENOENT;
+		goto err;
+	}
+
+	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
+	ret = il->ops->eeprom_acquire_semaphore(il);
+	if (ret < 0) {
+		IL_ERR("Failed to acquire EEPROM semaphore.\n");
+		ret = -ENOENT;
+		goto err;
+	}
+
+	/* eeprom is an array of 16bit values */
+	for (addr = 0; addr < sz; addr += sizeof(u16)) {
+		u32 r;
+
+		_il_wr(il, CSR_EEPROM_REG,
+		       CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+		ret =
+		    _il_poll_bit(il, CSR_EEPROM_REG,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 IL_EEPROM_ACCESS_TIMEOUT);
+		if (ret < 0) {
+			IL_ERR("Time out reading EEPROM[%d]\n", addr);
+			goto done;
+		}
+		r = _il_rd(il, CSR_EEPROM_REG);
+		e[addr / 2] = cpu_to_le16(r >> 16);
+	}
+
+	D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+		 il_eeprom_query16(il, EEPROM_VERSION));
+
+	ret = 0;
+done:
+	il->ops->eeprom_release_semaphore(il);
+
+err:
+	if (ret)
+		il_eeprom_free(il);
+	/* Reset chip to save power until we load uCode during "up". */
+	il_apm_stop(il);
+	return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+	kfree(il->eeprom);
+	il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+		       int *eeprom_ch_count,
+		       const struct il_eeprom_channel **eeprom_ch_info,
+		       const u8 **eeprom_ch_idx)
+{
+	u32 offset = il->cfg->regulatory_bands[eep_band - 1];
+
+	switch (eep_band) {
+	case 1:		/* 2.4GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_1;
+		break;
+	case 2:		/* 4.9GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_2;
+		break;
+	case 3:		/* 5.2GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_3;
+		break;
+	case 4:		/* 5.5GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_4;
+		break;
+	case 5:		/* 5.7GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_5;
+		break;
+	case 6:		/* 2.4GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_6;
+		break;
+	case 7:		/* 5 GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+		*eeprom_ch_info =
+		    (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+								     offset);
+		*eeprom_ch_idx = il_eeprom_band_7;
+		break;
+	default:
+		BUG();
+	}
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+			    ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
+		      const struct il_eeprom_channel *eeprom_ch,
+		      u8 clear_ht40_extension_channel)
+{
+	struct il_channel_info *ch_info;
+
+	ch_info =
+	    (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+	if (!il_is_channel_valid(ch_info))
+		return -1;
+
+	D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+		 " Ad-Hoc %ssupported\n", ch_info->channel,
+		 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+		 CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+		 CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+		 CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+		 eeprom_ch->max_power_avg,
+		 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+		  !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+	ch_info->ht40_eeprom = *eeprom_ch;
+	ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+	ch_info->ht40_flags = eeprom_ch->flags;
+	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+		ch_info->ht40_extension_channel &=
+		    ~clear_ht40_extension_channel;
+
+	return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+			    ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+	int eeprom_ch_count = 0;
+	const u8 *eeprom_ch_idx = NULL;
+	const struct il_eeprom_channel *eeprom_ch_info = NULL;
+	int band, ch;
+	struct il_channel_info *ch_info;
+
+	if (il->channel_count) {
+		D_EEPROM("Channel map already initialized.\n");
+		return 0;
+	}
+
+	D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+	il->channel_count =
+	    ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+	    ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+	    ARRAY_SIZE(il_eeprom_band_5);
+
+	D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+	il->channel_info =
+	    kcalloc(il->channel_count, sizeof(struct il_channel_info),
+		    GFP_KERNEL);
+	if (!il->channel_info) {
+		IL_ERR("Could not allocate channel_info\n");
+		il->channel_count = 0;
+		return -ENOMEM;
+	}
+
+	ch_info = il->channel_info;
+
+	/* Loop through the 5 EEPROM bands adding them in order to the
+	 * channel map we maintain (that contains additional information than
+	 * what just in the EEPROM) */
+	for (band = 1; band <= 5; band++) {
+
+		il_init_band_reference(il, band, &eeprom_ch_count,
+				       &eeprom_ch_info, &eeprom_ch_idx);
+
+		/* Loop through each band adding each of the channels */
+		for (ch = 0; ch < eeprom_ch_count; ch++) {
+			ch_info->channel = eeprom_ch_idx[ch];
+			ch_info->band =
+			    (band ==
+			     1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+
+			/* permanently store EEPROM's channel regulatory flags
+			 *   and max power in channel info database. */
+			ch_info->eeprom = eeprom_ch_info[ch];
+
+			/* Copy the run-time flags so they are there even on
+			 * invalid channels */
+			ch_info->flags = eeprom_ch_info[ch].flags;
+			/* First write that ht40 is not enabled, and then enable
+			 * one by one */
+			ch_info->ht40_extension_channel =
+			    IEEE80211_CHAN_NO_HT40;
+
+			if (!(il_is_channel_valid(ch_info))) {
+				D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+					 "No traffic\n", ch_info->channel,
+					 ch_info->flags,
+					 il_is_channel_a_band(ch_info) ? "5.2" :
+					 "2.4");
+				ch_info++;
+				continue;
+			}
+
+			/* Initialize regulatory-based run-time data */
+			ch_info->max_power_avg = ch_info->curr_txpow =
+			    eeprom_ch_info[ch].max_power_avg;
+			ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+			ch_info->min_power = 0;
+
+			D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+				 " Ad-Hoc %ssupported\n", ch_info->channel,
+				 il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+				 CHECK_AND_PRINT_I(VALID),
+				 CHECK_AND_PRINT_I(IBSS),
+				 CHECK_AND_PRINT_I(ACTIVE),
+				 CHECK_AND_PRINT_I(RADAR),
+				 CHECK_AND_PRINT_I(WIDE),
+				 CHECK_AND_PRINT_I(DFS),
+				 eeprom_ch_info[ch].flags,
+				 eeprom_ch_info[ch].max_power_avg,
+				 ((eeprom_ch_info[ch].
+				   flags & EEPROM_CHANNEL_IBSS) &&
+				  !(eeprom_ch_info[ch].
+				    flags & EEPROM_CHANNEL_RADAR)) ? "" :
+				 "not ");
+
+			ch_info++;
+		}
+	}
+
+	/* Check if we do have HT40 channels */
+	if (il->cfg->regulatory_bands[5] == EEPROM_REGULATORY_BAND_NO_HT40 &&
+	    il->cfg->regulatory_bands[6] == EEPROM_REGULATORY_BAND_NO_HT40)
+		return 0;
+
+	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+	for (band = 6; band <= 7; band++) {
+		enum nl80211_band ieeeband;
+
+		il_init_band_reference(il, band, &eeprom_ch_count,
+				       &eeprom_ch_info, &eeprom_ch_idx);
+
+		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+		ieeeband =
+		    (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+
+		/* Loop through each band adding each of the channels */
+		for (ch = 0; ch < eeprom_ch_count; ch++) {
+			/* Set up driver's info for lower half */
+			il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+					      &eeprom_ch_info[ch],
+					      IEEE80211_CHAN_NO_HT40PLUS);
+
+			/* Set up driver's info for upper half */
+			il_mod_ht40_chan_info(il, ieeeband,
+					      eeprom_ch_idx[ch] + 4,
+					      &eeprom_ch_info[ch],
+					      IEEE80211_CHAN_NO_HT40MINUS);
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+	kfree(il->channel_info);
+	il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
+		    u16 channel)
+{
+	int i;
+
+	switch (band) {
+	case NL80211_BAND_5GHZ:
+		for (i = 14; i < il->channel_count; i++) {
+			if (il->channel_info[i].channel == channel)
+				return &il->channel_info[i];
+		}
+		break;
+	case NL80211_BAND_2GHZ:
+		if (channel >= 1 && channel <= 14)
+			return &il->channel_info[channel - 1];
+		break;
+	default:
+		BUG();
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211.
+ */
+
+#define SLP_VEC(X0, X1, X2, X3, X4) { \
+		cpu_to_le32(X0), \
+		cpu_to_le32(X1), \
+		cpu_to_le32(X2), \
+		cpu_to_le32(X3), \
+		cpu_to_le32(X4)  \
+}
+
+static void
+il_build_powertable_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+	const __le32 interval[3][IL_POWER_VEC_SIZE] = {
+		SLP_VEC(2, 2, 4, 6, 0xFF),
+		SLP_VEC(2, 4, 7, 10, 10),
+		SLP_VEC(4, 7, 10, 10, 0xFF)
+	};
+	int i, dtim_period, no_dtim;
+	u32 max_sleep;
+	bool skip;
+
+	memset(cmd, 0, sizeof(*cmd));
+
+	if (il->power_data.pci_pm)
+		cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+	/* if no Power Save, we are done */
+	if (il->power_data.ps_disabled)
+		return;
+
+	cmd->flags = IL_POWER_DRIVER_ALLOW_SLEEP_MSK;
+	cmd->keep_alive_seconds = 0;
+	cmd->debug_flags = 0;
+	cmd->rx_data_timeout = cpu_to_le32(25 * 1024);
+	cmd->tx_data_timeout = cpu_to_le32(25 * 1024);
+	cmd->keep_alive_beacons = 0;
+
+	dtim_period = il->vif ? il->vif->bss_conf.dtim_period : 0;
+
+	if (dtim_period <= 2) {
+		memcpy(cmd->sleep_interval, interval[0], sizeof(interval[0]));
+		no_dtim = 2;
+	} else if (dtim_period <= 10) {
+		memcpy(cmd->sleep_interval, interval[1], sizeof(interval[1]));
+		no_dtim = 2;
+	} else {
+		memcpy(cmd->sleep_interval, interval[2], sizeof(interval[2]));
+		no_dtim = 0;
+	}
+
+	if (dtim_period == 0) {
+		dtim_period = 1;
+		skip = false;
+	} else {
+		skip = !!no_dtim;
+	}
+
+	if (skip) {
+		__le32 tmp = cmd->sleep_interval[IL_POWER_VEC_SIZE - 1];
+
+		max_sleep = le32_to_cpu(tmp);
+		if (max_sleep == 0xFF)
+			max_sleep = dtim_period * (skip + 1);
+		else if (max_sleep >  dtim_period)
+			max_sleep = (max_sleep / dtim_period) * dtim_period;
+		cmd->flags |= IL_POWER_SLEEP_OVER_DTIM_MSK;
+	} else {
+		max_sleep = dtim_period;
+		cmd->flags &= ~IL_POWER_SLEEP_OVER_DTIM_MSK;
+	}
+
+	for (i = 0; i < IL_POWER_VEC_SIZE; i++)
+		if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep)
+			cmd->sleep_interval[i] = cpu_to_le32(max_sleep);
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+	D_POWER("Sending power/sleep command\n");
+	D_POWER("Flags value = 0x%08X\n", cmd->flags);
+	D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+	D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+	D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+		le32_to_cpu(cmd->sleep_interval[0]),
+		le32_to_cpu(cmd->sleep_interval[1]),
+		le32_to_cpu(cmd->sleep_interval[2]),
+		le32_to_cpu(cmd->sleep_interval[3]),
+		le32_to_cpu(cmd->sleep_interval[4]));
+
+	return il_send_cmd_pdu(il, C_POWER_TBL,
+			       sizeof(struct il_powertable_cmd), cmd);
+}
+
+static int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+	int ret;
+	bool update_chains;
+
+	lockdep_assert_held(&il->mutex);
+
+	/* Don't update the RX chain when chain noise calibration is running */
+	update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+	    il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+	if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+		return 0;
+
+	if (!il_is_ready_rf(il))
+		return -EIO;
+
+	/* scan complete use sleep_power_next, need to be updated */
+	memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+	if (test_bit(S_SCANNING, &il->status) && !force) {
+		D_INFO("Defer power set mode while scanning\n");
+		return 0;
+	}
+
+	if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+		set_bit(S_POWER_PMI, &il->status);
+
+	ret = il_set_power(il, cmd);
+	if (!ret) {
+		if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+			clear_bit(S_POWER_PMI, &il->status);
+
+		if (il->ops->update_chain_flags && update_chains)
+			il->ops->update_chain_flags(il);
+		else if (il->ops->update_chain_flags)
+			D_POWER("Cannot update the power, chain noise "
+				"calibration running: %d\n",
+				il->chain_noise_data.state);
+
+		memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+	} else
+		IL_ERR("set power fail, ret = %d", ret);
+
+	return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+	struct il_powertable_cmd cmd;
+
+	il_build_powertable_cmd(il, &cmd);
+
+	return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+	u16 lctl;
+
+	pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+	il->power_data.pci_pm = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+	il->power_data.debug_sleep_level_override = -1;
+
+	memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IL_ACTIVE_DWELL_TIME_24    (30)	/* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24   (20)	/* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52   (10)
+#define IL_PASSIVE_DWELL_BASE      (100)
+#define IL_CHANNEL_TUNE_TIME       5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+	int ret;
+	struct il_rx_pkt *pkt;
+	struct il_host_cmd cmd = {
+		.id = C_SCAN_ABORT,
+		.flags = CMD_WANT_SKB,
+	};
+
+	/* Exit instantly with error when device is not ready
+	 * to receive scan abort command or it does not perform
+	 * hardware scan currently */
+	if (!test_bit(S_READY, &il->status) ||
+	    !test_bit(S_GEO_CONFIGURED, &il->status) ||
+	    !test_bit(S_SCAN_HW, &il->status) ||
+	    test_bit(S_FW_ERROR, &il->status) ||
+	    test_bit(S_EXIT_PENDING, &il->status))
+		return -EIO;
+
+	ret = il_send_cmd_sync(il, &cmd);
+	if (ret)
+		return ret;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->u.status != CAN_ABORT_STATUS) {
+		/* The scan abort will return 1 for success or
+		 * 2 for "failure".  A failure condition can be
+		 * due to simply not being in an active scan which
+		 * can occur if we send the scan abort before we
+		 * the microcode has notified us that a scan is
+		 * completed. */
+		D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+		ret = -EIO;
+	}
+
+	il_free_pages(il, cmd.reply_page);
+	return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+	struct cfg80211_scan_info info = {
+		.aborted = aborted,
+	};
+
+	/* check if scan was requested from mac80211 */
+	if (il->scan_request) {
+		D_SCAN("Complete scan in mac80211\n");
+		ieee80211_scan_completed(il->hw, &info);
+	}
+
+	il->scan_vif = NULL;
+	il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+	lockdep_assert_held(&il->mutex);
+
+	if (!test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Forcing scan end while not scanning\n");
+		return;
+	}
+
+	D_SCAN("Forcing scan end\n");
+	clear_bit(S_SCANNING, &il->status);
+	clear_bit(S_SCAN_HW, &il->status);
+	clear_bit(S_SCAN_ABORTING, &il->status);
+	il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Not performing scan to abort\n");
+		return;
+	}
+
+	if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+		D_SCAN("Scan abort in progress\n");
+		return;
+	}
+
+	ret = il_send_scan_abort(il);
+	if (ret) {
+		D_SCAN("Send scan abort failed %d\n", ret);
+		il_force_scan_end(il);
+	} else
+		D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+	D_SCAN("Queuing abort scan\n");
+	queue_work(il->workqueue, &il->abort_scan);
+	return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+	lockdep_assert_held(&il->mutex);
+
+	D_SCAN("Scan cancel timeout\n");
+
+	il_do_scan_abort(il);
+
+	while (time_before_eq(jiffies, timeout)) {
+		if (!test_bit(S_SCAN_HW, &il->status))
+			break;
+		msleep(20);
+	}
+
+	return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanreq_notification *notif =
+	    (struct il_scanreq_notification *)pkt->u.raw;
+
+	D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanstart_notification *notif =
+	    (struct il_scanstart_notification *)pkt->u.raw;
+	il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+	D_SCAN("Scan start: " "%d [802.11%s] "
+	       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+	       notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+	       le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scanresults_notification *notif =
+	    (struct il_scanresults_notification *)pkt->u.raw;
+
+	D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+	       "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+	       le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+	       le32_to_cpu(notif->stats[0]),
+	       le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+	D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+	       scan_notif->scanned_channels, scan_notif->tsf_low,
+	       scan_notif->tsf_high, scan_notif->status);
+
+	/* The HW is no longer scanning */
+	clear_bit(S_SCAN_HW, &il->status);
+
+	D_SCAN("Scan on %sGHz took %dms\n",
+	       (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
+	       jiffies_to_msecs(jiffies - il->scan_start));
+
+	queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+	/* scan handlers */
+	il->handlers[C_SCAN] = il_hdl_scan;
+	il->handlers[N_SCAN_START] = il_hdl_scan_start;
+	il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+	il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+u16
+il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
+			 u8 n_probes)
+{
+	if (band == NL80211_BAND_5GHZ)
+		return IL_ACTIVE_DWELL_TIME_52 +
+		    IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+	else
+		return IL_ACTIVE_DWELL_TIME_24 +
+		    IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
+			  struct ieee80211_vif *vif)
+{
+	u16 value;
+
+	u16 passive =
+	    (band ==
+	     NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+	    IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+	    IL_PASSIVE_DWELL_TIME_52;
+
+	if (il_is_any_associated(il)) {
+		/*
+		 * If we're associated, we clamp the maximum passive
+		 * dwell time to be 98% of the smallest beacon interval
+		 * (minus 2 * channel tune time)
+		 */
+		value = il->vif ? il->vif->bss_conf.beacon_int : 0;
+		if (value > IL_PASSIVE_DWELL_BASE || !value)
+			value = IL_PASSIVE_DWELL_BASE;
+		value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+		passive = min(value, passive);
+	}
+
+	return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+	u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+	if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
+		il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+	if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
+		il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	int ret;
+
+	lockdep_assert_held(&il->mutex);
+
+	cancel_delayed_work(&il->scan_check);
+
+	if (!il_is_ready_rf(il)) {
+		IL_WARN("Request scan called when driver not ready.\n");
+		return -EIO;
+	}
+
+	if (test_bit(S_SCAN_HW, &il->status)) {
+		D_SCAN("Multiple concurrent scan requests in parallel.\n");
+		return -EBUSY;
+	}
+
+	if (test_bit(S_SCAN_ABORTING, &il->status)) {
+		D_SCAN("Scan request while abort pending.\n");
+		return -EBUSY;
+	}
+
+	D_SCAN("Starting scan...\n");
+
+	set_bit(S_SCANNING, &il->status);
+	il->scan_start = jiffies;
+
+	ret = il->ops->request_scan(il, vif);
+	if (ret) {
+		clear_bit(S_SCANNING, &il->status);
+		return ret;
+	}
+
+	queue_delayed_work(il->workqueue, &il->scan_check,
+			   IL_SCAN_CHECK_WATCHDOG);
+
+	return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+	       struct ieee80211_scan_request *hw_req)
+{
+	struct cfg80211_scan_request *req = &hw_req->req;
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	if (req->n_channels == 0) {
+		IL_ERR("Can not scan on no channels.\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter\n");
+
+	if (test_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Scan already in progress.\n");
+		ret = -EAGAIN;
+		goto out_unlock;
+	}
+
+	/* mac80211 will only ask for one band at a time */
+	il->scan_request = req;
+	il->scan_vif = vif;
+	il->scan_band = req->channels[0]->band;
+
+	ret = il_scan_initiate(il, vif);
+
+out_unlock:
+	D_MAC80211("leave ret %d\n", ret);
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+	struct il_priv *il =
+	    container_of(data, struct il_priv, scan_check.work);
+
+	D_SCAN("Scan check work\n");
+
+	/* Since we are here firmware does not finish scan and
+	 * most likely is in bad shape, so we don't bother to
+	 * send abort command, just force scan complete to mac80211 */
+	mutex_lock(&il->mutex);
+	il_force_scan_end(il);
+	mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+		  const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+	int len = 0;
+	u8 *pos = NULL;
+
+	/* Make sure there is enough space for the probe request,
+	 * two mandatory IEs and the data */
+	left -= 24;
+	if (left < 0)
+		return 0;
+
+	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+	eth_broadcast_addr(frame->da);
+	memcpy(frame->sa, ta, ETH_ALEN);
+	eth_broadcast_addr(frame->bssid);
+	frame->seq_ctrl = 0;
+
+	len += 24;
+
+	/* ...next IE... */
+	pos = &frame->u.probe_req.variable[0];
+
+	/* fill in our indirect SSID IE */
+	left -= 2;
+	if (left < 0)
+		return 0;
+	*pos++ = WLAN_EID_SSID;
+	*pos++ = 0;
+
+	len += 2;
+
+	if (WARN_ON(left < ie_len))
+		return len;
+
+	if (ies && ie_len) {
+		memcpy(pos, ies, ie_len);
+		len += ie_len;
+	}
+
+	return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+	D_SCAN("Abort scan work\n");
+
+	/* We keep scan_check work queued in case when firmware will not
+	 * report back scan completed notification */
+	mutex_lock(&il->mutex);
+	il_scan_cancel_timeout(il, 200);
+	mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+	struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+	bool aborted;
+
+	D_SCAN("Completed scan.\n");
+
+	cancel_delayed_work(&il->scan_check);
+
+	mutex_lock(&il->mutex);
+
+	aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+	if (aborted)
+		D_SCAN("Aborted scan completed.\n");
+
+	if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+		D_SCAN("Scan already completed.\n");
+		goto out_settings;
+	}
+
+	il_complete_scan(il, aborted);
+
+out_settings:
+	/* Can we still talk to firmware ? */
+	if (!il_is_ready_rf(il))
+		goto out;
+
+	/*
+	 * We do not commit power settings while scan is pending,
+	 * do it now if the settings changed.
+	 */
+	il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+	il_set_tx_power(il, il->tx_power_next, false);
+
+	il->ops->post_scan(il);
+
+out:
+	mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+	INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+	INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+	INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+	cancel_work_sync(&il->abort_scan);
+	cancel_work_sync(&il->scan_completed);
+
+	if (cancel_delayed_work_sync(&il->scan_check)) {
+		mutex_lock(&il->mutex);
+		il_force_scan_end(il);
+		mutex_unlock(&il->mutex);
+	}
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+		IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+		       sta_id, il->stations[sta_id].sta.sta.addr);
+
+	if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+		D_ASSOC("STA id %u addr %pM already present"
+			" in uCode (according to driver)\n", sta_id,
+			il->stations[sta_id].sta.sta.addr);
+	} else {
+		il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+		D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+			il->stations[sta_id].sta.sta.addr);
+	}
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+			struct il_rx_pkt *pkt, bool sync)
+{
+	u8 sta_id = addsta->sta.sta_id;
+	unsigned long flags;
+	int ret = -EIO;
+
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+		return ret;
+	}
+
+	D_INFO("Processing response for adding station %u\n", sta_id);
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	switch (pkt->u.add_sta.status) {
+	case ADD_STA_SUCCESS_MSK:
+		D_INFO("C_ADD_STA PASSED\n");
+		il_sta_ucode_activate(il, sta_id);
+		ret = 0;
+		break;
+	case ADD_STA_NO_ROOM_IN_TBL:
+		IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+		break;
+	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+		IL_ERR("Adding station %d failed, no block ack resource.\n",
+		       sta_id);
+		break;
+	case ADD_STA_MODIFY_NON_EXIST_STA:
+		IL_ERR("Attempting to modify non-existing station %d\n",
+		       sta_id);
+		break;
+	default:
+		D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+		break;
+	}
+
+	D_INFO("%s station id %u addr %pM\n",
+	       il->stations[sta_id].sta.mode ==
+	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+	       il->stations[sta_id].sta.sta.addr);
+
+	/*
+	 * XXX: The MAC address in the command buffer is often changed from
+	 * the original sent to the device. That is, the MAC address
+	 * written to the command buffer often is not the same MAC address
+	 * read from the command buffer when the command returns. This
+	 * issue has not yet been resolved and this debugging is left to
+	 * observe the problem.
+	 */
+	D_INFO("%s station according to cmd buffer %pM\n",
+	       il->stations[sta_id].sta.mode ==
+	       STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+		    struct il_rx_pkt *pkt)
+{
+	struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+	il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+	struct il_rx_pkt *pkt = NULL;
+	int ret = 0;
+	u8 data[sizeof(*sta)];
+	struct il_host_cmd cmd = {
+		.id = C_ADD_STA,
+		.flags = flags,
+		.data = data,
+	};
+	u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+	D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+	       flags & CMD_ASYNC ? "a" : "");
+
+	if (flags & CMD_ASYNC)
+		cmd.callback = il_add_sta_callback;
+	else {
+		cmd.flags |= CMD_WANT_SKB;
+		might_sleep();
+	}
+
+	cmd.len = il->ops->build_addsta_hcmd(sta, data);
+	ret = il_send_cmd(il, &cmd);
+	if (ret)
+		return ret;
+	if (flags & CMD_ASYNC)
+		return 0;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	ret = il_process_add_sta_resp(il, sta, pkt, true);
+
+	il_free_pages(il, cmd.reply_page);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+	__le32 sta_flags;
+
+	if (!sta || !sta_ht_inf->ht_supported)
+		goto done;
+
+	D_ASSOC("spatial multiplexing power save mode: %s\n",
+		(sta->smps_mode == IEEE80211_SMPS_STATIC) ? "static" :
+		(sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ? "dynamic" :
+		"disabled");
+
+	sta_flags = il->stations[idx].sta.station_flags;
+
+	sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+	switch (sta->smps_mode) {
+	case IEEE80211_SMPS_STATIC:
+		sta_flags |= STA_FLG_MIMO_DIS_MSK;
+		break;
+	case IEEE80211_SMPS_DYNAMIC:
+		sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+		break;
+	case IEEE80211_SMPS_OFF:
+		break;
+	default:
+		IL_WARN("Invalid MIMO PS mode %d\n", sta->smps_mode);
+		break;
+	}
+
+	sta_flags |=
+	    cpu_to_le32((u32) sta_ht_inf->
+			ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+	sta_flags |=
+	    cpu_to_le32((u32) sta_ht_inf->
+			ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+	if (il_is_ht40_tx_allowed(il, &sta->ht_cap))
+		sta_flags |= STA_FLG_HT40_EN_MSK;
+	else
+		sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+	il->stations[idx].sta.station_flags = sta_flags;
+done:
+	return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
+		struct ieee80211_sta *sta)
+{
+	struct il_station_entry *station;
+	int i;
+	u8 sta_id = IL_INVALID_STATION;
+	u16 rate;
+
+	if (is_ap)
+		sta_id = IL_AP_ID;
+	else if (is_broadcast_ether_addr(addr))
+		sta_id = il->hw_params.bcast_id;
+	else
+		for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+			if (ether_addr_equal(il->stations[i].sta.sta.addr,
+					     addr)) {
+				sta_id = i;
+				break;
+			}
+
+			if (!il->stations[i].used &&
+			    sta_id == IL_INVALID_STATION)
+				sta_id = i;
+		}
+
+	/*
+	 * These two conditions have the same outcome, but keep them
+	 * separate
+	 */
+	if (unlikely(sta_id == IL_INVALID_STATION))
+		return sta_id;
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+		D_INFO("STA %d already in process of being added.\n", sta_id);
+		return sta_id;
+	}
+
+	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+	    ether_addr_equal(il->stations[sta_id].sta.sta.addr, addr)) {
+		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+			sta_id, addr);
+		return sta_id;
+	}
+
+	station = &il->stations[sta_id];
+	station->used = IL_STA_DRIVER_ACTIVE;
+	D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+	il->num_stations++;
+
+	/* Set up the C_ADD_STA command to send to device */
+	memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+	memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+	station->sta.mode = 0;
+	station->sta.sta.sta_id = sta_id;
+	station->sta.station_flags = 0;
+
+	/*
+	 * OK to call unconditionally, since local stations (IBSS BSSID
+	 * STA and broadcast STA) pass in a NULL sta, and mac80211
+	 * doesn't allow HT IBSS.
+	 */
+	il_set_ht_add_station(il, sta_id, sta);
+
+	/* 3945 only */
+	rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+	/* Turn on both antennas for the station... */
+	station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+	return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
+		      struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+	unsigned long flags_spin;
+	int ret = 0;
+	u8 sta_id;
+	struct il_addsta_cmd sta_cmd;
+
+	*sta_id_r = 0;
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	sta_id = il_prep_station(il, addr, is_ap, sta);
+	if (sta_id == IL_INVALID_STATION) {
+		IL_ERR("Unable to prepare station %pM for addition\n", addr);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EINVAL;
+	}
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+		D_INFO("STA %d already in process of being added.\n", sta_id);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EEXIST;
+	}
+
+	if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+	    (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+		D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+			sta_id, addr);
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EEXIST;
+	}
+
+	il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+	memcpy(&sta_cmd, &il->stations[sta_id].sta,
+	       sizeof(struct il_addsta_cmd));
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	/* Add station to device's station table */
+	ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+	if (ret) {
+		spin_lock_irqsave(&il->sta_lock, flags_spin);
+		IL_ERR("Adding station %pM failed.\n",
+		       il->stations[sta_id].sta.sta.addr);
+		il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+		il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	}
+	*sta_id_r = sta_id;
+	return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+	/* Ucode must be active and driver must be non active */
+	if ((il->stations[sta_id].
+	     used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+	    IL_STA_UCODE_ACTIVE)
+		IL_ERR("removed non active STA %u\n", sta_id);
+
+	il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+	memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+	D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+		       bool temporary)
+{
+	struct il_rx_pkt *pkt;
+	int ret;
+
+	unsigned long flags_spin;
+	struct il_rem_sta_cmd rm_sta_cmd;
+
+	struct il_host_cmd cmd = {
+		.id = C_REM_STA,
+		.len = sizeof(struct il_rem_sta_cmd),
+		.flags = CMD_SYNC,
+		.data = &rm_sta_cmd,
+	};
+
+	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+	rm_sta_cmd.num_sta = 1;
+	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+	cmd.flags |= CMD_WANT_SKB;
+
+	ret = il_send_cmd(il, &cmd);
+
+	if (ret)
+		return ret;
+
+	pkt = (struct il_rx_pkt *)cmd.reply_page;
+	if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+		IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+		ret = -EIO;
+	}
+
+	if (!ret) {
+		switch (pkt->u.rem_sta.status) {
+		case REM_STA_SUCCESS_MSK:
+			if (!temporary) {
+				spin_lock_irqsave(&il->sta_lock, flags_spin);
+				il_sta_ucode_deactivate(il, sta_id);
+				spin_unlock_irqrestore(&il->sta_lock,
+						       flags_spin);
+			}
+			D_ASSOC("C_REM_STA PASSED\n");
+			break;
+		default:
+			ret = -EIO;
+			IL_ERR("C_REM_STA failed\n");
+			break;
+		}
+	}
+	il_free_pages(il, cmd.reply_page);
+
+	return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+	unsigned long flags;
+
+	if (!il_is_ready(il)) {
+		D_INFO("Unable to remove station %pM, device not ready.\n",
+		       addr);
+		/*
+		 * It is typical for stations to be removed when we are
+		 * going down. Return success since device will be down
+		 * soon anyway
+		 */
+		return 0;
+	}
+
+	D_ASSOC("Removing STA from driver:%d  %pM\n", sta_id, addr);
+
+	if (WARN_ON(sta_id == IL_INVALID_STATION))
+		return -EINVAL;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+
+	if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+		D_INFO("Removing %pM but non DRIVER active\n", addr);
+		goto out_err;
+	}
+
+	if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+		D_INFO("Removing %pM but non UCODE active\n", addr);
+		goto out_err;
+	}
+
+	if (il->stations[sta_id].used & IL_STA_LOCAL) {
+		kfree(il->stations[sta_id].lq);
+		il->stations[sta_id].lq = NULL;
+	}
+
+	il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+	il->num_stations--;
+
+	BUG_ON(il->num_stations < 0);
+
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+
+	return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il)
+{
+	int i;
+	unsigned long flags_spin;
+	bool cleared = false;
+
+	D_INFO("Clearing ucode stations in driver\n");
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+			D_INFO("Clearing ucode active for station %d\n", i);
+			il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+			cleared = true;
+		}
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	if (!cleared)
+		D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il)
+{
+	struct il_addsta_cmd sta_cmd;
+	struct il_link_quality_cmd lq;
+	unsigned long flags_spin;
+	int i;
+	bool found = false;
+	int ret;
+	bool send_lq;
+
+	if (!il_is_ready(il)) {
+		D_INFO("Not ready yet, not restoring any stations.\n");
+		return;
+	}
+
+	D_ASSOC("Restoring all known stations ... start.\n");
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+		    !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+			D_ASSOC("Restoring sta %pM\n",
+				il->stations[i].sta.sta.addr);
+			il->stations[i].sta.mode = 0;
+			il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+			found = true;
+		}
+	}
+
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+			memcpy(&sta_cmd, &il->stations[i].sta,
+			       sizeof(struct il_addsta_cmd));
+			send_lq = false;
+			if (il->stations[i].lq) {
+				memcpy(&lq, il->stations[i].lq,
+				       sizeof(struct il_link_quality_cmd));
+				send_lq = true;
+			}
+			spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+			ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+			if (ret) {
+				spin_lock_irqsave(&il->sta_lock, flags_spin);
+				IL_ERR("Adding station %pM failed.\n",
+				       il->stations[i].sta.sta.addr);
+				il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+				il->stations[i].used &=
+				    ~IL_STA_UCODE_INPROGRESS;
+				spin_unlock_irqrestore(&il->sta_lock,
+						       flags_spin);
+			}
+			/*
+			 * Rate scaling has already been initialized, send
+			 * current LQ command
+			 */
+			if (send_lq)
+				il_send_lq_cmd(il, &lq, CMD_SYNC, true);
+			spin_lock_irqsave(&il->sta_lock, flags_spin);
+			il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+		}
+	}
+
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	if (!found)
+		D_INFO("Restoring all known stations"
+		       " .... no stations to be restored.\n");
+	else
+		D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+	int i;
+
+	for (i = 0; i < il->sta_key_max_num; i++)
+		if (!test_and_set_bit(i, &il->ucode_key_table))
+			return i;
+
+	return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	for (i = 0; i < il->hw_params.max_stations; i++) {
+		if (!(il->stations[i].used & IL_STA_BCAST))
+			continue;
+
+		il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+		il->num_stations--;
+		BUG_ON(il->num_stations < 0);
+		kfree(il->stations[i].lq);
+		il->stations[i].lq = NULL;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+	int i;
+	D_RATE("lq station id 0x%x\n", lq->sta_id);
+	D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+	       lq->general_params.dual_stream_ant_msk);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+	int i;
+
+	if (il->ht.enabled)
+		return true;
+
+	D_INFO("Channel %u is not an HT channel\n", il->active.channel);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+			D_INFO("idx %d of LQ expects HT channel\n", i);
+			return false;
+		}
+	}
+	return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
+	       u8 flags, bool init)
+{
+	int ret = 0;
+	unsigned long flags_spin;
+
+	struct il_host_cmd cmd = {
+		.id = C_TX_LINK_QUALITY_CMD,
+		.len = sizeof(struct il_link_quality_cmd),
+		.flags = flags,
+		.data = lq,
+	};
+
+	if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+		return -EINVAL;
+
+	spin_lock_irqsave(&il->sta_lock, flags_spin);
+	if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+	il_dump_lq_cmd(il, lq);
+	BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+	if (il_is_lq_table_valid(il, lq))
+		ret = il_send_cmd(il, &cmd);
+	else
+		ret = -EINVAL;
+
+	if (cmd.flags & CMD_ASYNC)
+		return ret;
+
+	if (init) {
+		D_INFO("init LQ command complete,"
+		       " clearing sta addition status for sta %d\n",
+		       lq->sta_id);
+		spin_lock_irqsave(&il->sta_lock, flags_spin);
+		il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+		spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  struct ieee80211_sta *sta)
+{
+	struct il_priv *il = hw->priv;
+	struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+	int ret;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter station %pM\n", sta->addr);
+
+	ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+	if (ret)
+		IL_ERR("Error removing station %pM\n", sta->addr);
+
+	D_MAC80211("leave ret %d\n", ret);
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt.  The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free.  When
+ *   iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ *   iwl->rxq is replenished and the READ IDX is updated (updating the
+ *   'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ *   IDX is not incremented and iwl->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc()   Allocates rx_free
+ * il_rx_replenish()     Replenishes rx_free list from rx_used, and calls
+ *                            il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE idx.  If insufficient rx_free buffers
+ *                            are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx()         Detach il_rx_bufs from pool up to the
+ *                            READ IDX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls il_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+	int s = q->read - q->write;
+	if (s <= 0)
+		s += RX_QUEUE_SIZE;
+	/* keep some buffer to not confuse full and empty queue */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+	unsigned long flags;
+	u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+	u32 reg;
+
+	spin_lock_irqsave(&q->lock, flags);
+
+	if (q->need_update == 0)
+		goto exit_unlock;
+
+	/* If power-saving is in use, make sure device is awake */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+			       reg);
+			il_set_bit(il, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+			goto exit_unlock;
+		}
+
+		q->write_actual = (q->write & ~0x7);
+		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+		/* Else device is assumed to be awake */
+	} else {
+		/* Device expects a multiple of 8 */
+		q->write_actual = (q->write & ~0x7);
+		il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+	}
+
+	q->need_update = 0;
+
+exit_unlock:
+	spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+	struct il_rx_queue *rxq = &il->rxq;
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	spin_lock_init(&rxq->lock);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+	rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+				     GFP_KERNEL);
+	if (!rxq->bd)
+		goto err_bd;
+
+	rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+					  &rxq->rb_stts_dma, GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err_rb;
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->write_actual = 0;
+	rxq->free_count = 0;
+	rxq->need_update = 0;
+	return 0;
+
+err_rb:
+	dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+			  rxq->bd_dma);
+err_bd:
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+	if (!report->state) {
+		D_11H("Spectrum Measure Notification: Start\n");
+		return;
+	}
+
+	memcpy(&il->measure_report, report, sizeof(*report));
+	il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+		      u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+	u16 fc = le16_to_cpu(hdr->frame_control);
+
+	/*
+	 * All contexts have the same setting here due to it being
+	 * a module parameter, so OK to check any context.
+	 */
+	if (il->active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+		return 0;
+
+	if (!(fc & IEEE80211_FCTL_PROTECTED))
+		return 0;
+
+	D_RX("decrypt_res:0x%x\n", decrypt_res);
+	switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		/* The uCode has got a bad phase 1 Key, pushes the packet.
+		 * Decryption will be done in SW. */
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_KEY_TTAK)
+			break;
+
+	case RX_RES_STATUS_SEC_TYPE_WEP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_ICV_MIC) {
+			/* bad ICV, the packet is destroyed since the
+			 * decryption is inplace, drop it */
+			D_RX("Packet destroyed\n");
+			return -1;
+		}
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_DECRYPT_OK) {
+			D_RX("hw decrypt successfully!!!\n");
+			stats->flag |= RX_FLAG_DECRYPTED;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u32 reg = 0;
+	int txq_id = txq->q.id;
+
+	if (txq->need_update == 0)
+		return;
+
+	/* if we're trying to save power */
+	if (test_bit(S_POWER_PMI, &il->status)) {
+		/* wake up nic if it's powered down ...
+		 * uCode will wake up, and interrupt us again, so next
+		 * time we'll skip this part. */
+		reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+			       txq_id, reg);
+			il_set_bit(il, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+			return;
+		}
+
+		il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+		/*
+		 * else not in power-save mode,
+		 * uCode will never sleep when we're
+		 * trying to tx (during RFKILL, we're not trying to tx).
+		 */
+	} else
+		_il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+	txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+
+	if (q->n_bd == 0)
+		return;
+
+	while (q->write_ptr != q->read_ptr) {
+		il->ops->txq_free_tfd(il, txq);
+		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	il_tx_queue_unmap(il, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+	if (txq->cmd) {
+		for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+			kfree(txq->cmd[i]);
+	}
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd)
+		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+				  txq->tfds, txq->q.dma_addr);
+
+	/* De-alloc array of per-TFD driver data */
+	kfree(txq->skbs);
+	txq->skbs = NULL;
+
+	/* deallocate arrays */
+	kfree(txq->cmd);
+	kfree(txq->meta);
+	txq->cmd = NULL;
+	txq->meta = NULL;
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct il_queue *q = &txq->q;
+	int i;
+
+	if (q->n_bd == 0)
+		return;
+
+	while (q->read_ptr != q->write_ptr) {
+		i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+		if (txq->meta[i].flags & CMD_MAPPED) {
+			pci_unmap_single(il->pci_dev,
+					 dma_unmap_addr(&txq->meta[i], mapping),
+					 dma_unmap_len(&txq->meta[i], len),
+					 PCI_DMA_BIDIRECTIONAL);
+			txq->meta[i].flags = 0;
+		}
+
+		q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+	}
+
+	i = q->n_win;
+	if (txq->meta[i].flags & CMD_MAPPED) {
+		pci_unmap_single(il->pci_dev,
+				 dma_unmap_addr(&txq->meta[i], mapping),
+				 dma_unmap_len(&txq->meta[i], len),
+				 PCI_DMA_BIDIRECTIONAL);
+		txq->meta[i].flags = 0;
+	}
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct device *dev = &il->pci_dev->dev;
+	int i;
+
+	il_cmd_queue_unmap(il);
+
+	/* De-alloc array of command/tx buffers */
+	if (txq->cmd) {
+		for (i = 0; i <= TFD_CMD_SLOTS; i++)
+			kfree(txq->cmd[i]);
+	}
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->q.n_bd)
+		dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+				  txq->tfds, txq->q.dma_addr);
+
+	/* deallocate arrays */
+	kfree(txq->cmd);
+	kfree(txq->meta);
+	txq->cmd = NULL;
+	txq->meta = NULL;
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+	int s = q->read_ptr - q->write_ptr;
+
+	if (q->read_ptr > q->write_ptr)
+		s -= q->n_bd;
+
+	if (s <= 0)
+		s += q->n_win;
+	/* keep some reserve to not confuse empty and full situations */
+	s -= 2;
+	if (s < 0)
+		s = 0;
+	return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
+{
+	/*
+	 * TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+	 * il_queue_inc_wrap and il_queue_dec_wrap are broken.
+	 */
+	BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+	/* FIXME: remove q->n_bd */
+	q->n_bd = TFD_QUEUE_SIZE_MAX;
+
+	q->n_win = slots;
+	q->id = id;
+
+	/* slots_must be power-of-two size, otherwise
+	 * il_get_cmd_idx is broken. */
+	BUG_ON(!is_power_of_2(slots));
+
+	q->low_mark = q->n_win / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_win / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->write_ptr = q->read_ptr = 0;
+
+	return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+	struct device *dev = &il->pci_dev->dev;
+	size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+	/* Driver ilate data, only for Tx (not command) queues,
+	 * not shared with device. */
+	if (id != il->cmd_queue) {
+		txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX,
+				    sizeof(struct sk_buff *),
+				    GFP_KERNEL);
+		if (!txq->skbs) {
+			IL_ERR("Fail to alloc skbs\n");
+			goto error;
+		}
+	} else
+		txq->skbs = NULL;
+
+	/* Circular buffer of transmit frame descriptors (TFDs),
+	 * shared with device */
+	txq->tfds =
+	    dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+	if (!txq->tfds)
+		goto error;
+
+	txq->q.id = id;
+
+	return 0;
+
+error:
+	kfree(txq->skbs);
+	txq->skbs = NULL;
+
+	return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, u32 txq_id)
+{
+	int i, len, ret;
+	int slots, actual_slots;
+	struct il_tx_queue *txq = &il->txq[txq_id];
+
+	/*
+	 * Alloc buffer array for commands (Tx or other types of commands).
+	 * For the command queue (#4/#9), allocate command space + one big
+	 * command for scan, since scan command is very huge; the system will
+	 * not have two scans at the same time, so only one is needed.
+	 * For normal Tx queues (all other queues), no super-size command
+	 * space is needed.
+	 */
+	if (txq_id == il->cmd_queue) {
+		slots = TFD_CMD_SLOTS;
+		actual_slots = slots + 1;
+	} else {
+		slots = TFD_TX_CMD_SLOTS;
+		actual_slots = slots;
+	}
+
+	txq->meta =
+	    kcalloc(actual_slots, sizeof(struct il_cmd_meta), GFP_KERNEL);
+	txq->cmd =
+	    kcalloc(actual_slots, sizeof(struct il_device_cmd *), GFP_KERNEL);
+
+	if (!txq->meta || !txq->cmd)
+		goto out_free_arrays;
+
+	len = sizeof(struct il_device_cmd);
+	for (i = 0; i < actual_slots; i++) {
+		/* only happens for cmd queue */
+		if (i == slots)
+			len = IL_MAX_CMD_SIZE;
+
+		txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+		if (!txq->cmd[i])
+			goto err;
+	}
+
+	/* Alloc driver data array and TFD circular buffer */
+	ret = il_tx_queue_alloc(il, txq, txq_id);
+	if (ret)
+		goto err;
+
+	txq->need_update = 0;
+
+	/*
+	 * For the default queues 0-3, set up the swq_id
+	 * already -- all others need to get one later
+	 * (if they need one at all).
+	 */
+	if (txq_id < 4)
+		il_set_swq_id(txq, txq_id, txq_id);
+
+	/* Initialize queue's high/low-water marks, and head/tail idxes */
+	il_queue_init(il, &txq->q, slots, txq_id);
+
+	/* Tell device where to find queue */
+	il->ops->txq_init(il, txq);
+
+	return 0;
+err:
+	for (i = 0; i < actual_slots; i++)
+		kfree(txq->cmd[i]);
+out_free_arrays:
+	kfree(txq->meta);
+	txq->meta = NULL;
+	kfree(txq->cmd);
+	txq->cmd = NULL;
+
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, u32 txq_id)
+{
+	int slots, actual_slots;
+	struct il_tx_queue *txq = &il->txq[txq_id];
+
+	if (txq_id == il->cmd_queue) {
+		slots = TFD_CMD_SLOTS;
+		actual_slots = TFD_CMD_SLOTS + 1;
+	} else {
+		slots = TFD_TX_CMD_SLOTS;
+		actual_slots = TFD_TX_CMD_SLOTS;
+	}
+
+	memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+	txq->need_update = 0;
+
+	/* Initialize queue's high/low-water marks, and head/tail idxes */
+	il_queue_init(il, &txq->q, slots, txq_id);
+
+	/* Tell device where to find queue */
+	il->ops->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	struct il_queue *q = &txq->q;
+	struct il_device_cmd *out_cmd;
+	struct il_cmd_meta *out_meta;
+	dma_addr_t phys_addr;
+	unsigned long flags;
+	int len;
+	u32 idx;
+	u16 fix_size;
+
+	cmd->len = il->ops->get_hcmd_size(cmd->id, cmd->len);
+	fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+	/* If any of the command structures end up being larger than
+	 * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+	 * we will need to increase the size of the TFD entries
+	 * Also, check to see if command buffer should not exceed the size
+	 * of device_cmd and max_cmd_size. */
+	BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+	       !(cmd->flags & CMD_SIZE_HUGE));
+	BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+	if (il_is_rfkill(il) || il_is_ctkill(il)) {
+		IL_WARN("Not sending command - %s KILL\n",
+			il_is_rfkill(il) ? "RF" : "CT");
+		return -EIO;
+	}
+
+	spin_lock_irqsave(&il->hcmd_lock, flags);
+
+	if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+		spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+		IL_ERR("Restarting adapter due to command queue full\n");
+		queue_work(il->workqueue, &il->restart);
+		return -ENOSPC;
+	}
+
+	idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+	out_cmd = txq->cmd[idx];
+	out_meta = &txq->meta[idx];
+
+	if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+		spin_unlock_irqrestore(&il->hcmd_lock, flags);
+		return -ENOSPC;
+	}
+
+	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
+	out_meta->flags = cmd->flags | CMD_MAPPED;
+	if (cmd->flags & CMD_WANT_SKB)
+		out_meta->source = cmd;
+	if (cmd->flags & CMD_ASYNC)
+		out_meta->callback = cmd->callback;
+
+	out_cmd->hdr.cmd = cmd->id;
+	memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+	/* At this point, the out_cmd now has all of the incoming cmd
+	 * information */
+
+	out_cmd->hdr.flags = 0;
+	out_cmd->hdr.sequence =
+	    cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+	if (cmd->flags & CMD_SIZE_HUGE)
+		out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+	len = sizeof(struct il_device_cmd);
+	if (idx == TFD_CMD_SLOTS)
+		len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	switch (out_cmd->hdr.cmd) {
+	case C_TX_LINK_QUALITY_CMD:
+	case C_SENSITIVITY:
+		D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+			  "%d bytes at %d[%d]:%d\n",
+			  il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+			  le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+			  q->write_ptr, idx, il->cmd_queue);
+		break;
+	default:
+		D_HC("Sending command %s (#%x), seq: 0x%04X, "
+		     "%d bytes at %d[%d]:%d\n",
+		     il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+		     le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+		     idx, il->cmd_queue);
+	}
+#endif
+
+	phys_addr =
+	    pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+			   PCI_DMA_BIDIRECTIONAL);
+	if (unlikely(pci_dma_mapping_error(il->pci_dev, phys_addr))) {
+		idx = -ENOMEM;
+		goto out;
+	}
+	dma_unmap_addr_set(out_meta, mapping, phys_addr);
+	dma_unmap_len_set(out_meta, len, fix_size);
+
+	txq->need_update = 1;
+
+	if (il->ops->txq_update_byte_cnt_tbl)
+		/* Set up entry in queue's byte count circular buffer */
+		il->ops->txq_update_byte_cnt_tbl(il, txq, 0);
+
+	il->ops->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size, 1,
+					    U32_PAD(cmd->len));
+
+	/* Increment and update queue's write idx */
+	q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+	il_txq_update_write_ptr(il, txq);
+
+out:
+	spin_unlock_irqrestore(&il->hcmd_lock, flags);
+	return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+	struct il_tx_queue *txq = &il->txq[txq_id];
+	struct il_queue *q = &txq->q;
+	int nfreed = 0;
+
+	if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+		IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+		       "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+		       q->write_ptr, q->read_ptr);
+		return;
+	}
+
+	for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+	     q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+		if (nfreed++ > 0) {
+			IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+			       q->write_ptr, q->read_ptr);
+			queue_work(il->workqueue, &il->restart);
+		}
+
+	}
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed.  The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int idx = SEQ_TO_IDX(sequence);
+	int cmd_idx;
+	bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+	struct il_device_cmd *cmd;
+	struct il_cmd_meta *meta;
+	struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+	unsigned long flags;
+
+	/* If a Tx command is being handled and it isn't in the actual
+	 * command queue then there a command routing bug has been introduced
+	 * in the queue management code. */
+	if (WARN
+	    (txq_id != il->cmd_queue,
+	     "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+	     txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+	     il->txq[il->cmd_queue].q.write_ptr)) {
+		il_print_hex_error(il, pkt, 32);
+		return;
+	}
+
+	cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+	cmd = txq->cmd[cmd_idx];
+	meta = &txq->meta[cmd_idx];
+
+	txq->time_stamp = jiffies;
+
+	pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+			 dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+	/* Input error checking is done when commands are added to queue. */
+	if (meta->flags & CMD_WANT_SKB) {
+		meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+		rxb->page = NULL;
+	} else if (meta->callback)
+		meta->callback(il, cmd, pkt);
+
+	spin_lock_irqsave(&il->hcmd_lock, flags);
+
+	il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+	if (!(meta->flags & CMD_ASYNC)) {
+		clear_bit(S_HCMD_ACTIVE, &il->status);
+		D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+		       il_get_cmd_string(cmd->hdr.cmd));
+		wake_up(&il->wait_command_queue);
+	}
+
+	/* Mark as unmapped */
+	meta->flags = 0;
+
+	spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, 0444);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+#define MAX_BIT_RATE_40_MHZ 150	/* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72	/* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+		    struct ieee80211_sta_ht_cap *ht_info,
+		    enum nl80211_band band)
+{
+	u16 max_bit_rate = 0;
+	u8 rx_chains_num = il->hw_params.rx_chains_num;
+	u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+	ht_info->cap = 0;
+	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+	ht_info->ht_supported = true;
+
+	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+	max_bit_rate = MAX_BIT_RATE_20_MHZ;
+	if (il->hw_params.ht40_channel & BIT(band)) {
+		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+		ht_info->mcs.rx_mask[4] = 0x01;
+		max_bit_rate = MAX_BIT_RATE_40_MHZ;
+	}
+
+	if (il->cfg->mod_params->amsdu_size_8K)
+		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+	ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+	ht_info->mcs.rx_mask[0] = 0xFF;
+	if (rx_chains_num >= 2)
+		ht_info->mcs.rx_mask[1] = 0xFF;
+	if (rx_chains_num >= 3)
+		ht_info->mcs.rx_mask[2] = 0xFF;
+
+	/* Highest supported Rx data rate */
+	max_bit_rate *= rx_chains_num;
+	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+	/* Tx MCS capabilities */
+	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	if (tx_chains_num != rx_chains_num) {
+		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+		ht_info->mcs.tx_params |=
+		    ((tx_chains_num -
+		      1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	}
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+	struct il_channel_info *ch;
+	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *channels;
+	struct ieee80211_channel *geo_ch;
+	struct ieee80211_rate *rates;
+	int i = 0;
+	s8 max_tx_power = 0;
+
+	if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
+	    il->bands[NL80211_BAND_5GHZ].n_bitrates) {
+		D_INFO("Geography modes already initialized.\n");
+		set_bit(S_GEO_CONFIGURED, &il->status);
+		return 0;
+	}
+
+	channels =
+	    kcalloc(il->channel_count, sizeof(struct ieee80211_channel),
+		    GFP_KERNEL);
+	if (!channels)
+		return -ENOMEM;
+
+	rates =
+	    kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+		    GFP_KERNEL);
+	if (!rates) {
+		kfree(channels);
+		return -ENOMEM;
+	}
+
+	/* 5.2GHz channels start after the 2.4GHz channels */
+	sband = &il->bands[NL80211_BAND_5GHZ];
+	sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+	/* just OFDM */
+	sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+	sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+	if (il->cfg->sku & IL_SKU_N)
+		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
+
+	sband = &il->bands[NL80211_BAND_2GHZ];
+	sband->channels = channels;
+	/* OFDM & CCK */
+	sband->bitrates = rates;
+	sband->n_bitrates = RATE_COUNT_LEGACY;
+
+	if (il->cfg->sku & IL_SKU_N)
+		il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
+
+	il->ieee_channels = channels;
+	il->ieee_rates = rates;
+
+	for (i = 0; i < il->channel_count; i++) {
+		ch = &il->channel_info[i];
+
+		if (!il_is_channel_valid(ch))
+			continue;
+
+		sband = &il->bands[ch->band];
+
+		geo_ch = &sband->channels[sband->n_channels++];
+
+		geo_ch->center_freq =
+		    ieee80211_channel_to_frequency(ch->channel, ch->band);
+		geo_ch->max_power = ch->max_power_avg;
+		geo_ch->max_antenna_gain = 0xff;
+		geo_ch->hw_value = ch->channel;
+
+		if (il_is_channel_valid(ch)) {
+			if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
+
+			if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+				geo_ch->flags |= IEEE80211_CHAN_NO_IR;
+
+			if (ch->flags & EEPROM_CHANNEL_RADAR)
+				geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+			geo_ch->flags |= ch->ht40_extension_channel;
+
+			if (ch->max_power_avg > max_tx_power)
+				max_tx_power = ch->max_power_avg;
+		} else {
+			geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+		}
+
+		D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+		       geo_ch->center_freq,
+		       il_is_channel_a_band(ch) ? "5.2" : "2.4",
+		       geo_ch->
+		       flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+		       geo_ch->flags);
+	}
+
+	il->tx_power_device_lmt = max_tx_power;
+	il->tx_power_user_lmt = max_tx_power;
+	il->tx_power_next = max_tx_power;
+
+	if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
+	    (il->cfg->sku & IL_SKU_A)) {
+		IL_INFO("Incorrectly detected BG card as ABG. "
+			"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+			il->pci_dev->device, il->pci_dev->subsystem_device);
+		il->cfg->sku &= ~IL_SKU_A;
+	}
+
+	IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+		il->bands[NL80211_BAND_2GHZ].n_channels,
+		il->bands[NL80211_BAND_5GHZ].n_channels);
+
+	set_bit(S_GEO_CONFIGURED, &il->status);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+	kfree(il->ieee_channels);
+	kfree(il->ieee_rates);
+	clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
+			u16 channel, u8 extension_chan_offset)
+{
+	const struct il_channel_info *ch_info;
+
+	ch_info = il_get_channel_info(il, band, channel);
+	if (!il_is_channel_valid(ch_info))
+		return false;
+
+	if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+		return !(ch_info->
+			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+	else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+		return !(ch_info->
+			 ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+	return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct ieee80211_sta_ht_cap *ht_cap)
+{
+	if (!il->ht.enabled || !il->ht.is_40mhz)
+		return false;
+
+	/*
+	 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+	 * the bit will not set if it is pure 40MHz case
+	 */
+	if (ht_cap && !ht_cap->ht_supported)
+		return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	if (il->disable_ht40)
+		return false;
+#endif
+
+	return il_is_channel_extension(il, il->band,
+				       le16_to_cpu(il->staging.channel),
+				       il->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16 noinline
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+	u16 new_val;
+	u16 beacon_factor;
+
+	/*
+	 * If mac80211 hasn't given us a beacon interval, program
+	 * the default into the device.
+	 */
+	if (!beacon_val)
+		return DEFAULT_BEACON_INTERVAL;
+
+	/*
+	 * If the beacon interval we obtained from the peer
+	 * is too large, we'll have to wake up more often
+	 * (and in IBSS case, we'll beacon too much)
+	 *
+	 * For example, if max_beacon_val is 4096, and the
+	 * requested beacon interval is 7000, we'll have to
+	 * use 3500 to be able to wake up on the beacons.
+	 *
+	 * This could badly influence beacon detection stats.
+	 */
+
+	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+	new_val = beacon_val / beacon_factor;
+
+	if (!new_val)
+		new_val = max_beacon_val;
+
+	return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il)
+{
+	u64 tsf;
+	s32 interval_tm, rem;
+	struct ieee80211_conf *conf = NULL;
+	u16 beacon_int;
+	struct ieee80211_vif *vif = il->vif;
+
+	conf = &il->hw->conf;
+
+	lockdep_assert_held(&il->mutex);
+
+	memset(&il->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+	il->timing.timestamp = cpu_to_le64(il->timestamp);
+	il->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+	/*
+	 * TODO: For IBSS we need to get atim_win from mac80211,
+	 *       for now just always use 0
+	 */
+	il->timing.atim_win = 0;
+
+	beacon_int =
+	    il_adjust_beacon_interval(beacon_int,
+				      il->hw_params.max_beacon_itrvl *
+				      TIME_UNIT);
+	il->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+	tsf = il->timestamp;	/* tsf is modifed by do_div: copy it */
+	interval_tm = beacon_int * TIME_UNIT;
+	rem = do_div(tsf, interval_tm);
+	il->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+	il->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+	D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+		le16_to_cpu(il->timing.beacon_interval),
+		le32_to_cpu(il->timing.beacon_init_val),
+		le16_to_cpu(il->timing.atim_win));
+
+	return il_send_cmd_pdu(il, C_RXON_TIMING, sizeof(il->timing),
+			       &il->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt)
+{
+	struct il_rxon_cmd *rxon = &il->staging;
+
+	if (hw_decrypt)
+		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+	else
+		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il)
+{
+	struct il_rxon_cmd *rxon = &il->staging;
+	bool error = false;
+
+	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+			IL_WARN("check 2.4G: wrong narrow\n");
+			error = true;
+		}
+		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+			IL_WARN("check 2.4G: wrong radar\n");
+			error = true;
+		}
+	} else {
+		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+			IL_WARN("check 5.2G: not short slot!\n");
+			error = true;
+		}
+		if (rxon->flags & RXON_FLG_CCK_MSK) {
+			IL_WARN("check 5.2G: CCK!\n");
+			error = true;
+		}
+	}
+	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+		IL_WARN("mac/bssid mcast!\n");
+		error = true;
+	}
+
+	/* make sure basic rates 6Mbps and 1Mbps are supported */
+	if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+	    (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+		IL_WARN("neither 1 nor 6 are basic\n");
+		error = true;
+	}
+
+	if (le16_to_cpu(rxon->assoc_id) > 2007) {
+		IL_WARN("aid > 2007\n");
+		error = true;
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+	    (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+		IL_WARN("CCK and short slot\n");
+		error = true;
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+	    (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+		IL_WARN("CCK and auto detect");
+		error = true;
+	}
+
+	if ((rxon->
+	     flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+	    RXON_FLG_TGG_PROTECT_MSK) {
+		IL_WARN("TGg but no auto-detect\n");
+		error = true;
+	}
+
+	if (error)
+		IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+	if (error) {
+		IL_ERR("Invalid RXON\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il)
+{
+	const struct il_rxon_cmd *staging = &il->staging;
+	const struct il_rxon_cmd *active = &il->active;
+
+#define CHK(cond)							\
+	if ((cond)) {							\
+		D_INFO("need full RXON - " #cond "\n");	\
+		return 1;						\
+	}
+
+#define CHK_NEQ(c1, c2)						\
+	if ((c1) != (c2)) {					\
+		D_INFO("need full RXON - "	\
+			       #c1 " != " #c2 " - %d != %d\n",	\
+			       (c1), (c2));			\
+		return 1;					\
+	}
+
+	/* These items are only settable from the full RXON command */
+	CHK(!il_is_associated(il));
+	CHK(!ether_addr_equal_64bits(staging->bssid_addr, active->bssid_addr));
+	CHK(!ether_addr_equal_64bits(staging->node_addr, active->node_addr));
+	CHK(!ether_addr_equal_64bits(staging->wlap_bssid_addr,
+				     active->wlap_bssid_addr));
+	CHK_NEQ(staging->dev_type, active->dev_type);
+	CHK_NEQ(staging->channel, active->channel);
+	CHK_NEQ(staging->air_propagation, active->air_propagation);
+	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+		active->ofdm_ht_single_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+		active->ofdm_ht_dual_stream_basic_rates);
+	CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+	 * be updated with the RXON_ASSOC command -- however only some
+	 * flag transitions are allowed using RXON_ASSOC */
+
+	/* Check if we are not switching bands */
+	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+		active->flags & RXON_FLG_BAND_24G_MSK);
+
+	/* Check if we are switching association toggle */
+	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+		active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+	return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il)
+{
+	/*
+	 * Assign the lowest rate -- should really get this from
+	 * the beacon skb from mac80211.
+	 */
+	if (il->staging.flags & RXON_FLG_BAND_24G_MSK)
+		return RATE_1M_PLCP;
+	else
+		return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+	struct il_rxon_cmd *rxon = &il->staging;
+
+	if (!il->ht.enabled) {
+		rxon->flags &=
+		    ~(RXON_FLG_CHANNEL_MODE_MSK |
+		      RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+		      | RXON_FLG_HT_PROT_MSK);
+		return;
+	}
+
+	rxon->flags |=
+	    cpu_to_le32(il->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+	/* Set up channel bandwidth:
+	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+	/* clear the HT channel mode before set the mode */
+	rxon->flags &=
+	    ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+	if (il_is_ht40_tx_allowed(il, NULL)) {
+		/* pure ht40 */
+		if (il->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+			/* Note: control channel is opposite of extension channel */
+			switch (il->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+				    ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			}
+		} else {
+			/* Note: control channel is opposite of extension channel */
+			switch (il->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+				    ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+			default:
+				/* channel location only valid if in Mixed mode */
+				IL_ERR("invalid extension channel offset\n");
+				break;
+			}
+		}
+	} else {
+		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+	}
+
+	if (il->ops->set_rxon_chain)
+		il->ops->set_rxon_chain(il);
+
+	D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+		"extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+		il->ht.protection, il->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+	_il_set_rxon_ht(il, ht_conf);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
+{
+	const struct il_channel_info *ch_info;
+	int i;
+	u8 channel = 0;
+	u8 min, max;
+
+	if (band == NL80211_BAND_5GHZ) {
+		min = 14;
+		max = il->channel_count;
+	} else {
+		min = 0;
+		max = 14;
+	}
+
+	for (i = min; i < max; i++) {
+		channel = il->channel_info[i].channel;
+		if (channel == le16_to_cpu(il->staging.channel))
+			continue;
+
+		ch_info = il_get_channel_info(il, band, channel);
+		if (il_is_channel_valid(ch_info))
+			break;
+	}
+
+	return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
+{
+	enum nl80211_band band = ch->band;
+	u16 channel = ch->hw_value;
+
+	if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
+		return 0;
+
+	il->staging.channel = cpu_to_le16(channel);
+	if (band == NL80211_BAND_5GHZ)
+		il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+	else
+		il->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+	il->band = band;
+
+	D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
+		      struct ieee80211_vif *vif)
+{
+	if (band == NL80211_BAND_5GHZ) {
+		il->staging.flags &=
+		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+		      RXON_FLG_CCK_MSK);
+		il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+	} else {
+		/* Copied from il_post_associate() */
+		if (vif && vif->bss_conf.use_short_slot)
+			il->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+		il->staging.flags |= RXON_FLG_BAND_24G_MSK;
+		il->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+		il->staging.flags &= ~RXON_FLG_CCK_MSK;
+	}
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il)
+{
+	const struct il_channel_info *ch_info;
+
+	memset(&il->staging, 0, sizeof(il->staging));
+
+	switch (il->iw_mode) {
+	case NL80211_IFTYPE_UNSPECIFIED:
+		il->staging.dev_type = RXON_DEV_TYPE_ESS;
+		break;
+	case NL80211_IFTYPE_STATION:
+		il->staging.dev_type = RXON_DEV_TYPE_ESS;
+		il->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		il->staging.dev_type = RXON_DEV_TYPE_IBSS;
+		il->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+		il->staging.filter_flags =
+		    RXON_FILTER_BCON_AWARE_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+	default:
+		IL_ERR("Unsupported interface type %d\n", il->vif->type);
+		return;
+	}
+
+#if 0
+	/* TODO:  Figure out when short_preamble would be set and cache from
+	 * that */
+	if (!hw_to_local(il->hw)->short_preamble)
+		il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+	ch_info =
+	    il_get_channel_info(il, il->band, le16_to_cpu(il->active.channel));
+
+	if (!ch_info)
+		ch_info = &il->channel_info[0];
+
+	il->staging.channel = cpu_to_le16(ch_info->channel);
+	il->band = ch_info->band;
+
+	il_set_flags_for_band(il, il->band, il->vif);
+
+	il->staging.ofdm_basic_rates =
+	    (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+	il->staging.cck_basic_rates =
+	    (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+	/* clear both MIX and PURE40 mode flag */
+	il->staging.flags &=
+	    ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+	if (il->vif)
+		memcpy(il->staging.node_addr, il->vif->addr, ETH_ALEN);
+
+	il->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+	il->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+	const struct ieee80211_supported_band *hw = NULL;
+	struct ieee80211_rate *rate;
+	int i;
+
+	hw = il_get_hw_mode(il, il->band);
+	if (!hw) {
+		IL_ERR("Failed to set rate: unable to get hw mode\n");
+		return;
+	}
+
+	il->active_rate = 0;
+
+	for (i = 0; i < hw->n_bitrates; i++) {
+		rate = &(hw->bitrates[i]);
+		if (rate->hw_value < RATE_COUNT_LEGACY)
+			il->active_rate |= (1 << rate->hw_value);
+	}
+
+	D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+	il->staging.cck_basic_rates =
+	    (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+	il->staging.ofdm_basic_rates =
+	    (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		ieee80211_chswitch_done(il->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_csa_notification *csa = &(pkt->u.csa_notif);
+	struct il_rxon_cmd *rxon = (void *)&il->active;
+
+	if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+		return;
+
+	if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+		rxon->channel = csa->channel;
+		il->staging.channel = csa->channel;
+		D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+		il_chswitch_done(il, true);
+	} else {
+		IL_ERR("CSA notif (fail) : channel %d\n",
+		       le16_to_cpu(csa->channel));
+		il_chswitch_done(il, false);
+	}
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il)
+{
+	struct il_rxon_cmd *rxon = &il->staging;
+
+	D_RADIO("RX CONFIG:\n");
+	il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+	D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+	D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+	D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+	D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+	D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+	D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+	D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+	D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+	D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+	/* Set the FW error flag -- cleared on il_down */
+	set_bit(S_FW_ERROR, &il->status);
+
+	/* Cancel currently queued command. */
+	clear_bit(S_HCMD_ACTIVE, &il->status);
+
+	IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+	il->ops->dump_nic_error_log(il);
+	if (il->ops->dump_fh)
+		il->ops->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+	if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+		il_print_rx_config_cmd(il);
+#endif
+
+	wake_up(&il->wait_command_queue);
+
+	/* Keep the restart process from trying to send host
+	 * commands by clearing the INIT status bit */
+	clear_bit(S_READY, &il->status);
+
+	if (!test_bit(S_EXIT_PENDING, &il->status)) {
+		IL_DBG(IL_DL_FW_ERRORS,
+		       "Restarting adapter due to uCode error.\n");
+
+		if (il->cfg->mod_params->restart_fw)
+			queue_work(il->workqueue, &il->restart);
+	}
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+_il_apm_stop_master(struct il_priv *il)
+{
+	int ret = 0;
+
+	/* stop device's busmaster DMA activity */
+	_il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+	ret =
+	    _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+			 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+	if (ret < 0)
+		IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+	D_INFO("stop master\n");
+
+	return ret;
+}
+
+void
+_il_apm_stop(struct il_priv *il)
+{
+	lockdep_assert_held(&il->reg_lock);
+
+	D_INFO("Stop card, put in low power state\n");
+
+	/* Stop device's DMA activity */
+	_il_apm_stop_master(il);
+
+	/* Reset the entire device */
+	_il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+	udelay(10);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(_il_apm_stop);
+
+void
+il_apm_stop(struct il_priv *il)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->reg_lock, flags);
+	_il_apm_stop(il);
+	spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+	int ret = 0;
+	u16 lctl;
+
+	D_INFO("Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+		   CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+		   CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 * NOTE:  This is no-op for 3945 (non-existent bit)
+	 */
+	il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+		   CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	/*
+	 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+	 * If so (likely), disable L0S, so device moves directly L0->L1;
+	 *    costs negligible amount of power savings.
+	 * If not (unlikely), enable L0S, so there is at least some
+	 *    power savings, even without L1.
+	 */
+	if (il->cfg->set_l0s) {
+		pcie_capability_read_word(il->pci_dev, PCI_EXP_LNKCTL, &lctl);
+		if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
+			/* L1-ASPM enabled; disable(!) L0S  */
+			il_set_bit(il, CSR_GIO_REG,
+				   CSR_GIO_REG_VAL_L0S_ENABLED);
+			D_POWER("L1 Enabled; Disabling L0S\n");
+		} else {
+			/* L1-ASPM disabled; enable(!) L0S */
+			il_clear_bit(il, CSR_GIO_REG,
+				     CSR_GIO_REG_VAL_L0S_ENABLED);
+			D_POWER("L1 Disabled; Enabling L0S\n");
+		}
+	}
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (il->cfg->pll_cfg_val)
+		il_set_bit(il, CSR_ANA_PLL_CFG,
+			   il->cfg->pll_cfg_val);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. il_wr_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret =
+	    _il_poll_bit(il, CSR_GP_CNTRL,
+			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+			 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+	if (ret < 0) {
+		D_INFO("Failed to init the card\n");
+		goto out;
+	}
+
+	/*
+	 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+	 * BSM (Boostrap State Machine) is only in 3945 and 4965.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+	 * do not disable clocks.  This preserves any hardware bits already
+	 * set by default in "CLK_CTRL_REG" after reset.
+	 */
+	if (il->cfg->use_bsm)
+		il_wr_prph(il, APMG_CLK_EN_REG,
+			   APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+	else
+		il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+	udelay(20);
+
+	/* Disable L1-Active */
+	il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+			 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+	return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+	int ret;
+	s8 prev_tx_power;
+	bool defer;
+
+	lockdep_assert_held(&il->mutex);
+
+	if (il->tx_power_user_lmt == tx_power && !force)
+		return 0;
+
+	if (!il->ops->send_tx_power)
+		return -EOPNOTSUPP;
+
+	/* 0 dBm mean 1 milliwatt */
+	if (tx_power < 0) {
+		IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+		return -EINVAL;
+	}
+
+	if (tx_power > il->tx_power_device_lmt) {
+		IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+			tx_power, il->tx_power_device_lmt);
+		return -EINVAL;
+	}
+
+	if (!il_is_ready_rf(il))
+		return -EIO;
+
+	/* scan complete and commit_rxon use tx_power_next value,
+	 * it always need to be updated for newest request */
+	il->tx_power_next = tx_power;
+
+	/* do not set tx power when scanning or channel changing */
+	defer = test_bit(S_SCANNING, &il->status) ||
+	    memcmp(&il->active, &il->staging, sizeof(il->staging));
+	if (defer && !force) {
+		D_INFO("Deferring tx power set\n");
+		return 0;
+	}
+
+	prev_tx_power = il->tx_power_user_lmt;
+	il->tx_power_user_lmt = tx_power;
+
+	ret = il->ops->send_tx_power(il);
+
+	/* if fail to set tx_power, restore the orig. tx power */
+	if (ret) {
+		il->tx_power_user_lmt = prev_tx_power;
+		il->tx_power_next = prev_tx_power;
+	}
+	return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+	struct il_bt_cmd bt_cmd = {
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
+		.kill_ack_mask = 0,
+		.kill_cts_mask = 0,
+	};
+
+	if (!bt_coex_active)
+		bt_cmd.flags = BT_COEX_DISABLE;
+	else
+		bt_cmd.flags = BT_COEX_ENABLE;
+
+	D_INFO("BT coex %s\n",
+	       (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+	if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+		IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+	struct il_stats_cmd stats_cmd = {
+		.configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+	};
+
+	if (flags & CMD_ASYNC)
+		return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+					     &stats_cmd, NULL);
+	else
+		return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+				       &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+	D_RX("sleep mode: %d, src: %d\n",
+	     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+	u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+	D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+		il_get_cmd_string(pkt->hdr.cmd));
+	il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+	struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+	IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+	       "seq 0x%04X ser 0x%08X\n",
+	       le32_to_cpu(pkt->u.err_resp.error_type),
+	       il_get_cmd_string(pkt->u.err_resp.cmd_id),
+	       pkt->u.err_resp.cmd_id,
+	       le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+	       le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+	memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+	       const struct ieee80211_tx_queue_params *params)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+	int q;
+
+	D_MAC80211("enter\n");
+
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - RF not ready\n");
+		return -EIO;
+	}
+
+	if (queue >= AC_NUM) {
+		D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+		return 0;
+	}
+
+	q = AC_NUM - 1 - queue;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	il->qos_data.def_qos_parm.ac[q].cw_min =
+	    cpu_to_le16(params->cw_min);
+	il->qos_data.def_qos_parm.ac[q].cw_max =
+	    cpu_to_le16(params->cw_max);
+	il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+	il->qos_data.def_qos_parm.ac[q].edca_txop =
+	    cpu_to_le16((params->txop * 32));
+
+	il->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	D_MAC80211("leave\n");
+	return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	D_MAC80211("enter\n");
+
+	ret = (il->ibss_manager == IL_IBSS_MANAGER);
+
+	D_MAC80211("leave ret %d\n", ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il)
+{
+	il_connection_init_rx_config(il);
+
+	if (il->ops->set_rxon_chain)
+		il->ops->set_rxon_chain(il);
+
+	return il_commit_rxon(il);
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	int err;
+	bool reset;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+	if (!il_is_ready_rf(il)) {
+		IL_WARN("Try to add interface when device not ready\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * We do not support multiple virtual interfaces, but on hardware reset
+	 * we have to add the same interface again.
+	 */
+	reset = (il->vif == vif);
+	if (il->vif && !reset) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	il->vif = vif;
+	il->iw_mode = vif->type;
+
+	err = il_set_mode(il);
+	if (err) {
+		IL_WARN("Fail to set mode %d\n", vif->type);
+		if (!reset) {
+			il->vif = NULL;
+			il->iw_mode = NL80211_IFTYPE_STATION;
+		}
+	}
+
+out:
+	D_MAC80211("leave err %d\n", err);
+	mutex_unlock(&il->mutex);
+
+	return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	lockdep_assert_held(&il->mutex);
+
+	if (il->scan_vif == vif) {
+		il_scan_cancel_timeout(il, 200);
+		il_force_scan_end(il);
+	}
+
+	il_set_mode(il);
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+	WARN_ON(il->vif != vif);
+	il->vif = NULL;
+	il->iw_mode = NL80211_IFTYPE_UNSPECIFIED;
+	il_teardown_interface(il, vif);
+	eth_zero_addr(il->bssid);
+
+	D_MAC80211("leave\n");
+	mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+	if (!il->txq)
+		il->txq =
+		    kcalloc(il->cfg->num_of_queues,
+			    sizeof(struct il_tx_queue),
+			    GFP_KERNEL);
+	if (!il->txq) {
+		IL_ERR("Not enough memory for txq\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_free_txq_mem(struct il_priv *il)
+{
+	kfree(il->txq);
+	il->txq = NULL;
+}
+EXPORT_SYMBOL(il_free_txq_mem);
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+	struct il_force_reset *force_reset;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return -EINVAL;
+
+	force_reset = &il->force_reset;
+	force_reset->reset_request_count++;
+	if (!external) {
+		if (force_reset->last_force_reset_jiffies &&
+		    time_after(force_reset->last_force_reset_jiffies +
+			       force_reset->reset_duration, jiffies)) {
+			D_INFO("force reset rejected\n");
+			force_reset->reset_reject_count++;
+			return -EAGAIN;
+		}
+	}
+	force_reset->reset_success_count++;
+	force_reset->last_force_reset_jiffies = jiffies;
+
+	/*
+	 * if the request is from external(ex: debugfs),
+	 * then always perform the request in regardless the module
+	 * parameter setting
+	 * if the request is from internal (uCode error or driver
+	 * detect failure), then fw_restart module parameter
+	 * need to be check before performing firmware reload
+	 */
+
+	if (!external && !il->cfg->mod_params->restart_fw) {
+		D_INFO("Cancel firmware reload based on "
+		       "module parameter setting\n");
+		return 0;
+	}
+
+	IL_ERR("On demand firmware reload\n");
+
+	/* Set the FW error flag -- cleared on il_down */
+	set_bit(S_FW_ERROR, &il->status);
+	wake_up(&il->wait_command_queue);
+	/*
+	 * Keep the restart process from trying to send host
+	 * commands by clearing the INIT status bit
+	 */
+	clear_bit(S_READY, &il->status);
+	queue_work(il->workqueue, &il->restart);
+
+	return 0;
+}
+EXPORT_SYMBOL(il_force_reset);
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			enum nl80211_iftype newtype, bool newp2p)
+{
+	struct il_priv *il = hw->priv;
+	int err;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: type %d, addr %pM newtype %d newp2p %d\n",
+		    vif->type, vif->addr, newtype, newp2p);
+
+	if (newp2p) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	if (!il->vif || !il_is_ready_rf(il)) {
+		/*
+		 * Huh? But wait ... this can maybe happen when
+		 * we're in the middle of a firmware restart!
+		 */
+		err = -EBUSY;
+		goto out;
+	}
+
+	/* success */
+	vif->type = newtype;
+	vif->p2p = false;
+	il->iw_mode = newtype;
+	il_teardown_interface(il, vif);
+	err = 0;
+
+out:
+	D_MAC80211("leave err %d\n", err);
+	mutex_unlock(&il->mutex);
+
+	return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  u32 queues, bool drop)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long timeout = jiffies + msecs_to_jiffies(500);
+	int i;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter\n");
+
+	if (il->txq == NULL)
+		goto out;
+
+	for (i = 0; i < il->hw_params.max_txq_num; i++) {
+		struct il_queue *q;
+
+		if (i == il->cmd_queue)
+			continue;
+
+		q = &il->txq[i].q;
+		if (q->read_ptr == q->write_ptr)
+			continue;
+
+		if (time_after(jiffies, timeout)) {
+			IL_ERR("Failed to flush queue %d\n", q->id);
+			break;
+		}
+
+		msleep(20);
+	}
+out:
+	D_MAC80211("leave\n");
+	mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_flush);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+	struct il_tx_queue *txq = &il->txq[cnt];
+	struct il_queue *q = &txq->q;
+	unsigned long timeout;
+	unsigned long now = jiffies;
+	int ret;
+
+	if (q->read_ptr == q->write_ptr) {
+		txq->time_stamp = now;
+		return 0;
+	}
+
+	timeout =
+	    txq->time_stamp +
+	    msecs_to_jiffies(il->cfg->wd_timeout);
+
+	if (time_after(now, timeout)) {
+		IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+		       jiffies_to_msecs(now - txq->time_stamp));
+		ret = il_force_reset(il, false);
+		return (ret == -EAGAIN) ? 0 : 1;
+	}
+
+	return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(struct timer_list *t)
+{
+	struct il_priv *il = from_timer(il, t, watchdog);
+	int cnt;
+	unsigned long timeout;
+
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	timeout = il->cfg->wd_timeout;
+	if (timeout == 0)
+		return;
+
+	/* monitor and check for stuck cmd queue */
+	if (il_check_stuck_queue(il, il->cmd_queue))
+		return;
+
+	/* monitor and check for other stuck queues */
+	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+		/* skip as we already checked the command queue */
+		if (cnt == il->cmd_queue)
+			continue;
+		if (il_check_stuck_queue(il, cnt))
+			return;
+	}
+
+	mod_timer(&il->watchdog,
+		  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+	unsigned int timeout = il->cfg->wd_timeout;
+
+	if (timeout)
+		mod_timer(&il->watchdog,
+			  jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+	else
+		del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+	u32 quot;
+	u32 rem;
+	u32 interval = beacon_interval * TIME_UNIT;
+
+	if (!interval || !usec)
+		return 0;
+
+	quot =
+	    (usec /
+	     interval) & (il_beacon_time_mask_high(il,
+						   il->hw_params.
+						   beacon_time_tsf_bits) >> il->
+			  hw_params.beacon_time_tsf_bits);
+	rem =
+	    (usec % interval) & il_beacon_time_mask_low(il,
+							il->hw_params.
+							beacon_time_tsf_bits);
+
+	return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+		   u32 beacon_interval)
+{
+	u32 base_low = base & il_beacon_time_mask_low(il,
+						      il->hw_params.
+						      beacon_time_tsf_bits);
+	u32 addon_low = addon & il_beacon_time_mask_low(il,
+							il->hw_params.
+							beacon_time_tsf_bits);
+	u32 interval = beacon_interval * TIME_UNIT;
+	u32 res = (base & il_beacon_time_mask_high(il,
+						   il->hw_params.
+						   beacon_time_tsf_bits)) +
+	    (addon & il_beacon_time_mask_high(il,
+					      il->hw_params.
+					      beacon_time_tsf_bits));
+
+	if (base_low > addon_low)
+		res += base_low - addon_low;
+	else if (base_low < addon_low) {
+		res += interval + base_low - addon_low;
+		res += (1 << il->hw_params.beacon_time_tsf_bits);
+	} else
+		res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+	return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int
+il_pci_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct il_priv *il = pci_get_drvdata(pdev);
+
+	/*
+	 * This function is called when system goes into suspend state
+	 * mac80211 will call il_mac_stop() from the mac80211 suspend function
+	 * first but since il_mac_stop() has no knowledge of who the caller is,
+	 * it will not call apm_ops.stop() to stop the DMA operation.
+	 * Calling apm_ops.stop here to make sure we stop the DMA.
+	 */
+	il_apm_stop(il);
+
+	return 0;
+}
+
+static int
+il_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct il_priv *il = pci_get_drvdata(pdev);
+	bool hw_rfkill = false;
+
+	/*
+	 * We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state.
+	 */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	il_enable_interrupts(il);
+
+	if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+		hw_rfkill = true;
+
+	if (hw_rfkill)
+		set_bit(S_RFKILL, &il->status);
+	else
+		clear_bit(S_RFKILL, &il->status);
+
+	wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+	return 0;
+}
+
+SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM_SLEEP */
+
+static void
+il_update_qos(struct il_priv *il)
+{
+	if (test_bit(S_EXIT_PENDING, &il->status))
+		return;
+
+	il->qos_data.def_qos_parm.qos_flags = 0;
+
+	if (il->qos_data.qos_active)
+		il->qos_data.def_qos_parm.qos_flags |=
+		    QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+	if (il->ht.enabled)
+		il->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+	D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+	      il->qos_data.qos_active, il->qos_data.def_qos_parm.qos_flags);
+
+	il_send_cmd_pdu_async(il, C_QOS_PARAM, sizeof(struct il_qosparam_cmd),
+			      &il->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct il_priv *il = hw->priv;
+	const struct il_channel_info *ch_info;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = conf->chandef.chan;
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+	unsigned long flags = 0;
+	int ret = 0;
+	u16 ch;
+	int scan_active = 0;
+	bool ht_changed = false;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: channel %d changed 0x%X\n", channel->hw_value,
+		   changed);
+
+	if (unlikely(test_bit(S_SCANNING, &il->status))) {
+		scan_active = 1;
+		D_MAC80211("scan active\n");
+	}
+
+	if (changed &
+	    (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+		/* mac80211 uses static for non-HT which is what we want */
+		il->current_ht_config.smps = conf->smps_mode;
+
+		/*
+		 * Recalculate chain counts.
+		 *
+		 * If monitor mode is enabled then mac80211 will
+		 * set up the SM PS mode to OFF if an HT channel is
+		 * configured.
+		 */
+		if (il->ops->set_rxon_chain)
+			il->ops->set_rxon_chain(il);
+	}
+
+	/* during scanning mac80211 will delay channel setting until
+	 * scan finish with changed = 0
+	 */
+	if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+		if (scan_active)
+			goto set_ch_out;
+
+		ch = channel->hw_value;
+		ch_info = il_get_channel_info(il, channel->band, ch);
+		if (!il_is_channel_valid(ch_info)) {
+			D_MAC80211("leave - invalid channel\n");
+			ret = -EINVAL;
+			goto set_ch_out;
+		}
+
+		if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+		    !il_is_channel_ibss(ch_info)) {
+			D_MAC80211("leave - not IBSS channel\n");
+			ret = -EINVAL;
+			goto set_ch_out;
+		}
+
+		spin_lock_irqsave(&il->lock, flags);
+
+		/* Configure HT40 channels */
+		if (il->ht.enabled != conf_is_ht(conf)) {
+			il->ht.enabled = conf_is_ht(conf);
+			ht_changed = true;
+		}
+		if (il->ht.enabled) {
+			if (conf_is_ht40_minus(conf)) {
+				il->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+				il->ht.is_40mhz = true;
+			} else if (conf_is_ht40_plus(conf)) {
+				il->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+				il->ht.is_40mhz = true;
+			} else {
+				il->ht.extension_chan_offset =
+				    IEEE80211_HT_PARAM_CHA_SEC_NONE;
+				il->ht.is_40mhz = false;
+			}
+		} else
+			il->ht.is_40mhz = false;
+
+		/*
+		 * Default to no protection. Protection mode will
+		 * later be set from BSS config in il_ht_conf
+		 */
+		il->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+		/* if we are switching from ht to 2.4 clear flags
+		 * from any ht related info since 2.4 does not
+		 * support ht */
+		if ((le16_to_cpu(il->staging.channel) != ch))
+			il->staging.flags = 0;
+
+		il_set_rxon_channel(il, channel);
+		il_set_rxon_ht(il, ht_conf);
+
+		il_set_flags_for_band(il, channel->band, il->vif);
+
+		spin_unlock_irqrestore(&il->lock, flags);
+
+		if (il->ops->update_bcast_stations)
+			ret = il->ops->update_bcast_stations(il);
+
+set_ch_out:
+		/* The list of supported rates and rate mask can be different
+		 * for each band; since the band may have changed, reset
+		 * the rate mask to what mac80211 lists */
+		il_set_rate(il);
+	}
+
+	if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+		il->power_data.ps_disabled = !(conf->flags & IEEE80211_CONF_PS);
+		if (!il->power_data.ps_disabled)
+			IL_WARN_ONCE("Enabling power save might cause firmware crashes\n");
+		ret = il_power_update_mode(il, false);
+		if (ret)
+			D_MAC80211("Error setting sleep level\n");
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+			   conf->power_level);
+
+		il_set_tx_power(il, conf->power_level, false);
+	}
+
+	if (!il_is_ready(il)) {
+		D_MAC80211("leave - not ready\n");
+		goto out;
+	}
+
+	if (scan_active)
+		goto out;
+
+	if (memcmp(&il->active, &il->staging, sizeof(il->staging)))
+		il_commit_rxon(il);
+	else
+		D_INFO("Not re-sending same RXON configuration.\n");
+	if (ht_changed)
+		il_update_qos(il);
+
+out:
+	D_MAC80211("leave ret %d\n", ret);
+	mutex_unlock(&il->mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+
+	/* new association get rid of ibss beacon skb */
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+	il->beacon_skb = NULL;
+	il->timestamp = 0;
+
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	il_scan_cancel_timeout(il, 100);
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - not ready\n");
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	/* we are restarting association process */
+	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il_commit_rxon(il);
+
+	il_set_rate(il);
+
+	D_MAC80211("leave\n");
+	mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	struct il_ht_config *ht_conf = &il->current_ht_config;
+	struct ieee80211_sta *sta;
+	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+
+	D_ASSOC("enter:\n");
+
+	if (!il->ht.enabled)
+		return;
+
+	il->ht.protection =
+	    bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+	il->ht.non_gf_sta_present =
+	    !!(bss_conf->
+	       ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+	ht_conf->single_chain_sufficient = false;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, bss_conf->bssid);
+		if (sta) {
+			struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+			int maxstreams;
+
+			maxstreams =
+			    (ht_cap->mcs.
+			     tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+			    >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+			maxstreams += 1;
+
+			if (ht_cap->mcs.rx_mask[1] == 0 &&
+			    ht_cap->mcs.rx_mask[2] == 0)
+				ht_conf->single_chain_sufficient = true;
+			if (maxstreams <= 1)
+				ht_conf->single_chain_sufficient = true;
+		} else {
+			/*
+			 * If at all, this can only happen through a race
+			 * when the AP disconnects us while we're still
+			 * setting up the connection, in that case mac80211
+			 * will soon tell us about that.
+			 */
+			ht_conf->single_chain_sufficient = true;
+		}
+		rcu_read_unlock();
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		ht_conf->single_chain_sufficient = true;
+		break;
+	default:
+		break;
+	}
+
+	D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+	/*
+	 * inform the ucode that there is no longer an
+	 * association and that no more packets should be
+	 * sent
+	 */
+	il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	il->staging.assoc_id = 0;
+	il_commit_rxon(il);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+	struct il_priv *il = hw->priv;
+	unsigned long flags;
+	__le64 timestamp;
+	struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+	if (!skb)
+		return;
+
+	D_MAC80211("enter\n");
+
+	lockdep_assert_held(&il->mutex);
+
+	if (!il->beacon_enabled) {
+		IL_ERR("update beacon with no beaconing enabled\n");
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	if (il->beacon_skb)
+		dev_kfree_skb(il->beacon_skb);
+
+	il->beacon_skb = skb;
+
+	timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+	il->timestamp = le64_to_cpu(timestamp);
+
+	D_MAC80211("leave\n");
+	spin_unlock_irqrestore(&il->lock, flags);
+
+	if (!il_is_ready_rf(il)) {
+		D_MAC80211("leave - RF not ready\n");
+		return;
+	}
+
+	il->ops->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+	struct il_priv *il = hw->priv;
+	int ret;
+
+	mutex_lock(&il->mutex);
+	D_MAC80211("enter: changes 0x%x\n", changes);
+
+	if (!il_is_alive(il)) {
+		D_MAC80211("leave - not alive\n");
+		mutex_unlock(&il->mutex);
+		return;
+	}
+
+	if (changes & BSS_CHANGED_QOS) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&il->lock, flags);
+		il->qos_data.qos_active = bss_conf->qos;
+		il_update_qos(il);
+		spin_unlock_irqrestore(&il->lock, flags);
+	}
+
+	if (changes & BSS_CHANGED_BEACON_ENABLED) {
+		/* FIXME: can we remove beacon_enabled ? */
+		if (vif->bss_conf.enable_beacon)
+			il->beacon_enabled = true;
+		else
+			il->beacon_enabled = false;
+	}
+
+	if (changes & BSS_CHANGED_BSSID) {
+		D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+		/*
+		 * On passive channel we wait with blocked queues to see if
+		 * there is traffic on that channel. If no frame will be
+		 * received (what is very unlikely since scan detects AP on
+		 * that channel, but theoretically possible), mac80211 associate
+		 * procedure will time out and mac80211 will call us with NULL
+		 * bssid. We have to unblock queues on such condition.
+		 */
+		if (is_zero_ether_addr(bss_conf->bssid))
+			il_wake_queues_by_reason(il, IL_STOP_REASON_PASSIVE);
+
+		/*
+		 * If there is currently a HW scan going on in the background,
+		 * then we need to cancel it, otherwise sometimes we are not
+		 * able to authenticate (FIXME: why ?)
+		 */
+		if (il_scan_cancel_timeout(il, 100)) {
+			D_MAC80211("leave - scan abort failed\n");
+			mutex_unlock(&il->mutex);
+			return;
+		}
+
+		/* mac80211 only sets assoc when in STATION mode */
+		memcpy(il->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+
+		/* FIXME: currently needed in a few places */
+		memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+	}
+
+	/*
+	 * This needs to be after setting the BSSID in case
+	 * mac80211 decides to do both changes at once because
+	 * it will invoke post_associate.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+		il_beacon_update(hw, vif);
+
+	if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+		D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+		if (bss_conf->use_short_preamble)
+			il->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	}
+
+	if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+		D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+		if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
+			il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+		else
+			il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+		if (bss_conf->use_cts_prot)
+			il->staging.flags |= RXON_FLG_SELF_CTS_EN;
+		else
+			il->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+	}
+
+	if (changes & BSS_CHANGED_BASIC_RATES) {
+		/* XXX use this information
+		 *
+		 * To do that, remove code from il_set_rate() and put something
+		 * like this here:
+		 *
+		 if (A-band)
+		 il->staging.ofdm_basic_rates =
+		 bss_conf->basic_rates;
+		 else
+		 il->staging.ofdm_basic_rates =
+		 bss_conf->basic_rates >> 4;
+		 il->staging.cck_basic_rates =
+		 bss_conf->basic_rates & 0xF;
+		 */
+	}
+
+	if (changes & BSS_CHANGED_HT) {
+		il_ht_conf(il, vif);
+
+		if (il->ops->set_rxon_chain)
+			il->ops->set_rxon_chain(il);
+	}
+
+	if (changes & BSS_CHANGED_ASSOC) {
+		D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+		if (bss_conf->assoc) {
+			il->timestamp = bss_conf->sync_tsf;
+
+			if (!il_is_rfkill(il))
+				il->ops->post_associate(il);
+		} else
+			il_set_no_assoc(il, vif);
+	}
+
+	if (changes && il_is_associated(il) && bss_conf->aid) {
+		D_MAC80211("Changes (%#x) while associated\n", changes);
+		ret = il_send_rxon_assoc(il);
+		if (!ret) {
+			/* Sync active_rxon with latest change. */
+			memcpy((void *)&il->active, &il->staging,
+			       sizeof(struct il_rxon_cmd));
+		}
+	}
+
+	if (changes & BSS_CHANGED_BEACON_ENABLED) {
+		if (vif->bss_conf.enable_beacon) {
+			memcpy(il->staging.bssid_addr, bss_conf->bssid,
+			       ETH_ALEN);
+			memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+			il->ops->config_ap(il);
+		} else
+			il_set_no_assoc(il, vif);
+	}
+
+	if (changes & BSS_CHANGED_IBSS) {
+		ret = il->ops->manage_ibss_station(il, vif,
+						   bss_conf->ibss_joined);
+		if (ret)
+			IL_ERR("failed to %s IBSS station %pM\n",
+			       bss_conf->ibss_joined ? "add" : "remove",
+			       bss_conf->bssid);
+	}
+
+	D_MAC80211("leave\n");
+	mutex_unlock(&il->mutex);
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+	struct il_priv *il = data;
+	u32 inta, inta_mask;
+	u32 inta_fh;
+	unsigned long flags;
+	if (!il)
+		return IRQ_NONE;
+
+	spin_lock_irqsave(&il->lock, flags);
+
+	/* Disable (but don't clear!) interrupts here to avoid
+	 *    back-to-back ISRs and sporadic interrupts from our NIC.
+	 * If we have something to service, the tasklet will re-enable ints.
+	 * If we *don't* have something, we'll re-enable before leaving here. */
+	inta_mask = _il_rd(il, CSR_INT_MASK);	/* just for debug */
+	_il_wr(il, CSR_INT_MASK, 0x00000000);
+
+	/* Discover which interrupts are active/pending */
+	inta = _il_rd(il, CSR_INT);
+	inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	if (!inta && !inta_fh) {
+		D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+		goto none;
+	}
+
+	if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+		/* Hardware disappeared. It might have already raised
+		 * an interrupt */
+		IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+		goto unplugged;
+	}
+
+	D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+	      inta_fh);
+
+	inta &= ~CSR_INT_BIT_SCD;
+
+	/* il_irq_tasklet() will service interrupts and re-enable them */
+	if (likely(inta || inta_fh))
+		tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+	spin_unlock_irqrestore(&il->lock, flags);
+	return IRQ_HANDLED;
+
+none:
+	/* re-enable interrupts here since we don't have anything to service. */
+	/* only Re-enable if disabled by irq */
+	if (test_bit(S_INT_ENABLED, &il->status))
+		il_enable_interrupts(il);
+	spin_unlock_irqrestore(&il->lock, flags);
+	return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ *  il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ *  function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+		     __le16 fc, __le32 *tx_flags)
+{
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+		*tx_flags |= TX_CMD_FLG_RTS_MSK;
+		*tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+		if (!ieee80211_is_mgmt(fc))
+			return;
+
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_AUTH):
+		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+			*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+			*tx_flags |= TX_CMD_FLG_CTS_MSK;
+			break;
+		}
+	} else if (info->control.rates[0].
+		   flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+		*tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+		*tx_flags |= TX_CMD_FLG_CTS_MSK;
+		*tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+	}
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
new file mode 100644
index 0000000..dc6a74a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -0,0 +1,3085 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>		/* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_WARN_ONCE(f, a...) dev_warn_once(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n)		((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110	/* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated (4965, no beacon stats being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ *   3)  Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE		  2304U
+#define MAX_MPDU_SIZE		  2346U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct il_rx_buf {
+	dma_addr_t page_dma;
+	struct page *page;
+	struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+	/* only for SYNC commands, iff the reply skb is wanted */
+	struct il_host_cmd *source;
+	/*
+	 * only for ASYNC commands
+	 * (which is somewhat stupid -- look at common.c for instance
+	 * which duplicates a bunch of code because the callback isn't
+	 * invoked for SYNC commands, if it were and its result passed
+	 * through it would be simpler...)
+	 */
+	void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+			  struct il_rx_pkt *pkt);
+
+	/* The CMD_SIZE_HUGE flag bit indicates that the command
+	 * structure is stored at the end of the shared queue memory. */
+	u32 flags;
+
+	 DEFINE_DMA_UNMAP_ADDR(mapping);
+	 DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+	int n_bd;		/* number of BDs in this queue */
+	int write_ptr;		/* 1-st empty entry (idx) host_w */
+	int read_ptr;		/* last used entry (idx) host_r */
+	/* use for monitoring and recovering the stuck queue */
+	dma_addr_t dma_addr;	/* physical addr for BD's */
+	int n_win;		/* safe queue win */
+	u32 id;
+	int low_mark;		/* low watermark, resume queue if free
+				 * space more than this */
+	int high_mark;		/* high watermark, stop queue if free
+				 * space less than this */
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @skbs: array of per-TFD socket buffer pointers
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+	struct il_queue q;
+	void *tfds;
+	struct il_device_cmd **cmd;
+	struct il_cmd_meta *meta;
+	struct sk_buff **skbs;
+	unsigned long time_stamp;
+	u8 need_update;
+	u8 sched_retry;
+	u8 active;
+	u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT	5000	/* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT		10	/* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT	1000	/* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR).  This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE:  "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ *        It only indicates that 20 MHz channel use is supported; HT40 channel
+ *        usage is indicated by a separate set of regulatory flags for each
+ *        HT40 channel pair.
+ *
+ * NOTE:  Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+	EEPROM_CHANNEL_VALID = (1 << 0),	/* usable for this SKU/geo */
+	EEPROM_CHANNEL_IBSS = (1 << 1),	/* usable as an IBSS channel */
+	/* Bit 2 Reserved */
+	EEPROM_CHANNEL_ACTIVE = (1 << 3),	/* active scanning allowed */
+	EEPROM_CHANNEL_RADAR = (1 << 4),	/* radar detection required */
+	EEPROM_CHANNEL_WIDE = (1 << 5),	/* 20 MHz channel okay */
+	/* Bit 6 Reserved (was Narrow Channel) */
+	EEPROM_CHANNEL_DFS = (1 << 7),	/* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE                (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE                (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+	u8 flags;		/* EEPROM_CHANNEL_* flags copied from EEPROM */
+	s8 max_power_avg;	/* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION	(0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS      (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS          (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS   (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION    (5)
+#define EEPROM_4965_EEPROM_VERSION	(0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET       (2*0xB6)	/* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET       (2*0xE8)	/* 48  bytes */
+#define EEPROM_4965_BOARD_REVISION             (2*0x4F)	/* 2 bytes */
+#define EEPROM_4965_BOARD_PBA                  (2*0x56+1)	/* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna).  EEPROM contains:
+ *
+ * 1)  Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2)  Gain table idx used to achieve the target measurement power.
+ *     This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3)  Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4)  RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+	u8 temperature;		/* Device temperature (Celsius) */
+	u8 gain_idx;		/* Index into gain table */
+	u8 actual_pow;		/* Measured RF output power, half-dBm */
+	s8 pa_det;		/* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel.  EEPROM contains:
+ *
+ * 1)  Channel number measured
+ *
+ * 2)  Measurements for each of 3 power levels for each of 2 radio transmitters
+ *     (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+	u8 ch_num;
+	struct il_eeprom_calib_measure
+	    measurements[EEPROM_TX_POWER_TX_CHAINS]
+	    [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1)  First and last channels within range of the subband.  "0" values
+ *     indicate that this sample set is not being used.
+ *
+ * 2)  Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+	u8 ch_from;		/* channel number of lowest channel in subband */
+	u8 ch_to;		/* channel number of highest channel in subband */
+	struct il_eeprom_calib_ch_info ch1;
+	struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info.  EEPROM contains:
+ *
+ * 1)  Factory-measured saturation power levels (maximum levels at which
+ *     tx power amplifier can output a signal without too much distortion).
+ *     There is one level for 2.4 GHz band and one for 5 GHz band.  These
+ *     values apply to all channels within each of the bands.
+ *
+ * 2)  Factory-measured power supply voltage level.  This is assumed to be
+ *     constant (i.e. same value applies to all channels/bands) while the
+ *     factory measurements are being made.
+ *
+ * 3)  Up to 8 sets of factory-measured txpower calibration values.
+ *     These are for different frequency ranges, since txpower gain
+ *     characteristics of the analog radio circuitry vary with frequency.
+ *
+ *     Not all sets need to be filled with data;
+ *     struct il_eeprom_calib_subband_info contains range of channels
+ *     (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+	u8 saturation_power24;	/* half-dBm (e.g. "34" = 17 dBm) */
+	u8 saturation_power52;	/* half-dBm */
+	__le16 voltage;		/* signed */
+	struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)	/* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)	/* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)	/* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1)	/* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)	/* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)	/* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)	/* 2  bytes */
+#define EEPROM_WOWLAN_MODE                  (2*0x47)	/* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)	/* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)	/* 2  bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)	/* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3)	/* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3)	/* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3)	/* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF)	/* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF)	/* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX  0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX  0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.  HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID            (2*0x60)	/* 4  bytes */
+#define EEPROM_REGULATORY_BAND_1            (2*0x62)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS   (2*0x63)	/* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2            (2*0x71)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS   (2*0x72)	/* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3            (2*0x7F)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS   (2*0x80)	/* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4            (2*0x8C)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS   (2*0x8D)	/* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5            (2*0x98)	/* 2  bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS   (2*0x99)	/* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE:  The RXON command uses 20 MHz channel numbers to specify the
+ *        control channel to which to tune.  RXON also specifies whether the
+ *        control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE:  4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0)	/* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8)	/* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40			(0)
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+						  enum nl80211_band band,
+						  u16 channel);
+
+#define IL_NUM_SCAN_RATES         (2)
+
+struct il4965_channel_tgd_info {
+	u8 type;
+	s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+	s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+	/* maximum power level to prevent clipping for each rate, derived by
+	 *   us from this band's saturation power in EEPROM */
+	const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+	struct il3945_tx_power tpc;	/* actual radio and DSP gain settings */
+	s8 power_table_idx;	/* actual (compenst'd) idx into gain table */
+	s8 base_power_idx;	/* gain idx for power at factory temp. */
+	s8 requested_power;	/* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+	struct il3945_tx_power tpc;	/* actual radio and DSP gain settings */
+	s8 power_table_idx;	/* actual (compenst'd) idx into gain table */
+	s8 requested_power;	/* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ *     with one another!
+ */
+struct il_channel_info {
+	struct il4965_channel_tgd_info tgd;
+	struct il4965_channel_tgh_info tgh;
+	struct il_eeprom_channel eeprom;	/* EEPROM regulatory limit */
+	struct il_eeprom_channel ht40_eeprom;	/* EEPROM regulatory limit for
+						 * HT40 channel */
+
+	u8 channel;		/* channel number */
+	u8 flags;		/* flags copied from EEPROM */
+	s8 max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
+	s8 curr_txpow;		/* (dBm) regulatory/spectrum/user (not h/w) limit */
+	s8 min_power;		/* always 0 */
+	s8 scan_power;		/* (dBm) regul. eeprom, direct scans, any rate */
+
+	u8 group_idx;		/* 0-4, maps channel to group1/2/3/4/5 */
+	u8 band_idx;		/* 0-4, maps channel to band1/2/3/4/5 */
+	enum nl80211_band band;
+
+	/* HT40 channel info */
+	s8 ht40_max_power_avg;	/* (dBm) regul. eeprom, normal Tx, any rate */
+	u8 ht40_flags;		/* flags copied from EEPROM */
+	u8 ht40_extension_channel;	/* HT_IE_EXT_CHANNEL_* */
+
+	/* Radio/DSP gain settings for each "normal" data Tx rate.
+	 * These include, in addition to RF and DSP gain, a few fields for
+	 *   remembering/modifying gain settings (idxes). */
+	struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+	/* Radio/DSP gain settings for each scan rate, for directed scans. */
+	struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK		0	/* shared */
+#define IL_TX_FIFO_BE		1
+#define IL_TX_FIFO_VI		2	/* shared */
+#define IL_TX_FIFO_VO		3
+#define IL_TX_FIFO_UNUSED	-1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES	10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM	4
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+	union {
+		struct ieee80211_hdr frame;
+		struct il_tx_beacon_cmd beacon;
+		u8 raw[IEEE80211_FRAME_LEN];
+		u8 cmd[360];
+	} u;
+	struct list_head list;
+};
+
+enum {
+	CMD_SYNC = 0,
+	CMD_SIZE_NORMAL = 0,
+	CMD_NO_SKB = 0,
+	CMD_SIZE_HUGE = (1 << 0),
+	CMD_ASYNC = (1 << 1),
+	CMD_WANT_SKB = (1 << 2),
+	CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+	struct il_cmd_header hdr;	/* uCode API */
+	union {
+		u32 flags;
+		u8 val8;
+		u16 val16;
+		u32 val32;
+		struct il_tx_cmd tx;
+		u8 payload[DEF_CMD_PAYLOAD_SIZE];
+	} __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+	const void *data;
+	unsigned long reply_page;
+	void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+			  struct il_rx_pkt *pkt);
+	u32 flags;
+	u16 len;
+	u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+	__le32 *bd;
+	dma_addr_t bd_dma;
+	struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+	struct il_rx_buf *queue[RX_QUEUE_SIZE];
+	u32 read;
+	u32 write;
+	u32 free_count;
+	u32 write_actual;
+	struct list_head rx_free;
+	struct list_head rx_used;
+	int need_update;
+	struct il_rb_status *rb_stts;
+	dma_addr_t rb_stts_dma;
+	spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN         8
+
+#define MAX_TID_COUNT        9
+
+#define IL_INVALID_RATE     0xFF
+#define IL_INVALID_VALUE    -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA).  This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+	u16 txq_id;
+	u16 frame_count;
+	u16 wait_for_ba;
+	u16 start_idx;
+	u64 bitmap;
+	u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+	u8 state;
+};
+
+struct il_tid_data {
+	u16 seq_number;		/* 4965 only */
+	u16 tfds_in_queue;
+	struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+	u32 cipher;
+	int keylen;
+	u8 keyidx;
+	u8 key[32];
+};
+
+union il_ht_rate_supp {
+	u16 rates;
+	struct {
+		u8 siso_rate;
+		u8 mimo_rate;
+	};
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K   (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K  (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K  (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K  (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX  CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN  CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC   (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC   (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC   (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC  (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN     (0x1)
+
+struct il_ht_config {
+	bool single_chain_sufficient;
+	enum ieee80211_smps_mode smps;	/* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+	int qos_active;
+	struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+	struct il_addsta_cmd sta;
+	struct il_tid_data tid[MAX_TID_COUNT];
+	u8 used;
+	struct il_hw_key keyinfo;
+	struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+	u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+	u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+	void *v_addr;		/* access by driver */
+	dma_addr_t p_addr;	/* access by card's busmaster DMA */
+	u32 len;		/* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+	__le32 ver;		/* major/minor/API/serial */
+	struct {
+		__le32 inst_size;	/* bytes of runtime code */
+		__le32 data_size;	/* bytes of runtime data */
+		__le32 init_size;	/* bytes of init code */
+		__le32 init_data_size;	/* bytes of init data */
+		__le32 boot_size;	/* bytes of bootstrap code */
+		u8 data[0];	/* in same order as sizes */
+	} v1;
+};
+
+struct il4965_ibss_seq {
+	u8 mac[ETH_ALEN];
+	u16 seq_num;
+	u16 frag_num;
+	unsigned long packet_time;
+	struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+	u16 min_nrg_cck;
+	u16 max_nrg_cck;
+
+	u16 nrg_th_cck;
+	u16 nrg_th_ofdm;
+
+	u16 auto_corr_min_ofdm;
+	u16 auto_corr_min_ofdm_mrc;
+	u16 auto_corr_min_ofdm_x1;
+	u16 auto_corr_min_ofdm_mrc_x1;
+
+	u16 auto_corr_max_ofdm;
+	u16 auto_corr_max_ofdm_mrc;
+	u16 auto_corr_max_ofdm_x1;
+	u16 auto_corr_max_ofdm_mrc_x1;
+
+	u16 auto_corr_max_cck;
+	u16 auto_corr_max_cck_mrc;
+	u16 auto_corr_min_cck;
+	u16 auto_corr_min_cck_mrc;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @bcast_id: f/w broadcast station ID
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+	u8 bcast_id;
+	u8 max_txq_num;
+	u8 dma_chnl_num;
+	u16 scd_bc_tbls_size;
+	u32 tfd_size;
+	u8 tx_chains_num;
+	u8 rx_chains_num;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+	u16 max_rxq_size;
+	u16 max_rxq_log;
+	u32 rx_page_order;
+	u32 rx_wrt_ptr_reg;
+	u8 max_stations;
+	u8 ht40_channel;
+	u8 max_beacon_itrvl;	/* in 1024 ms */
+	u32 max_inst_size;
+	u32 max_data_size;
+	u32 max_bsm_size;
+	u32 ct_kill_threshold;	/* value in hw-dependent units */
+	u16 beacon_time_tsf_bits;
+	const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_      <-- Called from work queue context
+ * il4965_mac_     <-- mac80211 callback
+ *
+ ****************************************************************************/
+void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+	return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+					      i < q->write_ptr) : !(i <
+								    q->read_ptr
+								    && i >=
+								    q->
+								    write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+	/*
+	 * This is for init calibration result and scan command which
+	 * required buffer > TFD_MAX_PAYLOAD_SIZE,
+	 * the big buffer at end of command array
+	 */
+	if (is_huge)
+		return q->n_win;	/* must be power of 2 */
+
+	/* Otherwise, use normal size buffers */
+	return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+	dma_addr_t dma;
+	void *addr;
+	size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO     0
+#define IL_OPERATION_MODE_HT_ONLY  1
+#define IL_OPERATION_MODE_MIXED    2
+#define IL_OPERATION_MODE_20MHZ    3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE		0xFFFF
+#define IL4965_CAL_NUM_BEACONS		20
+#define IL_CAL_NUM_BEACONS		16
+#define MAXIMUM_ALLOWED_PATHLOSS	15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER			0xFF00
+#define IN_BAND_FILTER			0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE	0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum il4965_false_alarm_state {
+	IL_FA_TOO_MANY = 0,
+	IL_FA_TOO_FEW = 1,
+	IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+	IL_CHAIN_NOISE_ALIVE = 0,	/* must be 0 */
+	IL_CHAIN_NOISE_ACCUMULATE,
+	IL_CHAIN_NOISE_CALIBRATED,
+	IL_CHAIN_NOISE_DONE,
+};
+
+enum ucode_type {
+	UCODE_NONE = 0,
+	UCODE_INIT,
+	UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+	u32 auto_corr_ofdm;
+	u32 auto_corr_ofdm_mrc;
+	u32 auto_corr_ofdm_x1;
+	u32 auto_corr_ofdm_mrc_x1;
+	u32 auto_corr_cck;
+	u32 auto_corr_cck_mrc;
+
+	u32 last_bad_plcp_cnt_ofdm;
+	u32 last_fa_cnt_ofdm;
+	u32 last_bad_plcp_cnt_cck;
+	u32 last_fa_cnt_cck;
+
+	u32 nrg_curr_state;
+	u32 nrg_prev_state;
+	u32 nrg_value[10];
+	u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+	u32 nrg_silence_ref;
+	u32 nrg_energy_idx;
+	u32 nrg_silence_idx;
+	u32 nrg_th_cck;
+	s32 nrg_auto_corr_silence_diff;
+	u32 num_in_cck_no_fa;
+	u32 nrg_th_ofdm;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+	u32 active_chains;
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_signal_a;
+	u32 chain_signal_b;
+	u32 chain_signal_c;
+	u16 beacon_count;
+	u8 disconn_array[NUM_RX_CHAINS];
+	u8 delta_gain_code[NUM_RX_CHAINS];
+	u8 radio_write;
+	u8 state;
+};
+
+#define	EEPROM_SEM_TIMEOUT 10	/* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000	/* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES	(256)
+#define IL_TRAFFIC_ENTRY_SIZE  (64)
+
+enum {
+	MEASUREMENT_READY = (1 << 0),
+	MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+	u32 hw;
+	u32 sw;
+	u32 err_code;
+	u32 sch;
+	u32 alive;
+	u32 rfkill;
+	u32 ctkill;
+	u32 wakeup;
+	u32 rx;
+	u32 handlers[IL_CN_MAX];
+	u32 tx;
+	u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+	MANAGEMENT_ASSOC_REQ = 0,
+	MANAGEMENT_ASSOC_RESP,
+	MANAGEMENT_REASSOC_REQ,
+	MANAGEMENT_REASSOC_RESP,
+	MANAGEMENT_PROBE_REQ,
+	MANAGEMENT_PROBE_RESP,
+	MANAGEMENT_BEACON,
+	MANAGEMENT_ATIM,
+	MANAGEMENT_DISASSOC,
+	MANAGEMENT_AUTH,
+	MANAGEMENT_DEAUTH,
+	MANAGEMENT_ACTION,
+	MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+	CONTROL_BACK_REQ = 0,
+	CONTROL_BACK,
+	CONTROL_PSPOLL,
+	CONTROL_RTS,
+	CONTROL_CTS,
+	CONTROL_ACK,
+	CONTROL_CFEND,
+	CONTROL_CFENDACK,
+	CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	u32 mgmt[MANAGEMENT_MAX];
+	u32 ctrl[CONTROL_MAX];
+	u32 data_cnt;
+	u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX	(0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF	(0x40)
+#define IL_HOST_INT_TIMEOUT_MIN	(0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX	(0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF	(0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN	(0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT	(2000)
+#define IL_LONG_WD_TIMEOUT	(10000)
+#define IL_MAX_WD_TIMEOUT	(120000)
+
+struct il_force_reset {
+	int reset_request_count;
+	int reset_success_count;
+	int reset_reject_count;
+	unsigned long reset_duration;
+	unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0  - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS	24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS	22
+
+struct il_rxon_context {
+	struct ieee80211_vif *vif;
+};
+
+struct il_power_mgr {
+	struct il_powertable_cmd sleep_cmd;
+	struct il_powertable_cmd sleep_cmd_next;
+	int debug_sleep_level_override;
+	bool pci_pm;
+	bool ps_disabled;
+};
+
+struct il_priv {
+	struct ieee80211_hw *hw;
+	struct ieee80211_channel *ieee_channels;
+	struct ieee80211_rate *ieee_rates;
+
+	struct il_cfg *cfg;
+	const struct il_ops *ops;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	const struct il_debugfs_ops *debugfs_ops;
+#endif
+
+	/* temporary frame storage list */
+	struct list_head free_frames;
+	int frames_count;
+
+	enum nl80211_band band;
+	int alloc_rxb_page;
+
+	void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+				     struct il_rx_buf *rxb);
+
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+
+	/* spectrum measurement report caching */
+	struct il_spectrum_notification measure_report;
+	u8 measurement_status;
+
+	/* ucode beacon time */
+	u32 ucode_beacon_time;
+	int missed_beacon_threshold;
+
+	/* track IBSS manager (last beacon) status */
+	u32 ibss_manager;
+
+	/* force reset */
+	struct il_force_reset force_reset;
+
+	/* we allocate array of il_channel_info for NIC's valid channels.
+	 *    Access via channel # using indirect idx array */
+	struct il_channel_info *channel_info;	/* channel info array */
+	u8 channel_count;	/* # of channels */
+
+	/* thermal calibration */
+	s32 temperature;	/* degrees Kelvin */
+	s32 last_temperature;
+
+	/* Scan related variables */
+	unsigned long scan_start;
+	unsigned long scan_start_tsf;
+	void *scan_cmd;
+	enum nl80211_band scan_band;
+	struct cfg80211_scan_request *scan_request;
+	struct ieee80211_vif *scan_vif;
+	u8 scan_tx_ant[NUM_NL80211_BANDS];
+	u8 mgmt_tx_ant;
+
+	/* spinlock */
+	spinlock_t lock;	/* protect general shared data */
+	spinlock_t hcmd_lock;	/* protect hcmd */
+	spinlock_t reg_lock;	/* protect hw register access */
+	struct mutex mutex;
+
+	/* basic pci-network driver stuff */
+	struct pci_dev *pci_dev;
+
+	/* pci hardware address support */
+	void __iomem *hw_base;
+	u32 hw_rev;
+	u32 hw_wa_rev;
+	u8 rev_id;
+
+	/* command queue number */
+	u8 cmd_queue;
+
+	/* max number of station keys */
+	u8 sta_key_max_num;
+
+	/* EEPROM MAC addresses */
+	struct mac_address addresses[1];
+
+	/* uCode images, save to reload in case of failure */
+	int fw_idx;		/* firmware we're trying to load */
+	u32 ucode_ver;		/* version of ucode, copy of
+				   il_ucode.ver */
+	struct fw_desc ucode_code;	/* runtime inst */
+	struct fw_desc ucode_data;	/* runtime data original */
+	struct fw_desc ucode_data_backup;	/* runtime data save/restore */
+	struct fw_desc ucode_init;	/* initialization inst */
+	struct fw_desc ucode_init_data;	/* initialization data */
+	struct fw_desc ucode_boot;	/* bootstrap inst */
+	enum ucode_type ucode_type;
+	u8 ucode_write_complete;	/* the image write is complete */
+	char firmware_name[25];
+
+	struct ieee80211_vif *vif;
+
+	struct il_qos_info qos_data;
+
+	struct {
+		bool enabled;
+		bool is_40mhz;
+		bool non_gf_sta_present;
+		u8 protection;
+		u8 extension_chan_offset;
+	} ht;
+
+	/*
+	 * We declare this const so it can only be
+	 * changed via explicit cast within the
+	 * routines that actually update the physical
+	 * hardware.
+	 */
+	const struct il_rxon_cmd active;
+	struct il_rxon_cmd staging;
+
+	struct il_rxon_time_cmd timing;
+
+	__le16 switch_channel;
+
+	/* 1st responses from initialize and runtime uCode images.
+	 * _4965's initialize alive response contains some calibration data. */
+	struct il_init_alive_resp card_alive_init;
+	struct il_alive_resp card_alive;
+
+	u16 active_rate;
+
+	u8 start_calib;
+	struct il_sensitivity_data sensitivity_data;
+	struct il_chain_noise_data chain_noise_data;
+	__le16 sensitivity_tbl[HD_TBL_SIZE];
+
+	struct il_ht_config current_ht_config;
+
+	/* Rate scaling data */
+	u8 retry_rate;
+
+	wait_queue_head_t wait_command_queue;
+
+	int activity_timer_active;
+
+	/* Rx and Tx DMA processing queues */
+	struct il_rx_queue rxq;
+	struct il_tx_queue *txq;
+	unsigned long txq_ctx_active_msk;
+	struct il_dma_ptr kw;	/* keep warm address */
+	struct il_dma_ptr scd_bc_tbls;
+
+	u32 scd_base_addr;	/* scheduler sram base address */
+
+	unsigned long status;
+
+	/* counts mgmt, ctl, and data packets */
+	struct traffic_stats tx_stats;
+	struct traffic_stats rx_stats;
+
+	/* counts interrupts */
+	struct isr_stats isr_stats;
+
+	struct il_power_mgr power_data;
+
+	/* context information */
+	u8 bssid[ETH_ALEN];	/* used only on 3945 but filled by core */
+
+	/* station table variables */
+
+	/* Note: if lock and sta_lock are needed, lock must be acquired first */
+	spinlock_t sta_lock;
+	int num_stations;
+	struct il_station_entry stations[IL_STATION_COUNT];
+	unsigned long ucode_key_table;
+
+	/* queue refcounts */
+#define IL_MAX_HW_QUEUES	32
+	unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+#define IL_STOP_REASON_PASSIVE	0
+	unsigned long stop_reason;
+	/* for each AC */
+	atomic_t queue_stop_count[4];
+
+	/* Indication if ieee80211_ops->open has been called */
+	u8 is_open;
+
+	u8 mac80211_registered;
+
+	/* eeprom -- this is in the card's little endian byte order */
+	u8 *eeprom;
+	struct il_eeprom_calib_info *calib_info;
+
+	enum nl80211_iftype iw_mode;
+
+	/* Last Rx'd beacon timestamp */
+	u64 timestamp;
+
+	union {
+#if IS_ENABLED(CONFIG_IWL3945)
+		struct {
+			void *shared_virt;
+			dma_addr_t shared_phys;
+
+			struct delayed_work thermal_periodic;
+			struct delayed_work rfkill_poll;
+
+			struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+			struct il3945_notif_stats accum_stats;
+			struct il3945_notif_stats delta_stats;
+			struct il3945_notif_stats max_delta;
+#endif
+
+			u32 sta_supp_rates;
+			int last_rx_rssi;	/* From Rx packet stats */
+
+			/* Rx'd packet timing information */
+			u32 last_beacon_time;
+			u64 last_tsf;
+
+			/*
+			 * each calibration channel group in the
+			 * EEPROM has a derived clip setting for
+			 * each rate.
+			 */
+			const struct il3945_clip_group clip_groups[5];
+
+		} _3945;
+#endif
+#if IS_ENABLED(CONFIG_IWL4965)
+		struct {
+			struct il_rx_phy_res last_phy_res;
+			bool last_phy_res_valid;
+			u32 ampdu_ref;
+
+			struct completion firmware_loading_complete;
+
+			/*
+			 * chain noise reset and gain commands are the
+			 * two extra calibration commands follows the standard
+			 * phy calibration commands
+			 */
+			u8 phy_calib_chain_noise_reset_cmd;
+			u8 phy_calib_chain_noise_gain_cmd;
+
+			u8 key_mapping_keys;
+			struct il_wep_key wep_keys[WEP_KEYS_MAX];
+
+			struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+			struct il_notif_stats accum_stats;
+			struct il_notif_stats delta_stats;
+			struct il_notif_stats max_delta;
+#endif
+
+		} _4965;
+#endif
+	};
+
+	struct il_hw_params hw_params;
+
+	u32 inta_mask;
+
+	struct workqueue_struct *workqueue;
+
+	struct work_struct restart;
+	struct work_struct scan_completed;
+	struct work_struct rx_replenish;
+	struct work_struct abort_scan;
+
+	bool beacon_enabled;
+	struct sk_buff *beacon_skb;
+
+	struct work_struct tx_flush;
+
+	struct tasklet_struct irq_tasklet;
+
+	struct delayed_work init_alive_start;
+	struct delayed_work alive_start;
+	struct delayed_work scan_check;
+
+	/* TX Power */
+	s8 tx_power_user_lmt;
+	s8 tx_power_device_lmt;
+	s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+	/* debugging info */
+	u32 debug_level;	/* per device debugging will override global
+				   il_debug_level if set */
+#endif				/* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+	/* debugfs */
+	u16 tx_traffic_idx;
+	u16 rx_traffic_idx;
+	u8 *tx_traffic;
+	u8 *rx_traffic;
+	struct dentry *debugfs_dir;
+	u32 dbgfs_sram_offset, dbgfs_sram_len;
+	bool disable_ht40;
+#endif				/* CONFIG_IWLEGACY_DEBUGFS */
+
+	struct work_struct txpower_work;
+	bool disable_sens_cal;
+	bool disable_chain_noise_cal;
+	bool disable_tx_power_cal;
+	struct work_struct run_time_calib_work;
+	struct timer_list stats_periodic;
+	struct timer_list watchdog;
+	bool hw_ready;
+
+	struct led_classdev led;
+	unsigned long blink_on, blink_off;
+	bool led_registered;
+};				/*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+	set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+	clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+	return (il->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+	return il_is_associated(il);
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+	if (ch_info == NULL)
+		return 0;
+	return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+	return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+	return ch_info->band == NL80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+	return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+	return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+	__free_pages(page, il->hw_params.rx_page_order);
+	il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+	free_pages(page, il->hw_params.rx_page_order);
+	il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR     "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+	.driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT		1024
+
+#define IL_SKU_G       0x1
+#define IL_SKU_A       0x2
+#define IL_SKU_N       0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000)	/* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+	ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos);
+	ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+				 size_t count, loff_t *ppos);
+	ssize_t(*general_stats_read) (struct file *file,
+				      char __user *user_buf, size_t count,
+				      loff_t *ppos);
+};
+#endif
+
+struct il_ops {
+	/* Handling TX */
+	void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+					 struct il_tx_queue *txq,
+					 u16 byte_cnt);
+	int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+				      struct il_tx_queue *txq, dma_addr_t addr,
+				      u16 len, u8 reset, u8 pad);
+	void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+	int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+	/* alive notification after init uCode load */
+	void (*init_alive_start) (struct il_priv *il);
+	/* check validity of rtc data address */
+	int (*is_valid_rtc_data_addr) (u32 addr);
+	/* 1st ucode load */
+	int (*load_ucode) (struct il_priv *il);
+
+	void (*dump_nic_error_log) (struct il_priv *il);
+	int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+	int (*set_channel_switch) (struct il_priv *il,
+				   struct ieee80211_channel_switch *ch_switch);
+	/* power management */
+	int (*apm_init) (struct il_priv *il);
+
+	/* tx power */
+	int (*send_tx_power) (struct il_priv *il);
+	void (*update_chain_flags) (struct il_priv *il);
+
+	/* eeprom operations */
+	int (*eeprom_acquire_semaphore) (struct il_priv *il);
+	void (*eeprom_release_semaphore) (struct il_priv *il);
+
+	int (*rxon_assoc) (struct il_priv *il);
+	int (*commit_rxon) (struct il_priv *il);
+	void (*set_rxon_chain) (struct il_priv *il);
+
+	u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+	u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+
+	int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+	void (*post_scan) (struct il_priv *il);
+	void (*post_associate) (struct il_priv *il);
+	void (*config_ap) (struct il_priv *il);
+	/* station management */
+	int (*update_bcast_stations) (struct il_priv *il);
+	int (*manage_ibss_station) (struct il_priv *il,
+				    struct ieee80211_vif *vif, bool add);
+
+	int (*send_led_cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_mod_params {
+	int sw_crypto;		/* def: 0 = using hardware encryption */
+	int disable_hw_scan;	/* def: 0 = use h/w scan */
+	int num_of_queues;	/* def: HW dependent */
+	int disable_11n;	/* def: 0 = 11n capabilities enabled */
+	int amsdu_size_8K;	/* def: 0 = disable 8K amsdu size */
+	int antenna;		/* def: 0 = both antennas (use diversity) */
+	int restart_fw;		/* def: 1 = restart firmware */
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY       (0<<1)
+#define IL_LED_LINK           (1<<1)
+
+/*
+ * LED mode
+ *    IL_LED_DEFAULT:  use device default
+ *    IL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ *    IL_LED_BLINK:    adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+	IL_LED_DEFAULT,
+	IL_LED_RF_STATE,
+	IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *	(.ucode) will be added to filename before loading from disk. The
+ *	filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ *	Driver interacts with Firmware API version >= 2.
+ * } else {
+ *	Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+	/* params specific to an individual device within a device family */
+	const char *name;
+	const char *fw_name_pre;
+	const unsigned int ucode_api_max;
+	const unsigned int ucode_api_min;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+	unsigned int sku;
+	u16 eeprom_ver;
+	u16 eeprom_calib_ver;
+	/* module based parameters which can be set from modprobe cmd */
+	const struct il_mod_params *mod_params;
+	/* params not likely to change within a device family */
+	struct il_base_params *base_params;
+	/* params likely to change within a device family */
+	u8 scan_rx_antennas[NUM_NL80211_BANDS];
+	enum il_led_mode led_mode;
+
+	int eeprom_size;
+	int num_of_queues;		/* def: HW dependent */
+	int num_of_ampdu_queues;	/* def: HW dependent */
+	/* for il_apm_init() */
+	u32 pll_cfg_val;
+	bool set_l0s;
+	bool use_bsm;
+
+	u16 led_compensation;
+	int chain_noise_num_beacons;
+	unsigned int wd_timeout;
+	bool temperature_kelvin;
+	const bool ucode_tracing;
+	const bool sensitivity_calib_by_driver;
+	const bool chain_noise_calib_by_driver;
+
+	const u32 regulatory_bands[7];
+};
+
+/***************************
+ *   L i b                 *
+ ***************************/
+
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il);
+int il_full_rxon_required(struct il_priv *il);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
+void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
+			   struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il,
+			   struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+			  u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			    enum nl80211_iftype newtype, bool newp2p);
+void il_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  u32 queues, bool drop);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_free_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+
+/*****************************************************
+ * Handlers
+ ***************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		   struct ieee80211_scan_request *hw_req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+		      const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
+			     u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
+			      struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME       cpu_to_le16(10)	/* msec */
+#define IL_PLCP_QUIET_THRESH       cpu_to_le16(1)	/* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG		(HZ * 7)
+
+/*****************************************************
+ *   S e n d i n g     H o s t     C o m m a n d s   *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+				 const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+			  void (*callback) (struct il_priv *il,
+					    struct il_device_cmd *cmd,
+					    struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI						     *
+ *****************************************************/
+
+void il_bg_watchdog(struct timer_list *t);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+			  u32 beacon_interval);
+
+#ifdef CONFIG_PM_SLEEP
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS	(&il_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define IL_LEGACY_PM_OPS	NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+/*****************************************************
+*  Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+*  GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS   *****/
+
+#define S_HCMD_ACTIVE	0	/* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED	2
+#define S_RFKILL	3
+#define S_CT_KILL		4
+#define S_INIT		5
+#define S_ALIVE		6
+#define S_READY		7
+#define S_TEMPERATURE	8
+#define S_GEO_CONFIGURED	9
+#define S_EXIT_PENDING	10
+#define S_STATS		12
+#define S_SCANNING		13
+#define S_SCAN_ABORTING	14
+#define S_SCAN_HW		15
+#define S_POWER_PMI	16
+#define S_FW_ERROR		17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+	/* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+	 * set but EXIT_PENDING is not */
+	return test_bit(S_READY, &il->status) &&
+	    test_bit(S_GEO_CONFIGURED, &il->status) &&
+	    !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+	return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+	return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+	return test_bit(S_RFKILL, &il->status);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+	return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+	if (il_is_rfkill(il))
+		return 0;
+
+	return il_is_ready(il);
+}
+
+void il_send_bt_config(struct il_priv *il);
+int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+void _il_apm_stop(struct il_priv *il);
+
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il);
+
+static inline int
+il_send_rxon_assoc(struct il_priv *il)
+{
+	return il->ops->rxon_assoc(il);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il)
+{
+	return il->ops->commit_rxon(il);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
+{
+	return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			     struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+			  __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+void il_set_bit(struct il_priv *p, u32 r, u32 m);
+void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+bool _il_grab_nic_access(struct il_priv *il);
+int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+u32 il_rd_prph(struct il_priv *il, u32 reg);
+void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+
+static inline bool il_need_reclaim(struct il_priv *il, struct il_rx_pkt *pkt)
+{
+	/* Reclaim a command buffer only if this packet is a response
+	 * to a (driver-originated) command. If the packet (e.g. Rx frame)
+	 * originated from uCode, there is no command buffer to reclaim.
+	 * Ucode should set SEQ_RX_FRAME bit if ucode-originated, but
+	 * apparently a few don't get set; catch them here.
+	 */
+	return !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+	       pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX &&
+	       pkt->hdr.cmd != N_RX_PHY && pkt->hdr.cmd != N_RX &&
+	       pkt->hdr.cmd != N_RX_MPDU && pkt->hdr.cmd != N_COMPRESSED_BA;
+}
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+	writeb(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+	writel(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+	return readl(il->hw_base + ofs);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+	_il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+	_il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+	_il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+	/*
+	 * In above we are reading CSR_GP_CNTRL register, what will flush any
+	 * previous writes, but still want write, which clear MAC_ACCESS_REQ
+	 * bit, be performed on PCI bus before any other writes scheduled on
+	 * different CPUs (after we drop reg_lock).
+	 */
+	mmiowb();
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+	u32 value;
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	_il_grab_nic_access(il);
+	value = _il_rd(il, reg);
+	_il_release_nic_access(il);
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+	return value;
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		_il_wr(il, reg, value);
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+	_il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+	return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+	_il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+	_il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		_il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+	unsigned long reg_flags;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		_il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+	unsigned long reg_flags;
+	u32 val;
+
+	spin_lock_irqsave(&il->reg_lock, reg_flags);
+	if (likely(_il_grab_nic_access(il))) {
+		val = _il_rd_prph(il, reg);
+		_il_wr_prph(il, reg, (val & ~mask));
+		_il_release_nic_access(il);
+	}
+	spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0)	/* driver entry is active */
+#define IL_STA_UCODE_ACTIVE  BIT(1)	/* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS  BIT(2)	/* ucode entry is in process of
+					   being activated */
+#define IL_STA_LOCAL BIT(3)	/* station state not directed by mac80211;
+				   (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4)	/* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il);
+void il_clear_ucode_stations(struct il_priv *il);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
+			  struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		      struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
+		   struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq,
+		   u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&il->sta_lock, flags);
+	memset(il->stations, 0, sizeof(il->stations));
+	il->num_stations = 0;
+	il->ucode_key_table = 0;
+	spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+	if (WARN_ON(!sta))
+		return IL_INVALID_STATION;
+
+	return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct ieee80211_sta *sta)
+{
+	int sta_id;
+
+	if (!sta)
+		return il->hw_params.bcast_id;
+
+	sta_id = il_sta_id(sta);
+
+	/*
+	 * mac80211 should not be passing a partially
+	 * initialised station!
+	 */
+	WARN_ON(sta_id == IL_INVALID_STATION);
+
+	return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+	return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+	return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+	if (desc->v_addr)
+		dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+				  desc->p_addr);
+	desc->v_addr = NULL;
+	desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+	if (!desc->len) {
+		desc->v_addr = NULL;
+		return -EINVAL;
+	}
+
+	desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+					  &desc->p_addr, GFP_KERNEL);
+	return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+	BUG_ON(ac > 3);		/* only have 2 bits */
+	BUG_ON(hwq > 31);	/* only use 5 bits */
+
+	txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+_il_wake_queue(struct il_priv *il, u8 ac)
+{
+	if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+		ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+_il_stop_queue(struct il_priv *il, u8 ac)
+{
+	if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+		ieee80211_stop_queue(il->hw, ac);
+}
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u8 queue = txq->swq_id;
+	u8 ac = queue & 3;
+	u8 hwq = (queue >> 2) & 0x1f;
+
+	if (test_and_clear_bit(hwq, il->queue_stopped))
+		_il_wake_queue(il, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+	u8 queue = txq->swq_id;
+	u8 ac = queue & 3;
+	u8 hwq = (queue >> 2) & 0x1f;
+
+	if (!test_and_set_bit(hwq, il->queue_stopped))
+		_il_stop_queue(il, ac);
+}
+
+static inline void
+il_wake_queues_by_reason(struct il_priv *il, int reason)
+{
+	u8 ac;
+
+	if (test_and_clear_bit(reason, &il->stop_reason))
+		for (ac = 0; ac < 4; ac++)
+			_il_wake_queue(il, ac);
+}
+
+static inline void
+il_stop_queues_by_reason(struct il_priv *il, int reason)
+{
+	u8 ac;
+
+	if (!test_and_set_bit(reason, &il->stop_reason))
+		for (ac = 0; ac < 4; ac++)
+			_il_stop_queue(il, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+	clear_bit(S_INT_ENABLED, &il->status);
+
+	/* disable interrupts from uCode/NIC to host */
+	_il_wr(il, CSR_INT_MASK, 0x00000000);
+
+	/* acknowledge/clear/reset any interrupts still pending
+	 * from uCode or flow handler (Rx/Tx DMA) */
+	_il_wr(il, CSR_INT, 0xffffffff);
+	_il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+	_il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+	set_bit(S_INT_ENABLED, &il->status);
+	_il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+	return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ *			     in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ *			     which was transferred
+ */
+struct il_rb_status {
+	__le16 closed_rb_num;
+	__le16 closed_fr_num;
+	__le16 finished_rb_num;
+	__le16 finished_fr_nam;
+	__le32 __unused;	/* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX      256
+#define TFD_QUEUE_SIZE_BC_DUP	64
+#define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK		DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS		20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+	return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ *	unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ *	      4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+	__le32 lo;
+	__le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *	     5   reserved
+ * 	     6-7 padding (not used)
+ * @ tbs[20]	transmit frame buffer descriptors
+ * @ __pad	padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+	u8 __reserved1[3];
+	u8 num_tbs;
+	struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+	__le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+struct il_rate_info {
+	u8 plcp;		/* uCode API:  RATE_6M_PLCP, etc. */
+	u8 plcp_siso;		/* uCode API:  RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_mimo2;		/* uCode API:  RATE_MIMO2_6M_PLCP, etc. */
+	u8 ieee;		/* MAC header:  RATE_6M_IEEE, etc. */
+	u8 prev_ieee;		/* previous rate in IEEE speeds */
+	u8 next_ieee;		/* next rate in IEEE speeds */
+	u8 prev_rs;		/* previous rate used in rs algo */
+	u8 next_rs;		/* next rate used in rs algo */
+	u8 prev_rs_tgg;		/* previous rate used in TGG rs algo */
+	u8 next_rs_tgg;		/* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+	u8 plcp;		/* uCode API:  RATE_6M_PLCP, etc. */
+	u8 ieee;		/* MAC header:  RATE_6M_IEEE, etc. */
+	u8 prev_ieee;		/* previous rate in IEEE speeds */
+	u8 next_ieee;		/* next rate in IEEE speeds */
+	u8 prev_rs;		/* previous rate used in rs algo */
+	u8 next_rs;		/* next rate used in rs algo */
+	u8 prev_rs_tgg;		/* previous rate used in TGG rs algo */
+	u8 next_rs_tgg;		/* next rate used in TGG rs algo */
+	u8 table_rs_idx;	/* idx in rate scale table cmd */
+	u8 prev_table_rs;	/* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+	RATE_1M_IDX = 0,
+	RATE_2M_IDX,
+	RATE_5M_IDX,
+	RATE_11M_IDX,
+	RATE_6M_IDX,
+	RATE_9M_IDX,
+	RATE_12M_IDX,
+	RATE_18M_IDX,
+	RATE_24M_IDX,
+	RATE_36M_IDX,
+	RATE_48M_IDX,
+	RATE_54M_IDX,
+	RATE_60M_IDX,
+	RATE_COUNT,
+	RATE_COUNT_LEGACY = RATE_COUNT - 1,	/* Excluding 60M */
+	RATE_COUNT_3945 = RATE_COUNT - 1,
+	RATE_INVM_IDX = RATE_COUNT,
+	RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+	RATE_6M_IDX_TBL = 0,
+	RATE_9M_IDX_TBL,
+	RATE_12M_IDX_TBL,
+	RATE_18M_IDX_TBL,
+	RATE_24M_IDX_TBL,
+	RATE_36M_IDX_TBL,
+	RATE_48M_IDX_TBL,
+	RATE_54M_IDX_TBL,
+	RATE_1M_IDX_TBL,
+	RATE_2M_IDX_TBL,
+	RATE_5M_IDX_TBL,
+	RATE_11M_IDX_TBL,
+	RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+	IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+	IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+	IL_LAST_OFDM_RATE = RATE_60M_IDX,
+	IL_FIRST_CCK_RATE = RATE_1M_IDX,
+	IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define	RATE_6M_MASK   (1 << RATE_6M_IDX)
+#define	RATE_9M_MASK   (1 << RATE_9M_IDX)
+#define	RATE_12M_MASK  (1 << RATE_12M_IDX)
+#define	RATE_18M_MASK  (1 << RATE_18M_IDX)
+#define	RATE_24M_MASK  (1 << RATE_24M_IDX)
+#define	RATE_36M_MASK  (1 << RATE_36M_IDX)
+#define	RATE_48M_MASK  (1 << RATE_48M_IDX)
+#define	RATE_54M_MASK  (1 << RATE_54M_IDX)
+#define RATE_60M_MASK  (1 << RATE_60M_IDX)
+#define	RATE_1M_MASK   (1 << RATE_1M_IDX)
+#define	RATE_2M_MASK   (1 << RATE_2M_IDX)
+#define	RATE_5M_MASK   (1 << RATE_5M_IDX)
+#define	RATE_11M_MASK  (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+	RATE_6M_PLCP = 13,
+	RATE_9M_PLCP = 15,
+	RATE_12M_PLCP = 5,
+	RATE_18M_PLCP = 7,
+	RATE_24M_PLCP = 9,
+	RATE_36M_PLCP = 11,
+	RATE_48M_PLCP = 1,
+	RATE_54M_PLCP = 3,
+	RATE_60M_PLCP = 3,	/*FIXME:RS:should be removed */
+	RATE_1M_PLCP = 10,
+	RATE_2M_PLCP = 20,
+	RATE_5M_PLCP = 55,
+	RATE_11M_PLCP = 110,
+	/*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+	RATE_SISO_6M_PLCP = 0,
+	RATE_SISO_12M_PLCP = 1,
+	RATE_SISO_18M_PLCP = 2,
+	RATE_SISO_24M_PLCP = 3,
+	RATE_SISO_36M_PLCP = 4,
+	RATE_SISO_48M_PLCP = 5,
+	RATE_SISO_54M_PLCP = 6,
+	RATE_SISO_60M_PLCP = 7,
+	RATE_MIMO2_6M_PLCP = 0x8,
+	RATE_MIMO2_12M_PLCP = 0x9,
+	RATE_MIMO2_18M_PLCP = 0xa,
+	RATE_MIMO2_24M_PLCP = 0xb,
+	RATE_MIMO2_36M_PLCP = 0xc,
+	RATE_MIMO2_48M_PLCP = 0xd,
+	RATE_MIMO2_54M_PLCP = 0xe,
+	RATE_MIMO2_60M_PLCP = 0xf,
+	RATE_SISO_INVM_PLCP,
+	RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+	RATE_6M_IEEE = 12,
+	RATE_9M_IEEE = 18,
+	RATE_12M_IEEE = 24,
+	RATE_18M_IEEE = 36,
+	RATE_24M_IEEE = 48,
+	RATE_36M_IEEE = 72,
+	RATE_48M_IEEE = 96,
+	RATE_54M_IEEE = 108,
+	RATE_60M_IEEE = 120,
+	RATE_1M_IEEE = 2,
+	RATE_2M_IEEE = 4,
+	RATE_5M_IEEE = 11,
+	RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK    \
+	(RATE_1M_MASK          | \
+	RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK          \
+	(IL_CCK_BASIC_RATES_MASK  | \
+	RATE_5M_MASK          | \
+	RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK   \
+	(RATE_6M_MASK         | \
+	RATE_12M_MASK         | \
+	RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK         \
+	(IL_OFDM_BASIC_RATES_MASK | \
+	RATE_9M_MASK          | \
+	RATE_18M_MASK         | \
+	RATE_36M_MASK         | \
+	RATE_48M_MASK         | \
+	RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK         \
+	(IL_OFDM_BASIC_RATES_MASK | \
+	 IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE    -1
+
+#define IL_MIN_RSSI_VAL                 -100
+#define IL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT	160
+#define IL_LEGACY_SUCCESS_LIMIT	480
+#define IL_LEGACY_TBL_COUNT		160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT	400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT	4500
+#define IL_NONE_LEGACY_TBL_COUNT	1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO		12800	/* 100% */
+#define RATE_SCALE_SWITCH		10880	/*  85% */
+#define RATE_HIGH_TH		10880	/*  85% */
+#define RATE_INCREASE_TH		6400	/*  50% */
+#define RATE_DECREASE_TH		1920	/*  15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1      0
+#define IL_LEGACY_SWITCH_ANTENNA2      1
+#define IL_LEGACY_SWITCH_SISO          2
+#define IL_LEGACY_SWITCH_MIMO2_AB      3
+#define IL_LEGACY_SWITCH_MIMO2_AC      4
+#define IL_LEGACY_SWITCH_MIMO2_BC      5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1        0
+#define IL_SISO_SWITCH_ANTENNA2        1
+#define IL_SISO_SWITCH_MIMO2_AB        2
+#define IL_SISO_SWITCH_MIMO2_AC        3
+#define IL_SISO_SWITCH_MIMO2_BC        4
+#define IL_SISO_SWITCH_GI              5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1       0
+#define IL_MIMO2_SWITCH_ANTENNA2       1
+#define IL_MIMO2_SWITCH_SISO_A         2
+#define IL_MIMO2_SWITCH_SISO_B         3
+#define IL_MIMO2_SWITCH_SISO_C         4
+#define IL_MIMO2_SWITCH_GI             5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT		3	/* # possible actions */
+
+#define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD	0
+#define IL_AGG_LOAD_THRESHOLD	10
+#define IL_AGG_ALL_TID		0xff
+#define TID_QUEUE_CELL_SPACING	50	/*mS */
+#define TID_QUEUE_MAX_SIZE	20
+#define TID_ROUND_VALUE		5	/* mS */
+#define TID_MAX_LOAD_COUNT	8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+	LQ_NONE,
+	LQ_G,			/* legacy types */
+	LQ_A,
+	LQ_SISO,		/* high-throughput types */
+	LQ_MIMO2,
+	LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define	ANT_NONE	0x0
+#define	ANT_A		BIT(0)
+#define	ANT_B		BIT(1)
+#define	ANT_AB		(ANT_A | ANT_B)
+#define ANT_C		BIT(2)
+#define	ANT_AC		(ANT_A | ANT_C)
+#define ANT_BC		(ANT_B | ANT_C)
+#define ANT_ABC		(ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE	12
+
+struct il_rate_mcs_info {
+	char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+	char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+	unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+	enum il_table_type lq_type;
+	u8 ant_type;
+	u8 is_SGI;		/* 1 = short guard interval */
+	u8 is_ht40;		/* 1 = 40 MHz channel width */
+	u8 is_dup;		/* 1 = duplicated data streams */
+	u8 action;		/* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+	u8 max_search;		/* maximun number of tables we can search */
+	s32 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	u32 current_rate;	/* rate_n_flags, uCode API format */
+	struct il_rate_scale_data win[RATE_COUNT];	/* rate histories */
+};
+
+struct il_traffic_load {
+	unsigned long time_stamp;	/* age of the oldest stats */
+	u32 packet_count[TID_QUEUE_MAX_SIZE];	/* packet count in this time
+						 * slice */
+	u32 total;		/* total num of packets during the
+				 * last TID_MAX_TIME_DIFF */
+	u8 queue_count;		/* number of queues that has
+				 * been used since the last cleanup */
+	u8 head;		/* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+	u8 active_tbl;		/* idx of active table, range 0-1 */
+	u8 enable_counter;	/* indicates HT mode */
+	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u8 action_counter;	/* # mode-switch actions tried */
+	u8 is_green;
+	u8 is_dup;
+	enum nl80211_band band;
+
+	/* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+	u32 supp_rates;
+	u16 active_legacy_rate;
+	u16 active_siso_rate;
+	u16 active_mimo2_rate;
+	s8 max_rate_idx;	/* Max rate set by user */
+	u8 missed_rate_counter;
+
+	struct il_link_quality_cmd lq;
+	struct il_scale_tbl_info lq_info[LQ_SIZE];	/* "active", "search" */
+	struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+	u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_scale_table_file;
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+	u32 dbg_fixed_rate;
+#endif
+	struct il_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+	struct il_station_priv_common common;
+	struct il_lq_sta lq_sta;
+	atomic_t pending_frames;
+	bool client;
+	bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+	return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+	if (mask & ANT_A)
+		return ANT_A;
+	if (mask & ANT_B)
+		return ANT_B;
+	return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+			 u8 sta_id);
+void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+			 u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+int il4965_rate_control_register(void);
+int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+void il4965_rate_control_unregister(void);
+void il3945_rate_control_unregister(void);
+
+int il_power_update_mode(struct il_priv *il, bool force);
+void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+	if (il->debug_level)
+		return il->debug_level;
+	else
+		return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+	return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len)					\
+do {									\
+	print_hex_dump(KERN_ERR, "iwl data: ",				\
+		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...)					\
+do {									\
+	if (il_get_debug_level(il) & level)				\
+		dev_err(&il->hw->wiphy->dev, "%c %s " fmt,		\
+			in_interrupt() ? 'I' : 'U', __func__ , ##args); \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len)				\
+do {									\
+	if (il_get_debug_level(il) & level)				\
+		print_hex_dump(KERN_DEBUG, "iwl data: ",		\
+			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+	return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *	/sys/module/iwl4965/parameters/debug
+ *	/sys/module/iwl3945/parameters/debug
+ *	/sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO		(1 << 0)
+#define IL_DL_MAC80211		(1 << 1)
+#define IL_DL_HCMD		(1 << 2)
+#define IL_DL_STATE		(1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP		(1 << 4)
+#define IL_DL_HCMD_DUMP		(1 << 5)
+#define IL_DL_EEPROM		(1 << 6)
+#define IL_DL_RADIO		(1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER		(1 << 8)
+#define IL_DL_TEMP		(1 << 9)
+#define IL_DL_NOTIF		(1 << 10)
+#define IL_DL_SCAN		(1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC		(1 << 12)
+#define IL_DL_DROP		(1 << 13)
+#define IL_DL_TXPOWER		(1 << 14)
+#define IL_DL_AP		(1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW		(1 << 16)
+#define IL_DL_RF_KILL		(1 << 17)
+#define IL_DL_FW_ERRORS		(1 << 18)
+#define IL_DL_LED		(1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE		(1 << 20)
+#define IL_DL_CALIB		(1 << 21)
+#define IL_DL_WEP		(1 << 22)
+#define IL_DL_TX		(1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX		(1 << 24)
+#define IL_DL_ISR		(1 << 25)
+#define IL_DL_HT		(1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H		(1 << 28)
+#define IL_DL_STATS		(1 << 29)
+#define IL_DL_TX_REPLY		(1 << 30)
+#define IL_DL_QOS		(1 << 31)
+
+#define D_INFO(f, a...)		IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...)	IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...)	IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...)		IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...)		IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...)		IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...)		IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...)		IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...)		IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...)		IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...)		IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...)	IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...)	IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...)	IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...)		IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...)	IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...)		IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...)		IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...)	IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...)		IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...)	IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...)	IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...)		IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...)	IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...)	IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...)		IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...)	IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...)	IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...)		IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/csr.h b/drivers/net/wireless/intel/iwlegacy/csr.h
new file mode 100644
index 0000000..9138e15
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/csr.h
@@ -0,0 +1,419 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __il_csr_h__
+#define __il_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use _il_wr() and _il_rd() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use il_wr() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE:  Device does need to be awake in order to read this memory
+ *        via CSR_EEPROM register
+ */
+#define CSR_BASE    (0x000)
+
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000)	/* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004)	/* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008)	/* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c)	/* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010)	/* busmaster int status/ack */
+#define CSR_GPIO_IN             (CSR_BASE+0x018)	/* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020)	/* busmaster enable, NMI, etc */
+#define CSR_GP_CNTRL            (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
+#define CSR_INT_PERIODIC_REG	(CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-8:  Reserved
+ *  7-4:  Type of device:  see CSR_HW_REV_TYPE_xxx definitions
+ *  3-2:  Revision step:  0 = A, 1 = B, 2 = C, 3 = D
+ *  1-0:  "Dash" (-) value, as in A-1, etc.
+ *
+ * NOTE:  Revision step affects calculation of CCK txpower for 4965.
+ * NOTE:  See also CSR_HW_REV_WA_REG (work-around for bug in 4965).
+ */
+#define CSR_HW_REV              (CSR_BASE+0x028)
+
+/*
+ * EEPROM memory reads
+ *
+ * NOTE:  Device must be awake, initialized via apm_ops.init(),
+ *        in order to read.
+ */
+#define CSR_EEPROM_REG          (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP           (CSR_BASE+0x030)
+
+#define CSR_GIO_REG		(CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG	(CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG	(CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1       (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET   (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR   (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2       (CSR_BASE+0x060)
+
+#define CSR_LED_REG             (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG	(CSR_BASE+0x0A0)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+
+/* Analog phase-lock-loop configuration  */
+#define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
+
+/*
+ * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation.  Used for 4965 only.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
+ *  1-0:  "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG		(CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG		(CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG	(CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R	(0x00000010)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER	(0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI 	(0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI	(0x00000200)
+
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB         (0x00000100)
+#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM         (0x00000200)
+#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC            (0x00000400)
+#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE         (0x00000800)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A    (0x00000000)
+#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B    (0x00001000)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A	(0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM	(0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY	(0x00400000)	/* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000)	/* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE		  (0x08000000)	/* WAKE_ME */
+
+#define CSR_INT_PERIODIC_DIS			(0x00)	/* disable periodic int */
+#define CSR_INT_PERIODIC_ENA			(0xFF)	/* 255*32 usec ~ 8 msec */
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX        (1 << 31)	/* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29)	/* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC	 (1 << 28)	/* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27)	/* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26)	/* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25)	/* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)	/* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)	/* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)	/* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)	/* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)	/* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK	(CSR_INT_BIT_FH_RX   | \
+				 CSR_INT_BIT_HW_ERR  | \
+				 CSR_INT_BIT_FH_TX   | \
+				 CSR_INT_BIT_SW_ERR  | \
+				 CSR_INT_BIT_RF_KILL | \
+				 CSR_INT_BIT_SW_RX   | \
+				 CSR_INT_BIT_WAKEUP  | \
+				 CSR_INT_BIT_ALIVE)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR       (1 << 31)	/* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30)	/* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2  (1 << 18)	/* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17)	/* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16)	/* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6  (1 << 6)	/* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)	/* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)	/* Tx channel 0 */
+
+#define CSR39_FH_INT_RX_MASK	(CSR_FH_INT_BIT_HI_PRIOR | \
+				 CSR39_FH_INT_BIT_RX_CHNL2 | \
+				 CSR_FH_INT_BIT_RX_CHNL1 | \
+				 CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR39_FH_INT_TX_MASK	(CSR39_FH_INT_BIT_TX_CHNL6 | \
+				 CSR_FH_INT_BIT_TX_CHNL1 | \
+				 CSR_FH_INT_BIT_TX_CHNL0)
+
+#define CSR49_FH_INT_RX_MASK	(CSR_FH_INT_BIT_HI_PRIOR | \
+				 CSR_FH_INT_BIT_RX_CHNL1 | \
+				 CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR49_FH_INT_TX_MASK	(CSR_FH_INT_BIT_TX_CHNL1 | \
+				 CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER                   (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC                (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC               (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET                (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI                 (0x00000002)
+#define CSR_RESET_REG_FLAG_SW_RESET                  (0x00000080)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ *    27:  HW_RF_KILL_SW
+ *         Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24:  POWER_SAVE_TYPE
+ *         Indicates current power-saving mode:
+ *         000 -- No power saving
+ *         001 -- MAC power-down
+ *         010 -- PHY (radio) power-down
+ *         011 -- Error
+ *   9-6:  SYS_CONFIG
+ *         Indicates current system configuration, reflecting pins on chip
+ *         as forced high/low by device circuit board.
+ *     4:  GOING_TO_SLEEP
+ *         Indicates MAC is entering a power-saving sleep power-down.
+ *         Not a good time to access device-internal resources.
+ *     3:  MAC_ACCESS_REQ
+ *         Host sets this to request and maintain MAC wakeup, to allow host
+ *         access to device-internal resources.  Host must wait for
+ *         MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ *         device registers.
+ *     2:  INIT_DONE
+ *         Host sets this to put device into fully operational D0 power mode.
+ *         Host resets this after SW_RESET to put device into low power mode.
+ *     0:  MAC_CLOCK_READY
+ *         Indicates MAC (ucode processor, etc.) is powered up and can run.
+ *         Internal resources are accessible.
+ *         NOTE:  This does not indicate that the processor is actually running.
+ *         NOTE:  This does not indicate that 4965 or 3945 has completed
+ *                init or post-power-down restore of internal SRAM memory.
+ *                Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ *                SRAM is restored and uCode is in normal operation mode.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ *         NOTE:  After device reset, this bit remains "0" until host sets
+ *                INIT_DONE
+ */
+#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY        (0x00000001)
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE              (0x00000004)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ         (0x00000008)
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
+
+#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN           (0x00000001)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE         (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE         (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK	(0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD		(0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR		(0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA		(0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK		(0x00000007)	/* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK	(0x00000180)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K		(0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K		(0x00000004)
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000)	/* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED	(0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ *     4:  UCODE_DISABLE
+ *         Host sets this to request permanent halt of uCode, same as
+ *         sending CARD_STATE command with "halt" bit set.
+ *     3:  CT_KILL_EXIT
+ *         Host sets this to request exit from CT_KILL state, i.e. host thinks
+ *         device temperature is low enough to continue normal operation.
+ *     2:  CMD_BLOCKED
+ *         Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ *         to release uCode to clear all Tx and command queues, enter
+ *         unassociated mode, and power down.
+ *         NOTE:  Some devices also use HBUS_TARG_MBX_C register for this bit.
+ *     1:  SW_BIT_RFKILL
+ *         Host sets this when issuing CARD_STATE command to request
+ *         device sleep.
+ *     0:  MAC_SLEEP
+ *         uCode sets this when preparing a power-saving power-down.
+ *         uCode resets this when power-up is complete and SRAM is sane.
+ *         NOTE:  3945/4965 saves internal SRAM data to host when powering down,
+ *                and must restore this data after powering back up.
+ *                MAC_SLEEP is the best indication that restore is complete.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP             (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL                     (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED           (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT      (0x00000008)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TRUN_ON (0x78)
+#define CSR_LED_REG_TRUN_OFF (0x38)
+
+/* ANA_PLL */
+#define CSR39_ANA_PLL_CFG_VAL        (0x01000000)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL	(0xFFFF0000)
+
+/* DRAM INT TBL */
+#define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use il_wr()/il_rd() family
+ * for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use _il_wr()/_il_rd() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE	(0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job.  Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ *  0-31:  memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR     (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR     (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT      (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT      (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.).  First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ *  0-15:  register address (offset) within device
+ * 24-25:  (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR    (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR    (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT     (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
+
+/*
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ *  0-7:  queue write idx
+ * 11-8:  queue selector
+ */
+#define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
+
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
new file mode 100644
index 0000000..d76073d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -0,0 +1,1424 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+static void
+il_clear_traffic_stats(struct il_priv *il)
+{
+	memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+	memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * il_update_stats function record all the MGMT, CTRL and DATA pkt for
+ * both TX and Rx . Use debugfs to display the rx/rx_stats
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+	struct traffic_stats *stats;
+
+	if (is_tx)
+		stats = &il->tx_stats;
+	else
+		stats = &il->rx_stats;
+
+	if (ieee80211_is_mgmt(fc)) {
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+			stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+			stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+			stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+			stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+			stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+			stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_BEACON):
+			stats->mgmt[MANAGEMENT_BEACON]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ATIM):
+			stats->mgmt[MANAGEMENT_ATIM]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+			stats->mgmt[MANAGEMENT_DISASSOC]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_AUTH):
+			stats->mgmt[MANAGEMENT_AUTH]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+			stats->mgmt[MANAGEMENT_DEAUTH]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ACTION):
+			stats->mgmt[MANAGEMENT_ACTION]++;
+			break;
+		}
+	} else if (ieee80211_is_ctl(fc)) {
+		switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+		case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+			stats->ctrl[CONTROL_BACK_REQ]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_BACK):
+			stats->ctrl[CONTROL_BACK]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+			stats->ctrl[CONTROL_PSPOLL]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_RTS):
+			stats->ctrl[CONTROL_RTS]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CTS):
+			stats->ctrl[CONTROL_CTS]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_ACK):
+			stats->ctrl[CONTROL_ACK]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CFEND):
+			stats->ctrl[CONTROL_CFEND]++;
+			break;
+		case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+			stats->ctrl[CONTROL_CFENDACK]++;
+			break;
+		}
+	} else {
+		/* data */
+		stats->data_cnt++;
+		stats->data_bytes += len;
+	}
+}
+EXPORT_SYMBOL(il_update_stats);
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+	if (!debugfs_create_file(#name, mode, parent, il,		\
+			 &il_dbgfs_##name##_ops))		\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {			\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_bool(#name, 0600, parent, ptr);		\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {				\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_x32(#name, 0600, parent, ptr);		\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name)                                         \
+static ssize_t il_dbgfs_##name##_read(struct file *file,               \
+					char __user *user_buf,          \
+					size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name)                                        \
+static ssize_t il_dbgfs_##name##_write(struct file *file,              \
+					const char __user *user_buf,    \
+					size_t count, loff_t *ppos);
+
+
+#define DEBUGFS_READ_FILE_OPS(name)				\
+	DEBUGFS_READ_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.read = il_dbgfs_##name##_read,				\
+	.open = simple_open,					\
+	.llseek = generic_file_llseek,				\
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)				\
+	DEBUGFS_WRITE_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.write = il_dbgfs_##name##_write,			\
+	.open = simple_open,					\
+	.llseek = generic_file_llseek,				\
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)			\
+	DEBUGFS_READ_FUNC(name);				\
+	DEBUGFS_WRITE_FUNC(name);				\
+static const struct file_operations il_dbgfs_##name##_ops = {	\
+	.write = il_dbgfs_##name##_write,			\
+	.read = il_dbgfs_##name##_read,				\
+	.open = simple_open,					\
+	.llseek = generic_file_llseek,				\
+};
+
+static const char *
+il_get_mgmt_string(int cmd)
+{
+	switch (cmd) {
+	IL_CMD(MANAGEMENT_ASSOC_REQ);
+	IL_CMD(MANAGEMENT_ASSOC_RESP);
+	IL_CMD(MANAGEMENT_REASSOC_REQ);
+	IL_CMD(MANAGEMENT_REASSOC_RESP);
+	IL_CMD(MANAGEMENT_PROBE_REQ);
+	IL_CMD(MANAGEMENT_PROBE_RESP);
+	IL_CMD(MANAGEMENT_BEACON);
+	IL_CMD(MANAGEMENT_ATIM);
+	IL_CMD(MANAGEMENT_DISASSOC);
+	IL_CMD(MANAGEMENT_AUTH);
+	IL_CMD(MANAGEMENT_DEAUTH);
+	IL_CMD(MANAGEMENT_ACTION);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+
+static const char *
+il_get_ctrl_string(int cmd)
+{
+	switch (cmd) {
+	IL_CMD(CONTROL_BACK_REQ);
+	IL_CMD(CONTROL_BACK);
+	IL_CMD(CONTROL_PSPOLL);
+	IL_CMD(CONTROL_RTS);
+	IL_CMD(CONTROL_CTS);
+	IL_CMD(CONTROL_ACK);
+	IL_CMD(CONTROL_CFEND);
+	IL_CMD(CONTROL_CFENDACK);
+	default:
+		return "UNKNOWN";
+
+	}
+}
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+
+	int cnt;
+	ssize_t ret;
+	const size_t bufsz =
+	    100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+		      il->tx_stats.data_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+		      il->tx_stats.data_bytes);
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+				   const char __user *user_buf, size_t count,
+				   loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	u32 clear_flag;
+	char buf[8];
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &clear_flag) != 1)
+		return -EFAULT;
+	il_clear_traffic_stats(il);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	ssize_t ret;
+	const size_t bufsz =
+	    100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+	for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+	for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+			      il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+		      il->rx_stats.data_cnt);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+		      il->rx_stats.data_bytes);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+		   loff_t *ppos)
+{
+	u32 val;
+	char *buf;
+	ssize_t ret;
+	int i;
+	int pos = 0;
+	struct il_priv *il = file->private_data;
+	size_t bufsz;
+
+	/* default is to dump the entire data segment */
+	if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+		il->dbgfs_sram_offset = 0x800000;
+		if (il->ucode_type == UCODE_INIT)
+			il->dbgfs_sram_len = il->ucode_init_data.len;
+		else
+			il->dbgfs_sram_len = il->ucode_data.len;
+	}
+	bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+		      il->dbgfs_sram_len);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+		      il->dbgfs_sram_offset);
+	for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+		val =
+		    il_read_targ_mem(il,
+				     il->dbgfs_sram_offset +
+				     il->dbgfs_sram_len - i);
+		if (i < 4) {
+			switch (i) {
+			case 1:
+				val &= BYTE1_MASK;
+				break;
+			case 2:
+				val &= BYTE2_MASK;
+				break;
+			case 3:
+				val &= BYTE3_MASK;
+				break;
+			}
+		}
+		if (!(i % 16))
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+		    size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[64];
+	int buf_size;
+	u32 offset, len;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+		il->dbgfs_sram_offset = offset;
+		il->dbgfs_sram_len = len;
+	} else {
+		il->dbgfs_sram_offset = 0;
+		il->dbgfs_sram_len = 0;
+	}
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	struct il_station_entry *station;
+	int max_sta = il->hw_params.max_stations;
+	char *buf;
+	int i, j, pos = 0;
+	ssize_t ret;
+	/* Add 30 for initial string */
+	const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+		      il->num_stations);
+
+	for (i = 0; i < max_sta; i++) {
+		station = &il->stations[i];
+		if (!station->used)
+			continue;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "station %d - addr: %pM, flags: %#x\n", i,
+			      station->sta.sta.addr,
+			      station->sta.station_flags_msk);
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+		for (j = 0; j < MAX_TID_COUNT; j++) {
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+				      j, station->tid[j].seq_number,
+				      station->tid[j].agg.txq_id,
+				      station->tid[j].agg.frame_count,
+				      station->tid[j].tfds_in_queue,
+				      station->tid[j].agg.start_idx,
+				      station->tid[j].agg.bitmap,
+				      station->tid[j].agg.rate_n_flags);
+
+			if (station->tid[j].agg.wait_for_ba)
+				pos +=
+				    scnprintf(buf + pos, bufsz - pos,
+					      " - waitforba");
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		}
+
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+		  loff_t *ppos)
+{
+	ssize_t ret;
+	struct il_priv *il = file->private_data;
+	int pos = 0, ofs = 0, buf_size = 0;
+	const u8 *ptr;
+	char *buf;
+	u16 eeprom_ver;
+	size_t eeprom_len = il->cfg->eeprom_size;
+	buf_size = 4 * eeprom_len + 256;
+
+	if (eeprom_len % 16) {
+		IL_ERR("NVM size is not multiple of 16.\n");
+		return -ENODATA;
+	}
+
+	ptr = il->eeprom;
+	if (!ptr) {
+		IL_ERR("Invalid EEPROM memory\n");
+		return -ENOMEM;
+	}
+
+	/* 4 characters for byte 0xYY */
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+	eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+	pos +=
+	    scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+		      eeprom_ver);
+	for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	struct ieee80211_channel *channels = NULL;
+	const struct ieee80211_supported_band *supp_band = NULL;
+	int pos = 0, i, bufsz = PAGE_SIZE;
+	char *buf;
+	ssize_t ret;
+
+	if (!test_bit(S_GEO_CONFIGURED, &il->status))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+			      supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d: %ddBm: BSS%s%s, %s.\n",
+				      channels[i].hw_value,
+				      channels[i].max_power,
+				      channels[i].
+				      flags & IEEE80211_CHAN_RADAR ?
+				      " (IEEE 802.11h required)" : "",
+				      ((channels[i].
+					flags & IEEE80211_CHAN_NO_IR) ||
+				       (channels[i].
+					flags & IEEE80211_CHAN_RADAR)) ? "" :
+				      ", IBSS",
+				      channels[i].
+				      flags & IEEE80211_CHAN_NO_IR ?
+				      "passive only" : "active/passive");
+	}
+	supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "Displaying %d channels in 5.2GHz band (802.11a)\n",
+			      supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "%d: %ddBm: BSS%s%s, %s.\n",
+				      channels[i].hw_value,
+				      channels[i].max_power,
+				      channels[i].
+				      flags & IEEE80211_CHAN_RADAR ?
+				      " (IEEE 802.11h required)" : "",
+				      ((channels[i].
+					flags & IEEE80211_CHAN_NO_IR) ||
+				       (channels[i].
+					flags & IEEE80211_CHAN_RADAR)) ? "" :
+				      ", IBSS",
+				      channels[i].
+				      flags & IEEE80211_CHAN_NO_IR ?
+				      "passive only" : "active/passive");
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+		     loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+		      test_bit(S_HCMD_ACTIVE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+		      test_bit(S_INT_ENABLED, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_RFKILL:\t %d\n",
+		      test_bit(S_RFKILL, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+		      test_bit(S_CT_KILL, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+		      test_bit(S_INIT, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+		      test_bit(S_ALIVE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+		      test_bit(S_READY, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+		      test_bit(S_TEMPERATURE, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+		      test_bit(S_GEO_CONFIGURED, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+		      test_bit(S_EXIT_PENDING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+		      test_bit(S_STATS, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+		      test_bit(S_SCANNING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+		      test_bit(S_SCAN_ABORTING, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+		      test_bit(S_SCAN_HW, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+		      test_bit(S_POWER_PMI, &il->status));
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+		      test_bit(S_FW_ERROR, &il->status));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+			loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = 24 * 64;	/* 24 items * 64 char per item */
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+		      il->isr_stats.hw);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+		      il->isr_stats.sw);
+	if (il->isr_stats.sw || il->isr_stats.hw) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "\tLast Restarting Code:  0x%X\n",
+			      il->isr_stats.err_code);
+	}
+#ifdef CONFIG_IWLEGACY_DEBUG
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+		      il->isr_stats.sch);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+		      il->isr_stats.alive);
+#endif
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "HW RF KILL switch toggled:\t %u\n",
+		      il->isr_stats.rfkill);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+		      il->isr_stats.ctkill);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+		      il->isr_stats.wakeup);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+		      il->isr_stats.rx);
+	for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+		if (il->isr_stats.handlers[cnt] > 0)
+			pos +=
+			    scnprintf(buf + pos, bufsz - pos,
+				      "\tRx handler[%36s]:\t\t %u\n",
+				      il_get_cmd_string(cnt),
+				      il->isr_stats.handlers[cnt]);
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+		      il->isr_stats.tx);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+		      il->isr_stats.unhandled);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+			 size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	u32 reset_flag;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &reset_flag) != 1)
+		return -EFAULT;
+	if (reset_flag == 0)
+		il_clear_isr_stats(il);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+		  loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	int pos = 0, i;
+	char buf[256];
+	const size_t bufsz = sizeof(buf);
+
+	for (i = 0; i < AC_NUM; i++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "\tcw_min\tcw_max\taifsn\ttxop\n");
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+			      il->qos_data.def_qos_parm.ac[i].cw_min,
+			      il->qos_data.def_qos_parm.ac[i].cw_max,
+			      il->qos_data.def_qos_parm.ac[i].aifsn,
+			      il->qos_data.def_qos_parm.ac[i].edca_txop);
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+			    size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int ht40;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &ht40) != 1)
+		return -EFAULT;
+	if (!il_is_any_associated(il))
+		il->disable_ht40 = ht40 ? true : false;
+	else {
+		IL_ERR("Sta associated with AP - "
+		       "Change to 40MHz channel support is not allowed\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[100];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+		      il->disable_ht40 ? "Disabled" : "Enabled");
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	struct il_tx_queue *txq;
+	struct il_queue *q;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	int ret;
+	const size_t bufsz =
+	    sizeof(char) * 64 * il->cfg->num_of_queues;
+
+	if (!il->txq) {
+		IL_ERR("txq not ready\n");
+		return -EAGAIN;
+	}
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+		txq = &il->txq[cnt];
+		q = &txq->q;
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "hwq %.2d: read=%u write=%u stop=%d"
+			      " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+			      q->read_ptr, q->write_ptr,
+			      !!test_bit(cnt, il->queue_stopped),
+			      txq->swq_id, txq->swq_id & 3,
+			      (txq->swq_id >> 2) & 0x1f);
+		if (cnt >= 4)
+			continue;
+		/* for the ACs, display the stop count too */
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "        stop-count: %d\n",
+			      atomic_read(&il->queue_stop_count[cnt]));
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+		       loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	struct il_rx_queue *rxq = &il->rxq;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+		      rxq->free_count);
+	if (rxq->rb_stts) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+			      le16_to_cpu(rxq->rb_stts->
+					  closed_rb_num) & 0x0FFF);
+	} else {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos,
+			      "closed_rb_num: Not Allocated\n");
+	}
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+
+	return il->debugfs_ops->rx_stats_read(file, user_buf, count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+
+	return il->debugfs_ops->tx_stats_read(file, user_buf, count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+
+	return il->debugfs_ops->general_stats_read(file, user_buf, count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+	ssize_t ret;
+	struct il_sensitivity_data *data;
+
+	data = &il->sensitivity_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+		      data->auto_corr_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+		      data->auto_corr_ofdm_mrc);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+		      data->auto_corr_ofdm_x1);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+		      data->auto_corr_ofdm_mrc_x1);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+		      data->auto_corr_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+		      data->auto_corr_cck_mrc);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+		      data->last_bad_plcp_cnt_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+		      data->last_fa_cnt_ofdm);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+		      data->last_bad_plcp_cnt_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+		      data->last_fa_cnt_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+		      data->nrg_curr_state);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+		      data->nrg_prev_state);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+	for (cnt = 0; cnt < 10; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->nrg_value[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+	for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->nrg_silence_rssi[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+		      data->nrg_silence_ref);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+		      data->nrg_energy_idx);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+		      data->nrg_silence_idx);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+		      data->nrg_th_cck);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "nrg_auto_corr_silence_diff:\t %u\n",
+		      data->nrg_auto_corr_silence_diff);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+		      data->num_in_cck_no_fa);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+		      data->nrg_th_ofdm);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+	ssize_t ret;
+	struct il_chain_noise_data *data;
+
+	data = &il->chain_noise_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf) {
+		IL_ERR("Can not allocate Buffer\n");
+		return -ENOMEM;
+	}
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+		      data->active_chains);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+		      data->chain_noise_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+		      data->chain_noise_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+		      data->chain_noise_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+		      data->chain_signal_a);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+		      data->chain_signal_b);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+		      data->chain_signal_c);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+		      data->beacon_count);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->disconn_array[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos +=
+		    scnprintf(buf + pos, bufsz - pos, " %u",
+			      data->delta_gain_code[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+		      data->radio_write);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+		      data->state);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[60];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	u32 pwrsave_status;
+
+	pwrsave_status =
+	    _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "%s\n",
+		      (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+		      (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+		      (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+		      "error");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+				 const char __user *user_buf, size_t count,
+				 loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int clear;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &clear) != 1)
+		return -EFAULT;
+
+	/* make request to uCode to retrieve stats information */
+	mutex_lock(&il->mutex);
+	il_send_stats_request(il, CMD_SYNC, true);
+	mutex_unlock(&il->mutex);
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+			 size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len =
+	    sprintf(buf, "0x%04X\n", le32_to_cpu(il->active.filter_flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+		     loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char *buf;
+	int pos = 0;
+	ssize_t ret = -EFAULT;
+
+	if (il->ops->dump_fh) {
+		ret = pos = il->ops->dump_fh(il, &buf, true);
+		if (buf) {
+			ret =
+			    simple_read_from_buffer(user_buf, count, ppos, buf,
+						    pos);
+			kfree(buf);
+		}
+	}
+
+	return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+			    size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char buf[12];
+	const size_t bufsz = sizeof(buf);
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "%d\n",
+		      il->missed_beacon_threshold);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+			     size_t count, loff_t *ppos)
+{
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int missed;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &missed) != 1)
+		return -EINVAL;
+
+	if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+	    missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+		il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+	else
+		il->missed_beacon_threshold = missed;
+
+	return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	int pos = 0;
+	char buf[300];
+	const size_t bufsz = sizeof(buf);
+	struct il_force_reset *force_reset;
+
+	force_reset = &il->force_reset;
+
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+		      force_reset->reset_request_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "\tnumber of reset request success: %d\n",
+		      force_reset->reset_success_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos,
+		      "\tnumber of reset request reject: %d\n",
+		      force_reset->reset_reject_count);
+	pos +=
+	    scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+		      force_reset->reset_duration);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+			   size_t count, loff_t *ppos)
+{
+
+	int ret;
+	struct il_priv *il = file->private_data;
+
+	ret = il_force_reset(il, true);
+
+	return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+			  size_t count, loff_t *ppos)
+{
+
+	struct il_priv *il = file->private_data;
+	char buf[8];
+	int buf_size;
+	int timeout;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &timeout) != 1)
+		return -EINVAL;
+	if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+		timeout = IL_DEF_WD_TIMEOUT;
+
+	il->cfg->wd_timeout = timeout;
+	il_setup_watchdog(il);
+	return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+	struct dentry *phyd = il->hw->wiphy->debugfsdir;
+	struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+	dir_drv = debugfs_create_dir(name, phyd);
+	if (!dir_drv)
+		return -ENOMEM;
+
+	il->debugfs_dir = dir_drv;
+
+	dir_data = debugfs_create_dir("data", dir_drv);
+	if (!dir_data)
+		goto err;
+	dir_rf = debugfs_create_dir("rf", dir_drv);
+	if (!dir_rf)
+		goto err;
+	dir_debug = debugfs_create_dir("debug", dir_drv);
+	if (!dir_debug)
+		goto err;
+
+	DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
+	DEBUGFS_ADD_FILE(sram, dir_data, 0600);
+	DEBUGFS_ADD_FILE(stations, dir_data, 0400);
+	DEBUGFS_ADD_FILE(channels, dir_data, 0400);
+	DEBUGFS_ADD_FILE(status, dir_data, 0400);
+	DEBUGFS_ADD_FILE(interrupt, dir_data, 0600);
+	DEBUGFS_ADD_FILE(qos, dir_data, 0400);
+	DEBUGFS_ADD_FILE(disable_ht40, dir_data, 0600);
+	DEBUGFS_ADD_FILE(rx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(tx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(rx_queue, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(tx_queue, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(power_save_status, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(fh_reg, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(missed_beacon, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(force_reset, dir_debug, 0600);
+	DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, 0400);
+
+	if (il->cfg->sensitivity_calib_by_driver)
+		DEBUGFS_ADD_FILE(sensitivity, dir_debug, 0400);
+	if (il->cfg->chain_noise_calib_by_driver)
+		DEBUGFS_ADD_FILE(chain_noise, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(wd_timeout, dir_debug, 0200);
+	if (il->cfg->sensitivity_calib_by_driver)
+		DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+				 &il->disable_sens_cal);
+	if (il->cfg->chain_noise_calib_by_driver)
+		DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+				 &il->disable_chain_noise_cal);
+	DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+	return 0;
+
+err:
+	IL_ERR("Can't create the debugfs directory\n");
+	il_dbgfs_unregister(il);
+	return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+	if (!il->debugfs_dir)
+		return;
+
+	debugfs_remove_recursive(il->debugfs_dir);
+	il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
new file mode 100644
index 0000000..85fe48e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/iwl-spectrum.h
@@ -0,0 +1,92 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
+enum {				/* ieee80211_basic_report.map */
+	IEEE80211_BASIC_MAP_BSS = (1 << 0),
+	IEEE80211_BASIC_MAP_OFDM = (1 << 1),
+	IEEE80211_BASIC_MAP_UNIDENTIFIED = (1 << 2),
+	IEEE80211_BASIC_MAP_RADAR = (1 << 3),
+	IEEE80211_BASIC_MAP_UNMEASURED = (1 << 4),
+	/* Bits 5-7 are reserved */
+
+};
+struct ieee80211_basic_report {
+	u8 channel;
+	__le64 start_time;
+	__le16 duration;
+	u8 map;
+} __packed;
+
+enum {				/* ieee80211_measurement_request.mode */
+	/* Bit 0 is reserved */
+	IEEE80211_MEASUREMENT_ENABLE = (1 << 1),
+	IEEE80211_MEASUREMENT_REQUEST = (1 << 2),
+	IEEE80211_MEASUREMENT_REPORT = (1 << 3),
+	/* Bits 4-7 are reserved */
+};
+
+enum {
+	IEEE80211_REPORT_BASIC = 0,	/* required */
+	IEEE80211_REPORT_CCA = 1,	/* optional */
+	IEEE80211_REPORT_RPI = 2,	/* optional */
+	/* 3-255 reserved */
+};
+
+struct ieee80211_measurement_params {
+	u8 channel;
+	__le64 start_time;
+	__le16 duration;
+} __packed;
+
+struct ieee80211_info_element {
+	u8 id;
+	u8 len;
+	u8 data[0];
+} __packed;
+
+struct ieee80211_measurement_request {
+	struct ieee80211_info_element ie;
+	u8 token;
+	u8 mode;
+	u8 type;
+	struct ieee80211_measurement_params params[0];
+} __packed;
+
+struct ieee80211_measurement_report {
+	struct ieee80211_info_element ie;
+	u8 token;
+	u8 mode;
+	u8 type;
+	union {
+		struct ieee80211_basic_report basic[0];
+	} u;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlegacy/prph.h b/drivers/net/wireless/intel/iwlegacy/prph.h
new file mode 100644
index 0000000..ffec4b4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlegacy/prph.h
@@ -0,0 +1,522 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef	__il_prph_h__
+#define __il_prph_h__
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE	(0x00000)
+#define PRPH_END	(0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE			(PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG		(APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG			(APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG		(APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG		(APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG		(APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG			(APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG		(APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG		(APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG		(APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG		(APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE	(0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT	(0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT	(0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS	(0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ		(0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC		(0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN		(0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX		(0x01000000)	/* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX		(0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK	(0x000001E0)	/* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32		(0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS		(0x00000800)
+
+/**
+ * BSM (Bootstrap State Machine)
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down when the embedded control
+ * processor is sleeping (e.g. for periodic power-saving shutdowns of radio).
+ *
+ * When powering back up after sleeps (or during initial uCode load), the BSM
+ * internally loads the short bootstrap program from the special SRAM into the
+ * embedded processor's instruction SRAM, and starts the processor so it runs
+ * the bootstrap program.
+ *
+ * This bootstrap program loads (via PCI busmaster DMA) instructions and data
+ * images for a uCode program from host DRAM locations.  The host driver
+ * indicates DRAM locations and sizes for instruction and data images via the
+ * four BSM_DRAM_* registers.  Once the bootstrap program loads the new program,
+ * the new program starts automatically.
+ *
+ * The uCode used for open-source drivers includes two programs:
+ *
+ * 1)  Initialization -- performs hardware calibration and sets up some
+ *     internal data, then notifies host via "initialize alive" notification
+ *     (struct il_init_alive_resp) that it has completed all of its work.
+ *     After signal from host, it then loads and starts the runtime program.
+ *     The initialization program must be used when initially setting up the
+ *     NIC after loading the driver.
+ *
+ * 2)  Runtime/Protocol -- performs all normal runtime operations.  This
+ *     notifies host via "alive" notification (struct il_alive_resp) that it
+ *     is ready to be used.
+ *
+ * When initializing the NIC, the host driver does the following procedure:
+ *
+ * 1)  Load bootstrap program (instructions only, no data image for bootstrap)
+ *     into bootstrap memory.  Use dword writes starting at BSM_SRAM_LOWER_BOUND
+ *
+ * 2)  Point (via BSM_DRAM_*) to the "initialize" uCode data and instruction
+ *     images in host DRAM.
+ *
+ * 3)  Set up BSM to copy from BSM SRAM into uCode instruction SRAM when asked:
+ *     BSM_WR_MEM_SRC_REG = 0
+ *     BSM_WR_MEM_DST_REG = RTC_INST_LOWER_BOUND
+ *     BSM_WR_MEM_DWCOUNT_REG = # dwords in bootstrap instruction image
+ *
+ * 4)  Load bootstrap into instruction SRAM:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START
+ *
+ * 5)  Wait for load completion:
+ *     Poll BSM_WR_CTRL_REG for BSM_WR_CTRL_REG_BIT_START = 0
+ *
+ * 6)  Enable future boot loads whenever NIC's power management triggers it:
+ *     BSM_WR_CTRL_REG = BSM_WR_CTRL_REG_BIT_START_EN
+ *
+ * 7)  Start the NIC by removing all reset bits:
+ *     CSR_RESET = 0
+ *
+ *     The bootstrap uCode (already in instruction SRAM) loads initialization
+ *     uCode.  Initialization uCode performs data initialization, sends
+ *     "initialize alive" notification to host, and waits for a signal from
+ *     host to load runtime code.
+ *
+ * 4)  Point (via BSM_DRAM_*) to the "runtime" uCode data and instruction
+ *     images in host DRAM.  The last register loaded must be the instruction
+ *     byte count register ("1" in MSbit tells initialization uCode to load
+ *     the runtime uCode):
+ *     BSM_DRAM_INST_BYTECOUNT_REG = byte count | BSM_DRAM_INST_LOAD
+ *
+ * 5)  Wait for "alive" notification, then issue normal runtime commands.
+ *
+ * Data caching during power-downs:
+ *
+ * Just before the embedded controller powers down (e.g for automatic
+ * power-saving modes, or for RFKILL), uCode stores (via PCI busmaster DMA)
+ * a current snapshot of the embedded processor's data SRAM into host DRAM.
+ * This caches the data while the embedded processor's memory is powered down.
+ * Location and size are controlled by BSM_DRAM_DATA_* registers.
+ *
+ * NOTE:  Instruction SRAM does not need to be saved, since that doesn't
+ *        change during operation; the original image (from uCode distribution
+ *        file) can be used for reload.
+ *
+ * When powering back up, the BSM loads the bootstrap program.  Bootstrap looks
+ * at the BSM_DRAM_* registers, which now point to the runtime instruction
+ * image and the cached (modified) runtime data (*not* the initialization
+ * uCode).  Bootstrap reloads these runtime images into SRAM, and restarts the
+ * uCode from where it left off before the power-down.
+ *
+ * NOTE:  Initialization uCode does *not* run as part of the save/restore
+ *        procedure.
+ *
+ * This save/restore method is mostly for autonomous power management during
+ * normal operation (result of C_POWER_TBL).  Platform suspend/resume and
+ * RFKILL should use complete restarts (with total re-initialization) of uCode,
+ * allowing total shutdown (including BSM memory).
+ *
+ * Note that, during normal operation, the host DRAM that held the initial
+ * startup data for the runtime code is now being used as a backup data cache
+ * for modified data!  If you need to completely re-initialize the NIC, make
+ * sure that you use the runtime data image from the uCode distribution file,
+ * not the modified/saved runtime data.  You may want to store a separate
+ * "clean" runtime data image in DRAM to avoid disk reads of distribution file.
+ */
+
+/* BSM bit fields */
+#define BSM_WR_CTRL_REG_BIT_START     (0x80000000)	/* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN  (0x40000000)	/* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD            (0x80000000)	/* start program load now */
+
+/* BSM addresses */
+#define BSM_BASE                     (PRPH_BASE + 0x3400)
+#define BSM_END                      (PRPH_BASE + 0x3800)
+
+#define BSM_WR_CTRL_REG              (BSM_BASE + 0x000)	/* ctl and status */
+#define BSM_WR_MEM_SRC_REG           (BSM_BASE + 0x004)	/* source in BSM mem */
+#define BSM_WR_MEM_DST_REG           (BSM_BASE + 0x008)	/* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG           (BSM_BASE + 0x00C)	/* bytes */
+#define BSM_WR_STATUS_REG            (BSM_BASE + 0x010)	/* bit 0:  1 == done */
+
+/*
+ * Pointers and size regs for bootstrap load and data SRAM save/restore.
+ * NOTE:  3945 pointers use bits 31:0 of DRAM address.
+ *        4965 pointers use bits 35:4 of DRAM address.
+ */
+#define BSM_DRAM_INST_PTR_REG        (BSM_BASE + 0x090)
+#define BSM_DRAM_INST_BYTECOUNT_REG  (BSM_BASE + 0x094)
+#define BSM_DRAM_DATA_PTR_REG        (BSM_BASE + 0x098)
+#define BSM_DRAM_DATA_BYTECOUNT_REG  (BSM_BASE + 0x09C)
+
+/*
+ * BSM special memory, stays powered on during power-save sleeps.
+ * Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
+ */
+#define BSM_SRAM_LOWER_BOUND         (PRPH_BASE + 0x3800)
+#define BSM_SRAM_SIZE			(1024)	/* bytes */
+
+/* 3945 Tx scheduler registers */
+#define ALM_SCD_BASE                        (PRPH_BASE + 0x2E00)
+#define ALM_SCD_MODE_REG                    (ALM_SCD_BASE + 0x000)
+#define ALM_SCD_ARASTAT_REG                 (ALM_SCD_BASE + 0x004)
+#define ALM_SCD_TXFACT_REG                  (ALM_SCD_BASE + 0x010)
+#define ALM_SCD_TXF4MF_REG                  (ALM_SCD_BASE + 0x014)
+#define ALM_SCD_TXF5MF_REG                  (ALM_SCD_BASE + 0x020)
+#define ALM_SCD_SBYP_MODE_1_REG             (ALM_SCD_BASE + 0x02C)
+#define ALM_SCD_SBYP_MODE_2_REG             (ALM_SCD_BASE + 0x030)
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM.  It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device.  A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes.  For 4965, they are used as follows
+ * (cf. default_queue_to_tx_fifo in 4965.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- Commands (e.g. RXON, etc.)
+ * 5 -- unused (HCCA)
+ * 6 -- unused (HCCA)
+ * 7 -- not used by driver (device-internal only)
+ *
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1)  Scheduler-Ack, in which the scheduler automatically supports a
+ *     block-ack (BA) win of up to 64 TFDs.  In this mode, each queue
+ *     contains TFDs for a unique combination of Recipient Address (RA)
+ *     and Traffic Identifier (TID), that is, traffic of a given
+ *     Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ *     each frame within the BA win, including whether it's been transmitted,
+ *     and whether it's been acknowledged by the receiving station.  The device
+ *     automatically processes block-acks received from the receiving STA,
+ *     and reschedules un-acked frames to be retransmitted (successful
+ *     Tx completion may end up being out-of-order).
+ *
+ *     The driver must maintain the queue's Byte Count table in host DRAM
+ *     (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
+ *     This mode does not support fragmentation.
+ *
+ * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ *     The device may automatically retry Tx, but will retry only one frame
+ *     at a time, until receiving ACK from receiving station, or reaching
+ *     retry limit and giving up.
+ *
+ *     The command queue (#4/#9) must use this mode!
+ *     This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1)  Scheduler registers
+ * 2)  Shared scheduler data base in internal 4956 SRAM
+ * 3)  Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1)  16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2)  16 Byte Count circular buffers in 16 KBytes contiguous memory
+ *     (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+
+/**
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ * Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ */
+#define SCD_WIN_SIZE				64
+#define SCD_FRAME_LIMIT				64
+
+/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
+#define IL49_SCD_START_OFFSET		0xa02c00
+
+/*
+ * 4965 tells driver SRAM address for internal scheduler structs via this reg.
+ * Value is valid only after "Alive" response from uCode.
+ */
+#define IL49_SCD_SRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x0)
+
+/*
+ * Driver may need to update queue-empty bits after changing queue's
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
+ * scheduler is not tracking what's happening).
+ * Bit fields:
+ * 31-16:  Write mask -- 1: update empty bit, 0: don't change empty bit
+ * 15-00:  Empty state, one for each queue -- 1: empty, 0: non-empty
+ * NOTE:  This register is not used by Linux driver.
+ */
+#define IL49_SCD_EMPTY_BITS               (IL49_SCD_START_OFFSET + 0x4)
+
+/*
+ * Physical base address of array of byte count (BC) circular buffers (CBs).
+ * Each Tx queue has a BC CB in host DRAM to support Scheduler-ACK mode.
+ * This register points to BC CB for queue 0, must be on 1024-byte boundary.
+ * Others are spaced by 1024 bytes.
+ * Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
+ * Bit fields:
+ * 25-00:  Byte Count CB physical address [35:10], must be 1024-byte aligned.
+ */
+#define IL49_SCD_DRAM_BASE_ADDR           (IL49_SCD_START_OFFSET + 0x10)
+
+/*
+ * Enables any/all Tx DMA/FIFO channels.
+ * Scheduler generates requests for only the active channels.
+ * Set this to 0xff to enable all 8 channels (normal usage).
+ * Bit fields:
+ *  7- 0:  Enable (1), disable (0), one bit for each channel 0-7
+ */
+#define IL49_SCD_TXFACT                   (IL49_SCD_START_OFFSET + 0x1c)
+/*
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
+ * Initialized and updated by driver as new TFDs are added to queue.
+ * NOTE:  If using Block Ack, idx must correspond to frame's
+ *        Start Sequence Number; idx = (SSN & 0xff)
+ * NOTE:  Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
+ */
+#define IL49_SCD_QUEUE_WRPTR(x)  (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+
+/*
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
+ * Initialized by driver, updated by scheduler.
+ */
+#define IL49_SCD_QUEUE_RDPTR(x)  (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+
+/*
+ * Select which queues work in chain mode (1) vs. not (0).
+ * Use chain mode to build chains of aggregated frames.
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Mode, one bit for each queue -- 1: Chain mode, 0: one-at-a-time
+ * NOTE:  If driver sets up queue for chain mode, it should be also set up
+ *        Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
+ */
+#define IL49_SCD_QUEUECHAIN_SEL  (IL49_SCD_START_OFFSET + 0xd0)
+
+/*
+ * Select which queues interrupt driver when scheduler increments
+ * a queue's read pointer (idx).
+ * Bit fields:
+ * 31-16:  Reserved
+ * 15-00:  Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
+ * NOTE:  This functionality is apparently a no-op; driver relies on interrupts
+ *        from Rx queue to read Tx command responses and update Tx queues.
+ */
+#define IL49_SCD_INTERRUPT_MASK  (IL49_SCD_START_OFFSET + 0xe4)
+
+/*
+ * Queue search status registers.  One for each queue.
+ * Sets up queue mode and assigns queue to Tx DMA channel.
+ * Bit fields:
+ * 19-10: Write mask/enable bits for bits 0-9
+ *     9: Driver should init to "0"
+ *     8: Scheduler-ACK mode (1), non-Scheduler-ACK (i.e. FIFO) mode (0).
+ *        Driver should init to "1" for aggregation mode, or "0" otherwise.
+ *   7-6: Driver should init to "0"
+ *     5: Window Size Left; indicates whether scheduler can request
+ *        another TFD, based on win size, etc.  Driver should init
+ *        this bit to "1" for aggregation mode, or "0" for non-agg.
+ *   4-1: Tx FIFO to use (range 0-7).
+ *     0: Queue is active (1), not active (0).
+ * Other bits should be written as "0"
+ *
+ * NOTE:  If enabling Scheduler-ACK mode, chain mode should also be enabled
+ *        via SCD_QUEUECHAIN_SEL.
+ */
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+	(IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+
+/* Bit field positions */
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE	(0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF	(1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL	(5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK	(8)
+
+/* Write masks */
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN	(10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK		(0x0007FC00)
+
+/**
+ * 4965 internal SRAM structures for scheduler, shared with driver ...
+ *
+ * Driver should clear and initialize the following areas after receiving
+ * "Alive" response from 4965 uCode, i.e. after initial
+ * uCode load, or after a uCode load done for error recovery:
+ *
+ * SCD_CONTEXT_DATA_OFFSET (size 128 bytes)
+ * SCD_TX_STTS_BITMAP_OFFSET (size 256 bytes)
+ * SCD_TRANSLATE_TBL_OFFSET (size 32 bytes)
+ *
+ * Driver accesses SRAM via HBUS_TARG_MEM_* registers.
+ * Driver reads base address of this scheduler area from SCD_SRAM_BASE_ADDR.
+ * All OFFSET values must be added to this base address.
+ */
+
+/*
+ * Queue context.  One 8-byte entry for each of 16 queues.
+ *
+ * Driver should clear this entire area (size 0x80) to 0 after receiving
+ * "Alive" notification from uCode.  Additionally, driver should init
+ * each queue's entry as follows:
+ *
+ * LS Dword bit fields:
+ *  0-06:  Max Tx win size for Scheduler-ACK.  Driver should init to 64.
+ *
+ * MS Dword bit fields:
+ * 16-22:  Frame limit.  Driver should init to 10 (0xa).
+ *
+ * Driver should init all other bits to 0.
+ *
+ * Init must be done after driver receives "Alive" response from 4965 uCode,
+ * and when setting up queue for aggregation.
+ */
+#define IL49_SCD_CONTEXT_DATA_OFFSET			0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+			(IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS		(0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK		(0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS	(16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK	(0x007F0000)
+
+/*
+ * Tx Status Bitmap
+ *
+ * Driver should clear this entire area (size 0x100) to 0 after receiving
+ * "Alive" notification from uCode.  Area is used only by device itself;
+ * no other support (besides clearing) is required from driver.
+ */
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET		0x400
+
+/*
+ * RAxTID to queue translation mapping.
+ *
+ * When queue is in Scheduler-ACK mode, frames placed in a that queue must be
+ * for only one combination of receiver address (RA) and traffic ID (TID), i.e.
+ * one QOS priority level destined for one station (for this wireless link,
+ * not final destination).  The SCD_TRANSLATE_TBL area provides 16 16-bit
+ * mappings, one for each of the 16 queues.  If queue is not in Scheduler-ACK
+ * mode, the device ignores the mapping value.
+ *
+ * Bit fields, for each 16-bit map:
+ * 15-9:  Reserved, set to 0
+ *  8-4:  Index into device's station table for recipient station
+ *  3-0:  Traffic ID (tid), range 0-15
+ *
+ * Driver should clear this entire area (size 32 bytes) to 0 after receiving
+ * "Alive" notification from uCode.  To update a 16-bit map value, driver
+ * must read a dword-aligned value from device SRAM, replace the 16-bit map
+ * value of interest, and write the dword value back into device SRAM.
+ */
+#define IL49_SCD_TRANSLATE_TBL_OFFSET		0x500
+
+/* Find translation table dword to read/write for given queue */
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+	((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+
+#define IL_SCD_TXFIFO_POS_TID			(0)
+#define IL_SCD_TXFIFO_POS_RA			(4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK	(0x01FF)
+
+/*********************** END TX SCHEDULER *************************************/
+
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
new file mode 100644
index 0000000..e5a2fc7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -0,0 +1,158 @@
+config IWLWIFI
+	tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
+	depends on PCI && MAC80211 && HAS_IOMEM
+	select FW_LOADER
+	---help---
+	  Select to build the driver supporting the:
+
+	  Intel Wireless WiFi Link Next-Gen AGN
+
+	  This option enables support for use with the following hardware:
+		Intel Wireless WiFi Link 6250AGN Adapter
+		Intel 6000 Series Wi-Fi Adapters (6200AGN and 6300AGN)
+		Intel WiFi Link 1000BGN
+		Intel Wireless WiFi 5150AGN
+		Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
+		Intel 6005 Series Wi-Fi Adapters
+		Intel 6030 Series Wi-Fi Adapters
+		Intel Wireless WiFi Link 6150BGN 2 Adapter
+		Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
+		Intel 2000 Series Wi-Fi Adapters
+		Intel 7260 Wi-Fi Adapter
+		Intel 3160 Wi-Fi Adapter
+		Intel 7265 Wi-Fi Adapter
+		Intel 8260 Wi-Fi Adapter
+		Intel 3165 Wi-Fi Adapter
+
+
+	  This driver uses the kernel's mac80211 subsystem.
+
+	  In order to use this driver, you will need a firmware
+	  image for it. You can obtain the microcode from:
+
+	          <http://wireless.kernel.org/en/users/Drivers/iwlwifi>.
+
+	  The firmware is typically installed in /lib/firmware. You can
+	  look in the hotplug script /etc/hotplug/firmware.agent to
+	  determine which directory FIRMWARE_DIR is set to when the script
+	  runs.
+
+	  If you want to compile the driver as a module ( = code which can be
+	  inserted in and removed from the running kernel whenever you want),
+	  say M here and read <file:Documentation/kbuild/modules.txt>.  The
+	  module will be called iwlwifi.
+
+if IWLWIFI
+
+config IWLWIFI_LEDS
+	bool
+	depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
+	select LEDS_TRIGGERS
+	select MAC80211_LEDS
+	default y
+
+config IWLDVM
+	tristate "Intel Wireless WiFi DVM Firmware support"
+	help
+	  This is the driver that supports the DVM firmware. The list
+	  of the devices that use this firmware is available here:
+	  https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+
+config IWLMVM
+	tristate "Intel Wireless WiFi MVM Firmware support"
+	select WANT_DEV_COREDUMP
+	help
+	  This is the driver that supports the MVM firmware. The list
+	  of the devices that use this firmware is available here:
+	  https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi#firmware
+
+# don't call it _MODULE -- will confuse Kconfig/fixdep/...
+config IWLWIFI_OPMODE_MODULAR
+	bool
+	default y if IWLDVM=m
+	default y if IWLMVM=m
+
+comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
+	depends on IWLDVM=n && IWLMVM=n
+
+config IWLWIFI_BCAST_FILTERING
+	bool "Enable broadcast filtering"
+	depends on IWLMVM
+	help
+	  Say Y here to enable default bcast filtering configuration.
+
+	  Enabling broadcast filtering will drop any incoming wireless
+	  broadcast frames, except some very specific predefined
+	  patterns (e.g. incoming arp requests).
+
+	  If unsure, don't enable this option, as some programs might
+	  expect incoming broadcasts for their normal operations.
+
+config IWLWIFI_PCIE_RTPM
+       bool "Enable runtime power management mode for PCIe devices"
+       depends on IWLMVM && PM && EXPERT
+       help
+         Say Y here to enable runtime power management for PCIe
+         devices.  If enabled, the device will go into low power mode
+         when idle for a short period of time, allowing for improved
+         power saving during runtime. Note that this feature requires
+         a tight integration with the platform. It is not recommended
+         to enable this feature without proper validation with the
+         specific target platform.
+
+	 If unsure, say N.
+
+menu "Debugging Options"
+
+config IWLWIFI_DEBUG
+	bool "Enable full debugging output in the iwlwifi driver"
+	---help---
+	  This option will enable debug tracing output for the iwlwifi drivers
+
+	  This will result in the kernel module being ~100k larger.  You can
+	  control which debug output is sent to the kernel log by setting the
+	  value in
+
+		/sys/module/iwlwifi/parameters/debug
+
+	  This entry will only exist if this option is enabled.
+
+	  To set a value, simply echo an 8-byte hex value to the same file:
+
+		  % echo 0x43fff > /sys/module/iwlwifi/parameters/debug
+
+	  You can find the list of debug mask values in:
+		  drivers/net/wireless/iwlwifi/iwl-debug.h
+
+	  If this is your first time using this driver, you should say Y here
+	  as the debug information can assist others in helping you resolve
+	  any problems you may encounter.
+
+config IWLWIFI_DEBUGFS
+        bool "iwlwifi debugfs support"
+        depends on MAC80211_DEBUGFS
+        ---help---
+	  Enable creation of debugfs files for the iwlwifi drivers. This
+	  is a low-impact option that allows getting insight into the
+	  driver's state at runtime.
+
+config IWLWIFI_DEVICE_TRACING
+	bool "iwlwifi device access tracing"
+	depends on EVENT_TRACING
+	default y
+	help
+	  Say Y here to trace all commands, including TX frames and IO
+	  accesses, sent to the device. If you say yes, iwlwifi will
+	  register with the ftrace framework for event tracing and dump
+	  all this information to the ringbuffer, you may need to
+	  increase the ringbuffer size. See the ftrace documentation
+	  for more information.
+
+	  When tracing is not enabled, this option still has some
+	  (though rather small) overhead.
+
+	  If unsure, say Y so we can help you better when problems
+	  occur.
+endmenu
+
+endif
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
new file mode 100644
index 0000000..04e376c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+# common
+obj-$(CONFIG_IWLWIFI)	+= iwlwifi.o
+iwlwifi-objs		+= iwl-io.o
+iwlwifi-objs		+= iwl-drv.o
+iwlwifi-objs		+= iwl-debug.o
+iwlwifi-objs		+= iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs		+= iwl-phy-db.o iwl-nvm-parse.o
+iwlwifi-objs		+= pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
+iwlwifi-objs		+= pcie/ctxt-info.o pcie/ctxt-info-gen3.o
+iwlwifi-objs		+= pcie/trans-gen2.o pcie/tx-gen2.o
+iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
+iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
+iwlwifi-objs		+= iwl-trans.o
+iwlwifi-objs		+= fw/notif-wait.o
+iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
+iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
+iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
+
+iwlwifi-objs += $(iwlwifi-m)
+
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
+
+ccflags-y += -I$(src)
+
+obj-$(CONFIG_IWLDVM)	+= dvm/
+obj-$(CONFIG_IWLMVM)	+= mvm/
+
+CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
new file mode 100644
index 0000000..497fd76
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -0,0 +1,137 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL1000_UCODE_API_MAX 5
+#define IWL100_UCODE_API_MAX 5
+
+/* Lowest firmware API version supported */
+#define IWL1000_UCODE_API_MIN 1
+#define IWL100_UCODE_API_MIN 5
+
+/* EEPROM version */
+#define EEPROM_1000_TX_POWER_VERSION	(4)
+#define EEPROM_1000_EEPROM_VERSION	(0x15C)
+
+#define IWL1000_FW_PRE "iwlwifi-1000-"
+#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL100_FW_PRE "iwlwifi-100-"
+#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
+
+
+static const struct iwl_base_params iwl1000_base_params = {
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.pll_cfg = true,
+	.max_ll_items = OTP_MAX_LL_ITEMS_1000,
+	.shadow_ram_support = false,
+	.led_compensation = 51,
+	.wd_timeout = IWL_WATCHDOG_DISABLED,
+	.max_event_log_size = 128,
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_ht_params iwl1000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+	.ht40_bands = BIT(NL80211_BAND_2GHZ),
+};
+
+static const struct iwl_eeprom_params iwl1000_eeprom_params = {
+	.regulatory_bands = {
+		EEPROM_REG_BAND_1_CHANNELS,
+		EEPROM_REG_BAND_2_CHANNELS,
+		EEPROM_REG_BAND_3_CHANNELS,
+		EEPROM_REG_BAND_4_CHANNELS,
+		EEPROM_REG_BAND_5_CHANNELS,
+		EEPROM_REG_BAND_24_HT40_CHANNELS,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+	}
+};
+
+#define IWL_DEVICE_1000						\
+	.fw_name_pre = IWL1000_FW_PRE,				\
+	.ucode_api_max = IWL1000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL1000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_1000,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_1000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
+	.base_params = &iwl1000_base_params,			\
+	.eeprom_params = &iwl1000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl1000_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
+	IWL_DEVICE_1000,
+	.ht_params = &iwl1000_ht_params,
+};
+
+const struct iwl_cfg iwl1000_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
+	IWL_DEVICE_1000,
+};
+
+#define IWL_DEVICE_100						\
+	.fw_name_pre = IWL100_FW_PRE,				\
+	.ucode_api_max = IWL100_UCODE_API_MAX,			\
+	.ucode_api_min = IWL100_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_100,			\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_1000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION,	\
+	.base_params = &iwl1000_base_params,			\
+	.eeprom_params = &iwl1000_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl100_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
+	IWL_DEVICE_100,
+	.ht_params = &iwl1000_ht_params,
+};
+
+const struct iwl_cfg iwl100_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 100 BG",
+	IWL_DEVICE_100,
+};
+
+MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
new file mode 100644
index 0000000..fedb108
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -0,0 +1,211 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "dvm/commands.h" /* needed for BT for now */
+
+/* Highest firmware API version supported */
+#define IWL2030_UCODE_API_MAX 6
+#define IWL2000_UCODE_API_MAX 6
+#define IWL105_UCODE_API_MAX 6
+#define IWL135_UCODE_API_MAX 6
+
+/* Lowest firmware API version supported */
+#define IWL2030_UCODE_API_MIN 5
+#define IWL2000_UCODE_API_MIN 5
+#define IWL105_UCODE_API_MIN 5
+#define IWL135_UCODE_API_MIN 5
+
+/* EEPROM version */
+#define EEPROM_2000_TX_POWER_VERSION	(6)
+#define EEPROM_2000_EEPROM_VERSION	(0x805)
+
+
+#define IWL2030_FW_PRE "iwlwifi-2030-"
+#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
+
+#define IWL2000_FW_PRE "iwlwifi-2000-"
+#define IWL2000_MODULE_FIRMWARE(api) IWL2000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL105_FW_PRE "iwlwifi-105-"
+#define IWL105_MODULE_FIRMWARE(api) IWL105_FW_PRE __stringify(api) ".ucode"
+
+#define IWL135_FW_PRE "iwlwifi-135-"
+#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl2000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.scd_chain_ext_wa = true,
+};
+
+
+static const struct iwl_base_params iwl2030_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_ht_params iwl2000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+	.ht40_bands = BIT(NL80211_BAND_2GHZ),
+};
+
+static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
+	.regulatory_bands = {
+		EEPROM_REG_BAND_1_CHANNELS,
+		EEPROM_REG_BAND_2_CHANNELS,
+		EEPROM_REG_BAND_3_CHANNELS,
+		EEPROM_REG_BAND_4_CHANNELS,
+		EEPROM_REG_BAND_5_CHANNELS,
+		EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+		EEPROM_REGULATORY_BAND_NO_HT40,
+	},
+	.enhanced_txpower = true,
+};
+
+#define IWL_DEVICE_2000						\
+	.fw_name_pre = IWL2000_FW_PRE,				\
+	.ucode_api_max = IWL2000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL2000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2000,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_2000_EEPROM_VERSION,			\
+	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,		\
+	.base_params = &iwl2000_base_params,			\
+	.eeprom_params = &iwl20x0_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+
+const struct iwl_cfg iwl2000_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
+	IWL_DEVICE_2000,
+	.ht_params = &iwl2000_ht_params,
+};
+
+const struct iwl_cfg iwl2000_2bgn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
+	IWL_DEVICE_2000,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_2030						\
+	.fw_name_pre = IWL2030_FW_PRE,				\
+	.ucode_api_max = IWL2030_UCODE_API_MAX,			\
+	.ucode_api_min = IWL2030_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_2030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2030_base_params,			\
+	.eeprom_params = &iwl20x0_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl2030_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
+	IWL_DEVICE_2030,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_105						\
+	.fw_name_pre = IWL105_FW_PRE,				\
+	.ucode_api_max = IWL105_UCODE_API_MAX,			\
+	.ucode_api_min = IWL105_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_105,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2000_base_params,			\
+	.eeprom_params = &iwl20x0_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl105_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
+	IWL_DEVICE_105,
+	.ht_params = &iwl2000_ht_params,
+};
+
+const struct iwl_cfg iwl105_bgn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
+	IWL_DEVICE_105,
+	.ht_params = &iwl2000_ht_params,
+};
+
+#define IWL_DEVICE_135						\
+	.fw_name_pre = IWL135_FW_PRE,				\
+	.ucode_api_max = IWL135_UCODE_API_MAX,			\
+	.ucode_api_min = IWL135_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_135,			\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_2000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_2000_TX_POWER_VERSION,	\
+	.base_params = &iwl2030_base_params,			\
+	.eeprom_params = &iwl20x0_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.rx_with_siso_diversity = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl135_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
+	IWL_DEVICE_135,
+	.ht_params = &iwl2000_ht_params,
+};
+
+MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
new file mode 100644
index 0000000..91ca77c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -0,0 +1,270 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+
+/* Highest firmware API version supported */
+#define IWL_22000_UCODE_API_MAX	38
+
+/* Lowest firmware API version supported */
+#define IWL_22000_UCODE_API_MIN	39
+
+/* NVM versions */
+#define IWL_22000_NVM_VERSION		0x0a1d
+#define IWL_22000_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL_22000_DCCM_OFFSET		0x800000 /* LMAC1 */
+#define IWL_22000_DCCM_LEN		0x10000 /* LMAC1 */
+#define IWL_22000_DCCM2_OFFSET		0x880000
+#define IWL_22000_DCCM2_LEN		0x8000
+#define IWL_22000_SMEM_OFFSET		0x400000
+#define IWL_22000_SMEM_LEN		0xD0000
+
+#define IWL_22000_JF_FW_PRE		"iwlwifi-Qu-a0-jf-b0-"
+#define IWL_22000_HR_FW_PRE		"iwlwifi-Qu-a0-hr-a0-"
+#define IWL_22000_HR_CDB_FW_PRE		"iwlwifi-QuIcp-z0-hrcdb-a0-"
+#define IWL_22000_HR_A_F0_FW_PRE	"iwlwifi-QuQnj-f0-hr-a0-"
+#define IWL_22000_HR_B_FW_PRE		"iwlwifi-Qu-b0-hr-b0-"
+#define IWL_22000_JF_B0_FW_PRE		"iwlwifi-QuQnj-a0-jf-b0-"
+#define IWL_22000_HR_A0_FW_PRE		"iwlwifi-QuQnj-a0-hr-a0-"
+#define IWL_22000_SU_Z0_FW_PRE		"iwlwifi-su-z0-"
+
+#define IWL_22000_HR_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
+	IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
+#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
+	IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_22000		10
+
+static const struct iwl_base_params iwl_22000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+	.num_of_queues = 512,
+	.max_tfd_queue_size = 256,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_base_params iwl_22560_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
+	.num_of_queues = 512,
+	.max_tfd_queue_size = 65536,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl_22000_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_22000_COMMON						\
+	.ucode_api_max = IWL_22000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL_22000_UCODE_API_MIN,			\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000,		\
+	.non_shared_ant = ANT_A,					\
+	.dccm_offset = IWL_22000_DCCM_OFFSET,				\
+	.dccm_len = IWL_22000_DCCM_LEN,					\
+	.dccm2_offset = IWL_22000_DCCM2_OFFSET,				\
+	.dccm2_len = IWL_22000_DCCM2_LEN,				\
+	.smem_offset = IWL_22000_SMEM_OFFSET,				\
+	.smem_len = IWL_22000_SMEM_LEN,					\
+	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
+	.apmg_not_supported = true,					\
+	.mq_rx_supported = true,					\
+	.vht_mu_mimo_supported = true,					\
+	.mac_addr_from_csr = true,					\
+	.ht_params = &iwl_22000_ht_params,				\
+	.nvm_ver = IWL_22000_NVM_VERSION,				\
+	.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,			\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,		\
+	.use_tfh = true,						\
+	.rf_id = true,							\
+	.gen2 = true,							\
+	.nvm_type = IWL_NVM_EXT,					\
+	.dbgc_supported = true,						\
+	.min_umac_error_event_table = 0x400000
+
+#define IWL_DEVICE_22500						\
+	IWL_DEVICE_22000_COMMON,					\
+	.device_family = IWL_DEVICE_FAMILY_22000,			\
+	.base_params = &iwl_22000_base_params,				\
+	.csr = &iwl_csr_v1
+
+#define IWL_DEVICE_22560						\
+	IWL_DEVICE_22000_COMMON,					\
+	.device_family = IWL_DEVICE_FAMILY_22560,			\
+	.base_params = &iwl_22560_base_params,				\
+	.csr = &iwl_csr_v2
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
+	IWL_DEVICE_22500,
+	.cdb = true,
+};
+
+const struct iwl_cfg iwl22000_2ac_cfg_jf = {
+	.name = "Intel(R) Dual Band Wireless AC 22000",
+	.fw_name_pre = IWL_22000_JF_FW_PRE,
+	IWL_DEVICE_22500,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_hr = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_B_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
+	.name = "Intel(R) Dual Band Wireless AX 22000",
+	.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
+	IWL_DEVICE_22500,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
+	.name = "Intel(R) Dual Band Wireless AX 22560",
+	.fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
+	IWL_DEVICE_22560,
+	.cdb = true,
+	/*
+	 * This device doesn't support receiving BlockAck with a large bitmap
+	 * so we need to restrict the size of transmitted aggregation to the
+	 * HT size; mac80211 would otherwise pick the HE max (256) by default.
+	 */
+	.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
new file mode 100644
index 0000000..36151e6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -0,0 +1,174 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL5000_UCODE_API_MAX 5
+#define IWL5150_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IWL5000_UCODE_API_MIN 1
+#define IWL5150_UCODE_API_MIN 1
+
+/* EEPROM versions */
+#define EEPROM_5000_TX_POWER_VERSION	(4)
+#define EEPROM_5000_EEPROM_VERSION	(0x11A)
+#define EEPROM_5050_TX_POWER_VERSION	(4)
+#define EEPROM_5050_EEPROM_VERSION	(0x21E)
+
+#define IWL5000_FW_PRE "iwlwifi-5000-"
+#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL5150_FW_PRE "iwlwifi-5150-"
+#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl5000_base_params = {
+	.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.pll_cfg = true,
+	.led_compensation = 51,
+	.wd_timeout = IWL_WATCHDOG_DISABLED,
+	.max_event_log_size = 512,
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_ht_params iwl5000_ht_params = {
+	.ht_greenfield_support = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+static const struct iwl_eeprom_params iwl5000_eeprom_params = {
+	.regulatory_bands = {
+		EEPROM_REG_BAND_1_CHANNELS,
+		EEPROM_REG_BAND_2_CHANNELS,
+		EEPROM_REG_BAND_3_CHANNELS,
+		EEPROM_REG_BAND_4_CHANNELS,
+		EEPROM_REG_BAND_5_CHANNELS,
+		EEPROM_REG_BAND_24_HT40_CHANNELS,
+		EEPROM_REG_BAND_52_HT40_CHANNELS
+	},
+};
+
+#define IWL_DEVICE_5000						\
+	.fw_name_pre = IWL5000_FW_PRE,				\
+	.ucode_api_max = IWL5000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL5000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5000,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_5000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_5000_TX_POWER_VERSION,	\
+	.base_params = &iwl5000_base_params,			\
+	.eeprom_params = &iwl5000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl5300_agn_cfg = {
+	.name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
+	IWL_DEVICE_5000,
+	/* at least EEPROM 0x11A has wrong info */
+	.valid_tx_ant = ANT_ABC,	/* .cfg overwrite */
+	.valid_rx_ant = ANT_ABC,	/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5100_bgn_cfg = {
+	.name = "Intel(R) WiFi Link 5100 BGN",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5100_abg_cfg = {
+	.name = "Intel(R) WiFi Link 5100 ABG",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+};
+
+const struct iwl_cfg iwl5100_agn_cfg = {
+	.name = "Intel(R) WiFi Link 5100 AGN",
+	IWL_DEVICE_5000,
+	.valid_tx_ant = ANT_B,		/* .cfg overwrite */
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */
+	.ht_params = &iwl5000_ht_params,
+};
+
+const struct iwl_cfg iwl5350_agn_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
+	.fw_name_pre = IWL5000_FW_PRE,
+	.ucode_api_max = IWL5000_UCODE_API_MAX,
+	.ucode_api_min = IWL5000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_5000,
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,
+	.nvm_ver = EEPROM_5050_EEPROM_VERSION,
+	.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,
+	.base_params = &iwl5000_base_params,
+	.eeprom_params = &iwl5000_eeprom_params,
+	.ht_params = &iwl5000_ht_params,
+	.led_mode = IWL_LED_BLINK,
+	.internal_wimax_coex = true,
+};
+
+#define IWL_DEVICE_5150						\
+	.fw_name_pre = IWL5150_FW_PRE,				\
+	.ucode_api_max = IWL5150_UCODE_API_MAX,			\
+	.ucode_api_min = IWL5150_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_5150,		\
+	.max_inst_size = IWLAGN_RTC_INST_SIZE,			\
+	.max_data_size = IWLAGN_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_5050_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_5050_TX_POWER_VERSION,	\
+	.base_params = &iwl5000_base_params,			\
+	.eeprom_params = &iwl5000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl5150_agn_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
+	IWL_DEVICE_5150,
+	.ht_params = &iwl5000_ht_params,
+
+};
+
+const struct iwl_cfg iwl5150_abg_cfg = {
+	.name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
+	IWL_DEVICE_5150,
+};
+
+MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
new file mode 100644
index 0000000..b5d8274
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -0,0 +1,385 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+#include "dvm/commands.h" /* needed for BT for now */
+
+/* Highest firmware API version supported */
+#define IWL6000_UCODE_API_MAX 6
+#define IWL6050_UCODE_API_MAX 5
+#define IWL6000G2_UCODE_API_MAX 6
+#define IWL6035_UCODE_API_MAX 6
+
+/* Lowest firmware API version supported */
+#define IWL6000_UCODE_API_MIN 4
+#define IWL6050_UCODE_API_MIN 4
+#define IWL6000G2_UCODE_API_MIN 5
+#define IWL6035_UCODE_API_MIN 6
+
+/* EEPROM versions */
+#define EEPROM_6000_TX_POWER_VERSION	(4)
+#define EEPROM_6000_EEPROM_VERSION	(0x423)
+#define EEPROM_6050_TX_POWER_VERSION	(4)
+#define EEPROM_6050_EEPROM_VERSION	(0x532)
+#define EEPROM_6150_TX_POWER_VERSION	(6)
+#define EEPROM_6150_EEPROM_VERSION	(0x553)
+#define EEPROM_6005_TX_POWER_VERSION	(6)
+#define EEPROM_6005_EEPROM_VERSION	(0x709)
+#define EEPROM_6030_TX_POWER_VERSION	(6)
+#define EEPROM_6030_EEPROM_VERSION	(0x709)
+#define EEPROM_6035_TX_POWER_VERSION	(6)
+#define EEPROM_6035_EEPROM_VERSION	(0x753)
+
+#define IWL6000_FW_PRE "iwlwifi-6000-"
+#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6050_FW_PRE "iwlwifi-6050-"
+#define IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6005_FW_PRE "iwlwifi-6000g2a-"
+#define IWL6005_MODULE_FIRMWARE(api) IWL6005_FW_PRE __stringify(api) ".ucode"
+
+#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
+#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
+
+static const struct iwl_base_params iwl6000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_base_params iwl6050_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
+	.shadow_ram_support = true,
+	.led_compensation = 51,
+	.wd_timeout = IWL_DEF_WD_TIMEOUT,
+	.max_event_log_size = 1024,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_base_params iwl6000_g2_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE,
+	.num_of_queues = IWLAGN_NUM_QUEUES,
+	.max_tfd_queue_size = 256,
+	.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = false, /* TODO: fix bugs using this feature */
+	.scd_chain_ext_wa = true,
+};
+
+static const struct iwl_ht_params iwl6000_ht_params = {
+	.ht_greenfield_support = true,
+	.use_rts_for_aggregation = true, /* use rts/cts protection */
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+static const struct iwl_eeprom_params iwl6000_eeprom_params = {
+	.regulatory_bands = {
+		EEPROM_REG_BAND_1_CHANNELS,
+		EEPROM_REG_BAND_2_CHANNELS,
+		EEPROM_REG_BAND_3_CHANNELS,
+		EEPROM_REG_BAND_4_CHANNELS,
+		EEPROM_REG_BAND_5_CHANNELS,
+		EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
+		EEPROM_REG_BAND_52_HT40_CHANNELS
+	},
+	.enhanced_txpower = true,
+};
+
+#define IWL_DEVICE_6005						\
+	.fw_name_pre = IWL6005_FW_PRE,				\
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6005,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_6005_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6005_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6005_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
+	IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
+	IWL_DEVICE_6005,
+};
+
+const struct iwl_cfg iwl6005_2agn_sff_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205S AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_d_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6205D AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow1_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6206 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6207 AGN",
+	IWL_DEVICE_6005,
+	.ht_params = &iwl6000_ht_params,
+};
+
+#define IWL_DEVICE_6030						\
+	.fw_name_pre = IWL6030_FW_PRE,				\
+	.ucode_api_max = IWL6000G2_UCODE_API_MAX,		\
+	.ucode_api_min = IWL6000G2_UCODE_API_MIN,		\
+	.device_family = IWL_DEVICE_FAMILY_6030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_6030_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6030_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
+	IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl6030_2bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6030_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
+	IWL_DEVICE_6030,
+};
+
+#define IWL_DEVICE_6035						\
+	.fw_name_pre = IWL6030_FW_PRE,				\
+	.ucode_api_max = IWL6035_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6035_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6030,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_6030_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6030_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_g2_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6035_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
+	IWL_DEVICE_6035,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6035_2agn_sff_cfg = {
+	.name = "Intel(R) Centrino(R) Ultimate-N 6235 AGN",
+	IWL_DEVICE_6035,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl1030_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
+	IWL_DEVICE_6030,
+};
+
+const struct iwl_cfg iwl130_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
+	IWL_DEVICE_6030,
+	.ht_params = &iwl6000_ht_params,
+	.rx_with_siso_diversity = true,
+};
+
+const struct iwl_cfg iwl130_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N 130 BG",
+	IWL_DEVICE_6030,
+	.rx_with_siso_diversity = true,
+};
+
+/*
+ * "i": Internal configuration, use internal Power Amplifier
+ */
+#define IWL_DEVICE_6000i					\
+	.fw_name_pre = IWL6000_FW_PRE,				\
+	.ucode_api_max = IWL6000_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6000_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6000i,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.valid_tx_ant = ANT_BC,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_BC,		/* .cfg overwrite */	\
+	.nvm_ver = EEPROM_6000_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,	\
+	.base_params = &iwl6000_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6000i_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
+	IWL_DEVICE_6000i,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6000i_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
+	IWL_DEVICE_6000i,
+};
+
+const struct iwl_cfg iwl6000i_2bg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
+	IWL_DEVICE_6000i,
+};
+
+#define IWL_DEVICE_6050						\
+	.fw_name_pre = IWL6050_FW_PRE,				\
+	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6050,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.valid_tx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.valid_rx_ant = ANT_AB,		/* .cfg overwrite */	\
+	.nvm_ver = EEPROM_6050_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6050_TX_POWER_VERSION,	\
+	.base_params = &iwl6050_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6050_2agn_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
+	IWL_DEVICE_6050,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6050_2abg_cfg = {
+	.name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
+	IWL_DEVICE_6050,
+};
+
+#define IWL_DEVICE_6150						\
+	.fw_name_pre = IWL6050_FW_PRE,				\
+	.ucode_api_max = IWL6050_UCODE_API_MAX,			\
+	.ucode_api_min = IWL6050_UCODE_API_MIN,			\
+	.device_family = IWL_DEVICE_FAMILY_6150,		\
+	.max_inst_size = IWL60_RTC_INST_SIZE,			\
+	.max_data_size = IWL60_RTC_DATA_SIZE,			\
+	.nvm_ver = EEPROM_6150_EEPROM_VERSION,		\
+	.nvm_calib_ver = EEPROM_6150_TX_POWER_VERSION,	\
+	.base_params = &iwl6050_base_params,			\
+	.eeprom_params = &iwl6000_eeprom_params,		\
+	.led_mode = IWL_LED_BLINK,				\
+	.internal_wimax_coex = true,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl6150_bgn_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
+	IWL_DEVICE_6150,
+	.ht_params = &iwl6000_ht_params,
+};
+
+const struct iwl_cfg iwl6150_bg_cfg = {
+	.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
+	IWL_DEVICE_6150,
+};
+
+const struct iwl_cfg iwl6000_3agn_cfg = {
+	.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
+	.fw_name_pre = IWL6000_FW_PRE,
+	.ucode_api_max = IWL6000_UCODE_API_MAX,
+	.ucode_api_min = IWL6000_UCODE_API_MIN,
+	.device_family = IWL_DEVICE_FAMILY_6000,
+	.max_inst_size = IWL60_RTC_INST_SIZE,
+	.max_data_size = IWL60_RTC_DATA_SIZE,
+	.nvm_ver = EEPROM_6000_EEPROM_VERSION,
+	.nvm_calib_ver = EEPROM_6000_TX_POWER_VERSION,
+	.base_params = &iwl6000_base_params,
+	.eeprom_params = &iwl6000_eeprom_params,
+	.ht_params = &iwl6000_ht_params,
+	.led_mode = IWL_LED_BLINK,
+	.csr = &iwl_csr_v1,
+};
+
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
new file mode 100644
index 0000000..a62c834
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+
+/* Highest firmware API version supported */
+#define IWL7260_UCODE_API_MAX	17
+#define IWL7265_UCODE_API_MAX	17
+#define IWL7265D_UCODE_API_MAX	29
+#define IWL3168_UCODE_API_MAX	29
+
+/* Lowest firmware API version supported */
+#define IWL7260_UCODE_API_MIN	17
+#define IWL7265_UCODE_API_MIN	17
+#define IWL7265D_UCODE_API_MIN	22
+#define IWL3168_UCODE_API_MIN	22
+
+/* NVM versions */
+#define IWL7260_NVM_VERSION		0x0a1d
+#define IWL7260_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL3160_NVM_VERSION		0x709
+#define IWL3160_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL3165_NVM_VERSION		0x709
+#define IWL3165_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL3168_NVM_VERSION		0xd01
+#define IWL3168_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL7265_NVM_VERSION		0x0a1d
+#define IWL7265_TX_POWER_VERSION	0xffff /* meaningless */
+#define IWL7265D_NVM_VERSION		0x0c11
+#define IWL7265_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* DCCM offsets and lengths */
+#define IWL7000_DCCM_OFFSET		0x800000
+#define IWL7260_DCCM_LEN		0x14000
+#define IWL3160_DCCM_LEN		0x10000
+#define IWL7265_DCCM_LEN		0x17A00
+
+#define IWL7260_FW_PRE "iwlwifi-7260-"
+#define IWL7260_MODULE_FIRMWARE(api) IWL7260_FW_PRE __stringify(api) ".ucode"
+
+#define IWL3160_FW_PRE "iwlwifi-3160-"
+#define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
+
+#define IWL3168_FW_PRE "iwlwifi-3168-"
+#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
+
+#define IWL7265_FW_PRE "iwlwifi-7265-"
+#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+
+#define IWL7265D_FW_PRE "iwlwifi-7265D-"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_7000		0
+
+static const struct iwl_base_params iwl7000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
+	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+	.apmg_wake_up_wa = true,
+};
+
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+	.ct_kill_entry = 118,
+	.ct_kill_exit = 96,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 114,
+	.dynamic_smps_exit = 110,
+	.tx_protection_entry = 114,
+	.tx_protection_exit = 108,
+	.tx_backoff = {
+		{.temperature = 112, .backoff = 300},
+		{.temperature = 113, .backoff = 800},
+		{.temperature = 114, .backoff = 1500},
+		{.temperature = 115, .backoff = 3000},
+		{.temperature = 116, .backoff = 5000},
+		{.temperature = 117, .backoff = 10000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
+static const struct iwl_ht_params iwl7000_ht_params = {
+	.stbc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_7000_COMMON					\
+	.device_family = IWL_DEVICE_FAMILY_7000,		\
+	.base_params = &iwl7000_base_params,			\
+	.led_mode = IWL_LED_RF_STATE,				\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000,	\
+	.non_shared_ant = ANT_A,				\
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,	\
+	.dccm_offset = IWL7000_DCCM_OFFSET,			\
+	.csr = &iwl_csr_v1
+
+#define IWL_DEVICE_7000						\
+	IWL_DEVICE_7000_COMMON,					\
+	.ucode_api_max = IWL7260_UCODE_API_MAX,			\
+	.ucode_api_min = IWL7260_UCODE_API_MIN
+
+#define IWL_DEVICE_7005						\
+	IWL_DEVICE_7000_COMMON,					\
+	.ucode_api_max = IWL7265_UCODE_API_MAX,			\
+	.ucode_api_min = IWL7265_UCODE_API_MIN
+
+#define IWL_DEVICE_3008						\
+	IWL_DEVICE_7000_COMMON,					\
+	.ucode_api_max = IWL3168_UCODE_API_MAX,			\
+	.ucode_api_min = IWL3168_UCODE_API_MIN
+
+#define IWL_DEVICE_7005D					\
+	IWL_DEVICE_7000_COMMON,					\
+	.ucode_api_max = IWL7265D_UCODE_API_MAX,		\
+	.ucode_api_min = IWL7265D_UCODE_API_MIN
+
+const struct iwl_cfg iwl7260_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.lp_xtal_workaround = true,
+	.dccm_len = IWL7260_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+	.name = "Intel(R) Dual Band Wireless AC 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+	.high_temp = true,
+	.host_interrupt_operation_mode = true,
+	.lp_xtal_workaround = true,
+	.dccm_len = IWL7260_DCCM_LEN,
+	.thermal_params = &iwl7000_high_temp_tt_params,
+};
+
+const struct iwl_cfg iwl7260_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.lp_xtal_workaround = true,
+	.dccm_len = IWL7260_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7260_n_cfg = {
+	.name = "Intel(R) Wireless N 7260",
+	.fw_name_pre = IWL7260_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL7260_NVM_VERSION,
+	.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.lp_xtal_workaround = true,
+	.dccm_len = IWL7260_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl3160_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 3160",
+	.fw_name_pre = IWL3160_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3160_NVM_VERSION,
+	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.dccm_len = IWL3160_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl3160_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 3160",
+	.fw_name_pre = IWL3160_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3160_NVM_VERSION,
+	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.dccm_len = IWL3160_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl3160_n_cfg = {
+	.name = "Intel(R) Wireless N 3160",
+	.fw_name_pre = IWL3160_FW_PRE,
+	IWL_DEVICE_7000,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3160_NVM_VERSION,
+	.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+	.host_interrupt_operation_mode = true,
+	.dccm_len = IWL3160_DCCM_LEN,
+};
+
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+	{.pwr = 1600, .backoff = 0},
+	{.pwr = 1300, .backoff = 467},
+	{.pwr = 900,  .backoff = 1900},
+	{.pwr = 800, .backoff = 2630},
+	{.pwr = 700, .backoff = 3720},
+	{.pwr = 600, .backoff = 5550},
+	{.pwr = 500, .backoff = 9350},
+	{0},
+};
+
+static const struct iwl_ht_params iwl7265_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+const struct iwl_cfg iwl3165_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 3165",
+	.fw_name_pre = IWL7265D_FW_PRE,
+	IWL_DEVICE_7005D,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3165_NVM_VERSION,
+	.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl3168_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 3168",
+	.fw_name_pre = IWL3168_FW_PRE,
+	IWL_DEVICE_3008,
+	.ht_params = &iwl7000_ht_params,
+	.nvm_ver = IWL3168_NVM_VERSION,
+	.nvm_calib_ver = IWL3168_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+	.nvm_type = IWL_NVM_SDP,
+};
+
+const struct iwl_cfg iwl7265_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 7265",
+	.fw_name_pre = IWL7265_FW_PRE,
+	IWL_DEVICE_7005,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7265_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 7265",
+	.fw_name_pre = IWL7265_FW_PRE,
+	IWL_DEVICE_7005,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7265_n_cfg = {
+	.name = "Intel(R) Wireless N 7265",
+	.fw_name_pre = IWL7265_FW_PRE,
+	IWL_DEVICE_7005,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7265d_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 7265",
+	.fw_name_pre = IWL7265D_FW_PRE,
+	IWL_DEVICE_7005D,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265D_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7265d_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 7265",
+	.fw_name_pre = IWL7265D_FW_PRE,
+	IWL_DEVICE_7005D,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265D_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+const struct iwl_cfg iwl7265d_n_cfg = {
+	.name = "Intel(R) Wireless N 7265",
+	.fw_name_pre = IWL7265D_FW_PRE,
+	IWL_DEVICE_7005D,
+	.ht_params = &iwl7265_ht_params,
+	.nvm_ver = IWL7265D_NVM_VERSION,
+	.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+	.pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
+	.dccm_len = IWL7265_DCCM_LEN,
+};
+
+MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
new file mode 100644
index 0000000..c46fa71
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX	36
+#define IWL8265_UCODE_API_MAX	36
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN	22
+#define IWL8265_UCODE_API_MIN	22
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION		0x0a1d
+#define IWL8000_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL8260_DCCM_OFFSET		0x800000
+#define IWL8260_DCCM_LEN		0x18000
+#define IWL8260_DCCM2_OFFSET		0x880000
+#define IWL8260_DCCM2_LEN		0x8000
+#define IWL8260_SMEM_OFFSET		0x400000
+#define IWL8260_SMEM_LEN		0x68000
+
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
+#define IWL8000_MODULE_FIRMWARE(api) \
+	IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define IWL8265_FW_PRE "iwlwifi-8265-"
+#define IWL8265_MODULE_FIRMWARE(api) \
+	IWL8265_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000		10
+#define DEFAULT_NVM_FILE_FAMILY_8000C		"nvmData-8000C"
+
+static const struct iwl_base_params iwl8000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
+	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+static const struct iwl_tt_params iwl8000_tt_params = {
+	.ct_kill_entry = 115,
+	.ct_kill_exit = 93,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 111,
+	.dynamic_smps_exit = 107,
+	.tx_protection_entry = 112,
+	.tx_protection_exit = 105,
+	.tx_backoff = {
+		{.temperature = 110, .backoff = 200},
+		{.temperature = 111, .backoff = 600},
+		{.temperature = 112, .backoff = 1200},
+		{.temperature = 113, .backoff = 2000},
+		{.temperature = 114, .backoff = 4000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_8000_COMMON						\
+	.device_family = IWL_DEVICE_FAMILY_8000,			\
+	.base_params = &iwl8000_base_params,				\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000,		\
+	.features = NETIF_F_RXCSUM,					\
+	.non_shared_ant = ANT_A,					\
+	.dccm_offset = IWL8260_DCCM_OFFSET,				\
+	.dccm_len = IWL8260_DCCM_LEN,					\
+	.dccm2_offset = IWL8260_DCCM2_OFFSET,				\
+	.dccm2_len = IWL8260_DCCM2_LEN,					\
+	.smem_offset = IWL8260_SMEM_OFFSET,				\
+	.smem_len = IWL8260_SMEM_LEN,					\
+	.default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C,	\
+	.thermal_params = &iwl8000_tt_params,				\
+	.apmg_not_supported = true,					\
+	.nvm_type = IWL_NVM_EXT,					\
+	.dbgc_supported = true,						\
+	.min_umac_error_event_table = 0x800000,				\
+	.csr = &iwl_csr_v1
+
+#define IWL_DEVICE_8000							\
+	IWL_DEVICE_8000_COMMON,						\
+	.ucode_api_max = IWL8000_UCODE_API_MAX,				\
+	.ucode_api_min = IWL8000_UCODE_API_MIN				\
+
+#define IWL_DEVICE_8260							\
+	IWL_DEVICE_8000_COMMON,						\
+	.ucode_api_max = IWL8000_UCODE_API_MAX,				\
+	.ucode_api_min = IWL8000_UCODE_API_MIN				\
+
+#define IWL_DEVICE_8265							\
+	IWL_DEVICE_8000_COMMON,						\
+	.ucode_api_max = IWL8265_UCODE_API_MAX,				\
+	.ucode_api_min = IWL8265_UCODE_API_MIN				\
+
+const struct iwl_cfg iwl8260_2n_cfg = {
+	.name = "Intel(R) Dual Band Wireless N 8260",
+	.fw_name_pre = IWL8000_FW_PRE,
+	IWL_DEVICE_8260,
+	.ht_params = &iwl8000_ht_params,
+	.nvm_ver = IWL8000_NVM_VERSION,
+	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 8260",
+	.fw_name_pre = IWL8000_FW_PRE,
+	IWL_DEVICE_8260,
+	.ht_params = &iwl8000_ht_params,
+	.nvm_ver = IWL8000_NVM_VERSION,
+	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl8265_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 8265",
+	.fw_name_pre = IWL8265_FW_PRE,
+	IWL_DEVICE_8265,
+	.ht_params = &iwl8000_ht_params,
+	.nvm_ver = IWL8000_NVM_VERSION,
+	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.vht_mu_mimo_supported = true,
+};
+
+const struct iwl_cfg iwl8275_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 8275",
+	.fw_name_pre = IWL8265_FW_PRE,
+	IWL_DEVICE_8265,
+	.ht_params = &iwl8000_ht_params,
+	.nvm_ver = IWL8000_NVM_VERSION,
+	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.vht_mu_mimo_supported = true,
+};
+
+const struct iwl_cfg iwl4165_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 4165",
+	.fw_name_pre = IWL8000_FW_PRE,
+	IWL_DEVICE_8000,
+	.ht_params = &iwl8000_ht_params,
+	.nvm_ver = IWL8000_NVM_VERSION,
+	.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
new file mode 100644
index 0000000..24b2f7c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -0,0 +1,404 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015-2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "fw/file.h"
+
+/* Highest firmware API version supported */
+#define IWL9000_UCODE_API_MAX	38
+
+/* Lowest firmware API version supported */
+#define IWL9000_UCODE_API_MIN	30
+
+/* NVM versions */
+#define IWL9000_NVM_VERSION		0x0a1d
+#define IWL9000_TX_POWER_VERSION	0xffff /* meaningless */
+
+/* Memory offsets and lengths */
+#define IWL9000_DCCM_OFFSET		0x800000
+#define IWL9000_DCCM_LEN		0x18000
+#define IWL9000_DCCM2_OFFSET		0x880000
+#define IWL9000_DCCM2_LEN		0x8000
+#define IWL9000_SMEM_OFFSET		0x400000
+#define IWL9000_SMEM_LEN		0x68000
+
+#define  IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define  IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
+#define  IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
+#define  IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
+#define  IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+	IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+	IWL9000B_FW_PRE __stringify(api) ".ucode"
+#define IWL9000RFB_MODULE_FIRMWARE(api) \
+	IWL9000RFB_FW_PRE __stringify(api) ".ucode"
+#define IWL9260A_MODULE_FIRMWARE(api) \
+	IWL9260A_FW_PRE __stringify(api) ".ucode"
+#define IWL9260B_MODULE_FIRMWARE(api) \
+	IWL9260B_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_9000		10
+
+static const struct iwl_base_params iwl9000_base_params = {
+	.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
+	.num_of_queues = 31,
+	.max_tfd_queue_size = 256,
+	.shadow_ram_support = true,
+	.led_compensation = 57,
+	.wd_timeout = IWL_LONG_WD_TIMEOUT,
+	.max_event_log_size = 512,
+	.shadow_reg_enable = true,
+	.pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl9000_ht_params = {
+	.stbc = true,
+	.ldpc = true,
+	.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
+};
+
+static const struct iwl_tt_params iwl9000_tt_params = {
+	.ct_kill_entry = 115,
+	.ct_kill_exit = 93,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 111,
+	.dynamic_smps_exit = 107,
+	.tx_protection_entry = 112,
+	.tx_protection_exit = 105,
+	.tx_backoff = {
+		{.temperature = 110, .backoff = 200},
+		{.temperature = 111, .backoff = 600},
+		{.temperature = 112, .backoff = 1200},
+		{.temperature = 113, .backoff = 2000},
+		{.temperature = 114, .backoff = 4000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
+#define IWL_DEVICE_9000							\
+	.ucode_api_max = IWL9000_UCODE_API_MAX,				\
+	.ucode_api_min = IWL9000_UCODE_API_MIN,				\
+	.device_family = IWL_DEVICE_FAMILY_9000,			\
+	.base_params = &iwl9000_base_params,				\
+	.led_mode = IWL_LED_RF_STATE,					\
+	.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000,		\
+	.non_shared_ant = ANT_B,					\
+	.dccm_offset = IWL9000_DCCM_OFFSET,				\
+	.dccm_len = IWL9000_DCCM_LEN,					\
+	.dccm2_offset = IWL9000_DCCM2_OFFSET,				\
+	.dccm2_len = IWL9000_DCCM2_LEN,					\
+	.smem_offset = IWL9000_SMEM_OFFSET,				\
+	.smem_len = IWL9000_SMEM_LEN,					\
+	.features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM,		\
+	.thermal_params = &iwl9000_tt_params,				\
+	.apmg_not_supported = true,					\
+	.mq_rx_supported = true,					\
+	.vht_mu_mimo_supported = true,					\
+	.mac_addr_from_csr = true,					\
+	.rf_id = true,							\
+	.nvm_type = IWL_NVM_EXT,					\
+	.dbgc_supported = true,						\
+	.min_umac_error_event_table = 0x800000,				\
+	.csr = &iwl_csr_v1
+
+const struct iwl_cfg iwl9160_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 9160",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9260_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 9260",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9260_killer_2ac_cfg = {
+	.name = "Killer (R) Wireless-AC 1550 Wireless Network Adapter (9260NGW)",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9270_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 9270",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 9460",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+	.name = "Intel(R) Dual Band Wireless AC 9460",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+		.name = "Intel(R) Dual Band Wireless AC 9461",
+		.fw_name_pre = IWL9000A_FW_PRE,
+		.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+		.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+		IWL_DEVICE_9000,
+		.ht_params = &iwl9000_ht_params,
+		.nvm_ver = IWL9000_NVM_VERSION,
+		.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+		.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+		.integrated = true,
+		.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+		.name = "Intel(R) Dual Band Wireless AC 9462",
+		.fw_name_pre = IWL9000A_FW_PRE,
+		.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+		.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+		IWL_DEVICE_9000,
+		.ht_params = &iwl9000_ht_params,
+		.nvm_ver = IWL9000_NVM_VERSION,
+		.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+		.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+		.integrated = true,
+		.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg = {
+	.name = "Intel(R) Dual Band Wireless AC 9560",
+	.fw_name_pre = IWL9260A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9260B_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+	.name = "Intel(R) Dual Band Wireless AC 9560",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = {
+	.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = {
+	.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
+	.name = "Intel(R) Dual Band Wireless AC 9460",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
+	.name = "Intel(R) Dual Band Wireless AC 9461",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
+	.name = "Intel(R) Dual Band Wireless AC 9462",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
+	.name = "Intel(R) Dual Band Wireless AC 9560",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = {
+	.name = "Killer (R) Wireless-AC 1550i Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = {
+	.name = "Killer (R) Wireless-AC 1550s Wireless Network Adapter (9560NGW)",
+	.fw_name_pre = IWL9000A_FW_PRE,
+	.fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+	.fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+	IWL_DEVICE_9000,
+	.ht_params = &iwl9000_ht_params,
+	.nvm_ver = IWL9000_NVM_VERSION,
+	.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+	.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+	.integrated = true,
+	.soc_latency = 5000,
+	.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
+};
+
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
new file mode 100644
index 0000000..702d42b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+# DVM
+obj-$(CONFIG_IWLDVM)	+= iwldvm.o
+iwldvm-objs		+= main.o rs.o mac80211.o ucode.o tx.o
+iwldvm-objs		+= lib.o calib.o tt.o sta.o rx.o
+
+iwldvm-objs		+= power.o
+iwldvm-objs		+= scan.o
+iwldvm-objs		+= rxon.o devices.o
+
+iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
+
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
new file mode 100644
index 0000000..b79e387
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -0,0 +1,476 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_agn_h__
+#define __iwl_agn_h__
+
+#include "iwl-config.h"
+
+#include "dev.h"
+
+/* The first 11 queues (0-10) are used otherwise */
+#define IWLAGN_FIRST_AMPDU_QUEUE	11
+
+/* AUX (TX during scan dwell) queue */
+#define IWL_AUX_QUEUE		10
+
+#define IWL_INVALID_STATION	255
+
+/* device operations */
+extern const struct iwl_dvm_cfg iwl_dvm_1000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_105_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_2030_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_5150_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6000_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6005_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6050_cfg;
+extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
+
+
+#define TIME_UNIT		1024
+
+/*****************************************************
+* DRIVER STATUS FUNCTIONS
+******************************************************/
+#define STATUS_RF_KILL_HW	0
+#define STATUS_CT_KILL		1
+#define STATUS_ALIVE		2
+#define STATUS_READY		3
+#define STATUS_EXIT_PENDING	5
+#define STATUS_STATISTICS	6
+#define STATUS_SCANNING		7
+#define STATUS_SCAN_ABORTING	8
+#define STATUS_SCAN_HW		9
+#define STATUS_FW_ERROR		10
+#define STATUS_CHANNEL_SWITCH_PENDING 11
+#define STATUS_SCAN_COMPLETE	12
+#define STATUS_POWER_PMI	13
+
+struct iwl_ucode_capabilities;
+
+extern const struct ieee80211_ops iwlagn_hw_ops;
+
+static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
+{
+	hdr->op_code = cmd;
+	hdr->first_group = 0;
+	hdr->groups_num = 1;
+	hdr->data_valid = 1;
+}
+
+void iwl_down(struct iwl_priv *priv);
+void iwl_cancel_deferred_work(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb);
+
+bool iwl_check_for_ct_kill(struct iwl_priv *priv);
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+			      const struct iwl_ucode_capabilities *capa);
+void iwlagn_mac_unregister(struct iwl_priv *priv);
+
+/* commands */
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+			 u32 flags, u16 len, const void *data);
+
+/* RXON */
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+				   struct iwl_rxon_context *ctx);
+int iwlagn_set_pan_params(struct iwl_priv *priv);
+int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_bss_conf *bss_conf,
+			     u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+			struct iwl_rxon_context *ctx);
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+			 struct iwl_rxon_context *ctx);
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    enum nl80211_band band,
+			    struct ieee80211_vif *vif);
+
+/* uCode */
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_priv *priv);
+int iwl_init_alive_start(struct iwl_priv *priv);
+int iwl_run_init_ucode(struct iwl_priv *priv);
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
+			      enum iwl_ucode_type ucode_type);
+int iwl_send_calib_results(struct iwl_priv *priv);
+int iwl_calib_set(struct iwl_priv *priv,
+		  const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_priv *priv);
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+			    char **buf);
+int iwlagn_hw_valid_rtc_data_addr(u32 addr);
+
+/* lib */
+int iwlagn_send_tx_power(struct iwl_priv *priv);
+void iwlagn_temperature(struct iwl_priv *priv);
+int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk);
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv);
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+int iwl_send_statistics_request(struct iwl_priv *priv,
+				u8 flags, bool clear);
+
+static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
+			struct iwl_priv *priv, enum nl80211_band band)
+{
+	return priv->hw->wiphy->bands[band];
+}
+
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+			 struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
+#endif
+
+/* rx */
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
+void iwl_setup_rx_handlers(struct iwl_priv *priv);
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
+
+
+/* tx */
+int iwlagn_tx_skb(struct iwl_priv *priv,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb);
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u8 buf_size);
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+		       struct ieee80211_sta *sta, u16 tid);
+int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid);
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
+
+static inline u32 iwl_tx_status_to_mac80211(u32 status)
+{
+	status &= TX_STATUS_MSK;
+
+	switch (status) {
+	case TX_STATUS_SUCCESS:
+	case TX_STATUS_DIRECT_DONE:
+		return IEEE80211_TX_STAT_ACK;
+	case TX_STATUS_FAIL_DEST_PS:
+	case TX_STATUS_FAIL_PASSIVE_NO_RX:
+		return IEEE80211_TX_STAT_TX_FILTERED;
+	default:
+		return 0;
+	}
+}
+
+static inline bool iwl_is_tx_success(u32 status)
+{
+	status &= TX_STATUS_MSK;
+	return (status == TX_STATUS_SUCCESS) ||
+	       (status == TX_STATUS_DIRECT_DONE);
+}
+
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
+
+/* scan */
+void iwlagn_post_scan(struct iwl_priv *priv);
+int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
+void iwl_init_scan_params(struct iwl_priv *priv);
+int iwl_scan_cancel(struct iwl_priv *priv);
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
+void iwl_force_scan_end(struct iwl_priv *priv);
+void iwl_internal_short_hw_scan(struct iwl_priv *priv);
+void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
+void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
+void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+				   struct ieee80211_vif *vif,
+				   enum iwl_scan_type scan_type,
+				   enum nl80211_band band);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request.  This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IWL_ACTIVE_QUIET_TIME       cpu_to_le16(10)  /* msec */
+#define IWL_PLCP_QUIET_THRESH       cpu_to_le16(1)  /* packets */
+
+#define IWL_SCAN_CHECK_WATCHDOG		(HZ * 15)
+
+
+/* bt coex */
+void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
+void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv);
+void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv);
+void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
+void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
+void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
+
+static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
+{
+	return priv->lib->bt_params &&
+	       priv->lib->bt_params->advanced_bt_coexist;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_get_tx_fail_reason(u32 status);
+const char *iwl_get_agg_tx_fail_reason(u16 status);
+#else
+static inline const char *iwl_get_tx_fail_reason(u32 status) { return ""; }
+static inline const char *iwl_get_agg_tx_fail_reason(u16 status) { return ""; }
+#endif
+
+
+/* station management */
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+			       struct ieee80211_vif *vif, bool add);
+#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IWL_STA_UCODE_ACTIVE  BIT(1) /* ucode entry is active */
+#define IWL_STA_UCODE_INPROGRESS  BIT(2) /* ucode entry is in process of
+					    being activated */
+#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+				(this is for the IBSS BSSID stations) */
+#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx);
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv);
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv);
+int iwl_send_add_sta(struct iwl_priv *priv,
+		     struct iwl_addsta_cmd *sta, u8 flags);
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			   const u8 *addr, bool is_ap,
+			   struct ieee80211_sta *sta, u8 *sta_id_r);
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+		       const u8 *addr);
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+			    const u8 *addr);
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		    const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		    struct iwl_link_quality_cmd *lq, u8 flags, bool init);
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		      struct ieee80211_sta *sta);
+
+bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_sta *sta);
+
+static inline int iwl_sta_id(struct ieee80211_sta *sta)
+{
+	if (WARN_ON(!sta))
+		return IWL_INVALID_STATION;
+
+	return ((struct iwl_station_priv *)sta->drv_priv)->sta_id;
+}
+
+int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx);
+int iwlagn_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			     const u8 *addr, u8 *sta_id_r);
+int iwl_remove_default_wep_key(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx,
+			       struct ieee80211_key_conf *key);
+int iwl_set_default_wep_key(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_key_conf *key);
+int iwl_restore_default_wep_keys(struct iwl_priv *priv,
+				 struct iwl_rxon_context *ctx);
+int iwl_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			struct ieee80211_key_conf *key,
+			struct ieee80211_sta *sta);
+int iwl_remove_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			   struct ieee80211_key_conf *key,
+			   struct ieee80211_sta *sta);
+void iwl_update_tkip_key(struct iwl_priv *priv,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_key_conf *keyconf,
+			 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+			 int tid, u16 ssn);
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+			int tid);
+void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
+int iwl_update_bcast_station(struct iwl_priv *priv,
+			     struct iwl_rxon_context *ctx);
+int iwl_update_bcast_stations(struct iwl_priv *priv);
+
+/* rate */
+static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
+{
+	return BIT(ant_idx) << RATE_MCS_ANT_POS;
+}
+
+static inline u8 iwl_hw_get_rate(__le32 rate_n_flags)
+{
+	return le32_to_cpu(rate_n_flags) & RATE_MCS_RATE_MSK;
+}
+
+static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
+{
+	return cpu_to_le32(flags|(u32)rate);
+}
+
+int iwl_alive_start(struct iwl_priv *priv);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+			     enum iwl_rxon_context_id ctxid);
+#else
+static inline void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+					   enum iwl_rxon_context_id ctxid)
+{
+}
+#endif
+
+/* status checks */
+
+static inline int iwl_is_ready(struct iwl_priv *priv)
+{
+	/* The adapter is 'ready' if READY EXIT_PENDING is not set */
+	return test_bit(STATUS_READY, &priv->status) &&
+	       !test_bit(STATUS_EXIT_PENDING, &priv->status);
+}
+
+static inline int iwl_is_alive(struct iwl_priv *priv)
+{
+	return test_bit(STATUS_ALIVE, &priv->status);
+}
+
+static inline int iwl_is_rfkill(struct iwl_priv *priv)
+{
+	return test_bit(STATUS_RF_KILL_HW, &priv->status);
+}
+
+static inline int iwl_is_ctkill(struct iwl_priv *priv)
+{
+	return test_bit(STATUS_CT_KILL, &priv->status);
+}
+
+static inline int iwl_is_ready_rf(struct iwl_priv *priv)
+{
+	if (iwl_is_rfkill(priv))
+		return 0;
+
+	return iwl_is_ready(priv);
+}
+
+static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
+{
+	if (state)
+		set_bit(STATUS_POWER_PMI, &priv->status);
+	else
+		clear_bit(STATUS_POWER_PMI, &priv->status);
+	iwl_trans_set_pmi(priv->trans, state);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
+#else
+static inline int iwl_dbgfs_register(struct iwl_priv *priv,
+				     struct dentry *dbgfs_dir)
+{
+	return 0;
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...)	\
+do {									\
+	if (!iwl_is_rfkill((m)))					\
+		IWL_ERR(m, fmt, ##args);				\
+	else								\
+		__iwl_err((m)->dev, true,				\
+			  !iwl_have_debug_level(IWL_DL_RADIO),		\
+			  fmt, ##args);					\
+} while (0)
+#else
+#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...)	\
+do {									\
+	if (!iwl_is_rfkill((m)))					\
+		IWL_ERR(m, fmt, ##args);				\
+	else								\
+		__iwl_err((m)->dev, true, true, fmt, ##args);	\
+} while (0)
+#endif				/* CONFIG_IWLWIFI_DEBUG */
+
+#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
new file mode 100644
index 0000000..c96f9b1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -0,0 +1,1112 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include "iwl-trans.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
+/*****************************************************************************
+ * INIT calibrations framework
+ *****************************************************************************/
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+	struct list_head list;
+	size_t cmd_len;
+	struct iwl_calib_hdr hdr;
+	/* data follows */
+};
+
+struct statistics_general_data {
+	u32 beacon_silence_rssi_a;
+	u32 beacon_silence_rssi_b;
+	u32 beacon_silence_rssi_c;
+	u32 beacon_energy_a;
+	u32 beacon_energy_b;
+	u32 beacon_energy_c;
+};
+
+int iwl_send_calib_results(struct iwl_priv *priv)
+{
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_PHY_CALIBRATION_CMD,
+	};
+	struct iwl_calib_result *res;
+
+	list_for_each_entry(res, &priv->calib_results, list) {
+		int ret;
+
+		hcmd.len[0] = res->cmd_len;
+		hcmd.data[0] = &res->hdr;
+		hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+		ret = iwl_dvm_send_cmd(priv, &hcmd);
+		if (ret) {
+			IWL_ERR(priv, "Error %d on calib cmd %d\n",
+				ret, res->hdr.op_code);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+int iwl_calib_set(struct iwl_priv *priv,
+		  const struct iwl_calib_hdr *cmd, int len)
+{
+	struct iwl_calib_result *res, *tmp;
+
+	res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+		      GFP_ATOMIC);
+	if (!res)
+		return -ENOMEM;
+	memcpy(&res->hdr, cmd, len);
+	res->cmd_len = len;
+
+	list_for_each_entry(tmp, &priv->calib_results, list) {
+		if (tmp->hdr.op_code == res->hdr.op_code) {
+			list_replace(&tmp->list, &res->list);
+			kfree(tmp);
+			return 0;
+		}
+	}
+
+	/* wasn't in list already */
+	list_add_tail(&res->list, &priv->calib_results);
+
+	return 0;
+}
+
+void iwl_calib_free_results(struct iwl_priv *priv)
+{
+	struct iwl_calib_result *res, *tmp;
+
+	list_for_each_entry_safe(res, tmp, &priv->calib_results, list) {
+		list_del(&res->list);
+		kfree(res);
+	}
+}
+
+/*****************************************************************************
+ * RUNTIME calibrations framework
+ *****************************************************************************/
+
+/* "false alarms" are signals that our DSP tries to lock onto,
+ *   but then determines that they are either noise, or transmissions
+ *   from a distant wireless network (also "noise", really) that get
+ *   "stepped on" by stronger transmissions within our own network.
+ * This algorithm attempts to set a sensitivity level that is high
+ *   enough to receive all of our own network traffic, but not so
+ *   high that our DSP gets too busy trying to lock onto non-network
+ *   activity/noise. */
+static int iwl_sens_energy_cck(struct iwl_priv *priv,
+				   u32 norm_fa,
+				   u32 rx_enable_time,
+				   struct statistics_general_data *rx_info)
+{
+	u32 max_nrg_cck = 0;
+	int i = 0;
+	u8 max_silence_rssi = 0;
+	u32 silence_ref = 0;
+	u8 silence_rssi_a = 0;
+	u8 silence_rssi_b = 0;
+	u8 silence_rssi_c = 0;
+	u32 val;
+
+	/* "false_alarms" values below are cross-multiplications to assess the
+	 *   numbers of false alarms within the measured period of actual Rx
+	 *   (Rx is off when we're txing), vs the min/max expected false alarms
+	 *   (some should be expected if rx is sensitive enough) in a
+	 *   hypothetical listening period of 200 time units (TU), 204.8 msec:
+	 *
+	 * MIN_FA/fixed-time < false_alarms/actual-rx-time < MAX_FA/beacon-time
+	 *
+	 * */
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
+	struct iwl_sensitivity_data *data = NULL;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+	data = &(priv->sensitivity_data);
+
+	data->nrg_auto_corr_silence_diff = 0;
+
+	/* Find max silence rssi among all 3 receivers.
+	 * This is background noise, which may include transmissions from other
+	 *    networks, measured during silence before our network's beacon */
+	silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
+			    ALL_BAND_FILTER) >> 8);
+	silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
+			    ALL_BAND_FILTER) >> 8);
+	silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
+			    ALL_BAND_FILTER) >> 8);
+
+	val = max(silence_rssi_b, silence_rssi_c);
+	max_silence_rssi = max(silence_rssi_a, (u8) val);
+
+	/* Store silence rssi in 20-beacon history table */
+	data->nrg_silence_rssi[data->nrg_silence_idx] = max_silence_rssi;
+	data->nrg_silence_idx++;
+	if (data->nrg_silence_idx >= NRG_NUM_PREV_STAT_L)
+		data->nrg_silence_idx = 0;
+
+	/* Find max silence rssi across 20 beacon history */
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) {
+		val = data->nrg_silence_rssi[i];
+		silence_ref = max(silence_ref, val);
+	}
+	IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
+			silence_rssi_a, silence_rssi_b, silence_rssi_c,
+			silence_ref);
+
+	/* Find max rx energy (min value!) among all 3 receivers,
+	 *   measured during beacon frame.
+	 * Save it in 10-beacon history table. */
+	i = data->nrg_energy_idx;
+	val = min(rx_info->beacon_energy_b, rx_info->beacon_energy_c);
+	data->nrg_value[i] = min(rx_info->beacon_energy_a, val);
+
+	data->nrg_energy_idx++;
+	if (data->nrg_energy_idx >= 10)
+		data->nrg_energy_idx = 0;
+
+	/* Find min rx energy (max value) across 10 beacon history.
+	 * This is the minimum signal level that we want to receive well.
+	 * Add backoff (margin so we don't miss slightly lower energy frames).
+	 * This establishes an upper bound (min value) for energy threshold. */
+	max_nrg_cck = data->nrg_value[0];
+	for (i = 1; i < 10; i++)
+		max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
+	max_nrg_cck += 6;
+
+	IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+			rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+			rx_info->beacon_energy_c, max_nrg_cck - 6);
+
+	/* Count number of consecutive beacons with fewer-than-desired
+	 *   false alarms. */
+	if (false_alarms < min_false_alarms)
+		data->num_in_cck_no_fa++;
+	else
+		data->num_in_cck_no_fa = 0;
+	IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
+			data->num_in_cck_no_fa);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if ((false_alarms > max_false_alarms) &&
+		(data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
+		IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
+		     false_alarms, max_false_alarms);
+		IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
+		data->nrg_curr_state = IWL_FA_TOO_MANY;
+		/* Store for "fewer than desired" on later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* increase energy threshold (reduce nrg value)
+		 *   to decrease sensitivity */
+		data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
+	/* Else if we got fewer than desired, increase sensitivity */
+	} else if (false_alarms < min_false_alarms) {
+		data->nrg_curr_state = IWL_FA_TOO_FEW;
+
+		/* Compare silence level with silence level for most recent
+		 *   healthy number or too many false alarms */
+		data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
+						   (s32)silence_ref;
+
+		IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u, silence diff %d\n",
+			 false_alarms, min_false_alarms,
+			 data->nrg_auto_corr_silence_diff);
+
+		/* Increase value to increase sensitivity, but only if:
+		 * 1a) previous beacon did *not* have *too many* false alarms
+		 * 1b) AND there's a significant difference in Rx levels
+		 *      from a previous beacon with too many, or healthy # FAs
+		 * OR 2) We've seen a lot of beacons (100) with too few
+		 *       false alarms */
+		if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
+			((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+			(data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+			IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+			/* Increase nrg value to increase sensitivity */
+			val = data->nrg_th_cck + NRG_STEP_CCK;
+			data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+		} else {
+			IWL_DEBUG_CALIB(priv, "... but not changing sensitivity\n");
+		}
+
+	/* Else we got a healthy number of false alarms, keep status quo */
+	} else {
+		IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
+		data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+
+		/* Store for use in "fewer than desired" with later beacon */
+		data->nrg_silence_ref = silence_ref;
+
+		/* If previous beacon had too many false alarms,
+		 *   give it some extra margin by reducing sensitivity again
+		 *   (but don't go below measured energy of desired Rx) */
+		if (data->nrg_prev_state == IWL_FA_TOO_MANY) {
+			IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+			if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
+				data->nrg_th_cck -= NRG_MARGIN;
+			else
+				data->nrg_th_cck = max_nrg_cck;
+		}
+	}
+
+	/* Make sure the energy threshold does not go above the measured
+	 * energy of the desired Rx signals (reduced by backoff margin),
+	 * or else we might start missing Rx frames.
+	 * Lower value is higher energy, so we use max()!
+	 */
+	data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
+	IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+
+	data->nrg_prev_state = data->nrg_curr_state;
+
+	/* Auto-correlation CCK algorithm */
+	if (false_alarms > min_false_alarms) {
+
+		/* increase auto_corr values to decrease sensitivity
+		 * so the DSP won't be disturbed by the noise
+		 */
+		if (data->auto_corr_cck < AUTO_CORR_MAX_TH_CCK)
+			data->auto_corr_cck = AUTO_CORR_MAX_TH_CCK + 1;
+		else {
+			val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
+			data->auto_corr_cck =
+				min((u32)ranges->auto_corr_max_cck, val);
+		}
+		val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+			min((u32)ranges->auto_corr_max_cck_mrc, val);
+	} else if ((false_alarms < min_false_alarms) &&
+	   ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
+	   (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+
+		/* Decrease auto_corr values to increase sensitivity */
+		val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck =
+			max((u32)ranges->auto_corr_min_cck, val);
+		val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
+		data->auto_corr_cck_mrc =
+			max((u32)ranges->auto_corr_min_cck_mrc, val);
+	}
+
+	return 0;
+}
+
+
+static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
+				       u32 norm_fa,
+				       u32 rx_enable_time)
+{
+	u32 val;
+	u32 false_alarms = norm_fa * 200 * 1024;
+	u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
+	u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
+	struct iwl_sensitivity_data *data = NULL;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+	data = &(priv->sensitivity_data);
+
+	/* If we got too many false alarms this time, reduce sensitivity */
+	if (false_alarms > max_false_alarms) {
+
+		IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
+			     false_alarms, max_false_alarms);
+
+		val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+			min((u32)ranges->auto_corr_max_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+			min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+			min((u32)ranges->auto_corr_max_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+			min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+	}
+
+	/* Else if we got fewer than desired, increase sensitivity */
+	else if (false_alarms < min_false_alarms) {
+
+		IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
+			     false_alarms, min_false_alarms);
+
+		val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm =
+			max((u32)ranges->auto_corr_min_ofdm, val);
+
+		val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc =
+			max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+
+		val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_x1 =
+			max((u32)ranges->auto_corr_min_ofdm_x1, val);
+
+		val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
+		data->auto_corr_ofdm_mrc_x1 =
+			max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+	} else {
+		IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
+			 min_false_alarms, false_alarms, max_false_alarms);
+	}
+	return 0;
+}
+
+static void iwl_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
+				struct iwl_sensitivity_data *data,
+				__le16 *tbl)
+{
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_ofdm);
+	tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_ofdm_x1);
+	tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
+
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_cck);
+	tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
+				cpu_to_le16((u16)data->auto_corr_cck_mrc);
+
+	tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
+				cpu_to_le16((u16)data->nrg_th_cck);
+	tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
+				cpu_to_le16((u16)data->nrg_th_ofdm);
+
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
+				cpu_to_le16(data->barker_corr_th_min);
+	tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
+				cpu_to_le16(data->barker_corr_th_min_mrc);
+	tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
+				cpu_to_le16(data->nrg_th_cca);
+
+	IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+			data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+			data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+			data->nrg_th_ofdm);
+
+	IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
+			data->auto_corr_cck, data->auto_corr_cck_mrc,
+			data->nrg_th_cck);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl_sensitivity_write(struct iwl_priv *priv)
+{
+	struct iwl_sensitivity_cmd cmd;
+	struct iwl_sensitivity_data *data = NULL;
+	struct iwl_host_cmd cmd_out = {
+		.id = SENSITIVITY_CMD,
+		.len = { sizeof(struct iwl_sensitivity_cmd), },
+		.flags = CMD_ASYNC,
+		.data = { &cmd, },
+	};
+
+	data = &(priv->sensitivity_data);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+
+	/* Update uCode's "work" table, and copy it to DSP */
+	cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+	/* Don't send command to uCode if nothing has changed */
+	if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
+		    sizeof(u16)*HD_TABLE_SIZE)) {
+		IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+		return 0;
+	}
+
+	/* Copy table for comparison next time */
+	memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
+	       sizeof(u16)*HD_TABLE_SIZE);
+
+	return iwl_dvm_send_cmd(priv, &cmd_out);
+}
+
+/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
+static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
+{
+	struct iwl_enhance_sensitivity_cmd cmd;
+	struct iwl_sensitivity_data *data = NULL;
+	struct iwl_host_cmd cmd_out = {
+		.id = SENSITIVITY_CMD,
+		.len = { sizeof(struct iwl_enhance_sensitivity_cmd), },
+		.flags = CMD_ASYNC,
+		.data = { &cmd, },
+	};
+
+	data = &(priv->sensitivity_data);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
+
+	if (priv->lib->hd_v2) {
+		cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+			HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
+		cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+			HD_INA_NON_SQUARE_DET_CCK_DATA_V2;
+		cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+			HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+			HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+			HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+			HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+			HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2;
+	} else {
+		cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
+			HD_INA_NON_SQUARE_DET_OFDM_DATA_V1;
+		cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
+			HD_INA_NON_SQUARE_DET_CCK_DATA_V1;
+		cmd.enhance_table[HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX] =
+			HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1;
+		cmd.enhance_table[HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX] =
+			HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX] =
+			HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX] =
+			HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_SLOPE_INDEX] =
+			HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1;
+		cmd.enhance_table[HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX] =
+			HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1;
+	}
+
+	/* Update uCode's "work" table, and copy it to DSP */
+	cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+
+	/* Don't send command to uCode if nothing has changed */
+	if (!memcmp(&cmd.enhance_table[0], &(priv->sensitivity_tbl[0]),
+		    sizeof(u16)*HD_TABLE_SIZE) &&
+	    !memcmp(&cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX],
+		    &(priv->enhance_sensitivity_tbl[0]),
+		    sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES)) {
+		IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+		return 0;
+	}
+
+	/* Copy table for comparison next time */
+	memcpy(&(priv->sensitivity_tbl[0]), &(cmd.enhance_table[0]),
+	       sizeof(u16)*HD_TABLE_SIZE);
+	memcpy(&(priv->enhance_sensitivity_tbl[0]),
+	       &(cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX]),
+	       sizeof(u16)*ENHANCE_HD_TABLE_ENTRIES);
+
+	return iwl_dvm_send_cmd(priv, &cmd_out);
+}
+
+void iwl_init_sensitivity(struct iwl_priv *priv)
+{
+	int ret = 0;
+	int i;
+	struct iwl_sensitivity_data *data = NULL;
+	const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+
+	if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
+		return;
+
+	IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
+
+	/* Clear driver's sensitivity algo data */
+	data = &(priv->sensitivity_data);
+
+	if (ranges == NULL)
+		return;
+
+	memset(data, 0, sizeof(struct iwl_sensitivity_data));
+
+	data->num_in_cck_no_fa = 0;
+	data->nrg_curr_state = IWL_FA_TOO_MANY;
+	data->nrg_prev_state = IWL_FA_TOO_MANY;
+	data->nrg_silence_ref = 0;
+	data->nrg_silence_idx = 0;
+	data->nrg_energy_idx = 0;
+
+	for (i = 0; i < 10; i++)
+		data->nrg_value[i] = 0;
+
+	for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
+		data->nrg_silence_rssi[i] = 0;
+
+	data->auto_corr_ofdm =  ranges->auto_corr_min_ofdm;
+	data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
+	data->auto_corr_ofdm_x1  = ranges->auto_corr_min_ofdm_x1;
+	data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
+	data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
+	data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
+	data->nrg_th_cck = ranges->nrg_th_cck;
+	data->nrg_th_ofdm = ranges->nrg_th_ofdm;
+	data->barker_corr_th_min = ranges->barker_corr_th_min;
+	data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
+	data->nrg_th_cca = ranges->nrg_th_cca;
+
+	data->last_bad_plcp_cnt_ofdm = 0;
+	data->last_fa_cnt_ofdm = 0;
+	data->last_bad_plcp_cnt_cck = 0;
+	data->last_fa_cnt_cck = 0;
+
+	if (priv->fw->enhance_sensitivity_table)
+		ret |= iwl_enhance_sensitivity_write(priv);
+	else
+		ret |= iwl_sensitivity_write(priv);
+	IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+}
+
+void iwl_sensitivity_calibration(struct iwl_priv *priv)
+{
+	u32 rx_enable_time;
+	u32 fa_cck;
+	u32 fa_ofdm;
+	u32 bad_plcp_cck;
+	u32 bad_plcp_ofdm;
+	u32 norm_fa_ofdm;
+	u32 norm_fa_cck;
+	struct iwl_sensitivity_data *data = NULL;
+	struct statistics_rx_non_phy *rx_info;
+	struct statistics_rx_phy *ofdm, *cck;
+	struct statistics_general_data statis;
+
+	if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
+		return;
+
+	data = &(priv->sensitivity_data);
+
+	if (!iwl_is_any_associated(priv)) {
+		IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+		return;
+	}
+
+	spin_lock_bh(&priv->statistics.lock);
+	rx_info = &priv->statistics.rx_non_phy;
+	ofdm = &priv->statistics.rx_ofdm;
+	cck = &priv->statistics.rx_cck;
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
+		spin_unlock_bh(&priv->statistics.lock);
+		return;
+	}
+
+	/* Extract Statistics: */
+	rx_enable_time = le32_to_cpu(rx_info->channel_load);
+	fa_cck = le32_to_cpu(cck->false_alarm_cnt);
+	fa_ofdm = le32_to_cpu(ofdm->false_alarm_cnt);
+	bad_plcp_cck = le32_to_cpu(cck->plcp_err);
+	bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
+
+	statis.beacon_silence_rssi_a =
+			le32_to_cpu(rx_info->beacon_silence_rssi_a);
+	statis.beacon_silence_rssi_b =
+			le32_to_cpu(rx_info->beacon_silence_rssi_b);
+	statis.beacon_silence_rssi_c =
+			le32_to_cpu(rx_info->beacon_silence_rssi_c);
+	statis.beacon_energy_a =
+			le32_to_cpu(rx_info->beacon_energy_a);
+	statis.beacon_energy_b =
+			le32_to_cpu(rx_info->beacon_energy_b);
+	statis.beacon_energy_c =
+			le32_to_cpu(rx_info->beacon_energy_c);
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+
+	if (!rx_enable_time) {
+		IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+		return;
+	}
+
+	/* These statistics increase monotonically, and do not reset
+	 *   at each beacon.  Calculate difference from last value, or just
+	 *   use the new statistics value if it has reset or wrapped around. */
+	if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
+		data->last_bad_plcp_cnt_cck = bad_plcp_cck;
+	else {
+		bad_plcp_cck -= data->last_bad_plcp_cnt_cck;
+		data->last_bad_plcp_cnt_cck += bad_plcp_cck;
+	}
+
+	if (data->last_bad_plcp_cnt_ofdm > bad_plcp_ofdm)
+		data->last_bad_plcp_cnt_ofdm = bad_plcp_ofdm;
+	else {
+		bad_plcp_ofdm -= data->last_bad_plcp_cnt_ofdm;
+		data->last_bad_plcp_cnt_ofdm += bad_plcp_ofdm;
+	}
+
+	if (data->last_fa_cnt_ofdm > fa_ofdm)
+		data->last_fa_cnt_ofdm = fa_ofdm;
+	else {
+		fa_ofdm -= data->last_fa_cnt_ofdm;
+		data->last_fa_cnt_ofdm += fa_ofdm;
+	}
+
+	if (data->last_fa_cnt_cck > fa_cck)
+		data->last_fa_cnt_cck = fa_cck;
+	else {
+		fa_cck -= data->last_fa_cnt_cck;
+		data->last_fa_cnt_cck += fa_cck;
+	}
+
+	/* Total aborted signal locks */
+	norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
+	norm_fa_cck = fa_cck + bad_plcp_cck;
+
+	IWL_DEBUG_CALIB(priv, "cck: fa %u badp %u  ofdm: fa %u badp %u\n", fa_cck,
+			bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+
+	iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
+	iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+	if (priv->fw->enhance_sensitivity_table)
+		iwl_enhance_sensitivity_write(priv);
+	else
+		iwl_sensitivity_write(priv);
+}
+
+static inline u8 find_first_chain(u8 mask)
+{
+	if (mask & ANT_A)
+		return CHAIN_A;
+	if (mask & ANT_B)
+		return CHAIN_B;
+	return CHAIN_C;
+}
+
+/**
+ * Run disconnected antenna algorithm to find out which antennas are
+ * disconnected.
+ */
+static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
+				     struct iwl_chain_noise_data *data)
+{
+	u32 active_chains = 0;
+	u32 max_average_sig;
+	u16 max_average_sig_antenna_i;
+	u8 num_tx_chains;
+	u8 first_chain;
+	u16 i = 0;
+
+	average_sig[0] = data->chain_signal_a / IWL_CAL_NUM_BEACONS;
+	average_sig[1] = data->chain_signal_b / IWL_CAL_NUM_BEACONS;
+	average_sig[2] = data->chain_signal_c / IWL_CAL_NUM_BEACONS;
+
+	if (average_sig[0] >= average_sig[1]) {
+		max_average_sig = average_sig[0];
+		max_average_sig_antenna_i = 0;
+		active_chains = (1 << max_average_sig_antenna_i);
+	} else {
+		max_average_sig = average_sig[1];
+		max_average_sig_antenna_i = 1;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	if (average_sig[2] >= max_average_sig) {
+		max_average_sig = average_sig[2];
+		max_average_sig_antenna_i = 2;
+		active_chains = (1 << max_average_sig_antenna_i);
+	}
+
+	IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
+		     average_sig[0], average_sig[1], average_sig[2]);
+	IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
+		     max_average_sig, max_average_sig_antenna_i);
+
+	/* Compare signal strengths for all 3 receivers. */
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (i != max_average_sig_antenna_i) {
+			s32 rssi_delta = (max_average_sig - average_sig[i]);
+
+			/* If signal is very weak, compared with
+			 * strongest, mark it as disconnected. */
+			if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
+				data->disconn_array[i] = 1;
+			else
+				active_chains |= (1 << i);
+			IWL_DEBUG_CALIB(priv, "i = %d  rssiDelta = %d  "
+			     "disconn_array[i] = %d\n",
+			     i, rssi_delta, data->disconn_array[i]);
+		}
+	}
+
+	/*
+	 * The above algorithm sometimes fails when the ucode
+	 * reports 0 for all chains. It's not clear why that
+	 * happens to start with, but it is then causing trouble
+	 * because this can make us enable more chains than the
+	 * hardware really has.
+	 *
+	 * To be safe, simply mask out any chains that we know
+	 * are not on the device.
+	 */
+	active_chains &= priv->nvm_data->valid_rx_ant;
+
+	num_tx_chains = 0;
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		/* loops on all the bits of
+		 * priv->hw_setting.valid_tx_ant */
+		u8 ant_msk = (1 << i);
+		if (!(priv->nvm_data->valid_tx_ant & ant_msk))
+			continue;
+
+		num_tx_chains++;
+		if (data->disconn_array[i] == 0)
+			/* there is a Tx antenna connected */
+			break;
+		if (num_tx_chains == priv->hw_params.tx_chains_num &&
+		    data->disconn_array[i]) {
+			/*
+			 * If all chains are disconnected
+			 * connect the first valid tx chain
+			 */
+			first_chain =
+				find_first_chain(priv->nvm_data->valid_tx_ant);
+			data->disconn_array[first_chain] = 0;
+			active_chains |= BIT(first_chain);
+			IWL_DEBUG_CALIB(priv,
+					"All Tx chains are disconnected W/A - declare %d as connected\n",
+					first_chain);
+			break;
+		}
+	}
+
+	if (active_chains != priv->nvm_data->valid_rx_ant &&
+	    active_chains != priv->chain_noise_data.active_chains)
+		IWL_DEBUG_CALIB(priv,
+				"Detected that not all antennas are connected! "
+				"Connected: %#x, valid: %#x.\n",
+				active_chains,
+				priv->nvm_data->valid_rx_ant);
+
+	/* Save for use within RXON, TX, SCAN commands, etc. */
+	data->active_chains = active_chains;
+	IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
+			active_chains);
+}
+
+static void iwlagn_gain_computation(struct iwl_priv *priv,
+				    u32 average_noise[NUM_RX_CHAINS],
+				    u8 default_chain)
+{
+	int i;
+	s32 delta_g;
+	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+
+	/*
+	 * Find Gain Code for the chains based on "default chain"
+	 */
+	for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
+		if ((data->disconn_array[i])) {
+			data->delta_gain_code[i] = 0;
+			continue;
+		}
+
+		delta_g = (priv->lib->chain_noise_scale *
+			((s32)average_noise[default_chain] -
+			(s32)average_noise[i])) / 1500;
+
+		/* bound gain by 2 bits value max, 3rd bit is sign */
+		data->delta_gain_code[i] =
+			min(abs(delta_g), CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
+
+		if (delta_g < 0)
+			/*
+			 * set negative sign ...
+			 * note to Intel developers:  This is uCode API format,
+			 *   not the format of any internal device registers.
+			 *   Do not change this format for e.g. 6050 or similar
+			 *   devices.  Change format only if more resolution
+			 *   (i.e. more than 2 bits magnitude) is needed.
+			 */
+			data->delta_gain_code[i] |= (1 << 2);
+	}
+
+	IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d  ANT_C = %d\n",
+			data->delta_gain_code[1], data->delta_gain_code[2]);
+
+	if (!data->radio_write) {
+		struct iwl_calib_chain_noise_gain_cmd cmd;
+
+		memset(&cmd, 0, sizeof(cmd));
+
+		iwl_set_calib_hdr(&cmd.hdr,
+			priv->phy_calib_chain_noise_gain_cmd);
+		cmd.delta_gain_1 = data->delta_gain_code[1];
+		cmd.delta_gain_2 = data->delta_gain_code[2];
+		iwl_dvm_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
+			CMD_ASYNC, sizeof(cmd), &cmd);
+
+		data->radio_write = 1;
+		data->state = IWL_CHAIN_NOISE_CALIBRATED;
+	}
+}
+
+/*
+ * Accumulate 16 beacons of signal and noise statistics for each of
+ *   3 receivers/antennas/rx-chains, then figure out:
+ * 1)  Which antennas are connected.
+ * 2)  Differential rx gain settings to balance the 3 receivers.
+ */
+void iwl_chain_noise_calibration(struct iwl_priv *priv)
+{
+	struct iwl_chain_noise_data *data = NULL;
+
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_sig_a;
+	u32 chain_sig_b;
+	u32 chain_sig_c;
+	u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+	u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+	u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
+	u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
+	u16 i = 0;
+	u16 rxon_chnum = INITIALIZATION_VALUE;
+	u16 stat_chnum = INITIALIZATION_VALUE;
+	u8 rxon_band24;
+	u8 stat_band24;
+	struct statistics_rx_non_phy *rx_info;
+
+	/*
+	 * MULTI-FIXME:
+	 * When we support multiple interfaces on different channels,
+	 * this must be modified/fixed.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
+		return;
+
+	data = &(priv->chain_noise_data);
+
+	/*
+	 * Accumulate just the first "chain_noise_num_beacons" after
+	 * the first association, then we're done forever.
+	 */
+	if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
+		if (data->state == IWL_CHAIN_NOISE_ALIVE)
+			IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+		return;
+	}
+
+	spin_lock_bh(&priv->statistics.lock);
+
+	rx_info = &priv->statistics.rx_non_phy;
+
+	if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
+		IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
+		spin_unlock_bh(&priv->statistics.lock);
+		return;
+	}
+
+	rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
+	rxon_chnum = le16_to_cpu(ctx->staging.channel);
+	stat_band24 =
+		!!(priv->statistics.flag & STATISTICS_REPLY_FLG_BAND_24G_MSK);
+	stat_chnum = le32_to_cpu(priv->statistics.flag) >> 16;
+
+	/* Make sure we accumulate data for just the associated channel
+	 *   (even if scanning). */
+	if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
+		IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
+				rxon_chnum, rxon_band24);
+		spin_unlock_bh(&priv->statistics.lock);
+		return;
+	}
+
+	/*
+	 *  Accumulate beacon statistics values across
+	 * "chain_noise_num_beacons"
+	 */
+	chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
+				IN_BAND_FILTER;
+	chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
+				IN_BAND_FILTER;
+	chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
+				IN_BAND_FILTER;
+
+	chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
+	chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
+	chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	data->beacon_count++;
+
+	data->chain_noise_a = (chain_noise_a + data->chain_noise_a);
+	data->chain_noise_b = (chain_noise_b + data->chain_noise_b);
+	data->chain_noise_c = (chain_noise_c + data->chain_noise_c);
+
+	data->chain_signal_a = (chain_sig_a + data->chain_signal_a);
+	data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
+	data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
+
+	IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
+			rxon_chnum, rxon_band24, data->beacon_count);
+	IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
+			chain_sig_a, chain_sig_b, chain_sig_c);
+	IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
+			chain_noise_a, chain_noise_b, chain_noise_c);
+
+	/* If this is the "chain_noise_num_beacons", determine:
+	 * 1)  Disconnected antennas (using signal strengths)
+	 * 2)  Differential gain (using silence noise) to balance receivers */
+	if (data->beacon_count != IWL_CAL_NUM_BEACONS)
+		return;
+
+	/* Analyze signal for disconnected antenna */
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		/* Disable disconnected antenna algorithm for advanced
+		   bt coex, assuming valid antennas are connected */
+		data->active_chains = priv->nvm_data->valid_rx_ant;
+		for (i = 0; i < NUM_RX_CHAINS; i++)
+			if (!(data->active_chains & (1<<i)))
+				data->disconn_array[i] = 1;
+	} else
+		iwl_find_disconn_antenna(priv, average_sig, data);
+
+	/* Analyze noise for rx balance */
+	average_noise[0] = data->chain_noise_a / IWL_CAL_NUM_BEACONS;
+	average_noise[1] = data->chain_noise_b / IWL_CAL_NUM_BEACONS;
+	average_noise[2] = data->chain_noise_c / IWL_CAL_NUM_BEACONS;
+
+	for (i = 0; i < NUM_RX_CHAINS; i++) {
+		if (!(data->disconn_array[i]) &&
+		   (average_noise[i] <= min_average_noise)) {
+			/* This means that chain i is active and has
+			 * lower noise values so far: */
+			min_average_noise = average_noise[i];
+			min_average_noise_antenna_i = i;
+		}
+	}
+
+	IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
+			average_noise[0], average_noise[1],
+			average_noise[2]);
+
+	IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
+			min_average_noise, min_average_noise_antenna_i);
+
+	iwlagn_gain_computation(
+		priv, average_noise,
+		find_first_chain(priv->nvm_data->valid_rx_ant));
+
+	/* Some power changes may have been made during the calibration.
+	 * Update and commit the RXON
+	 */
+	iwl_update_chain_flags(priv);
+
+	data->state = IWL_CHAIN_NOISE_DONE;
+	iwl_power_update_mode(priv, false);
+}
+
+void iwl_reset_run_time_calib(struct iwl_priv *priv)
+{
+	int i;
+	memset(&(priv->sensitivity_data), 0,
+	       sizeof(struct iwl_sensitivity_data));
+	memset(&(priv->chain_noise_data), 0,
+	       sizeof(struct iwl_chain_noise_data));
+	for (i = 0; i < NUM_RX_CHAINS; i++)
+		priv->chain_noise_data.delta_gain_code[i] =
+				CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+
+	/* Ask for statistics now, the uCode will send notification
+	 * periodically after association */
+	iwl_send_statistics_request(priv, CMD_ASYNC, true);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.h b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
new file mode 100644
index 0000000..099e3ce
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.h
@@ -0,0 +1,74 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_calib_h__
+#define __iwl_calib_h__
+
+#include "dev.h"
+#include "commands.h"
+
+void iwl_chain_noise_calibration(struct iwl_priv *priv);
+void iwl_sensitivity_calibration(struct iwl_priv *priv);
+
+void iwl_init_sensitivity(struct iwl_priv *priv);
+void iwl_reset_run_time_calib(struct iwl_priv *priv);
+
+#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
new file mode 100644
index 0000000..f89736d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -0,0 +1,3986 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (commands.h) only for uCode API definitions.
+ * Please use iwl-xxxx-hw.h for hardware-related definitions.
+ * Please use dev.h for driver implementation definitions.
+ */
+
+#ifndef __iwl_commands_h__
+#define __iwl_commands_h__
+
+#include <linux/ieee80211.h>
+#include <linux/types.h>
+
+
+enum {
+	REPLY_ALIVE = 0x1,
+	REPLY_ERROR = 0x2,
+	REPLY_ECHO = 0x3,		/* test command */
+
+	/* RXON and QOS commands */
+	REPLY_RXON = 0x10,
+	REPLY_RXON_ASSOC = 0x11,
+	REPLY_QOS_PARAM = 0x13,
+	REPLY_RXON_TIMING = 0x14,
+
+	/* Multi-Station support */
+	REPLY_ADD_STA = 0x18,
+	REPLY_REMOVE_STA = 0x19,
+	REPLY_REMOVE_ALL_STA = 0x1a,	/* not used */
+	REPLY_TXFIFO_FLUSH = 0x1e,
+
+	/* Security */
+	REPLY_WEPKEY = 0x20,
+
+	/* RX, TX, LEDs */
+	REPLY_TX = 0x1c,
+	REPLY_LEDS_CMD = 0x48,
+	REPLY_TX_LINK_QUALITY_CMD = 0x4e,
+
+	/* WiMAX coexistence */
+	COEX_PRIORITY_TABLE_CMD = 0x5a,
+	COEX_MEDIUM_NOTIFICATION = 0x5b,
+	COEX_EVENT_CMD = 0x5c,
+
+	/* Calibration */
+	TEMPERATURE_NOTIFICATION = 0x62,
+	CALIBRATION_CFG_CMD = 0x65,
+	CALIBRATION_RES_NOTIFICATION = 0x66,
+	CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
+
+	/* 802.11h related */
+	REPLY_QUIET_CMD = 0x71,		/* not used */
+	REPLY_CHANNEL_SWITCH = 0x72,
+	CHANNEL_SWITCH_NOTIFICATION = 0x73,
+	REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
+	SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+
+	/* Power Management */
+	POWER_TABLE_CMD = 0x77,
+	PM_SLEEP_NOTIFICATION = 0x7A,
+	PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+
+	/* Scan commands and notifications */
+	REPLY_SCAN_CMD = 0x80,
+	REPLY_SCAN_ABORT_CMD = 0x81,
+	SCAN_START_NOTIFICATION = 0x82,
+	SCAN_RESULTS_NOTIFICATION = 0x83,
+	SCAN_COMPLETE_NOTIFICATION = 0x84,
+
+	/* IBSS/AP commands */
+	BEACON_NOTIFICATION = 0x90,
+	REPLY_TX_BEACON = 0x91,
+	WHO_IS_AWAKE_NOTIFICATION = 0x94,	/* not used */
+
+	/* Miscellaneous commands */
+	REPLY_TX_POWER_DBM_CMD = 0x95,
+	QUIET_NOTIFICATION = 0x96,		/* not used */
+	REPLY_TX_PWR_TABLE_CMD = 0x97,
+	REPLY_TX_POWER_DBM_CMD_V1 = 0x98,	/* old version of API */
+	TX_ANT_CONFIGURATION_CMD = 0x98,
+	MEASURE_ABORT_NOTIFICATION = 0x99,	/* not used */
+
+	/* Bluetooth device coexistence config command */
+	REPLY_BT_CONFIG = 0x9b,
+
+	/* Statistics */
+	REPLY_STATISTICS_CMD = 0x9c,
+	STATISTICS_NOTIFICATION = 0x9d,
+
+	/* RF-KILL commands and notifications */
+	REPLY_CARD_STATE_CMD = 0xa0,
+	CARD_STATE_NOTIFICATION = 0xa1,
+
+	/* Missed beacons notification */
+	MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+	REPLY_CT_KILL_CONFIG_CMD = 0xa4,
+	SENSITIVITY_CMD = 0xa8,
+	REPLY_PHY_CALIBRATION_CMD = 0xb0,
+	REPLY_RX_PHY_CMD = 0xc0,
+	REPLY_RX_MPDU_CMD = 0xc1,
+	REPLY_RX = 0xc3,
+	REPLY_COMPRESSED_BA = 0xc5,
+
+	/* BT Coex */
+	REPLY_BT_COEX_PRIO_TABLE = 0xcc,
+	REPLY_BT_COEX_PROT_ENV = 0xcd,
+	REPLY_BT_COEX_PROFILE_NOTIF = 0xce,
+
+	/* PAN commands */
+	REPLY_WIPAN_PARAMS = 0xb2,
+	REPLY_WIPAN_RXON = 0xb3,	/* use REPLY_RXON structure */
+	REPLY_WIPAN_RXON_TIMING = 0xb4,	/* use REPLY_RXON_TIMING structure */
+	REPLY_WIPAN_RXON_ASSOC = 0xb6,	/* use REPLY_RXON_ASSOC structure */
+	REPLY_WIPAN_QOS_PARAM = 0xb7,	/* use REPLY_QOS_PARAM structure */
+	REPLY_WIPAN_WEPKEY = 0xb8,	/* use REPLY_WEPKEY structure */
+	REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9,
+	REPLY_WIPAN_NOA_NOTIFICATION = 0xbc,
+	REPLY_WIPAN_DEACTIVATION_COMPLETE = 0xbd,
+
+	REPLY_WOWLAN_PATTERNS = 0xe0,
+	REPLY_WOWLAN_WAKEUP_FILTER = 0xe1,
+	REPLY_WOWLAN_TSC_RSC_PARAMS = 0xe2,
+	REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
+	REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+	REPLY_WOWLAN_GET_STATUS = 0xe5,
+	REPLY_D3_CONFIG = 0xd3,
+
+	REPLY_MAX = 0xff
+};
+
+/*
+ * Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate
+ *  - 4 standard TX queues
+ *  - the command queue
+ *  - 4 PAN TX queues
+ *  - the PAN multicast queue, and
+ *  - the AUX (TX during scan dwell) queue.
+ */
+#define IWL_MIN_NUM_QUEUES	11
+
+/*
+ * Command queue depends on iPAN support.
+ */
+#define IWL_DEFAULT_CMD_QUEUE_NUM	4
+#define IWL_IPAN_CMD_QUEUE_NUM		9
+
+#define IWL_TX_FIFO_BK		0	/* shared */
+#define IWL_TX_FIFO_BE		1
+#define IWL_TX_FIFO_VI		2	/* shared */
+#define IWL_TX_FIFO_VO		3
+#define IWL_TX_FIFO_BK_IPAN	IWL_TX_FIFO_BK
+#define IWL_TX_FIFO_BE_IPAN	4
+#define IWL_TX_FIFO_VI_IPAN	IWL_TX_FIFO_VI
+#define IWL_TX_FIFO_VO_IPAN	5
+/* re-uses the VO FIFO, uCode will properly flush/schedule */
+#define IWL_TX_FIFO_AUX		5
+#define IWL_TX_FIFO_UNUSED	255
+
+#define IWLAGN_CMD_FIFO_NUM	7
+
+/*
+ * This queue number is required for proper operation
+ * because the ucode will stop/start the scheduler as
+ * required.
+ */
+#define IWL_IPAN_MCAST_QUEUE	8
+
+/******************************************************************************
+ * (0)
+ * Commonly used structures and definitions:
+ * Command header, rate_n_flags, txpower
+ *
+ *****************************************************************************/
+
+/**
+ * iwlagn rate_n_flags bit fields
+ *
+ * rate_n_flags format is used in following iwlagn commands:
+ *  REPLY_RX (response only)
+ *  REPLY_RX_MPDU (response only)
+ *  REPLY_TX (both command and response)
+ *  REPLY_TX_LINK_QUALITY_CMD
+ *
+ * High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
+ *  2-0:  0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *
+ * Legacy OFDM rate format for bits 7:0 (bit 8 must be "0", bit 9 "0"):
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ *
+ * Legacy CCK rate format for bits 7:0 (bit 8 must be "0", bit 9 "1"):
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ */
+#define RATE_MCS_CODE_MSK 0x7
+#define RATE_MCS_SPATIAL_POS 3
+#define RATE_MCS_SPATIAL_MSK 0x18
+#define RATE_MCS_HT_DUP_POS 5
+#define RATE_MCS_HT_DUP_MSK 0x20
+/* Both legacy and HT use bits 7:0 as the CCK/OFDM rate or HT MCS */
+#define RATE_MCS_RATE_MSK 0xff
+
+/* Bit 8: (1) HT format, (0) legacy format in bits 7:0 */
+#define RATE_MCS_FLAGS_POS 8
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK 0x100
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK 0x200
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_MCS_GF_POS 10
+#define RATE_MCS_GF_MSK 0x400
+
+/* Bit 11: (1) Use 40Mhz HT40 chnl width, (0) use 20 MHz legacy chnl width */
+#define RATE_MCS_HT40_POS 11
+#define RATE_MCS_HT40_MSK 0x800
+
+/* Bit 12: (1) Duplicate data on both 20MHz chnls. HT40 (bit 11) must be set. */
+#define RATE_MCS_DUP_POS 12
+#define RATE_MCS_DUP_MSK 0x1000
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS 13
+#define RATE_MCS_SGI_MSK 0x2000
+
+/**
+ * rate_n_flags Tx antenna masks
+ * bit14:16
+ */
+#define RATE_MCS_ANT_POS	14
+#define RATE_MCS_ANT_A_MSK	0x04000
+#define RATE_MCS_ANT_B_MSK	0x08000
+#define RATE_MCS_ANT_C_MSK	0x10000
+#define RATE_MCS_ANT_AB_MSK	(RATE_MCS_ANT_A_MSK | RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK	(RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
+#define RATE_ANT_NUM 3
+
+#define POWER_TABLE_NUM_ENTRIES			33
+#define POWER_TABLE_NUM_HT_OFDM_ENTRIES		32
+#define POWER_TABLE_CCK_ENTRY			32
+
+#define IWL_PWR_NUM_HT_OFDM_ENTRIES		24
+#define IWL_PWR_CCK_ENTRIES			2
+
+/**
+ * struct tx_power_dual_stream
+ *
+ * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ *
+ * Same format as iwl_tx_power_dual_stream, but __le32
+ */
+struct tx_power_dual_stream {
+	__le32 dw;
+} __packed;
+
+/**
+ * Command REPLY_TX_POWER_DBM_CMD = 0x98
+ * struct iwlagn_tx_power_dbm_cmd
+ */
+#define IWLAGN_TX_POWER_AUTO 0x7f
+#define IWLAGN_TX_POWER_NO_CLOSED (0x1 << 6)
+
+struct iwlagn_tx_power_dbm_cmd {
+	s8 global_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
+	u8 flags;
+	s8 srv_chan_lmt; /*in half-dBm (e.g. 30 = 15 dBm) */
+	u8 reserved;
+} __packed;
+
+/**
+ * Command TX_ANT_CONFIGURATION_CMD = 0x98
+ * This command is used to configure valid Tx antenna.
+ * By default uCode concludes the valid antenna according to the radio flavor.
+ * This command enables the driver to override/modify this conclusion.
+ */
+struct iwl_tx_ant_config_cmd {
+	__le32 valid;
+} __packed;
+
+/******************************************************************************
+ * (0a)
+ * Alive and Error Commands & Responses:
+ *
+ *****************************************************************************/
+
+#define UCODE_VALID_OK	cpu_to_le32(0x1)
+
+/**
+ * REPLY_ALIVE = 0x1 (response only, not a command)
+ *
+ * uCode issues this "alive" notification once the runtime image is ready
+ * to receive commands from the driver.  This is the *second* "alive"
+ * notification that the driver will receive after rebooting uCode;
+ * this "alive" is indicated by subtype field != 9.
+ *
+ * See comments documenting "BSM" (bootstrap state machine).
+ *
+ * This response includes two pointers to structures within the device's
+ * data SRAM (access via HBUS_TARG_MEM_* regs) that are useful for debugging:
+ *
+ * 1)  log_event_table_ptr indicates base of the event log.  This traces
+ *     a 256-entry history of uCode execution within a circular buffer.
+ *     Its header format is:
+ *
+ *	__le32 log_size;     log capacity (in number of entries)
+ *	__le32 type;         (1) timestamp with each entry, (0) no timestamp
+ *	__le32 wraps;        # times uCode has wrapped to top of circular buffer
+ *      __le32 write_index;  next circular buffer entry that uCode would fill
+ *
+ *     The header is followed by the circular buffer of log entries.  Entries
+ *     with timestamps have the following format:
+ *
+ *	__le32 event_id;     range 0 - 1500
+ *	__le32 timestamp;    low 32 bits of TSF (of network, if associated)
+ *	__le32 data;         event_id-specific data value
+ *
+ *     Entries without timestamps contain only event_id and data.
+ *
+ *
+ * 2)  error_event_table_ptr indicates base of the error log.  This contains
+ *     information about any uCode error that occurs.  For agn, the format
+ *     of the error log is defined by struct iwl_error_event_table.
+ *
+ * The Linux driver can print both logs to the system log when a uCode error
+ * occurs.
+ */
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table {
+	u32 valid;		/* (nonzero) valid, (0) log is empty */
+	u32 error_id;		/* type of error */
+	u32 pc;			/* program counter */
+	u32 blink1;		/* branch link */
+	u32 blink2;		/* branch link */
+	u32 ilink1;		/* interrupt link */
+	u32 ilink2;		/* interrupt link */
+	u32 data1;		/* error-specific data */
+	u32 data2;		/* error-specific data */
+	u32 line;		/* source code line of error */
+	u32 bcon_time;		/* beacon timer */
+	u32 tsf_low;		/* network timestamp function timer */
+	u32 tsf_hi;		/* network timestamp function timer */
+	u32 gp1;		/* GP1 timer register */
+	u32 gp2;		/* GP2 timer register */
+	u32 gp3;		/* GP3 timer register */
+	u32 ucode_ver;		/* uCode version */
+	u32 hw_ver;		/* HW Silicon version */
+	u32 brd_ver;		/* HW board version */
+	u32 log_pc;		/* log program counter */
+	u32 frame_ptr;		/* frame pointer */
+	u32 stack_ptr;		/* stack pointer */
+	u32 hcmd;		/* last host command header */
+	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
+				 * rxtx_flag */
+	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
+				 * host_flag */
+	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
+				 * enc_flag */
+	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
+				 * time_flag */
+	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
+				 * wico interrupt */
+	u32 isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
+	u32 wait_event;		/* wait event() caller address */
+	u32 l2p_control;	/* L2pControlField */
+	u32 l2p_duration;	/* L2pDurationField */
+	u32 l2p_mhvalid;	/* L2pMhValidBits */
+	u32 l2p_addr_match;	/* L2pAddrMatchStat */
+	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
+				 * (LMPM_PMG_SEL) */
+	u32 u_timestamp;	/* indicate when the date and time of the
+				 * compilation */
+	u32 flow_handler;	/* FH read/write pointers, RX credit */
+} __packed;
+
+struct iwl_alive_resp {
+	u8 ucode_minor;
+	u8 ucode_major;
+	__le16 reserved1;
+	u8 sw_rev[8];
+	u8 ver_type;
+	u8 ver_subtype;			/* not "9" for runtime alive */
+	__le16 reserved2;
+	__le32 log_event_table_ptr;	/* SRAM address for event log */
+	__le32 error_event_table_ptr;	/* SRAM address for error log */
+	__le32 timestamp;
+	__le32 is_valid;
+} __packed;
+
+/*
+ * REPLY_ERROR = 0x2 (response only, not a command)
+ */
+struct iwl_error_resp {
+	__le32 error_type;
+	u8 cmd_id;
+	u8 reserved1;
+	__le16 bad_cmd_seq_num;
+	__le32 error_info;
+	__le64 timestamp;
+} __packed;
+
+/******************************************************************************
+ * (1)
+ * RXON Commands & Responses:
+ *
+ *****************************************************************************/
+
+/*
+ * Rx config defines & structure
+ */
+/* rx_config device types  */
+enum {
+	RXON_DEV_TYPE_AP = 1,
+	RXON_DEV_TYPE_ESS = 3,
+	RXON_DEV_TYPE_IBSS = 4,
+	RXON_DEV_TYPE_SNIFFER = 6,
+	RXON_DEV_TYPE_CP = 7,
+	RXON_DEV_TYPE_2STA = 8,
+	RXON_DEV_TYPE_P2P = 9,
+};
+
+
+#define RXON_RX_CHAIN_DRIVER_FORCE_MSK		cpu_to_le16(0x1 << 0)
+#define RXON_RX_CHAIN_DRIVER_FORCE_POS		(0)
+#define RXON_RX_CHAIN_VALID_MSK			cpu_to_le16(0x7 << 1)
+#define RXON_RX_CHAIN_VALID_POS			(1)
+#define RXON_RX_CHAIN_FORCE_SEL_MSK		cpu_to_le16(0x7 << 4)
+#define RXON_RX_CHAIN_FORCE_SEL_POS		(4)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_MSK	cpu_to_le16(0x7 << 7)
+#define RXON_RX_CHAIN_FORCE_MIMO_SEL_POS	(7)
+#define RXON_RX_CHAIN_CNT_MSK			cpu_to_le16(0x3 << 10)
+#define RXON_RX_CHAIN_CNT_POS			(10)
+#define RXON_RX_CHAIN_MIMO_CNT_MSK		cpu_to_le16(0x3 << 12)
+#define RXON_RX_CHAIN_MIMO_CNT_POS		(12)
+#define RXON_RX_CHAIN_MIMO_FORCE_MSK		cpu_to_le16(0x1 << 14)
+#define RXON_RX_CHAIN_MIMO_FORCE_POS		(14)
+
+/* rx_config flags */
+/* band & modulation selection */
+#define RXON_FLG_BAND_24G_MSK           cpu_to_le32(1 << 0)
+#define RXON_FLG_CCK_MSK                cpu_to_le32(1 << 1)
+/* auto detection enable */
+#define RXON_FLG_AUTO_DETECT_MSK        cpu_to_le32(1 << 2)
+/* TGg protection when tx */
+#define RXON_FLG_TGG_PROTECT_MSK        cpu_to_le32(1 << 3)
+/* cck short slot & preamble */
+#define RXON_FLG_SHORT_SLOT_MSK          cpu_to_le32(1 << 4)
+#define RXON_FLG_SHORT_PREAMBLE_MSK     cpu_to_le32(1 << 5)
+/* antenna selection */
+#define RXON_FLG_DIS_DIV_MSK            cpu_to_le32(1 << 7)
+#define RXON_FLG_ANT_SEL_MSK            cpu_to_le32(0x0f00)
+#define RXON_FLG_ANT_A_MSK              cpu_to_le32(1 << 8)
+#define RXON_FLG_ANT_B_MSK              cpu_to_le32(1 << 9)
+/* radar detection enable */
+#define RXON_FLG_RADAR_DETECT_MSK       cpu_to_le32(1 << 12)
+#define RXON_FLG_TGJ_NARROW_BAND_MSK    cpu_to_le32(1 << 13)
+/* rx response to host with 8-byte TSF
+* (according to ON_AIR deassertion) */
+#define RXON_FLG_TSF2HOST_MSK           cpu_to_le32(1 << 15)
+
+
+/* HT flags */
+#define RXON_FLG_CTRL_CHANNEL_LOC_POS		(22)
+#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK	cpu_to_le32(0x1 << 22)
+
+#define RXON_FLG_HT_OPERATING_MODE_POS		(23)
+
+#define RXON_FLG_HT_PROT_MSK			cpu_to_le32(0x1 << 23)
+#define RXON_FLG_HT40_PROT_MSK			cpu_to_le32(0x2 << 23)
+
+#define RXON_FLG_CHANNEL_MODE_POS		(25)
+#define RXON_FLG_CHANNEL_MODE_MSK		cpu_to_le32(0x3 << 25)
+
+/* channel mode */
+enum {
+	CHANNEL_MODE_LEGACY = 0,
+	CHANNEL_MODE_PURE_40 = 1,
+	CHANNEL_MODE_MIXED = 2,
+	CHANNEL_MODE_RESERVED = 3,
+};
+#define RXON_FLG_CHANNEL_MODE_LEGACY	cpu_to_le32(CHANNEL_MODE_LEGACY << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_PURE_40	cpu_to_le32(CHANNEL_MODE_PURE_40 << RXON_FLG_CHANNEL_MODE_POS)
+#define RXON_FLG_CHANNEL_MODE_MIXED	cpu_to_le32(CHANNEL_MODE_MIXED << RXON_FLG_CHANNEL_MODE_POS)
+
+/* CTS to self (if spec allows) flag */
+#define RXON_FLG_SELF_CTS_EN			cpu_to_le32(0x1<<30)
+
+/* rx_config filter flags */
+/* accept all data frames */
+#define RXON_FILTER_PROMISC_MSK         cpu_to_le32(1 << 0)
+/* pass control & management to host */
+#define RXON_FILTER_CTL2HOST_MSK        cpu_to_le32(1 << 1)
+/* accept multi-cast */
+#define RXON_FILTER_ACCEPT_GRP_MSK      cpu_to_le32(1 << 2)
+/* don't decrypt uni-cast frames */
+#define RXON_FILTER_DIS_DECRYPT_MSK     cpu_to_le32(1 << 3)
+/* don't decrypt multi-cast frames */
+#define RXON_FILTER_DIS_GRP_DECRYPT_MSK cpu_to_le32(1 << 4)
+/* STA is associated */
+#define RXON_FILTER_ASSOC_MSK           cpu_to_le32(1 << 5)
+/* transfer to host non bssid beacons in associated state */
+#define RXON_FILTER_BCON_AWARE_MSK      cpu_to_le32(1 << 6)
+
+/**
+ * REPLY_RXON = 0x10 (command, has simple generic response)
+ *
+ * RXON tunes the radio tuner to a service channel, and sets up a number
+ * of parameters that are used primarily for Rx, but also for Tx operations.
+ *
+ * NOTE:  When tuning to a new channel, driver must set the
+ *        RXON_FILTER_ASSOC_MSK to 0.  This will clear station-dependent
+ *        info within the device, including the station tables, tx retry
+ *        rate tables, and txpower tables.  Driver must build a new station
+ *        table and txpower table before transmitting anything on the RXON
+ *        channel.
+ *
+ * NOTE:  All RXONs wipe clean the internal txpower table.  Driver must
+ *        issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ *        regardless of whether RXON_FILTER_ASSOC_MSK is set.
+ */
+
+struct iwl_rxon_cmd {
+	u8 node_addr[6];
+	__le16 reserved1;
+	u8 bssid_addr[6];
+	__le16 reserved2;
+	u8 wlap_bssid_addr[6];
+	__le16 reserved3;
+	u8 dev_type;
+	u8 air_propagation;
+	__le16 rx_chain;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 assoc_id;
+	__le32 flags;
+	__le32 filter_flags;
+	__le16 channel;
+	u8 ofdm_ht_single_stream_basic_rates;
+	u8 ofdm_ht_dual_stream_basic_rates;
+	u8 ofdm_ht_triple_stream_basic_rates;
+	u8 reserved5;
+	__le16 acquisition_data;
+	__le16 reserved6;
+} __packed;
+
+/*
+ * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ */
+struct iwl_rxon_assoc_cmd {
+	__le32 flags;
+	__le32 filter_flags;
+	u8 ofdm_basic_rates;
+	u8 cck_basic_rates;
+	__le16 reserved1;
+	u8 ofdm_ht_single_stream_basic_rates;
+	u8 ofdm_ht_dual_stream_basic_rates;
+	u8 ofdm_ht_triple_stream_basic_rates;
+	u8 reserved2;
+	__le16 rx_chain_select_flags;
+	__le16 acquisition_data;
+	__le32 reserved3;
+} __packed;
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL	10
+#define IWL_MAX_UCODE_BEACON_INTERVAL	4 /* 4096 */
+
+/*
+ * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ */
+struct iwl_rxon_time_cmd {
+	__le64 timestamp;
+	__le16 beacon_interval;
+	__le16 atim_window;
+	__le32 beacon_init_val;
+	__le16 listen_interval;
+	u8 dtim_period;
+	u8 delta_cp_bss_tbtts;
+} __packed;
+
+/*
+ * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ */
+/**
+ * struct iwl5000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl5000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __packed;
+
+/**
+ * struct iwl6000_channel_switch_cmd
+ * @band: 0- 5.2GHz, 1- 2.4GHz
+ * @expect_beacon: 0- resume transmits after channel switch
+ *		   1- wait for beacon to resume transmits
+ * @channel: new channel number
+ * @rxon_flags: Rx on flags
+ * @rxon_filter_flags: filtering parameters
+ * @switch_time: switch time in extended beacon format
+ * @reserved: reserved bytes
+ */
+struct iwl6000_channel_switch_cmd {
+	u8 band;
+	u8 expect_beacon;
+	__le16 channel;
+	__le32 rxon_flags;
+	__le32 rxon_filter_flags;
+	__le32 switch_time;
+	__le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
+} __packed;
+
+/*
+ * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ */
+struct iwl_csa_notification {
+	__le16 band;
+	__le16 channel;
+	__le32 status;		/* 0 - OK, 1 - fail */
+} __packed;
+
+/******************************************************************************
+ * (2)
+ * Quality-of-Service (QOS) Commands & Responses:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ *
+ * @cw_min: Contention window, start value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ *          Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *          performing random backoff timing prior to Tx).  Device default 1.
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+	__le16 cw_min;
+	__le16 cw_max;
+	u8 aifsn;
+	u8 reserved1;
+	__le16 edca_txop;
+} __packed;
+
+/* QoS flags defines */
+#define QOS_PARAM_FLG_UPDATE_EDCA_MSK	cpu_to_le32(0x01)
+#define QOS_PARAM_FLG_TGN_MSK		cpu_to_le32(0x02)
+#define QOS_PARAM_FLG_TXOP_TYPE_MSK	cpu_to_le32(0x10)
+
+/* Number of Access Categories (AC) (EDCA), queues 0..3 */
+#define AC_NUM                4
+
+/*
+ * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ *
+ * This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
+ * 0: Background, 1: Best Effort, 2: Video, 3: Voice.
+ */
+struct iwl_qosparam_cmd {
+	__le32 qos_flags;
+	struct iwl_ac_qos ac[AC_NUM];
+} __packed;
+
+/******************************************************************************
+ * (3)
+ * Add/Modify Stations Commands & Responses:
+ *
+ *****************************************************************************/
+/*
+ * Multi station support
+ */
+
+/* Special, dedicated locations within device's station table */
+#define	IWL_AP_ID		0
+#define	IWL_AP_ID_PAN		1
+#define	IWL_STA_ID		2
+#define IWLAGN_PAN_BCAST_ID	14
+#define IWLAGN_BROADCAST_ID	15
+#define	IWLAGN_STATION_COUNT	16
+
+#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
+
+#define STA_FLG_TX_RATE_MSK		cpu_to_le32(1 << 2)
+#define STA_FLG_PWR_SAVE_MSK		cpu_to_le32(1 << 8)
+#define STA_FLG_PAN_STATION		cpu_to_le32(1 << 13)
+#define STA_FLG_RTS_MIMO_PROT_MSK	cpu_to_le32(1 << 17)
+#define STA_FLG_AGG_MPDU_8US_MSK	cpu_to_le32(1 << 18)
+#define STA_FLG_MAX_AGG_SIZE_POS	(19)
+#define STA_FLG_MAX_AGG_SIZE_MSK	cpu_to_le32(3 << 19)
+#define STA_FLG_HT40_EN_MSK		cpu_to_le32(1 << 21)
+#define STA_FLG_MIMO_DIS_MSK		cpu_to_le32(1 << 22)
+#define STA_FLG_AGG_MPDU_DENSITY_POS	(23)
+#define STA_FLG_AGG_MPDU_DENSITY_MSK	cpu_to_le32(7 << 23)
+
+/* Use in mode field.  1: modify existing entry, 0: add new station entry */
+#define STA_CONTROL_MODIFY_MSK		0x01
+
+/* key flags __le16*/
+#define STA_KEY_FLG_ENCRYPT_MSK	cpu_to_le16(0x0007)
+#define STA_KEY_FLG_NO_ENC	cpu_to_le16(0x0000)
+#define STA_KEY_FLG_WEP		cpu_to_le16(0x0001)
+#define STA_KEY_FLG_CCMP	cpu_to_le16(0x0002)
+#define STA_KEY_FLG_TKIP	cpu_to_le16(0x0003)
+
+#define STA_KEY_FLG_KEYID_POS	8
+#define STA_KEY_FLG_INVALID 	cpu_to_le16(0x0800)
+/* wep key is either from global key (0) or from station info array (1) */
+#define STA_KEY_FLG_MAP_KEY_MSK	cpu_to_le16(0x0008)
+
+/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
+#define STA_KEY_FLG_KEY_SIZE_MSK     cpu_to_le16(0x1000)
+#define STA_KEY_MULTICAST_MSK        cpu_to_le16(0x4000)
+#define STA_KEY_MAX_NUM		8
+#define STA_KEY_MAX_NUM_PAN	16
+/* must not match WEP_INVALID_OFFSET */
+#define IWLAGN_HW_KEY_DEFAULT	0xfe
+
+/* Flags indicate whether to modify vs. don't change various station params */
+#define	STA_MODIFY_KEY_MASK		0x01
+#define	STA_MODIFY_TID_DISABLE_TX	0x02
+#define	STA_MODIFY_TX_RATE_MSK		0x04
+#define STA_MODIFY_ADDBA_TID_MSK	0x08
+#define STA_MODIFY_DELBA_TID_MSK	0x10
+#define STA_MODIFY_SLEEP_TX_COUNT_MSK	0x20
+
+/* agn */
+struct iwl_keyinfo {
+	__le16 key_flags;
+	u8 tkip_rx_tsc_byte2;	/* TSC[2] for key mix ph1 detection */
+	u8 reserved1;
+	__le16 tkip_rx_ttak[5];	/* 10-byte unicast TKIP TTAK */
+	u8 key_offset;
+	u8 reserved2;
+	u8 key[16];		/* 16-byte unicast decryption key */
+	__le64 tx_secur_seq_cnt;
+	__le64 hw_tkip_mic_rx_key;
+	__le64 hw_tkip_mic_tx_key;
+} __packed;
+
+/**
+ * struct sta_id_modify
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
+ *
+ * Driver selects unused table index when adding new station,
+ * or the index to a pre-existing station entry when modifying that station.
+ * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ *
+ * modify_mask flags select which parameters to modify vs. leave alone.
+ */
+struct sta_id_modify {
+	u8 addr[ETH_ALEN];
+	__le16 reserved1;
+	u8 sta_id;
+	u8 modify_mask;
+	__le16 reserved2;
+} __packed;
+
+/*
+ * REPLY_ADD_STA = 0x18 (command)
+ *
+ * The device contains an internal table of per-station information,
+ * with info on security keys, aggregation parameters, and Tx rates for
+ * initial Tx attempt and any retries (agn devices uses
+ * REPLY_TX_LINK_QUALITY_CMD,
+ *
+ * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * a new entry, or modifying a pre-existing one.
+ *
+ * NOTE:  RXON command (without "associated" bit set) wipes the station table
+ *        clean.  Moving into RF_KILL state does this also.  Driver must set up
+ *        new station table before transmitting anything on the RXON channel
+ *        (except active scans or active measurements; those commands carry
+ *        their own txpower/rate setup data).
+ *
+ *        When getting started on a new channel, driver must set up the
+ *        IWL_BROADCAST_ID entry (last entry in the table).  For a client
+ *        station in a BSS, once an AP is selected, driver sets up the AP STA
+ *        in the IWL_AP_ID entry (1st entry in the table).  BROADCAST and AP
+ *        are all that are needed for a BSS client station.  If the device is
+ *        used as AP, or in an IBSS network, driver must set up station table
+ *        entries for all STAs in network, starting with index IWL_STA_ID.
+ */
+
+struct iwl_addsta_cmd {
+	u8 mode;		/* 1: modify existing, 0: add new station */
+	u8 reserved[3];
+	struct sta_id_modify sta;
+	struct iwl_keyinfo key;
+	__le32 station_flags;		/* STA_FLG_* */
+	__le32 station_flags_msk;	/* STA_FLG_* */
+
+	/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
+	 * corresponding to bit (e.g. bit 5 controls TID 5).
+	 * Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
+	__le16 tid_disable_tx;
+	__le16 legacy_reserved;
+
+	/* TID for which to add block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	u8 add_immediate_ba_tid;
+
+	/* TID for which to remove block-ack support.
+	 * Set modify_mask bit STA_MODIFY_DELBA_TID_MSK to use this field. */
+	u8 remove_immediate_ba_tid;
+
+	/* Starting Sequence Number for added block-ack support.
+	 * Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
+	__le16 add_immediate_ba_ssn;
+
+	/*
+	 * Number of packets OK to transmit to station even though
+	 * it is asleep -- used to synchronise PS-poll and u-APSD
+	 * responses while ucode keeps track of STA sleep state.
+	 */
+	__le16 sleep_tx_count;
+
+	__le16 reserved2;
+} __packed;
+
+
+#define ADD_STA_SUCCESS_MSK		0x1
+#define ADD_STA_NO_ROOM_IN_TABLE	0x2
+#define ADD_STA_NO_BLOCK_ACK_RESOURCE	0x4
+#define ADD_STA_MODIFY_NON_EXIST_STA	0x8
+/*
+ * REPLY_ADD_STA = 0x18 (response)
+ */
+struct iwl_add_sta_resp {
+	u8 status;	/* ADD_STA_* */
+} __packed;
+
+#define REM_STA_SUCCESS_MSK              0x1
+/*
+ *  REPLY_REM_STA = 0x19 (response)
+ */
+struct iwl_rem_sta_resp {
+	u8 status;
+} __packed;
+
+/*
+ *  REPLY_REM_STA = 0x19 (command)
+ */
+struct iwl_rem_sta_cmd {
+	u8 num_sta;     /* number of removed stations */
+	u8 reserved[3];
+	u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+	u8 reserved2[2];
+} __packed;
+
+
+/* WiFi queues mask */
+#define IWL_SCD_BK_MSK			BIT(0)
+#define IWL_SCD_BE_MSK			BIT(1)
+#define IWL_SCD_VI_MSK			BIT(2)
+#define IWL_SCD_VO_MSK			BIT(3)
+#define IWL_SCD_MGMT_MSK		BIT(3)
+
+/* PAN queues mask */
+#define IWL_PAN_SCD_BK_MSK		BIT(4)
+#define IWL_PAN_SCD_BE_MSK		BIT(5)
+#define IWL_PAN_SCD_VI_MSK		BIT(6)
+#define IWL_PAN_SCD_VO_MSK		BIT(7)
+#define IWL_PAN_SCD_MGMT_MSK		BIT(7)
+#define IWL_PAN_SCD_MULTICAST_MSK	BIT(8)
+
+#define IWL_AGG_TX_QUEUE_MSK		0xffc00
+
+#define IWL_DROP_ALL			BIT(1)
+
+/*
+ * REPLY_TXFIFO_FLUSH = 0x1e(command and response)
+ *
+ * When using full FIFO flush this command checks the scheduler HW block WR/RD
+ * pointers to check if all the frames were transferred by DMA into the
+ * relevant TX FIFO queue. Only when the DMA is finished and the queue is
+ * empty the command can finish.
+ * This command is used to flush the TXFIFO from transmit commands, it may
+ * operate on single or multiple queues, the command queue can't be flushed by
+ * this command. The command response is returned when all the queue flush
+ * operations are done. Each TX command flushed return response with the FLUSH
+ * status set in the TX response status. When FIFO flush operation is used,
+ * the flush operation ends when both the scheduler DMA done and TXFIFO empty
+ * are set.
+ *
+ * @queue_control: bit mask for which queues to flush
+ * @flush_control: flush controls
+ *	0: Dump single MSDU
+ *	1: Dump multiple MSDU according to PS, INVALID STA, TTL, TID disable.
+ *	2: Dump all FIFO
+ */
+struct iwl_txfifo_flush_cmd_v3 {
+	__le32 queue_control;
+	__le16 flush_control;
+	__le16 reserved;
+} __packed;
+
+struct iwl_txfifo_flush_cmd_v2 {
+	__le16 queue_control;
+	__le16 flush_control;
+} __packed;
+
+/*
+ * REPLY_WEP_KEY = 0x20
+ */
+struct iwl_wep_key {
+	u8 key_index;
+	u8 key_offset;
+	u8 reserved1[2];
+	u8 key_size;
+	u8 reserved2[3];
+	u8 key[16];
+} __packed;
+
+struct iwl_wep_cmd {
+	u8 num_keys;
+	u8 global_key_type;
+	u8 flags;
+	u8 reserved;
+	struct iwl_wep_key key[0];
+} __packed;
+
+#define WEP_KEY_WEP_TYPE 1
+#define WEP_KEYS_MAX 4
+#define WEP_INVALID_OFFSET 0xff
+#define WEP_KEY_LEN_64 5
+#define WEP_KEY_LEN_128 13
+
+/******************************************************************************
+ * (4)
+ * Rx Responses:
+ *
+ *****************************************************************************/
+
+#define RX_RES_STATUS_NO_CRC32_ERROR	cpu_to_le32(1 << 0)
+#define RX_RES_STATUS_NO_RXE_OVERFLOW	cpu_to_le32(1 << 1)
+
+#define RX_RES_PHY_FLAGS_BAND_24_MSK	cpu_to_le16(1 << 0)
+#define RX_RES_PHY_FLAGS_MOD_CCK_MSK		cpu_to_le16(1 << 1)
+#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK	cpu_to_le16(1 << 2)
+#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK	cpu_to_le16(1 << 3)
+#define RX_RES_PHY_FLAGS_ANTENNA_MSK		0x70
+#define RX_RES_PHY_FLAGS_ANTENNA_POS		4
+#define RX_RES_PHY_FLAGS_AGG_MSK		cpu_to_le16(1 << 7)
+
+#define RX_RES_STATUS_SEC_TYPE_MSK	(0x7 << 8)
+#define RX_RES_STATUS_SEC_TYPE_NONE	(0x0 << 8)
+#define RX_RES_STATUS_SEC_TYPE_WEP	(0x1 << 8)
+#define RX_RES_STATUS_SEC_TYPE_CCMP	(0x2 << 8)
+#define RX_RES_STATUS_SEC_TYPE_TKIP	(0x3 << 8)
+#define	RX_RES_STATUS_SEC_TYPE_ERR	(0x7 << 8)
+
+#define RX_RES_STATUS_STATION_FOUND	(1<<6)
+#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH	(1<<7)
+
+#define RX_RES_STATUS_DECRYPT_TYPE_MSK	(0x3 << 11)
+#define RX_RES_STATUS_NOT_DECRYPT	(0x0 << 11)
+#define RX_RES_STATUS_DECRYPT_OK	(0x3 << 11)
+#define RX_RES_STATUS_BAD_ICV_MIC	(0x1 << 11)
+#define RX_RES_STATUS_BAD_KEY_TTAK	(0x2 << 11)
+
+#define RX_MPDU_RES_STATUS_ICV_OK	(0x20)
+#define RX_MPDU_RES_STATUS_MIC_OK	(0x40)
+#define RX_MPDU_RES_STATUS_TTAK_OK	(1 << 7)
+#define RX_MPDU_RES_STATUS_DEC_DONE_MSK	(0x800)
+
+
+#define IWLAGN_RX_RES_PHY_CNT 8
+#define IWLAGN_RX_RES_AGC_IDX     1
+#define IWLAGN_RX_RES_RSSI_AB_IDX 2
+#define IWLAGN_RX_RES_RSSI_C_IDX  3
+#define IWLAGN_OFDM_AGC_MSK 0xfe00
+#define IWLAGN_OFDM_AGC_BIT_POS 9
+#define IWLAGN_OFDM_RSSI_INBAND_A_BITMSK 0x00ff
+#define IWLAGN_OFDM_RSSI_ALLBAND_A_BITMSK 0xff00
+#define IWLAGN_OFDM_RSSI_A_BIT_POS 0
+#define IWLAGN_OFDM_RSSI_INBAND_B_BITMSK 0xff0000
+#define IWLAGN_OFDM_RSSI_ALLBAND_B_BITMSK 0xff000000
+#define IWLAGN_OFDM_RSSI_B_BIT_POS 16
+#define IWLAGN_OFDM_RSSI_INBAND_C_BITMSK 0x00ff
+#define IWLAGN_OFDM_RSSI_ALLBAND_C_BITMSK 0xff00
+#define IWLAGN_OFDM_RSSI_C_BIT_POS 0
+
+struct iwlagn_non_cfg_phy {
+	__le32 non_cfg_phy[IWLAGN_RX_RES_PHY_CNT];  /* up to 8 phy entries */
+} __packed;
+
+
+/*
+ * REPLY_RX = 0xc3 (response only, not a command)
+ * Used only for legacy (non 11n) frames.
+ */
+struct iwl_rx_phy_res {
+	u8 non_cfg_phy_cnt;     /* non configurable DSP phy data byte count */
+	u8 cfg_phy_cnt;		/* configurable DSP phy data byte count */
+	u8 stat_id;		/* configurable DSP phy data set ID */
+	u8 reserved1;
+	__le64 timestamp;	/* TSF at on air rise */
+	__le32 beacon_time_stamp; /* beacon at on-air rise */
+	__le16 phy_flags;	/* general phy flags: band, modulation, ... */
+	__le16 channel;		/* channel number */
+	u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+	__le32 rate_n_flags;	/* RATE_MCS_* */
+	__le16 byte_count;	/* frame's byte-count */
+	__le16 frame_time;	/* frame's time on the air */
+} __packed;
+
+struct iwl_rx_mpdu_res_start {
+	__le16 byte_count;
+	__le16 reserved;
+} __packed;
+
+
+/******************************************************************************
+ * (5)
+ * Tx Commands & Responses:
+ *
+ * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * queues in host DRAM, shared between driver and device (see comments for
+ * SCD registers and Tx/Rx Queues).  When the device's Tx scheduler and uCode
+ * are preparing to transmit, the device pulls the Tx command over the PCI
+ * bus via one of the device's Tx DMA channels, to fill an internal FIFO
+ * from which data will be transmitted.
+ *
+ * uCode handles all timing and protocol related to control frames
+ * (RTS/CTS/ACK), based on flags in the Tx command.  uCode and Tx scheduler
+ * handle reception of block-acks; uCode updates the host driver via
+ * REPLY_COMPRESSED_BA.
+ *
+ * uCode handles retrying Tx when an ACK is expected but not received.
+ * This includes trying lower data rates than the one requested in the Tx
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
+ *
+ * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * This command must be executed after every RXON command, before Tx can occur.
+ *****************************************************************************/
+
+/* REPLY_TX Tx flags field */
+
+/*
+ * 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
+ * before this frame. if CTS-to-self required check
+ * RXON_FLG_SELF_CTS_EN status.
+ */
+#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
+
+/* 1: Expect ACK from receiving station
+ * 0: Don't expect ACK (MAC header's duration field s/b 0)
+ * Set this for unicast frames, but not broadcast/multicast. */
+#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
+
+/* For agn devices:
+ * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
+ *    Tx command's initial_rate_index indicates first rate to try;
+ *    uCode walks through table for additional Tx attempts.
+ * 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
+ *    This rate will be used for all Tx attempts; it will not be scaled. */
+#define TX_CMD_FLG_STA_RATE_MSK cpu_to_le32(1 << 4)
+
+/* 1: Expect immediate block-ack.
+ * Set when Txing a block-ack request frame.  Also set TX_CMD_FLG_ACK_MSK. */
+#define TX_CMD_FLG_IMM_BA_RSP_MASK  cpu_to_le32(1 << 6)
+
+/* Tx antenna selection field; reserved (0) for agn devices. */
+#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
+
+/* 1: Ignore Bluetooth priority for this frame.
+ * 0: Delay Tx until Bluetooth device is done (normal usage). */
+#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
+
+/* 1: uCode overrides sequence control field in MAC header.
+ * 0: Driver provides sequence control field in MAC header.
+ * Set this for management frames, non-QOS data frames, non-unicast frames,
+ * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
+
+/* 1: This frame is non-last MPDU; more fragments are coming.
+ * 0: Last fragment, or not using fragmentation. */
+#define TX_CMD_FLG_MORE_FRAG_MSK cpu_to_le32(1 << 14)
+
+/* 1: uCode calculates and inserts Timestamp Function (TSF) in outgoing frame.
+ * 0: No TSF required in outgoing frame.
+ * Set this for transmitting beacons and probe responses. */
+#define TX_CMD_FLG_TSF_MSK cpu_to_le32(1 << 16)
+
+/* 1: Driver inserted 2 bytes pad after the MAC header, for (required) dword
+ *    alignment of frame's payload data field.
+ * 0: No pad
+ * Set this for MAC headers with 26 or 30 bytes, i.e. those with QOS or ADDR4
+ * field (but not both).  Driver must align frame data (i.e. data following
+ * MAC header) to DWORD boundary. */
+#define TX_CMD_FLG_MH_PAD_MSK cpu_to_le32(1 << 20)
+
+/* accelerate aggregation support
+ * 0 - no CCMP encryption; 1 - CCMP encryption */
+#define TX_CMD_FLG_AGG_CCMP_MSK cpu_to_le32(1 << 22)
+
+/* HCCA-AP - disable duration overwriting. */
+#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
+
+
+/*
+ * TX command security control
+ */
+#define TX_CMD_SEC_WEP  	0x01
+#define TX_CMD_SEC_CCM  	0x02
+#define TX_CMD_SEC_TKIP		0x03
+#define TX_CMD_SEC_MSK		0x03
+#define TX_CMD_SEC_SHIFT	6
+#define TX_CMD_SEC_KEY128	0x08
+
+/*
+ * REPLY_TX = 0x1c (command)
+ */
+
+/*
+ * Used for managing Tx retries when expecting block-acks.
+ * Driver should set these fields to 0.
+ */
+struct iwl_dram_scratch {
+	u8 try_cnt;		/* Tx attempts */
+	u8 bt_kill_cnt;		/* Tx attempts blocked by Bluetooth device */
+	__le16 reserved;
+} __packed;
+
+struct iwl_tx_cmd {
+	/*
+	 * MPDU byte count:
+	 * MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
+	 * + 8 byte IV for CCM or TKIP (not used for WEP)
+	 * + Data payload
+	 * + 8-byte MIC (not used for CCM/WEP)
+	 * NOTE:  Does not include Tx command bytes, post-MAC pad bytes,
+	 *        MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.i
+	 * Range: 14-2342 bytes.
+	 */
+	__le16 len;
+
+	/*
+	 * MPDU or MSDU byte count for next frame.
+	 * Used for fragmentation and bursting, but not 11n aggregation.
+	 * Same as "len", but for next frame.  Set to 0 if not applicable.
+	 */
+	__le16 next_frame_len;
+
+	__le32 tx_flags;	/* TX_CMD_FLG_* */
+
+	/* uCode may modify this field of the Tx command (in host DRAM!).
+	 * Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
+	struct iwl_dram_scratch scratch;
+
+	/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
+	__le32 rate_n_flags;	/* RATE_MCS_* */
+
+	/* Index of destination station in uCode's station table */
+	u8 sta_id;
+
+	/* Type of security encryption:  CCM or TKIP */
+	u8 sec_ctl;		/* TX_CMD_SEC_* */
+
+	/*
+	 * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+	 * Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set.  Normally "0" for
+	 * data frames, this field may be used to selectively reduce initial
+	 * rate (via non-0 value) for special frames (e.g. management), while
+	 * still supporting rate scaling for all frames.
+	 */
+	u8 initial_rate_index;
+	u8 reserved;
+	u8 key[16];
+	__le16 next_frame_flags;
+	__le16 reserved2;
+	union {
+		__le32 life_time;
+		__le32 attempt;
+	} stop_time;
+
+	/* Host DRAM physical address pointer to "scratch" in this command.
+	 * Must be dword aligned.  "0" in dram_lsb_ptr disables usage. */
+	__le32 dram_lsb_ptr;
+	u8 dram_msb_ptr;
+
+	u8 rts_retry_limit;	/*byte 50 */
+	u8 data_retry_limit;	/*byte 51 */
+	u8 tid_tspec;
+	union {
+		__le16 pm_frame_timeout;
+		__le16 attempt_duration;
+	} timeout;
+
+	/*
+	 * Duration of EDCA burst Tx Opportunity, in 32-usec units.
+	 * Set this if txop time is not specified by HCCA protocol (e.g. by AP).
+	 */
+	__le16 driver_txop;
+
+	/*
+	 * MAC header goes here, followed by 2 bytes padding if MAC header
+	 * length is 26 or 30 bytes, followed by payload data
+	 */
+	u8 payload[0];
+	struct ieee80211_hdr hdr[0];
+} __packed;
+
+/*
+ * TX command response is sent after *agn* transmission attempts.
+ *
+ * both postpone and abort status are expected behavior from uCode. there is
+ * no special operation required from driver; except for RFKILL_FLUSH,
+ * which required tx flush host command to flush all the tx frames in queues
+ */
+enum {
+	TX_STATUS_SUCCESS = 0x01,
+	TX_STATUS_DIRECT_DONE = 0x02,
+	/* postpone TX */
+	TX_STATUS_POSTPONE_DELAY = 0x40,
+	TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+	TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+	TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+	TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+	/* abort TX */
+	TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+	TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+	TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+	TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
+	TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+	TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+	TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+	TX_STATUS_FAIL_DEST_PS = 0x88,
+	TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+	TX_STATUS_FAIL_BT_RETRY = 0x8a,
+	TX_STATUS_FAIL_STA_INVALID = 0x8b,
+	TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+	TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+	TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+	TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
+	TX_STATUS_FAIL_PASSIVE_NO_RX = 0x90,
+	TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
+};
+
+#define	TX_PACKET_MODE_REGULAR		0x0000
+#define	TX_PACKET_MODE_BURST_SEQ	0x0100
+#define	TX_PACKET_MODE_BURST_FIRST	0x0200
+
+enum {
+	TX_POWER_PA_NOT_ACTIVE = 0x0,
+};
+
+enum {
+	TX_STATUS_MSK = 0x000000ff,		/* bits 0:7 */
+	TX_STATUS_DELAY_MSK = 0x00000040,
+	TX_STATUS_ABORT_MSK = 0x00000080,
+	TX_PACKET_MODE_MSK = 0x0000ff00,	/* bits 8:15 */
+	TX_FIFO_NUMBER_MSK = 0x00070000,	/* bits 16:18 */
+	TX_RESERVED = 0x00780000,		/* bits 19:22 */
+	TX_POWER_PA_DETECT_MSK = 0x7f800000,	/* bits 23:30 */
+	TX_ABORT_REQUIRED_MSK = 0x80000000,	/* bits 31:31 */
+};
+
+/* *******************************
+ * TX aggregation status
+ ******************************* */
+
+enum {
+	AGG_TX_STATE_TRANSMITTED = 0x00,
+	AGG_TX_STATE_UNDERRUN_MSK = 0x01,
+	AGG_TX_STATE_BT_PRIO_MSK = 0x02,
+	AGG_TX_STATE_FEW_BYTES_MSK = 0x04,
+	AGG_TX_STATE_ABORT_MSK = 0x08,
+	AGG_TX_STATE_LAST_SENT_TTL_MSK = 0x10,
+	AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK = 0x20,
+	AGG_TX_STATE_LAST_SENT_BT_KILL_MSK = 0x40,
+	AGG_TX_STATE_SCD_QUERY_MSK = 0x80,
+	AGG_TX_STATE_TEST_BAD_CRC32_MSK = 0x100,
+	AGG_TX_STATE_RESPONSE_MSK = 0x1ff,
+	AGG_TX_STATE_DUMP_TX_MSK = 0x200,
+	AGG_TX_STATE_DELAY_TX_MSK = 0x400
+};
+
+#define AGG_TX_STATUS_MSK	0x00000fff	/* bits 0:11 */
+#define AGG_TX_TRY_MSK		0x0000f000	/* bits 12:15 */
+#define AGG_TX_TRY_POS		12
+
+#define AGG_TX_STATE_LAST_SENT_MSK  (AGG_TX_STATE_LAST_SENT_TTL_MSK | \
+				     AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK | \
+				     AGG_TX_STATE_LAST_SENT_BT_KILL_MSK)
+
+/* # tx attempts for first frame in aggregation */
+#define AGG_TX_STATE_TRY_CNT_POS 12
+#define AGG_TX_STATE_TRY_CNT_MSK 0xf000
+
+/* Command ID and sequence number of Tx command for this frame */
+#define AGG_TX_STATE_SEQ_NUM_POS 16
+#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)  No aggregation (frame_count == 1).  This reports Tx results for
+ *     a single frame.  Multiple attempts, at various bit rates, may have
+ *     been made for this frame.
+ *
+ * 2)  Aggregation (frame_count > 1).  This reports Tx results for
+ *     2 or more frames that used block-acknowledge.  All frames were
+ *     transmitted at same rate.  Rate scaling may have been used if first
+ *     frame in this new agg block failed in previous agg block(s).
+ *
+ *     Note that, for aggregation, ACK (block-ack) status is not delivered here;
+ *     block-ack has not been received by the time the agn device records
+ *     this status.
+ *     This status relates to reasons the tx might have been blocked or aborted
+ *     within the sending station (this agn device), rather than whether it was
+ *     received successfully by the destination station.
+ */
+struct agg_tx_status {
+	__le16 status;
+	__le16 sequence;
+} __packed;
+
+/* refer to ra_tid */
+#define IWLAGN_TX_RES_TID_POS	0
+#define IWLAGN_TX_RES_TID_MSK	0x0f
+#define IWLAGN_TX_RES_RA_POS	4
+#define IWLAGN_TX_RES_RA_MSK	0xf0
+
+struct iwlagn_tx_resp {
+	u8 frame_count;		/* 1 no aggregation, >1 aggregation */
+	u8 bt_kill_count;	/* # blocked by bluetooth (unused for agg) */
+	u8 failure_rts;		/* # failures due to unsuccessful RTS */
+	u8 failure_frame;	/* # failures due to no ACK (unused for agg) */
+
+	/* For non-agg:  Rate at which frame was successful.
+	 * For agg:  Rate at which all frames were transmitted. */
+	__le32 rate_n_flags;	/* RATE_MCS_*  */
+
+	/* For non-agg:  RTS + CTS + frame tx attempts time + ACK.
+	 * For agg:  RTS + CTS + aggregation tx time + block-ack time. */
+	__le16 wireless_media_time;	/* uSecs */
+
+	u8 pa_status;		/* RF power amplifier measurement (not used) */
+	u8 pa_integ_res_a[3];
+	u8 pa_integ_res_b[3];
+	u8 pa_integ_res_C[3];
+
+	__le32 tfd_info;
+	__le16 seq_ctl;
+	__le16 byte_cnt;
+	u8 tlc_info;
+	u8 ra_tid;		/* tid (0:3), sta_id (4:7) */
+	__le16 frame_ctrl;
+	/*
+	 * For non-agg:  frame status TX_STATUS_*
+	 * For agg:  status of 1st frame, AGG_TX_STATE_*; other frame status
+	 *           fields follow this one, up to frame_count.
+	 *           Bit fields:
+	 *           11- 0:  AGG_TX_STATE_* status code
+	 *           15-12:  Retry count for 1st frame in aggregation (retries
+	 *                   occur if tx failed for this frame when it was a
+	 *                   member of a previous aggregation block).  If rate
+	 *                   scaling is used, retry count indicates the rate
+	 *                   table entry used for all frames in the new agg.
+	 *           31-16:  Sequence # for this frame's Tx cmd (not SSN!)
+	 */
+	struct agg_tx_status status;	/* TX status (in aggregation -
+					 * status of 1st frame) */
+} __packed;
+/*
+ * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ *
+ * Reports Block-Acknowledge from recipient station
+ */
+struct iwl_compressed_ba_resp {
+	__le32 sta_addr_lo32;
+	__le16 sta_addr_hi16;
+	__le16 reserved;
+
+	/* Index of recipient (BA-sending) station in uCode's station table */
+	u8 sta_id;
+	u8 tid;
+	__le16 seq_ctl;
+	__le64 bitmap;
+	__le16 scd_flow;
+	__le16 scd_ssn;
+	u8 txed;	/* number of frames sent */
+	u8 txed_2_done; /* number of frames acked */
+	__le16 reserved1;
+} __packed;
+
+/*
+ * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ *
+ */
+
+/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
+#define  LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK	(1 << 0)
+
+/* # of EDCA prioritized tx fifos */
+#define  LINK_QUAL_AC_NUM AC_NUM
+
+/* # entries in rate scale table to support Tx retries */
+#define  LINK_QUAL_MAX_RETRY_NUM 16
+
+/* Tx antenna selection values */
+#define  LINK_QUAL_ANT_A_MSK (1 << 0)
+#define  LINK_QUAL_ANT_B_MSK (1 << 1)
+#define  LINK_QUAL_ANT_MSK   (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
+
+
+/**
+ * struct iwl_link_qual_general_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_general_params {
+	u8 flags;
+
+	/* No entries at or above this (driver chosen) index contain MIMO */
+	u8 mimo_delimiter;
+
+	/* Best single antenna to use for single stream (legacy, SISO). */
+	u8 single_stream_ant_msk;	/* LINK_QUAL_ANT_* */
+
+	/* Best antennas to use for MIMO */
+	u8 dual_stream_ant_msk;		/* LINK_QUAL_ANT_* */
+
+	/*
+	 * If driver needs to use different initial rates for different
+	 * EDCA QOS access categories (as implemented by tx fifos 0-3),
+	 * this table will set that up, by indicating the indexes in the
+	 * rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
+	 * Otherwise, driver should set all entries to 0.
+	 *
+	 * Entry usage:
+	 * 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
+	 * TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
+	 */
+	u8 start_rate_index[LINK_QUAL_AC_NUM];
+} __packed;
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_MAX	(8000)
+#define LINK_QUAL_AGG_TIME_LIMIT_MIN	(100)
+
+#define LINK_QUAL_AGG_DISABLE_START_DEF	(3)
+#define LINK_QUAL_AGG_DISABLE_START_MAX	(255)
+#define LINK_QUAL_AGG_DISABLE_START_MIN	(0)
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
+
+/**
+ * struct iwl_link_qual_agg_params
+ *
+ * Used in REPLY_TX_LINK_QUALITY_CMD
+ */
+struct iwl_link_qual_agg_params {
+
+	/*
+	 *Maximum number of uSec in aggregation.
+	 * default set to 4000 (4 milliseconds) if not configured in .cfg
+	 */
+	__le16 agg_time_limit;
+
+	/*
+	 * Number of Tx retries allowed for a frame, before that frame will
+	 * no longer be considered for the start of an aggregation sequence
+	 * (scheduler will then try to tx it as single frame).
+	 * Driver should set this to 3.
+	 */
+	u8 agg_dis_start_th;
+
+	/*
+	 * Maximum number of frames in aggregation.
+	 * 0 = no limit (default).  1 = no aggregation.
+	 * Other values = max # frames in aggregation.
+	 */
+	u8 agg_frame_cnt_limit;
+
+	__le32 reserved;
+} __packed;
+
+/*
+ * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ *
+ * For agn devices
+ *
+ * Each station in the agn device's internal station table has its own table
+ * of 16
+ * Tx rates and modulation modes (e.g. legacy/SISO/MIMO) for retrying Tx when
+ * an ACK is not received.  This command replaces the entire table for
+ * one station.
+ *
+ * NOTE:  Station must already be in agn device's station table.
+ *	  Use REPLY_ADD_STA.
+ *
+ * The rate scaling procedures described below work well.  Of course, other
+ * procedures are possible, and may work better for particular environments.
+ *
+ *
+ * FILLING THE RATE TABLE
+ *
+ * Given a particular initial rate and mode, as determined by the rate
+ * scaling algorithm described below, the Linux driver uses the following
+ * formula to fill the rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table in the
+ * Link Quality command:
+ *
+ *
+ * 1)  If using High-throughput (HT) (SISO or MIMO) initial rate:
+ *     a) Use this same initial rate for first 3 entries.
+ *     b) Find next lower available rate using same mode (SISO or MIMO),
+ *        use for next 3 entries.  If no lower rate available, switch to
+ *        legacy mode (no HT40 channel, no MIMO, no short guard interval).
+ *     c) If using MIMO, set command's mimo_delimiter to number of entries
+ *        using MIMO (3 or 6).
+ *     d) After trying 2 HT rates, switch to legacy mode (no HT40 channel,
+ *        no MIMO, no short guard interval), at the next lower bit rate
+ *        (e.g. if second HT bit rate was 54, try 48 legacy), and follow
+ *        legacy procedure for remaining table entries.
+ *
+ * 2)  If using legacy initial rate:
+ *     a) Use the initial rate for only one entry.
+ *     b) For each following entry, reduce the rate to next lower available
+ *        rate, until reaching the lowest available rate.
+ *     c) When reducing rate, also switch antenna selection.
+ *     d) Once lowest available rate is reached, repeat this rate until
+ *        rate table is filled (16 entries), switching antenna each entry.
+ *
+ *
+ * ACCUMULATING HISTORY
+ *
+ * The rate scaling algorithm for agn devices, as implemented in Linux driver,
+ * uses two sets of frame Tx success history:  One for the current/active
+ * modulation mode, and one for a speculative/search mode that is being
+ * attempted. If the speculative mode turns out to be more effective (i.e.
+ * actual transfer rate is better), then the driver continues to use the
+ * speculative mode as the new current active mode.
+ *
+ * Each history set contains, separately for each possible rate, data for a
+ * sliding window of the 62 most recent tx attempts at that rate.  The data
+ * includes a shifting bitmap of success(1)/failure(0), and sums of successful
+ * and attempted frames, from which the driver can additionally calculate a
+ * success ratio (success / attempted) and number of failures
+ * (attempted - success), and control the size of the window (attempted).
+ * The driver uses the bit map to remove successes from the success sum, as
+ * the oldest tx attempts fall out of the window.
+ *
+ * When the agn device makes multiple tx attempts for a given frame, each
+ * attempt might be at a different rate, and have different modulation
+ * characteristics (e.g. antenna, fat channel, short guard interval), as set
+ * up in the rate scaling table in the Link Quality command.  The driver must
+ * determine which rate table entry was used for each tx attempt, to determine
+ * which rate-specific history to update, and record only those attempts that
+ * match the modulation characteristics of the history set.
+ *
+ * When using block-ack (aggregation), all frames are transmitted at the same
+ * rate, since there is no per-attempt acknowledgment from the destination
+ * station.  The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * rate_n_flags field.  After receiving a block-ack, the driver can update
+ * history for the entire block all at once.
+ *
+ *
+ * FINDING BEST STARTING RATE:
+ *
+ * When working with a selected initial modulation mode (see below), the
+ * driver attempts to find a best initial rate.  The initial rate is the
+ * first entry in the Link Quality command's rate table.
+ *
+ * 1)  Calculate actual throughput (success ratio * expected throughput, see
+ *     table below) for current initial rate.  Do this only if enough frames
+ *     have been attempted to make the value meaningful:  at least 6 failed
+ *     tx attempts, or at least 8 successes.  If not enough, don't try rate
+ *     scaling yet.
+ *
+ * 2)  Find available rates adjacent to current initial rate.  Available means:
+ *     a)  supported by hardware &&
+ *     b)  supported by association &&
+ *     c)  within any constraints selected by user
+ *
+ * 3)  Gather measured throughputs for adjacent rates.  These might not have
+ *     enough history to calculate a throughput.  That's okay, we might try
+ *     using one of them anyway!
+ *
+ * 4)  Try decreasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  lower adjacent rate has better measured throughput ||
+ *     c)  higher adjacent rate has worse throughput, and lower is unmeasured
+ *
+ *     As a sanity check, if decrease was determined above, leave rate
+ *     unchanged if:
+ *     a)  lower rate unavailable
+ *     b)  success ratio at current rate > 85% (very good)
+ *     c)  current measured throughput is better than expected throughput
+ *         of lower rate (under perfect 100% tx conditions, see table below)
+ *
+ * 5)  Try increasing rate if, for current rate:
+ *     a)  success ratio is < 15% ||
+ *     b)  both adjacent rates' throughputs are unmeasured (try it!) ||
+ *     b)  higher adjacent rate has better measured throughput ||
+ *     c)  lower adjacent rate has worse throughput, and higher is unmeasured
+ *
+ *     As a sanity check, if increase was determined above, leave rate
+ *     unchanged if:
+ *     a)  success ratio at current rate < 70%.  This is not particularly
+ *         good performance; higher rate is sure to have poorer success.
+ *
+ * 6)  Re-evaluate the rate after each tx frame.  If working with block-
+ *     acknowledge, history and statistics may be calculated for the entire
+ *     block (including prior history that fits within the history windows),
+ *     before re-evaluation.
+ *
+ * FINDING BEST STARTING MODULATION MODE:
+ *
+ * After working with a modulation mode for a "while" (and doing rate scaling),
+ * the driver searches for a new initial mode in an attempt to improve
+ * throughput.  The "while" is measured by numbers of attempted frames:
+ *
+ * For legacy mode, search for new mode after:
+ *   480 successful frames, or 160 failed frames
+ * For high-throughput modes (SISO or MIMO), search for new mode after:
+ *   4500 successful frames, or 400 failed frames
+ *
+ * Mode switch possibilities are (3 for each mode):
+ *
+ * For legacy:
+ *   Change antenna, try SISO (if HT association), try MIMO (if HT association)
+ * For SISO:
+ *   Change antenna, try MIMO, try shortened guard interval (SGI)
+ * For MIMO:
+ *   Try SISO antenna A, SISO antenna B, try shortened guard interval (SGI)
+ *
+ * When trying a new mode, use the same bit rate as the old/current mode when
+ * trying antenna switches and shortened guard interval.  When switching to
+ * SISO from MIMO or legacy, or to MIMO from SISO or legacy, use a rate
+ * for which the expected throughput (under perfect conditions) is about the
+ * same or slightly better than the actual measured throughput delivered by
+ * the old/current mode.
+ *
+ * Actual throughput can be estimated by multiplying the expected throughput
+ * by the success ratio (successful / attempted tx frames).  Frame size is
+ * not considered in this calculation; it assumes that frame size will average
+ * out to be fairly consistent over several samples.  The following are
+ * metric values for expected throughput assuming 100% success ratio.
+ * Only G band has support for CCK rates:
+ *
+ *           RATE:  1    2    5   11    6   9   12   18   24   36   48   54   60
+ *
+ *              G:  7   13   35   58   40  57   72   98  121  154  177  186  186
+ *              A:  0    0    0    0   40  57   72   98  121  154  177  186  186
+ *     SISO 20MHz:  0    0    0    0   42  42   76  102  124  159  183  193  202
+ * SGI SISO 20MHz:  0    0    0    0   46  46   82  110  132  168  192  202  211
+ *     MIMO 20MHz:  0    0    0    0   74  74  123  155  179  214  236  244  251
+ * SGI MIMO 20MHz:  0    0    0    0   81  81  131  164  188  222  243  251  257
+ *     SISO 40MHz:  0    0    0    0   77  77  127  160  184  220  242  250  257
+ * SGI SISO 40MHz:  0    0    0    0   83  83  135  169  193  229  250  257  264
+ *     MIMO 40MHz:  0    0    0    0  123 123  182  214  235  264  279  285  289
+ * SGI MIMO 40MHz:  0    0    0    0  131 131  191  222  242  270  284  289  293
+ *
+ * After the new mode has been tried for a short while (minimum of 6 failed
+ * frames or 8 successful frames), compare success ratio and actual throughput
+ * estimate of the new mode with the old.  If either is better with the new
+ * mode, continue to use the new mode.
+ *
+ * Continue comparing modes until all 3 possibilities have been tried.
+ * If moving from legacy to HT, try all 3 possibilities from the new HT
+ * mode.  After trying all 3, a best mode is found.  Continue to use this mode
+ * for the longer "while" described above (e.g. 480 successful frames for
+ * legacy), and then repeat the search process.
+ *
+ */
+struct iwl_link_quality_cmd {
+
+	/* Index of destination/recipient station in uCode's station table */
+	u8 sta_id;
+	u8 reserved1;
+	__le16 control;		/* not used */
+	struct iwl_link_qual_general_params general_params;
+	struct iwl_link_qual_agg_params agg_params;
+
+	/*
+	 * Rate info; when using rate-scaling, Tx command's initial_rate_index
+	 * specifies 1st Tx rate attempted, via index into this table.
+	 * agn devices works its way through table when retrying Tx.
+	 */
+	struct {
+		__le32 rate_n_flags;	/* RATE_MCS_*, IWL_RATE_* */
+	} rs_table[LINK_QUAL_MAX_RETRY_NUM];
+	__le32 reserved2;
+} __packed;
+
+/*
+ * BT configuration enable flags:
+ *   bit 0 - 1: BT channel announcement enabled
+ *           0: disable
+ *   bit 1 - 1: priority of BT device enabled
+ *           0: disable
+ *   bit 2 - 1: BT 2 wire support enabled
+ *           0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY	   BIT(1)
+#define BT_ENABLE_2_WIRE	   BIT(2)
+
+#define BT_COEX_DISABLE (0x0)
+#define BT_COEX_ENABLE  (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
+
+#define BT_LEAD_TIME_MIN (0x0)
+#define BT_LEAD_TIME_DEF (0x1E)
+#define BT_LEAD_TIME_MAX (0xFF)
+
+#define BT_MAX_KILL_MIN (0x1)
+#define BT_MAX_KILL_DEF (0x5)
+#define BT_MAX_KILL_MAX (0xFF)
+
+#define BT_DURATION_LIMIT_DEF	625
+#define BT_DURATION_LIMIT_MAX	1250
+#define BT_DURATION_LIMIT_MIN	625
+
+#define BT_ON_THRESHOLD_DEF	4
+#define BT_ON_THRESHOLD_MAX	1000
+#define BT_ON_THRESHOLD_MIN	1
+
+#define BT_FRAG_THRESHOLD_DEF	0
+#define BT_FRAG_THRESHOLD_MAX	0
+#define BT_FRAG_THRESHOLD_MIN	0
+
+#define BT_AGG_THRESHOLD_DEF	1200
+#define BT_AGG_THRESHOLD_MAX	8000
+#define BT_AGG_THRESHOLD_MIN	400
+
+/*
+ * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ *
+ * agn devices support hardware handshake with Bluetooth device on
+ * same platform.  Bluetooth device alerts wireless device when it will Tx;
+ * wireless device can delay or kill its own Tx to accommodate.
+ */
+struct iwl_bt_cmd {
+	u8 flags;
+	u8 lead_time;
+	u8 max_kill;
+	u8 reserved;
+	__le32 kill_ack_mask;
+	__le32 kill_cts_mask;
+} __packed;
+
+#define IWLAGN_BT_FLAG_CHANNEL_INHIBITION	BIT(0)
+
+#define IWLAGN_BT_FLAG_COEX_MODE_MASK		(BIT(3)|BIT(4)|BIT(5))
+#define IWLAGN_BT_FLAG_COEX_MODE_SHIFT		3
+#define IWLAGN_BT_FLAG_COEX_MODE_DISABLED	0
+#define IWLAGN_BT_FLAG_COEX_MODE_LEGACY_2W	1
+#define IWLAGN_BT_FLAG_COEX_MODE_3W		2
+#define IWLAGN_BT_FLAG_COEX_MODE_4W		3
+
+#define IWLAGN_BT_FLAG_UCODE_DEFAULT		BIT(6)
+/* Disable Sync PSPoll on SCO/eSCO */
+#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE	BIT(7)
+
+#define IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD	-75 /* dBm */
+#define IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD	-65 /* dBm */
+
+#define IWLAGN_BT_PRIO_BOOST_MAX	0xFF
+#define IWLAGN_BT_PRIO_BOOST_MIN	0x00
+#define IWLAGN_BT_PRIO_BOOST_DEFAULT	0xF0
+#define IWLAGN_BT_PRIO_BOOST_DEFAULT32	0xF0F0F0F0
+
+#define IWLAGN_BT_MAX_KILL_DEFAULT	5
+
+#define IWLAGN_BT3_T7_DEFAULT		1
+
+enum iwl_bt_kill_idx {
+	IWL_BT_KILL_DEFAULT = 0,
+	IWL_BT_KILL_OVERRIDE = 1,
+	IWL_BT_KILL_REDUCE = 2,
+};
+
+#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT	cpu_to_le32(0xffff0000)
+#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT	cpu_to_le32(0xffff0000)
+#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO	cpu_to_le32(0xffffffff)
+#define IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE	cpu_to_le32(0)
+
+#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT	2
+
+#define IWLAGN_BT3_T2_DEFAULT		0xc
+
+#define IWLAGN_BT_VALID_ENABLE_FLAGS	cpu_to_le16(BIT(0))
+#define IWLAGN_BT_VALID_BOOST		cpu_to_le16(BIT(1))
+#define IWLAGN_BT_VALID_MAX_KILL	cpu_to_le16(BIT(2))
+#define IWLAGN_BT_VALID_3W_TIMERS	cpu_to_le16(BIT(3))
+#define IWLAGN_BT_VALID_KILL_ACK_MASK	cpu_to_le16(BIT(4))
+#define IWLAGN_BT_VALID_KILL_CTS_MASK	cpu_to_le16(BIT(5))
+#define IWLAGN_BT_VALID_REDUCED_TX_PWR	cpu_to_le16(BIT(6))
+#define IWLAGN_BT_VALID_3W_LUT		cpu_to_le16(BIT(7))
+
+#define IWLAGN_BT_ALL_VALID_MSK		(IWLAGN_BT_VALID_ENABLE_FLAGS | \
+					IWLAGN_BT_VALID_BOOST | \
+					IWLAGN_BT_VALID_MAX_KILL | \
+					IWLAGN_BT_VALID_3W_TIMERS | \
+					IWLAGN_BT_VALID_KILL_ACK_MASK | \
+					IWLAGN_BT_VALID_KILL_CTS_MASK | \
+					IWLAGN_BT_VALID_REDUCED_TX_PWR | \
+					IWLAGN_BT_VALID_3W_LUT)
+
+#define IWLAGN_BT_REDUCED_TX_PWR	BIT(0)
+
+#define IWLAGN_BT_DECISION_LUT_SIZE	12
+
+struct iwl_basic_bt_cmd {
+	u8 flags;
+	u8 ledtime; /* unused */
+	u8 max_kill;
+	u8 bt3_timer_t7_value;
+	__le32 kill_ack_mask;
+	__le32 kill_cts_mask;
+	u8 bt3_prio_sample_time;
+	u8 bt3_timer_t2_value;
+	__le16 bt4_reaction_time; /* unused */
+	__le32 bt3_lookup_table[IWLAGN_BT_DECISION_LUT_SIZE];
+	/*
+	 * bit 0: use reduced tx power for control frame
+	 * bit 1 - 7: reserved
+	 */
+	u8 reduce_txpower;
+	u8 reserved;
+	__le16 valid;
+};
+
+struct iwl_bt_cmd_v1 {
+	struct iwl_basic_bt_cmd basic;
+	u8 prio_boost;
+	/*
+	 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+	 * if configure the following patterns
+	 */
+	u8 tx_prio_boost;	/* SW boost of WiFi tx priority */
+	__le16 rx_prio_boost;	/* SW boost of WiFi rx priority */
+};
+
+struct iwl_bt_cmd_v2 {
+	struct iwl_basic_bt_cmd basic;
+	__le32 prio_boost;
+	/*
+	 * set IWLAGN_BT_VALID_BOOST to "1" in "valid" bitmask
+	 * if configure the following patterns
+	 */
+	u8 reserved;
+	u8 tx_prio_boost;	/* SW boost of WiFi tx priority */
+	__le16 rx_prio_boost;	/* SW boost of WiFi rx priority */
+};
+
+#define IWLAGN_BT_SCO_ACTIVE	cpu_to_le32(BIT(0))
+
+struct iwlagn_bt_sco_cmd {
+	__le32 flags;
+};
+
+/******************************************************************************
+ * (6)
+ * Spectrum Management (802.11h) Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/*
+ * Spectrum Management
+ */
+#define MEASUREMENT_FILTER_FLAG (RXON_FILTER_PROMISC_MSK         | \
+				 RXON_FILTER_CTL2HOST_MSK        | \
+				 RXON_FILTER_ACCEPT_GRP_MSK      | \
+				 RXON_FILTER_DIS_DECRYPT_MSK     | \
+				 RXON_FILTER_DIS_GRP_DECRYPT_MSK | \
+				 RXON_FILTER_ASSOC_MSK           | \
+				 RXON_FILTER_BCON_AWARE_MSK)
+
+struct iwl_measure_channel {
+	__le32 duration;	/* measurement duration in extended beacon
+				 * format */
+	u8 channel;		/* channel to measure */
+	u8 type;		/* see enum iwl_measure_type */
+	__le16 reserved;
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ */
+struct iwl_spectrum_cmd {
+	__le16 len;		/* number of bytes starting from token */
+	u8 token;		/* token id */
+	u8 id;			/* measurement id -- 0 or 1 */
+	u8 origin;		/* 0 = TGh, 1 = other, 2 = TGk */
+	u8 periodic;		/* 1 = periodic */
+	__le16 path_loss_timeout;
+	__le32 start_time;	/* start time in extended beacon format */
+	__le32 reserved2;
+	__le32 flags;		/* rxon flags */
+	__le32 filter_flags;	/* rxon filter flags */
+	__le16 channel_count;	/* minimum 1, maximum 10 */
+	__le16 reserved3;
+	struct iwl_measure_channel channels[10];
+} __packed;
+
+/*
+ * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ */
+struct iwl_spectrum_resp {
+	u8 token;
+	u8 id;			/* id of the prior command replaced, or 0xff */
+	__le16 status;		/* 0 - command will be handled
+				 * 1 - cannot handle (conflicts with another
+				 *     measurement) */
+} __packed;
+
+enum iwl_measurement_state {
+	IWL_MEASUREMENT_START = 0,
+	IWL_MEASUREMENT_STOP = 1,
+};
+
+enum iwl_measurement_status {
+	IWL_MEASUREMENT_OK = 0,
+	IWL_MEASUREMENT_CONCURRENT = 1,
+	IWL_MEASUREMENT_CSA_CONFLICT = 2,
+	IWL_MEASUREMENT_TGH_CONFLICT = 3,
+	/* 4-5 reserved */
+	IWL_MEASUREMENT_STOPPED = 6,
+	IWL_MEASUREMENT_TIMEOUT = 7,
+	IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+};
+
+#define NUM_ELEMENTS_IN_HISTOGRAM 8
+
+struct iwl_measurement_histogram {
+	__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 0.8usec counts */
+	__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM];	/* in 1usec counts */
+} __packed;
+
+/* clear channel availability counters */
+struct iwl_measurement_cca_counters {
+	__le32 ofdm;
+	__le32 cck;
+} __packed;
+
+enum iwl_measure_type {
+	IWL_MEASURE_BASIC = (1 << 0),
+	IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
+	IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+	IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+	IWL_MEASURE_FRAME = (1 << 4),
+	/* bits 5:6 are reserved */
+	IWL_MEASURE_IDLE = (1 << 7),
+};
+
+/*
+ * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ */
+struct iwl_spectrum_notification {
+	u8 id;			/* measurement id -- 0 or 1 */
+	u8 token;
+	u8 channel_index;	/* index in measurement channel list */
+	u8 state;		/* 0 - start, 1 - stop */
+	__le32 start_time;	/* lower 32-bits of TSF */
+	u8 band;		/* 0 - 5.2GHz, 1 - 2.4GHz */
+	u8 channel;
+	u8 type;		/* see enum iwl_measurement_type */
+	u8 reserved1;
+	/* NOTE:  cca_ofdm, cca_cck, basic_type, and histogram are only only
+	 * valid if applicable for measurement type requested. */
+	__le32 cca_ofdm;	/* cca fraction time in 40Mhz clock periods */
+	__le32 cca_cck;		/* cca fraction time in 44Mhz clock periods */
+	__le32 cca_time;	/* channel load time in usecs */
+	u8 basic_type;		/* 0 - bss, 1 - ofdm preamble, 2 -
+				 * unidentified */
+	u8 reserved2[3];
+	struct iwl_measurement_histogram histogram;
+	__le32 stop_time;	/* lower 32-bits of TSF */
+	__le32 status;		/* see iwl_measurement_status */
+} __packed;
+
+/******************************************************************************
+ * (7)
+ * Power Management Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+/**
+ * struct iwl_powertable_cmd - Power Table Command
+ * @flags: See below:
+ *
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * PM allow:
+ *   bit 0 - '0' Driver not allow power management
+ *           '1' Driver allow PM (use rest of parameters)
+ *
+ * uCode send sleep notifications:
+ *   bit 1 - '0' Don't send sleep notification
+ *           '1' send sleep notification (SEND_PM_NOTIFICATION)
+ *
+ * Sleep over DTIM
+ *   bit 2 - '0' PM have to walk up every DTIM
+ *           '1' PM could sleep over DTIM till listen Interval.
+ *
+ * PCI power managed
+ *   bit 3 - '0' (PCI_CFG_LINK_CTRL & 0x1)
+ *           '1' !(PCI_CFG_LINK_CTRL & 0x1)
+ *
+ * Fast PD
+ *   bit 4 - '1' Put radio to sleep when receiving frame for others
+ *
+ * Force sleep Modes
+ *   bit 31/30- '00' use both mac/xtal sleeps
+ *              '01' force Mac sleep
+ *              '10' force xtal sleep
+ *              '11' Illegal set
+ *
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * ucode assume sleep over DTIM is allowed and we don't need to wake up
+ * for every DTIM.
+ */
+#define IWL_POWER_VEC_SIZE 5
+
+#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK	cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_SAVE_ENA_MSK		cpu_to_le16(BIT(0))
+#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK	cpu_to_le16(BIT(1))
+#define IWL_POWER_SLEEP_OVER_DTIM_MSK		cpu_to_le16(BIT(2))
+#define IWL_POWER_PCI_PM_MSK			cpu_to_le16(BIT(3))
+#define IWL_POWER_FAST_PD			cpu_to_le16(BIT(4))
+#define IWL_POWER_BEACON_FILTERING		cpu_to_le16(BIT(5))
+#define IWL_POWER_SHADOW_REG_ENA		cpu_to_le16(BIT(6))
+#define IWL_POWER_CT_KILL_SET			cpu_to_le16(BIT(7))
+#define IWL_POWER_BT_SCO_ENA			cpu_to_le16(BIT(8))
+#define IWL_POWER_ADVANCE_PM_ENA_MSK		cpu_to_le16(BIT(9))
+
+struct iwl_powertable_cmd {
+	__le16 flags;
+	u8 keep_alive_seconds;
+	u8 debug_flags;
+	__le32 rx_data_timeout;
+	__le32 tx_data_timeout;
+	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
+	__le32 keep_alive_beacons;
+} __packed;
+
+/*
+ * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * all devices identical.
+ */
+struct iwl_sleep_notification {
+	u8 pm_sleep_mode;
+	u8 pm_wakeup_src;
+	__le16 reserved;
+	__le32 sleep_time;
+	__le32 tsf_low;
+	__le32 bcon_timer;
+} __packed;
+
+/* Sleep states.  all devices identical. */
+enum {
+	IWL_PM_NO_SLEEP = 0,
+	IWL_PM_SLP_MAC = 1,
+	IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+	IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+	IWL_PM_SLP_PHY = 4,
+	IWL_PM_SLP_REPENT = 5,
+	IWL_PM_WAKEUP_BY_TIMER = 6,
+	IWL_PM_WAKEUP_BY_DRIVER = 7,
+	IWL_PM_WAKEUP_BY_RFKILL = 8,
+	/* 3 reserved */
+	IWL_PM_NUM_OF_MODES = 12,
+};
+
+/*
+ * REPLY_CARD_STATE_CMD = 0xa0 (command, has simple generic response)
+ */
+#define CARD_STATE_CMD_DISABLE 0x00	/* Put card to sleep */
+#define CARD_STATE_CMD_ENABLE  0x01	/* Wake up card */
+#define CARD_STATE_CMD_HALT    0x02	/* Power down permanently */
+struct iwl_card_state_cmd {
+	__le32 status;		/* CARD_STATE_CMD_* request new power state */
+} __packed;
+
+/*
+ * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ */
+struct iwl_card_state_notif {
+	__le32 flags;
+} __packed;
+
+#define HW_CARD_DISABLED   0x01
+#define SW_CARD_DISABLED   0x02
+#define CT_CARD_DISABLED   0x04
+#define RXON_CARD_DISABLED 0x10
+
+struct iwl_ct_kill_config {
+	__le32   reserved;
+	__le32   critical_temperature_M;
+	__le32   critical_temperature_R;
+}  __packed;
+
+/* 1000, and 6x00 */
+struct iwl_ct_kill_throttling_config {
+	__le32   critical_temperature_exit;
+	__le32   reserved;
+	__le32   critical_temperature_enter;
+}  __packed;
+
+/******************************************************************************
+ * (8)
+ * Scan Commands, Responses, Notifications:
+ *
+ *****************************************************************************/
+
+#define SCAN_CHANNEL_TYPE_PASSIVE cpu_to_le32(0)
+#define SCAN_CHANNEL_TYPE_ACTIVE  cpu_to_le32(1)
+
+/**
+ * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ *
+ * One for each channel in the scan list.
+ * Each channel can independently select:
+ * 1)  SSID for directed active scans
+ * 2)  Txpower setting (for rate specified within Tx command)
+ * 3)  How long to stay on-channel (behavior may be modified by quiet_time,
+ *     quiet_plcp_th, good_CRC_th)
+ *
+ * To avoid uCode errors, make sure the following are true (see comments
+ * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * 1)  If using passive_dwell (i.e. passive_dwell != 0):
+ *     active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
+ * 2)  quiet_time <= active_dwell
+ * 3)  If restricting off-channel time (i.e. max_out_time !=0):
+ *     passive_dwell < max_out_time
+ *     active_dwell < max_out_time
+ */
+
+struct iwl_scan_channel {
+	/*
+	 * type is defined as:
+	 * 0:0 1 = active, 0 = passive
+	 * 1:20 SSID direct bit map; if a bit is set, then corresponding
+	 *     SSID IE is transmitted in probe request.
+	 * 21:31 reserved
+	 */
+	__le32 type;
+	__le16 channel;	/* band is selected by iwl_scan_cmd "flags" field */
+	u8 tx_gain;		/* gain for analog radio */
+	u8 dsp_atten;		/* gain for DSP */
+	__le16 active_dwell;	/* in 1024-uSec TU (time units), typ 5-50 */
+	__le16 passive_dwell;	/* in 1024-uSec TU (time units), typ 20-500 */
+} __packed;
+
+/* set number of direct probes __le32 type */
+#define IWL_SCAN_PROBE_MASK(n) 	cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwl_ssid_ie {
+	u8 id;
+	u8 len;
+	u8 ssid[32];
+} __packed;
+
+#define PROBE_OPTION_MAX		20
+#define TX_CMD_LIFE_TIME_INFINITE	cpu_to_le32(0xFFFFFFFF)
+#define IWL_GOOD_CRC_TH_DISABLED	0
+#define IWL_GOOD_CRC_TH_DEFAULT		cpu_to_le16(1)
+#define IWL_GOOD_CRC_TH_NEVER		cpu_to_le16(0xffff)
+#define IWL_MAX_CMD_SIZE 4096
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (command)
+ *
+ * The hardware scan command is very powerful; the driver can set it up to
+ * maintain (relatively) normal network traffic while doing a scan in the
+ * background.  The max_out_time and suspend_time control the ratio of how
+ * long the device stays on an associated network channel ("service channel")
+ * vs. how long it's away from the service channel, i.e. tuned to other channels
+ * for scanning.
+ *
+ * max_out_time is the max time off-channel (in usec), and suspend_time
+ * is how long (in "extended beacon" format) that the scan is "suspended"
+ * after returning to the service channel.  That is, suspend_time is the
+ * time that we stay on the service channel, doing normal work, between
+ * scan segments.  The driver may set these parameters differently to support
+ * scanning when associated vs. not associated, and light vs. heavy traffic
+ * loads when associated.
+ *
+ * After receiving this command, the device's scan engine does the following;
+ *
+ * 1)  Sends SCAN_START notification to driver
+ * 2)  Checks to see if it has time to do scan for one channel
+ * 3)  Sends NULL packet, with power-save (PS) bit set to 1,
+ *     to tell AP that we're going off-channel
+ * 4)  Tunes to first channel in scan list, does active or passive scan
+ * 5)  Sends SCAN_RESULT notification to driver
+ * 6)  Checks to see if it has time to do scan on *next* channel in list
+ * 7)  Repeats 4-6 until it no longer has time to scan the next channel
+ *     before max_out_time expires
+ * 8)  Returns to service channel
+ * 9)  Sends NULL packet with PS=0 to tell AP that we're back
+ * 10) Stays on service channel until suspend_time expires
+ * 11) Repeats entire process 2-10 until list is complete
+ * 12) Sends SCAN_COMPLETE notification
+ *
+ * For fast, efficient scans, the scan command also has support for staying on
+ * a channel for just a short time, if doing active scanning and getting no
+ * responses to the transmitted probe request.  This time is controlled by
+ * quiet_time, and the number of received packets below which a channel is
+ * considered "quiet" is controlled by quiet_plcp_threshold.
+ *
+ * For active scanning on channels that have regulatory restrictions against
+ * blindly transmitting, the scan can listen before transmitting, to make sure
+ * that there is already legitimate activity on the channel.  If enough
+ * packets are cleanly received on the channel (controlled by good_CRC_th,
+ * typical value 1), the scan engine starts transmitting probe requests.
+ *
+ * Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
+ *
+ * To avoid uCode errors, see timing restrictions described under
+ * struct iwl_scan_channel.
+ */
+
+enum iwl_scan_flags {
+	/* BIT(0) currently unused */
+	IWL_SCAN_FLAGS_ACTION_FRAME_TX	= BIT(1),
+	/* bits 2-7 reserved */
+};
+
+struct iwl_scan_cmd {
+	__le16 len;
+	u8 scan_flags;		/* scan flags: see enum iwl_scan_flags */
+	u8 channel_count;	/* # channels in channel list */
+	__le16 quiet_time;	/* dwell only this # millisecs on quiet channel
+				 * (only for active scan) */
+	__le16 quiet_plcp_th;	/* quiet chnl is < this # pkts (typ. 1) */
+	__le16 good_CRC_th;	/* passive -> active promotion threshold */
+	__le16 rx_chain;	/* RXON_RX_CHAIN_* */
+	__le32 max_out_time;	/* max usec to be away from associated (service)
+				 * channel */
+	__le32 suspend_time;	/* pause scan this long (in "extended beacon
+				 * format") when returning to service chnl:
+				 */
+	__le32 flags;		/* RXON_FLG_* */
+	__le32 filter_flags;	/* RXON_FILTER_* */
+
+	/* For active scans (set to all-0s for passive scans).
+	 * Does not include payload.  Must specify Tx rate; no rate scaling. */
+	struct iwl_tx_cmd tx_cmd;
+
+	/* For directed active scans (set to all-0s otherwise) */
+	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+
+	/*
+	 * Probe request frame, followed by channel list.
+	 *
+	 * Size of probe request frame is specified by byte count in tx_cmd.
+	 * Channel list follows immediately after probe request frame.
+	 * Number of channels in list is specified by channel_count.
+	 * Each channel in list is of type:
+	 *
+	 * struct iwl_scan_channel channels[0];
+	 *
+	 * NOTE:  Only one band of channels can be scanned per pass.  You
+	 * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
+	 * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+	 * before requesting another scan.
+	 */
+	u8 data[0];
+} __packed;
+
+/* Can abort will notify by complete notification with abort status. */
+#define CAN_ABORT_STATUS	cpu_to_le32(0x1)
+/* complete notification statuses */
+#define ABORT_STATUS            0x2
+
+/*
+ * REPLY_SCAN_CMD = 0x80 (response)
+ */
+struct iwl_scanreq_notification {
+	__le32 status;		/* 1: okay, 2: cannot fulfill request */
+} __packed;
+
+/*
+ * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ */
+struct iwl_scanstart_notification {
+	__le32 tsf_low;
+	__le32 tsf_high;
+	__le32 beacon_timer;
+	u8 channel;
+	u8 band;
+	u8 reserved[2];
+	__le32 status;
+} __packed;
+
+#define  SCAN_OWNER_STATUS 0x1
+#define  MEASURE_OWNER_STATUS 0x2
+
+#define IWL_PROBE_STATUS_OK		0
+#define IWL_PROBE_STATUS_TX_FAILED	BIT(0)
+/* error statuses combined with TX_FAILED */
+#define IWL_PROBE_STATUS_FAIL_TTL	BIT(1)
+#define IWL_PROBE_STATUS_FAIL_BT	BIT(2)
+
+#define NUMBER_OF_STATISTICS 1	/* first __le32 is good CRC */
+/*
+ * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ */
+struct iwl_scanresults_notification {
+	u8 channel;
+	u8 band;
+	u8 probe_status;
+	u8 num_probe_not_sent; /* not enough time to send */
+	__le32 tsf_low;
+	__le32 tsf_high;
+	__le32 statistics[NUMBER_OF_STATISTICS];
+} __packed;
+
+/*
+ * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ */
+struct iwl_scancomplete_notification {
+	u8 scanned_channels;
+	u8 status;
+	u8 bt_status;	/* BT On/Off status */
+	u8 last_channel;
+	__le32 tsf_low;
+	__le32 tsf_high;
+} __packed;
+
+
+/******************************************************************************
+ * (9)
+ * IBSS/AP Commands and Notifications:
+ *
+ *****************************************************************************/
+
+enum iwl_ibss_manager {
+	IWL_NOT_IBSS_MANAGER = 0,
+	IWL_IBSS_MANAGER = 1,
+};
+
+/*
+ * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ */
+
+struct iwlagn_beacon_notif {
+	struct iwlagn_tx_resp beacon_notify_hdr;
+	__le32 low_tsf;
+	__le32 high_tsf;
+	__le32 ibss_mgr_status;
+} __packed;
+
+/*
+ * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ */
+
+struct iwl_tx_beacon_cmd {
+	struct iwl_tx_cmd tx;
+	__le16 tim_idx;
+	u8 tim_size;
+	u8 reserved1;
+	struct ieee80211_hdr frame[0];	/* beacon frame */
+} __packed;
+
+/******************************************************************************
+ * (10)
+ * Statistics Commands and Notifications:
+ *
+ *****************************************************************************/
+
+#define IWL_TEMP_CONVERT 260
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+/* Used for passing to driver number of successes and failures per rate */
+struct rate_histogram {
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} success;
+	union {
+		__le32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		__le32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		__le32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} failed;
+} __packed;
+
+/* statistics command response */
+
+struct statistics_dbg {
+	__le32 burst_check;
+	__le32 burst_count;
+	__le32 wait_for_silence_timeout_cnt;
+	__le32 reserved[3];
+} __packed;
+
+struct statistics_rx_phy {
+	__le32 ina_cnt;
+	__le32 fina_cnt;
+	__le32 plcp_err;
+	__le32 crc32_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 false_alarm_cnt;
+	__le32 fina_sync_err_cnt;
+	__le32 sfd_timeout;
+	__le32 fina_timeout;
+	__le32 unresponded_rts;
+	__le32 rxe_frame_limit_overrun;
+	__le32 sent_ack_cnt;
+	__le32 sent_cts_cnt;
+	__le32 sent_ba_rsp_cnt;
+	__le32 dsp_self_kill;
+	__le32 mh_format_err;
+	__le32 re_acq_main_rssi_sum;
+	__le32 reserved3;
+} __packed;
+
+struct statistics_rx_ht_phy {
+	__le32 plcp_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 crc32_err;
+	__le32 mh_format_err;
+	__le32 agg_crc32_good;
+	__le32 agg_mpdu_cnt;
+	__le32 agg_cnt;
+	__le32 unsupport_mcs;
+} __packed;
+
+#define INTERFERENCE_DATA_AVAILABLE      cpu_to_le32(1)
+
+struct statistics_rx_non_phy {
+	__le32 bogus_cts;	/* CTS received when not expecting CTS */
+	__le32 bogus_ack;	/* ACK received when not expecting ACK */
+	__le32 non_bssid_frames;	/* number of frames with BSSID that
+					 * doesn't belong to the STA BSSID */
+	__le32 filtered_frames;	/* count frames that were dumped in the
+				 * filtering process */
+	__le32 non_channel_beacons;	/* beacons with our bss id but not on
+					 * our serving channel */
+	__le32 channel_beacons;	/* beacons with our bss id and in our
+				 * serving channel */
+	__le32 num_missed_bcon;	/* number of missed beacons */
+	__le32 adc_rx_saturation_time;	/* count in 0.8us units the time the
+					 * ADC was in saturation */
+	__le32 ina_detection_search_time;/* total time (in 0.8us) searched
+					  * for INA */
+	__le32 beacon_silence_rssi_a;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_b;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_c;	/* RSSI silence after beacon frame */
+	__le32 interference_data_flag;	/* flag for interference data
+					 * availability. 1 when data is
+					 * available. */
+	__le32 channel_load;		/* counts RX Enable time in uSec */
+	__le32 dsp_false_alarms;	/* DSP false alarm (both OFDM
+					 * and CCK) counter */
+	__le32 beacon_rssi_a;
+	__le32 beacon_rssi_b;
+	__le32 beacon_rssi_c;
+	__le32 beacon_energy_a;
+	__le32 beacon_energy_b;
+	__le32 beacon_energy_c;
+} __packed;
+
+struct statistics_rx_non_phy_bt {
+	struct statistics_rx_non_phy common;
+	/* additional stats for bt */
+	__le32 num_bt_kills;
+	__le32 reserved[2];
+} __packed;
+
+struct statistics_rx {
+	struct statistics_rx_phy ofdm;
+	struct statistics_rx_phy cck;
+	struct statistics_rx_non_phy general;
+	struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+struct statistics_rx_bt {
+	struct statistics_rx_phy ofdm;
+	struct statistics_rx_phy cck;
+	struct statistics_rx_non_phy_bt general;
+	struct statistics_rx_ht_phy ofdm_ht;
+} __packed;
+
+/**
+ * struct statistics_tx_power - current tx power
+ *
+ * @ant_a: current tx power on chain a in 1/2 dB step
+ * @ant_b: current tx power on chain b in 1/2 dB step
+ * @ant_c: current tx power on chain c in 1/2 dB step
+ */
+struct statistics_tx_power {
+	u8 ant_a;
+	u8 ant_b;
+	u8 ant_c;
+	u8 reserved;
+} __packed;
+
+struct statistics_tx_non_phy_agg {
+	__le32 ba_timeout;
+	__le32 ba_reschedule_frames;
+	__le32 scd_query_agg_frame_cnt;
+	__le32 scd_query_no_agg;
+	__le32 scd_query_agg;
+	__le32 scd_query_mismatch;
+	__le32 frame_not_ready;
+	__le32 underrun;
+	__le32 bt_prio_kill;
+	__le32 rx_ba_rsp_cnt;
+} __packed;
+
+struct statistics_tx {
+	__le32 preamble_cnt;
+	__le32 rx_detected_cnt;
+	__le32 bt_prio_defer_cnt;
+	__le32 bt_prio_kill_cnt;
+	__le32 few_bytes_cnt;
+	__le32 cts_timeout;
+	__le32 ack_timeout;
+	__le32 expected_ack_cnt;
+	__le32 actual_ack_cnt;
+	__le32 dump_msdu_cnt;
+	__le32 burst_abort_next_frame_mismatch_cnt;
+	__le32 burst_abort_missing_next_frame_cnt;
+	__le32 cts_timeout_collision;
+	__le32 ack_or_ba_timeout_collision;
+	struct statistics_tx_non_phy_agg agg;
+	/*
+	 * "tx_power" are optional parameters provided by uCode,
+	 * 6000 series is the only device provide the information,
+	 * Those are reserved fields for all the other devices
+	 */
+	struct statistics_tx_power tx_power;
+	__le32 reserved1;
+} __packed;
+
+
+struct statistics_div {
+	__le32 tx_on_a;
+	__le32 tx_on_b;
+	__le32 exec_time;
+	__le32 probe_time;
+	__le32 reserved1;
+	__le32 reserved2;
+} __packed;
+
+struct statistics_general_common {
+	__le32 temperature;   /* radio temperature */
+	__le32 temperature_m; /* radio voltage */
+	struct statistics_dbg dbg;
+	__le32 sleep_time;
+	__le32 slots_out;
+	__le32 slots_idle;
+	__le32 ttl_timestamp;
+	struct statistics_div div;
+	__le32 rx_enable_counter;
+	/*
+	 * num_of_sos_states:
+	 *  count the number of times we have to re-tune
+	 *  in order to get out of bad PHY status
+	 */
+	__le32 num_of_sos_states;
+} __packed;
+
+struct statistics_bt_activity {
+	/* Tx statistics */
+	__le32 hi_priority_tx_req_cnt;
+	__le32 hi_priority_tx_denied_cnt;
+	__le32 lo_priority_tx_req_cnt;
+	__le32 lo_priority_tx_denied_cnt;
+	/* Rx statistics */
+	__le32 hi_priority_rx_req_cnt;
+	__le32 hi_priority_rx_denied_cnt;
+	__le32 lo_priority_rx_req_cnt;
+	__le32 lo_priority_rx_denied_cnt;
+} __packed;
+
+struct statistics_general {
+	struct statistics_general_common common;
+	__le32 reserved2;
+	__le32 reserved3;
+} __packed;
+
+struct statistics_general_bt {
+	struct statistics_general_common common;
+	struct statistics_bt_activity activity;
+	__le32 reserved2;
+	__le32 reserved3;
+} __packed;
+
+#define UCODE_STATISTICS_CLEAR_MSK		(0x1 << 0)
+#define UCODE_STATISTICS_FREQUENCY_MSK		(0x1 << 1)
+#define UCODE_STATISTICS_NARROW_BAND_MSK	(0x1 << 2)
+
+/*
+ * REPLY_STATISTICS_CMD = 0x9c,
+ * all devices identical.
+ *
+ * This command triggers an immediate response containing uCode statistics.
+ * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ *
+ * If the CLEAR_STATS configuration flag is set, uCode will clear its
+ * internal copy of the statistics (counters) after issuing the response.
+ * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ *
+ * If the DISABLE_NOTIF configuration flag is set, uCode will not issue
+ * STATISTICS_NOTIFICATIONs after received beacons (see below).  This flag
+ * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ */
+#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1)	/* see above */
+#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
+struct iwl_statistics_cmd {
+	__le32 configuration_flags;	/* IWL_STATS_CONF_* */
+} __packed;
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans.  uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+#define STATISTICS_REPLY_FLG_BAND_24G_MSK         cpu_to_le32(0x2)
+#define STATISTICS_REPLY_FLG_HT40_MODE_MSK        cpu_to_le32(0x8)
+
+struct iwl_notif_statistics {
+	__le32 flag;
+	struct statistics_rx rx;
+	struct statistics_tx tx;
+	struct statistics_general general;
+} __packed;
+
+struct iwl_bt_notif_statistics {
+	__le32 flag;
+	struct statistics_rx_bt rx;
+	struct statistics_tx tx;
+	struct statistics_general_bt general;
+} __packed;
+
+/*
+ * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
+ */
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN	(1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF	(5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX	IWL_MISSED_BEACON_THRESHOLD_DEF
+
+struct iwl_missed_beacon_notif {
+	__le32 consecutive_missed_beacons;
+	__le32 total_missed_becons;
+	__le32 num_expected_beacons;
+	__le32 num_recvd_beacons;
+} __packed;
+
+
+/******************************************************************************
+ * (11)
+ * Rx Calibration Commands:
+ *
+ * With the uCode used for open source drivers, most Tx calibration (except
+ * for Tx Power) and most Rx calibration is done by uCode during the
+ * "initialize" phase of uCode boot.  Driver must calibrate only:
+ *
+ * 1)  Tx power (depends on temperature), described elsewhere
+ * 2)  Receiver gain balance (optimize MIMO, and detect disconnected antennas)
+ * 3)  Receiver sensitivity (to optimize signal detection)
+ *
+ *****************************************************************************/
+
+/**
+ * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ *
+ * This command sets up the Rx signal detector for a sensitivity level that
+ * is high enough to lock onto all signals within the associated network,
+ * but low enough to ignore signals that are below a certain threshold, so as
+ * not to have too many "false alarms".  False alarms are signals that the
+ * Rx DSP tries to lock onto, but then discards after determining that they
+ * are noise.
+ *
+ * The optimum number of false alarms is between 5 and 50 per 200 TUs
+ * (200 * 1024 uSecs, i.e. 204.8 milliseconds) of actual Rx time (i.e.
+ * time listening, not transmitting).  Driver must adjust sensitivity so that
+ * the ratio of actual false alarms to actual Rx time falls within this range.
+ *
+ * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * received beacon.  These provide information to the driver to analyze the
+ * sensitivity.  Don't analyze statistics that come in from scanning, or any
+ * other non-associated-network source.  Pertinent statistics include:
+ *
+ * From "general" statistics (struct statistics_rx_non_phy):
+ *
+ * (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
+ *   Measure of energy of desired signal.  Used for establishing a level
+ *   below which the device does not detect signals.
+ *
+ * (beacon_silence_rssi_[abc] & 0x0FF00) >> 8 (unsigned, units in dB)
+ *   Measure of background noise in silent period after beacon.
+ *
+ * channel_load
+ *   uSecs of actual Rx time during beacon period (varies according to
+ *   how much time was spent transmitting).
+ *
+ * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ *
+ * false_alarm_cnt
+ *   Signal locks abandoned early (before phy-level header).
+ *
+ * plcp_err
+ *   Signal locks abandoned late (during phy-level header).
+ *
+ * NOTE:  Both false_alarm_cnt and plcp_err increment monotonically from
+ *        beacon to beacon, i.e. each value is an accumulation of all errors
+ *        before and including the latest beacon.  Values will wrap around to 0
+ *        after counting up to 2^32 - 1.  Driver must differentiate vs.
+ *        previous beacon's values to determine # false alarms in the current
+ *        beacon period.
+ *
+ * Total number of false alarms = false_alarms + plcp_errs
+ *
+ * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for OFDM are at or close to settings for
+ * maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          90   /   85  /  120
+ *   HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX     170   /  170  /  210
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX         105   /  105  /  140
+ *   HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX     220   /  220  /  270
+ *
+ *   If actual rate of OFDM false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), reduce sensitivity
+ *   by *adding* 1 to all 4 of the table entries above, up to the max for
+ *   each entry.  Conversely, if false alarm rate is too low (less than 5
+ *   for each 204.8 msecs listening), *subtract* 1 from each entry to
+ *   increase sensitivity.
+ *
+ * For CCK sensitivity, keep track of the following:
+ *
+ *   1).  20-beacon history of maximum background noise, indicated by
+ *        (beacon_silence_rssi_[abc] & 0x0FF00), units in dB, across the
+ *        3 receivers.  For any given beacon, the "silence reference" is
+ *        the maximum of last 60 samples (20 beacons * 3 receivers).
+ *
+ *   2).  10-beacon history of strongest signal level, as indicated
+ *        by (beacon_energy_[abc] & 0x0FF00) >> 8, across the 3 receivers,
+ *        i.e. the strength of the signal through the best receiver at the
+ *        moment.  These measurements are "upside down", with lower values
+ *        for stronger signals, so max energy will be *minimum* value.
+ *
+ *        Then for any given beacon, the driver must determine the *weakest*
+ *        of the strongest signals; this is the minimum level that needs to be
+ *        successfully detected, when using the best receiver at the moment.
+ *        "Max cck energy" is the maximum (higher value means lower energy!)
+ *        of the last 10 minima.  Once this is determined, driver must add
+ *        a little margin by adding "6" to it.
+ *
+ *   3).  Number of consecutive beacon periods with too few false alarms.
+ *        Reset this to 0 at the first beacon period that falls within the
+ *        "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
+ *
+ * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * (notice that the start points for CCK are at maximum sensitivity):
+ *
+ *                                             START  /  MIN  /  MAX
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX         125   /  125  /  200
+ *   HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX     200   /  200  /  400
+ *   HD_MIN_ENERGY_CCK_DET_INDEX                100   /    0  /  100
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too high
+ *   (greater than 50 for each 204.8 msecs listening), method for reducing
+ *   sensitivity is:
+ *
+ *   1)  *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       up to max 400.
+ *
+ *   2)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ *       sensitivity has been reduced a significant amount; bring it up to
+ *       a moderate 161.  Otherwise, *add* 3, up to max 200.
+ *
+ *   3)  a)  If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ *       sensitivity has been reduced only a moderate or small amount;
+ *       *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ *       down to min 0.  Otherwise (if gain has been significantly reduced),
+ *       don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ *
+ *       b)  Save a snapshot of the "silence reference".
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is too low
+ *   (less than 5 for each 204.8 msecs listening), method for increasing
+ *   sensitivity is used only if:
+ *
+ *   1a)  Previous beacon did not have too many false alarms
+ *   1b)  AND difference between previous "silence reference" and current
+ *        "silence reference" (prev - current) is 2 or more,
+ *   OR 2)  100 or more consecutive beacon periods have had rate of
+ *          less than 5 false alarms per 204.8 milliseconds rx time.
+ *
+ *   Method for increasing sensitivity:
+ *
+ *   1)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ *       down to min 125.
+ *
+ *   2)  *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ *       down to min 200.
+ *
+ *   3)  *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ *
+ *   If actual rate of CCK false alarms (+ plcp_errors) is within good range
+ *   (between 5 and 50 for each 204.8 msecs listening):
+ *
+ *   1)  Save a snapshot of the silence reference.
+ *
+ *   2)  If previous beacon had too many CCK false alarms (+ plcp_errors),
+ *       give some extra margin to energy threshold by *subtracting* 8
+ *       from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ *
+ *   For all cases (too few, too many, good range), make sure that the CCK
+ *   detection threshold (energy) is below the energy level for robust
+ *   detection over the past 10 beacon periods, the "Max cck energy".
+ *   Lower values mean higher energy; this means making sure that the value
+ *   in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ *
+ */
+
+/*
+ * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
+ */
+#define HD_TABLE_SIZE  (11)	/* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_INDEX                 (0)	/* table indexes */
+#define HD_MIN_ENERGY_OFDM_DET_INDEX                (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX          (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX      (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX      (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX          (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX      (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_INDEX             (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX         (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX          (9)
+#define HD_OFDM_ENERGY_TH_IN_INDEX                  (10)
+
+/*
+ * Additional table entries in enhance SENSITIVITY_CMD
+ */
+#define HD_INA_NON_SQUARE_DET_OFDM_INDEX		(11)
+#define HD_INA_NON_SQUARE_DET_CCK_INDEX			(12)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_INDEX		(13)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_INDEX		(14)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_INDEX	(15)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_INDEX		(16)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_INDEX		(17)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_INDEX		(18)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_INDEX	(19)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_INDEX		(20)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_INDEX		(21)
+#define HD_RESERVED					(22)
+
+/* number of entries for enhanced tbl */
+#define ENHANCE_HD_TABLE_SIZE  (23)
+
+/* number of additional entries for enhanced tbl */
+#define ENHANCE_HD_TABLE_ENTRIES  (ENHANCE_HD_TABLE_SIZE - HD_TABLE_SIZE)
+
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V1		cpu_to_le16(0)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V1		cpu_to_le16(0)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V1		cpu_to_le16(0)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V1	cpu_to_le16(668)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1	cpu_to_le16(4)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V1		cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V1	cpu_to_le16(37)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V1		cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V1	cpu_to_le16(4)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V1		cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V1		cpu_to_le16(99)
+
+#define HD_INA_NON_SQUARE_DET_OFDM_DATA_V2		cpu_to_le16(1)
+#define HD_INA_NON_SQUARE_DET_CCK_DATA_V2		cpu_to_le16(1)
+#define HD_CORR_11_INSTEAD_OF_CORR_9_EN_DATA_V2		cpu_to_le16(1)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_MRC_DATA_V2	cpu_to_le16(600)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2	cpu_to_le16(40)
+#define HD_OFDM_NON_SQUARE_DET_SLOPE_DATA_V2		cpu_to_le16(486)
+#define HD_OFDM_NON_SQUARE_DET_INTERCEPT_DATA_V2	cpu_to_le16(45)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_MRC_DATA_V2		cpu_to_le16(853)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_MRC_DATA_V2	cpu_to_le16(60)
+#define HD_CCK_NON_SQUARE_DET_SLOPE_DATA_V2		cpu_to_le16(476)
+#define HD_CCK_NON_SQUARE_DET_INTERCEPT_DATA_V2		cpu_to_le16(99)
+
+
+/* Control field in struct iwl_sensitivity_cmd */
+#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE	cpu_to_le16(0)
+#define SENSITIVITY_CMD_CONTROL_WORK_TABLE	cpu_to_le16(1)
+
+/**
+ * struct iwl_sensitivity_cmd
+ * @control:  (1) updates working table, (0) updates default table
+ * @table:  energy threshold values, use HD_* as index into table
+ *
+ * Always use "1" in "control" to update uCode's working table and DSP.
+ */
+struct iwl_sensitivity_cmd {
+	__le16 control;			/* always use "1" */
+	__le16 table[HD_TABLE_SIZE];	/* use HD_* as index */
+} __packed;
+
+/*
+ *
+ */
+struct iwl_enhance_sensitivity_cmd {
+	__le16 control;			/* always use "1" */
+	__le16 enhance_table[ENHANCE_HD_TABLE_SIZE];	/* use HD_* as index */
+} __packed;
+
+
+/**
+ * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ *
+ * This command sets the relative gains of agn device's 3 radio receiver chains.
+ *
+ * After the first association, driver should accumulate signal and noise
+ * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
+ * beacons from the associated network (don't collect statistics that come
+ * in from scanning, or any other non-network source).
+ *
+ * DISCONNECTED ANTENNA:
+ *
+ * Driver should determine which antennas are actually connected, by comparing
+ * average beacon signal levels for the 3 Rx chains.  Accumulate (add) the
+ * following values over 20 beacons, one accumulator for each of the chains
+ * a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the strongest signal from among a/b/c.  Compare the other two to the
+ * strongest.  If any signal is more than 15 dB (times 20, unless you
+ * divide the accumulated values by 20) below the strongest, the driver
+ * considers that antenna to be disconnected, and should not try to use that
+ * antenna/chain for Rx or Tx.  If both A and B seem to be disconnected,
+ * driver should declare the stronger one as connected, and attempt to use it
+ * (A and B are the only 2 Tx chains!).
+ *
+ *
+ * RX BALANCE:
+ *
+ * Driver should balance the 3 receivers (but just the ones that are connected
+ * to antennas, see above) for gain, by comparing the average signal levels
+ * detected during the silence after each beacon (background noise).
+ * Accumulate (add) the following values over 20 beacons, one accumulator for
+ * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ *
+ * beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
+ *
+ * Find the weakest background noise level from among a/b/c.  This Rx chain
+ * will be the reference, with 0 gain adjustment.  Attenuate other channels by
+ * finding noise difference:
+ *
+ * (accum_noise[i] - accum_noise[reference]) / 30
+ *
+ * The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
+ * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * driver should limit the difference results to a range of 0-3 (0-4.5 dB),
+ * and set bit 2 to indicate "reduce gain".  The value for the reference
+ * (weakest) chain should be "0".
+ *
+ * diff_gain_[abc] bit fields:
+ *   2: (1) reduce gain, (0) increase gain
+ * 1-0: amount of gain, units of 1.5 dB
+ */
+
+/* Phy calibration command for series */
+enum {
+	IWL_PHY_CALIBRATE_DC_CMD		= 8,
+	IWL_PHY_CALIBRATE_LO_CMD		= 9,
+	IWL_PHY_CALIBRATE_TX_IQ_CMD		= 11,
+	IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD	= 15,
+	IWL_PHY_CALIBRATE_BASE_BAND_CMD		= 16,
+	IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD	= 17,
+	IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD	= 18,
+};
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_ucode_calib_cfg {
+	IWL_CALIB_CFG_RX_BB_IDX			= BIT(0),
+	IWL_CALIB_CFG_DC_IDX			= BIT(1),
+	IWL_CALIB_CFG_LO_IDX			= BIT(2),
+	IWL_CALIB_CFG_TX_IQ_IDX			= BIT(3),
+	IWL_CALIB_CFG_RX_IQ_IDX			= BIT(4),
+	IWL_CALIB_CFG_NOISE_IDX			= BIT(5),
+	IWL_CALIB_CFG_CRYSTAL_IDX		= BIT(6),
+	IWL_CALIB_CFG_TEMPERATURE_IDX		= BIT(7),
+	IWL_CALIB_CFG_PAPD_IDX			= BIT(8),
+	IWL_CALIB_CFG_SENSITIVITY_IDX		= BIT(9),
+	IWL_CALIB_CFG_TX_PWR_IDX		= BIT(10),
+};
+
+#define IWL_CALIB_INIT_CFG_ALL	cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX |	\
+					IWL_CALIB_CFG_DC_IDX |		\
+					IWL_CALIB_CFG_LO_IDX |		\
+					IWL_CALIB_CFG_TX_IQ_IDX |	\
+					IWL_CALIB_CFG_RX_IQ_IDX |	\
+					IWL_CALIB_CFG_CRYSTAL_IDX)
+
+#define IWL_CALIB_RT_CFG_ALL	cpu_to_le32(IWL_CALIB_CFG_RX_BB_IDX |	\
+					IWL_CALIB_CFG_DC_IDX |		\
+					IWL_CALIB_CFG_LO_IDX |		\
+					IWL_CALIB_CFG_TX_IQ_IDX |	\
+					IWL_CALIB_CFG_RX_IQ_IDX |	\
+					IWL_CALIB_CFG_TEMPERATURE_IDX |	\
+					IWL_CALIB_CFG_PAPD_IDX |	\
+					IWL_CALIB_CFG_TX_PWR_IDX |	\
+					IWL_CALIB_CFG_CRYSTAL_IDX)
+
+#define IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK	cpu_to_le32(BIT(0))
+
+struct iwl_calib_cfg_elmnt_s {
+	__le32 is_enable;
+	__le32 start;
+	__le32 send_res;
+	__le32 apply_res;
+	__le32 reserved;
+} __packed;
+
+struct iwl_calib_cfg_status_s {
+	struct iwl_calib_cfg_elmnt_s once;
+	struct iwl_calib_cfg_elmnt_s perd;
+	__le32 flags;
+} __packed;
+
+struct iwl_calib_cfg_cmd {
+	struct iwl_calib_cfg_status_s ucd_calib_cfg;
+	struct iwl_calib_cfg_status_s drv_calib_cfg;
+	__le32 reserved1;
+} __packed;
+
+struct iwl_calib_hdr {
+	u8 op_code;
+	u8 first_group;
+	u8 groups_num;
+	u8 data_valid;
+} __packed;
+
+struct iwl_calib_cmd {
+	struct iwl_calib_hdr hdr;
+	u8 data[0];
+} __packed;
+
+struct iwl_calib_xtal_freq_cmd {
+	struct iwl_calib_hdr hdr;
+	u8 cap_pin1;
+	u8 cap_pin2;
+	u8 pad[2];
+} __packed;
+
+#define DEFAULT_RADIO_SENSOR_OFFSET    cpu_to_le16(2700)
+struct iwl_calib_temperature_offset_cmd {
+	struct iwl_calib_hdr hdr;
+	__le16 radio_sensor_offset;
+	__le16 reserved;
+} __packed;
+
+struct iwl_calib_temperature_offset_v2_cmd {
+	struct iwl_calib_hdr hdr;
+	__le16 radio_sensor_offset_high;
+	__le16 radio_sensor_offset_low;
+	__le16 burntVoltageRef;
+	__le16 reserved;
+} __packed;
+
+/* IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD */
+struct iwl_calib_chain_noise_reset_cmd {
+	struct iwl_calib_hdr hdr;
+	u8 data[0];
+};
+
+/* IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD */
+struct iwl_calib_chain_noise_gain_cmd {
+	struct iwl_calib_hdr hdr;
+	u8 delta_gain_1;
+	u8 delta_gain_2;
+	u8 pad[2];
+} __packed;
+
+/******************************************************************************
+ * (12)
+ * Miscellaneous Commands:
+ *
+ *****************************************************************************/
+
+/*
+ * LEDs Command & Response
+ * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ *
+ * For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
+ * this command turns it on or off, or sets up a periodic blinking cycle.
+ */
+struct iwl_led_cmd {
+	__le32 interval;	/* "interval" in uSec */
+	u8 id;			/* 1: Activity, 2: Link, 3: Tech */
+	u8 off;			/* # intervals off while blinking;
+				 * "0", with >0 "on" value, turns LED on */
+	u8 on;			/* # intervals on while blinking;
+				 * "0", regardless of "off", turns LED off */
+	u8 reserved;
+} __packed;
+
+/*
+ * station priority table entries
+ * also used as potential "events" value for both
+ * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
+ */
+
+/*
+ * COEX events entry flag masks
+ * RP - Requested Priority
+ * WP - Win Medium Priority: priority assigned when the contention has been won
+ */
+#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG        (0x1)
+#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG        (0x2)
+#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG  (0x4)
+
+#define COEX_CU_UNASSOC_IDLE_RP               4
+#define COEX_CU_UNASSOC_MANUAL_SCAN_RP        4
+#define COEX_CU_UNASSOC_AUTO_SCAN_RP          4
+#define COEX_CU_CALIBRATION_RP                4
+#define COEX_CU_PERIODIC_CALIBRATION_RP       4
+#define COEX_CU_CONNECTION_ESTAB_RP           4
+#define COEX_CU_ASSOCIATED_IDLE_RP            4
+#define COEX_CU_ASSOC_MANUAL_SCAN_RP          4
+#define COEX_CU_ASSOC_AUTO_SCAN_RP            4
+#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP         4
+#define COEX_CU_RF_ON_RP                      6
+#define COEX_CU_RF_OFF_RP                     4
+#define COEX_CU_STAND_ALONE_DEBUG_RP          6
+#define COEX_CU_IPAN_ASSOC_LEVEL_RP           4
+#define COEX_CU_RSRVD1_RP                     4
+#define COEX_CU_RSRVD2_RP                     4
+
+#define COEX_CU_UNASSOC_IDLE_WP               3
+#define COEX_CU_UNASSOC_MANUAL_SCAN_WP        3
+#define COEX_CU_UNASSOC_AUTO_SCAN_WP          3
+#define COEX_CU_CALIBRATION_WP                3
+#define COEX_CU_PERIODIC_CALIBRATION_WP       3
+#define COEX_CU_CONNECTION_ESTAB_WP           3
+#define COEX_CU_ASSOCIATED_IDLE_WP            3
+#define COEX_CU_ASSOC_MANUAL_SCAN_WP          3
+#define COEX_CU_ASSOC_AUTO_SCAN_WP            3
+#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP         3
+#define COEX_CU_RF_ON_WP                      3
+#define COEX_CU_RF_OFF_WP                     3
+#define COEX_CU_STAND_ALONE_DEBUG_WP          6
+#define COEX_CU_IPAN_ASSOC_LEVEL_WP           3
+#define COEX_CU_RSRVD1_WP                     3
+#define COEX_CU_RSRVD2_WP                     3
+
+#define COEX_UNASSOC_IDLE_FLAGS                     0
+#define COEX_UNASSOC_MANUAL_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_UNASSOC_AUTO_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_CALIBRATION_FLAGS			\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_PERIODIC_CALIBRATION_FLAGS             0
+/*
+ * COEX_CONNECTION_ESTAB:
+ * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
+ */
+#define COEX_CONNECTION_ESTAB_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+#define COEX_ASSOCIATED_IDLE_FLAGS                  0
+#define COEX_ASSOC_MANUAL_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_ASSOC_AUTO_SCAN_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS               0
+#define COEX_RF_ON_FLAGS                            0
+#define COEX_RF_OFF_FLAGS                           0
+#define COEX_STAND_ALONE_DEBUG_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
+#define COEX_IPAN_ASSOC_LEVEL_FLAGS		\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+#define COEX_RSRVD1_FLAGS                           0
+#define COEX_RSRVD2_FLAGS                           0
+/*
+ * COEX_CU_RF_ON is the event wrapping all radio ownership.
+ * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
+ */
+#define COEX_CU_RF_ON_FLAGS			\
+	(COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG |	\
+	 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG |	\
+	 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
+
+
+enum {
+	/* un-association part */
+	COEX_UNASSOC_IDLE		= 0,
+	COEX_UNASSOC_MANUAL_SCAN	= 1,
+	COEX_UNASSOC_AUTO_SCAN		= 2,
+	/* calibration */
+	COEX_CALIBRATION		= 3,
+	COEX_PERIODIC_CALIBRATION	= 4,
+	/* connection */
+	COEX_CONNECTION_ESTAB		= 5,
+	/* association part */
+	COEX_ASSOCIATED_IDLE		= 6,
+	COEX_ASSOC_MANUAL_SCAN		= 7,
+	COEX_ASSOC_AUTO_SCAN		= 8,
+	COEX_ASSOC_ACTIVE_LEVEL		= 9,
+	/* RF ON/OFF */
+	COEX_RF_ON			= 10,
+	COEX_RF_OFF			= 11,
+	COEX_STAND_ALONE_DEBUG		= 12,
+	/* IPAN */
+	COEX_IPAN_ASSOC_LEVEL		= 13,
+	/* reserved */
+	COEX_RSRVD1			= 14,
+	COEX_RSRVD2			= 15,
+	COEX_NUM_OF_EVENTS		= 16
+};
+
+/*
+ * Coexistence WIFI/WIMAX  Command
+ * COEX_PRIORITY_TABLE_CMD = 0x5a
+ *
+ */
+struct iwl_wimax_coex_event_entry {
+	u8 request_prio;
+	u8 win_medium_prio;
+	u8 reserved;
+	u8 flags;
+} __packed;
+
+/* COEX flag masks */
+
+/* Station table is valid */
+#define COEX_FLAGS_STA_TABLE_VALID_MSK      (0x1)
+/* UnMask wake up src at unassociated sleep */
+#define COEX_FLAGS_UNASSOC_WA_UNMASK_MSK    (0x4)
+/* UnMask wake up src at associated sleep */
+#define COEX_FLAGS_ASSOC_WA_UNMASK_MSK      (0x8)
+/* Enable CoEx feature. */
+#define COEX_FLAGS_COEX_ENABLE_MSK          (0x80)
+
+struct iwl_wimax_coex_cmd {
+	u8 flags;
+	u8 reserved[3];
+	struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
+} __packed;
+
+/*
+ * Coexistence MEDIUM NOTIFICATION
+ * COEX_MEDIUM_NOTIFICATION = 0x5b
+ *
+ * notification from uCode to host to indicate medium changes
+ *
+ */
+/*
+ * status field
+ * bit 0 - 2: medium status
+ * bit 3: medium change indication
+ * bit 4 - 31: reserved
+ */
+/* status option values, (0 - 2 bits) */
+#define COEX_MEDIUM_BUSY	(0x0) /* radio belongs to WiMAX */
+#define COEX_MEDIUM_ACTIVE	(0x1) /* radio belongs to WiFi */
+#define COEX_MEDIUM_PRE_RELEASE	(0x2) /* received radio release */
+#define COEX_MEDIUM_MSK		(0x7)
+
+/* send notification status (1 bit) */
+#define COEX_MEDIUM_CHANGED	(0x8)
+#define COEX_MEDIUM_CHANGED_MSK	(0x8)
+#define COEX_MEDIUM_SHIFT	(3)
+
+struct iwl_coex_medium_notification {
+	__le32 status;
+	__le32 events;
+} __packed;
+
+/*
+ * Coexistence EVENT  Command
+ * COEX_EVENT_CMD = 0x5c
+ *
+ * send from host to uCode for coex event request.
+ */
+/* flags options */
+#define COEX_EVENT_REQUEST_MSK	(0x1)
+
+struct iwl_coex_event_cmd {
+	u8 flags;
+	u8 event;
+	__le16 reserved;
+} __packed;
+
+struct iwl_coex_event_resp {
+	__le32 status;
+} __packed;
+
+
+/******************************************************************************
+ * Bluetooth Coexistence commands
+ *
+ *****************************************************************************/
+
+/*
+ * BT Status notification
+ * REPLY_BT_COEX_PROFILE_NOTIF = 0xce
+ */
+enum iwl_bt_coex_profile_traffic_load {
+	IWL_BT_COEX_TRAFFIC_LOAD_NONE = 	0,
+	IWL_BT_COEX_TRAFFIC_LOAD_LOW =		1,
+	IWL_BT_COEX_TRAFFIC_LOAD_HIGH = 	2,
+	IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS =	3,
+/*
+ * There are no more even though below is a u8, the
+ * indication from the BT device only has two bits.
+ */
+};
+
+#define BT_SESSION_ACTIVITY_1_UART_MSG		0x1
+#define BT_SESSION_ACTIVITY_2_UART_MSG		0x2
+
+/* BT UART message - Share Part (BT -> WiFi) */
+#define BT_UART_MSG_FRAME1MSGTYPE_POS		(0)
+#define BT_UART_MSG_FRAME1MSGTYPE_MSK		\
+		(0x7 << BT_UART_MSG_FRAME1MSGTYPE_POS)
+#define BT_UART_MSG_FRAME1SSN_POS		(3)
+#define BT_UART_MSG_FRAME1SSN_MSK		\
+		(0x3 << BT_UART_MSG_FRAME1SSN_POS)
+#define BT_UART_MSG_FRAME1UPDATEREQ_POS		(5)
+#define BT_UART_MSG_FRAME1UPDATEREQ_MSK		\
+		(0x1 << BT_UART_MSG_FRAME1UPDATEREQ_POS)
+#define BT_UART_MSG_FRAME1RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME1RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME1RESERVED_POS)
+
+#define BT_UART_MSG_FRAME2OPENCONNECTIONS_POS	(0)
+#define BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK	\
+		(0x3 << BT_UART_MSG_FRAME2OPENCONNECTIONS_POS)
+#define BT_UART_MSG_FRAME2TRAFFICLOAD_POS	(2)
+#define BT_UART_MSG_FRAME2TRAFFICLOAD_MSK	\
+		(0x3 << BT_UART_MSG_FRAME2TRAFFICLOAD_POS)
+#define BT_UART_MSG_FRAME2CHLSEQN_POS		(4)
+#define BT_UART_MSG_FRAME2CHLSEQN_MSK		\
+		(0x1 << BT_UART_MSG_FRAME2CHLSEQN_POS)
+#define BT_UART_MSG_FRAME2INBAND_POS		(5)
+#define BT_UART_MSG_FRAME2INBAND_MSK		\
+		(0x1 << BT_UART_MSG_FRAME2INBAND_POS)
+#define BT_UART_MSG_FRAME2RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME2RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_FRAME3SCOESCO_POS		(0)
+#define BT_UART_MSG_FRAME3SCOESCO_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3SCOESCO_POS)
+#define BT_UART_MSG_FRAME3SNIFF_POS		(1)
+#define BT_UART_MSG_FRAME3SNIFF_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3SNIFF_POS)
+#define BT_UART_MSG_FRAME3A2DP_POS		(2)
+#define BT_UART_MSG_FRAME3A2DP_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3A2DP_POS)
+#define BT_UART_MSG_FRAME3ACL_POS		(3)
+#define BT_UART_MSG_FRAME3ACL_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3ACL_POS)
+#define BT_UART_MSG_FRAME3MASTER_POS		(4)
+#define BT_UART_MSG_FRAME3MASTER_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3MASTER_POS)
+#define BT_UART_MSG_FRAME3OBEX_POS		(5)
+#define BT_UART_MSG_FRAME3OBEX_MSK		\
+		(0x1 << BT_UART_MSG_FRAME3OBEX_POS)
+#define BT_UART_MSG_FRAME3RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME3RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_FRAME4IDLEDURATION_POS	(0)
+#define BT_UART_MSG_FRAME4IDLEDURATION_MSK	\
+		(0x3F << BT_UART_MSG_FRAME4IDLEDURATION_POS)
+#define BT_UART_MSG_FRAME4RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME4RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_FRAME5TXACTIVITY_POS	(0)
+#define BT_UART_MSG_FRAME5TXACTIVITY_MSK	\
+		(0x3 << BT_UART_MSG_FRAME5TXACTIVITY_POS)
+#define BT_UART_MSG_FRAME5RXACTIVITY_POS	(2)
+#define BT_UART_MSG_FRAME5RXACTIVITY_MSK	\
+		(0x3 << BT_UART_MSG_FRAME5RXACTIVITY_POS)
+#define BT_UART_MSG_FRAME5ESCORETRANSMIT_POS	(4)
+#define BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK	\
+		(0x3 << BT_UART_MSG_FRAME5ESCORETRANSMIT_POS)
+#define BT_UART_MSG_FRAME5RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME5RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_FRAME6SNIFFINTERVAL_POS	(0)
+#define BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK	\
+		(0x1F << BT_UART_MSG_FRAME6SNIFFINTERVAL_POS)
+#define BT_UART_MSG_FRAME6DISCOVERABLE_POS	(5)
+#define BT_UART_MSG_FRAME6DISCOVERABLE_MSK	\
+		(0x1 << BT_UART_MSG_FRAME6DISCOVERABLE_POS)
+#define BT_UART_MSG_FRAME6RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME6RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_FRAME7SNIFFACTIVITY_POS	(0)
+#define BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK	\
+		(0x7 << BT_UART_MSG_FRAME7SNIFFACTIVITY_POS)
+#define BT_UART_MSG_FRAME7PAGE_POS		(3)
+#define BT_UART_MSG_FRAME7PAGE_MSK		\
+		(0x1 << BT_UART_MSG_FRAME7PAGE_POS)
+#define BT_UART_MSG_FRAME7INQUIRY_POS		(4)
+#define BT_UART_MSG_FRAME7INQUIRY_MSK		\
+		(0x1 << BT_UART_MSG_FRAME7INQUIRY_POS)
+#define BT_UART_MSG_FRAME7CONNECTABLE_POS	(5)
+#define BT_UART_MSG_FRAME7CONNECTABLE_MSK	\
+		(0x1 << BT_UART_MSG_FRAME7CONNECTABLE_POS)
+#define BT_UART_MSG_FRAME7RESERVED_POS		(6)
+#define BT_UART_MSG_FRAME7RESERVED_MSK		\
+		(0x3 << BT_UART_MSG_FRAME7RESERVED_POS)
+
+/* BT Session Activity 2 UART message (BT -> WiFi) */
+#define BT_UART_MSG_2_FRAME1RESERVED1_POS	(5)
+#define BT_UART_MSG_2_FRAME1RESERVED1_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME1RESERVED1_POS)
+#define BT_UART_MSG_2_FRAME1RESERVED2_POS	(6)
+#define BT_UART_MSG_2_FRAME1RESERVED2_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME1RESERVED2_POS)
+
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS	(0)
+#define BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_MSK	\
+		(0x3F<<BT_UART_MSG_2_FRAME2AGGTRAFFICLOAD_POS)
+#define BT_UART_MSG_2_FRAME2RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME2RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME2RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS	(0)
+#define BT_UART_MSG_2_FRAME3BRLASTTXPOWER_MSK	\
+		(0xF<<BT_UART_MSG_2_FRAME3BRLASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS	(4)
+#define BT_UART_MSG_2_FRAME3INQPAGESRMODE_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME3INQPAGESRMODE_POS)
+#define BT_UART_MSG_2_FRAME3LEMASTER_POS	(5)
+#define BT_UART_MSG_2_FRAME3LEMASTER_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME3LEMASTER_POS)
+#define BT_UART_MSG_2_FRAME3RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME3RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME3RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS	(0)
+#define BT_UART_MSG_2_FRAME4LELASTTXPOWER_MSK	\
+		(0xF<<BT_UART_MSG_2_FRAME4LELASTTXPOWER_POS)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_POS	(4)
+#define BT_UART_MSG_2_FRAME4NUMLECONN_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME4NUMLECONN_POS)
+#define BT_UART_MSG_2_FRAME4RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME4RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME4RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_POS	(0)
+#define BT_UART_MSG_2_FRAME5BTMINRSSI_MSK	\
+		(0xF<<BT_UART_MSG_2_FRAME5BTMINRSSI_POS)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_POS	(4)
+#define BT_UART_MSG_2_FRAME5LESCANINITMODE_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME5LESCANINITMODE_POS)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_POS	(5)
+#define BT_UART_MSG_2_FRAME5LEADVERMODE_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME5LEADVERMODE_POS)
+#define BT_UART_MSG_2_FRAME5RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME5RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME5RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS	(0)
+#define BT_UART_MSG_2_FRAME6LECONNINTERVAL_MSK	\
+		(0x1F<<BT_UART_MSG_2_FRAME6LECONNINTERVAL_POS)
+#define BT_UART_MSG_2_FRAME6RFU_POS		(5)
+#define BT_UART_MSG_2_FRAME6RFU_MSK		\
+		(0x1<<BT_UART_MSG_2_FRAME6RFU_POS)
+#define BT_UART_MSG_2_FRAME6RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME6RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME6RESERVED_POS)
+
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS	(0)
+#define BT_UART_MSG_2_FRAME7LECONNSLAVELAT_MSK	\
+		(0x7<<BT_UART_MSG_2_FRAME7LECONNSLAVELAT_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_POS	(3)
+#define BT_UART_MSG_2_FRAME7LEPROFILE1_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME7LEPROFILE1_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_POS	(4)
+#define BT_UART_MSG_2_FRAME7LEPROFILE2_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME7LEPROFILE2_POS)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS	(5)
+#define BT_UART_MSG_2_FRAME7LEPROFILEOTHER_MSK	\
+		(0x1<<BT_UART_MSG_2_FRAME7LEPROFILEOTHER_POS)
+#define BT_UART_MSG_2_FRAME7RESERVED_POS	(6)
+#define BT_UART_MSG_2_FRAME7RESERVED_MSK	\
+		(0x3<<BT_UART_MSG_2_FRAME7RESERVED_POS)
+
+
+#define BT_ENABLE_REDUCED_TXPOWER_THRESHOLD	(-62)
+#define BT_DISABLE_REDUCED_TXPOWER_THRESHOLD	(-65)
+
+struct iwl_bt_uart_msg {
+	u8 header;
+	u8 frame1;
+	u8 frame2;
+	u8 frame3;
+	u8 frame4;
+	u8 frame5;
+	u8 frame6;
+	u8 frame7;
+} __packed;
+
+struct iwl_bt_coex_profile_notif {
+	struct iwl_bt_uart_msg last_bt_uart_msg;
+	u8 bt_status; /* 0 - off, 1 - on */
+	u8 bt_traffic_load; /* 0 .. 3? */
+	u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
+	u8 reserved;
+} __packed;
+
+#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS	0
+#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK	0x1
+#define IWL_BT_COEX_PRIO_TBL_PRIO_POS		1
+#define IWL_BT_COEX_PRIO_TBL_PRIO_MASK		0x0e
+#define IWL_BT_COEX_PRIO_TBL_RESERVED_POS	4
+#define IWL_BT_COEX_PRIO_TBL_RESERVED_MASK	0xf0
+#define IWL_BT_COEX_PRIO_TBL_PRIO_SHIFT		1
+
+/*
+ * BT Coexistence Priority table
+ * REPLY_BT_COEX_PRIO_TABLE = 0xcc
+ */
+enum bt_coex_prio_table_events {
+	BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
+	BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
+	BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
+	BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3, /* DC calib */
+	BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
+	BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
+	BT_COEX_PRIO_TBL_EVT_DTIM = 6,
+	BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
+	BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
+	BT_COEX_PRIO_TBL_EVT_RESERVED0 = 9,
+	BT_COEX_PRIO_TBL_EVT_RESERVED1 = 10,
+	BT_COEX_PRIO_TBL_EVT_RESERVED2 = 11,
+	BT_COEX_PRIO_TBL_EVT_RESERVED3 = 12,
+	BT_COEX_PRIO_TBL_EVT_RESERVED4 = 13,
+	BT_COEX_PRIO_TBL_EVT_RESERVED5 = 14,
+	BT_COEX_PRIO_TBL_EVT_RESERVED6 = 15,
+	/* BT_COEX_PRIO_TBL_EVT_MAX should always be last */
+	BT_COEX_PRIO_TBL_EVT_MAX,
+};
+
+enum bt_coex_prio_table_priorities {
+	BT_COEX_PRIO_TBL_DISABLED = 0,
+	BT_COEX_PRIO_TBL_PRIO_LOW = 1,
+	BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
+	BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
+	BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
+	BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
+	BT_COEX_PRIO_TBL_PRIO_RSRVD1 = 6,
+	BT_COEX_PRIO_TBL_PRIO_RSRVD2 = 7,
+	BT_COEX_PRIO_TBL_MAX,
+};
+
+struct iwl_bt_coex_prio_table_cmd {
+	u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
+} __packed;
+
+#define IWL_BT_COEX_ENV_CLOSE	0
+#define IWL_BT_COEX_ENV_OPEN	1
+/*
+ * BT Protection Envelope
+ * REPLY_BT_COEX_PROT_ENV = 0xcd
+ */
+struct iwl_bt_coex_prot_env_cmd {
+	u8 action; /* 0 = closed, 1 = open */
+	u8 type; /* 0 .. 15 */
+	u8 reserved[2];
+} __packed;
+
+/*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+	IWLAGN_D3_WAKEUP_RFKILL		= BIT(0),
+	IWLAGN_D3_WAKEUP_SYSASSERT	= BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+	__le32 min_sleep_time;
+	__le32 wakeup_flags;
+} __packed;
+
+/*
+ * REPLY_WOWLAN_PATTERNS
+ */
+#define IWLAGN_WOWLAN_MIN_PATTERN_LEN	16
+#define IWLAGN_WOWLAN_MAX_PATTERN_LEN	128
+
+struct iwlagn_wowlan_pattern {
+	u8 mask[IWLAGN_WOWLAN_MAX_PATTERN_LEN / 8];
+	u8 pattern[IWLAGN_WOWLAN_MAX_PATTERN_LEN];
+	u8 mask_size;
+	u8 pattern_size;
+	__le16 reserved;
+} __packed;
+
+#define IWLAGN_WOWLAN_MAX_PATTERNS	20
+
+struct iwlagn_wowlan_patterns_cmd {
+	__le32 n_patterns;
+	struct iwlagn_wowlan_pattern patterns[];
+} __packed;
+
+/*
+ * REPLY_WOWLAN_WAKEUP_FILTER
+ */
+enum iwlagn_wowlan_wakeup_filters {
+	IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET	= BIT(0),
+	IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH	= BIT(1),
+	IWLAGN_WOWLAN_WAKEUP_BEACON_MISS	= BIT(2),
+	IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE	= BIT(3),
+	IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL	= BIT(4),
+	IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ	= BIT(5),
+	IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE	= BIT(6),
+	IWLAGN_WOWLAN_WAKEUP_ALWAYS		= BIT(7),
+	IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT	= BIT(8),
+};
+
+struct iwlagn_wowlan_wakeup_filter_cmd {
+	__le32 enabled;
+	__le16 non_qos_seq;
+	__le16 reserved;
+	__le16 qos_seq[8];
+};
+
+/*
+ * REPLY_WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWLAGN_NUM_RSC	16
+
+struct tkip_sc {
+	__le16 iv16;
+	__le16 pad;
+	__le32 iv32;
+} __packed;
+
+struct iwlagn_tkip_rsc_tsc {
+	struct tkip_sc unicast_rsc[IWLAGN_NUM_RSC];
+	struct tkip_sc multicast_rsc[IWLAGN_NUM_RSC];
+	struct tkip_sc tsc;
+} __packed;
+
+struct aes_sc {
+	__le64 pn;
+} __packed;
+
+struct iwlagn_aes_rsc_tsc {
+	struct aes_sc unicast_rsc[IWLAGN_NUM_RSC];
+	struct aes_sc multicast_rsc[IWLAGN_NUM_RSC];
+	struct aes_sc tsc;
+} __packed;
+
+union iwlagn_all_tsc_rsc {
+	struct iwlagn_tkip_rsc_tsc tkip;
+	struct iwlagn_aes_rsc_tsc aes;
+};
+
+struct iwlagn_wowlan_rsc_tsc_params_cmd {
+	union iwlagn_all_tsc_rsc all_tsc_rsc;
+} __packed;
+
+/*
+ * REPLY_WOWLAN_TKIP_PARAMS
+ */
+#define IWLAGN_MIC_KEY_SIZE	8
+#define IWLAGN_P1K_SIZE		5
+struct iwlagn_mic_keys {
+	u8 tx[IWLAGN_MIC_KEY_SIZE];
+	u8 rx_unicast[IWLAGN_MIC_KEY_SIZE];
+	u8 rx_mcast[IWLAGN_MIC_KEY_SIZE];
+} __packed;
+
+struct iwlagn_p1k_cache {
+	__le16 p1k[IWLAGN_P1K_SIZE];
+} __packed;
+
+#define IWLAGN_NUM_RX_P1K_CACHE	2
+
+struct iwlagn_wowlan_tkip_params_cmd {
+	struct iwlagn_mic_keys mic_keys;
+	struct iwlagn_p1k_cache tx;
+	struct iwlagn_p1k_cache rx_uni[IWLAGN_NUM_RX_P1K_CACHE];
+	struct iwlagn_p1k_cache rx_multi[IWLAGN_NUM_RX_P1K_CACHE];
+} __packed;
+
+/*
+ * REPLY_WOWLAN_KEK_KCK_MATERIAL
+ */
+
+#define IWLAGN_KCK_MAX_SIZE	32
+#define IWLAGN_KEK_MAX_SIZE	32
+
+struct iwlagn_wowlan_kek_kck_material_cmd {
+	u8	kck[IWLAGN_KCK_MAX_SIZE];
+	u8	kek[IWLAGN_KEK_MAX_SIZE];
+	__le16	kck_len;
+	__le16	kek_len;
+	__le64	replay_ctr;
+} __packed;
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN	0x87
+
+/*
+ * REPLY_WOWLAN_GET_STATUS = 0xe5
+ */
+struct iwlagn_wowlan_status {
+	__le64 replay_ctr;
+	__le32 rekey_status;
+	__le32 wakeup_reason;
+	u8 pattern_number;
+	u8 reserved1;
+	__le16 qos_seq_ctr[8];
+	__le16 non_qos_seq_ctr;
+	__le16 reserved2;
+	union iwlagn_all_tsc_rsc tsc_rsc;
+	__le16 reserved3;
+} __packed;
+
+/*
+ * REPLY_WIPAN_PARAMS = 0xb2 (Commands and Notification)
+ */
+
+/*
+ * Minimum slot time in TU
+ */
+#define IWL_MIN_SLOT_TIME	20
+
+/**
+ * struct iwl_wipan_slot
+ * @width: Time in TU
+ * @type:
+ *   0 - BSS
+ *   1 - PAN
+ */
+struct iwl_wipan_slot {
+	__le16 width;
+	u8 type;
+	u8 reserved;
+} __packed;
+
+#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_CTS		BIT(1)	/* reserved */
+#define IWL_WIPAN_PARAMS_FLG_LEAVE_CHANNEL_QUIET	BIT(2)	/* reserved */
+#define IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE		BIT(3)	/* reserved */
+#define IWL_WIPAN_PARAMS_FLG_FILTER_BEACON_NOTIF	BIT(4)
+#define IWL_WIPAN_PARAMS_FLG_FULL_SLOTTED_MODE		BIT(5)
+
+/**
+ * struct iwl_wipan_params_cmd
+ * @flags:
+ *   bit0: reserved
+ *   bit1: CP leave channel with CTS
+ *   bit2: CP leave channel qith Quiet
+ *   bit3: slotted mode
+ *     1 - work in slotted mode
+ *     0 - work in non slotted mode
+ *   bit4: filter beacon notification
+ *   bit5: full tx slotted mode. if this flag is set,
+ *         uCode will perform leaving channel methods in context switch
+ *         also when working in same channel mode
+ * @num_slots: 1 - 10
+ */
+struct iwl_wipan_params_cmd {
+	__le16 flags;
+	u8 reserved;
+	u8 num_slots;
+	struct iwl_wipan_slot slots[10];
+} __packed;
+
+/*
+ * REPLY_WIPAN_P2P_CHANNEL_SWITCH = 0xb9
+ *
+ * TODO: Figure out what this is used for,
+ *	 it can only switch between 2.4 GHz
+ *	 channels!!
+ */
+
+struct iwl_wipan_p2p_channel_switch_cmd {
+	__le16 channel;
+	__le16 reserved;
+};
+
+/*
+ * REPLY_WIPAN_NOA_NOTIFICATION = 0xbc
+ *
+ * This is used by the device to notify us of the
+ * NoA schedule it determined so we can forward it
+ * to userspace for inclusion in probe responses.
+ *
+ * In beacons, the NoA schedule is simply appended
+ * to the frame we give the device.
+ */
+
+struct iwl_wipan_noa_descriptor {
+	u8 count;
+	__le32 duration;
+	__le32 interval;
+	__le32 starttime;
+} __packed;
+
+struct iwl_wipan_noa_attribute {
+	u8 id;
+	__le16 length;
+	u8 index;
+	u8 ct_window;
+	struct iwl_wipan_noa_descriptor descr0, descr1;
+	u8 reserved;
+} __packed;
+
+struct iwl_wipan_noa_notification {
+	u32 noa_active;
+	struct iwl_wipan_noa_attribute noa_attribute;
+} __packed;
+
+#endif				/* __iwl_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
new file mode 100644
index 0000000..096a07c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -0,0 +1,2438 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-trans.h"
+#include "iwl-io.h"
+#include "dev.h"
+#include "agn.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+	if (!debugfs_create_file(#name, mode, parent, priv,		\
+				 &iwl_dbgfs_##name##_ops))		\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do {			\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_bool(#name, 0600, parent, ptr);		\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do {				\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_x32(#name, 0600, parent, ptr);		\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+#define DEBUGFS_ADD_U32(name, parent, ptr, mode) do {			\
+	struct dentry *__tmp;						\
+	__tmp = debugfs_create_u32(#name, mode,				\
+				   parent, ptr);			\
+	if (IS_ERR(__tmp) || !__tmp)					\
+		goto err;						\
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FILE_OPS(name)                                     \
+static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
+static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+	.write = iwl_dbgfs_##name##_write,                              \
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)                               \
+static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+	.write = iwl_dbgfs_##name##_write,                              \
+	.read = iwl_dbgfs_##name##_read,                                \
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	u32 val = 0;
+	char *buf;
+	ssize_t ret;
+	int i = 0;
+	bool device_format = false;
+	int offset = 0;
+	int len = 0;
+	int pos = 0;
+	int sram;
+	struct iwl_priv *priv = file->private_data;
+	const struct fw_img *img;
+	size_t bufsz;
+
+	if (!iwl_is_ready_rf(priv))
+		return -EAGAIN;
+
+	/* default is to dump the entire data segment */
+	if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+		priv->dbgfs_sram_offset = 0x800000;
+		if (!priv->ucode_loaded)
+			return -EINVAL;
+		img = &priv->fw->img[priv->cur_ucode];
+		priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+	}
+	len = priv->dbgfs_sram_len;
+
+	if (len == -4) {
+		device_format = true;
+		len = 4;
+	}
+
+	bufsz =  50 + len * 4;
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+			 len);
+	pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+			priv->dbgfs_sram_offset);
+
+	/* adjust sram address since reads are only on even u32 boundaries */
+	offset = priv->dbgfs_sram_offset & 0x3;
+	sram = priv->dbgfs_sram_offset & ~0x3;
+
+	/* read the first u32 from sram */
+	val = iwl_trans_read_mem32(priv->trans, sram);
+
+	for (; len; len--) {
+		/* put the address at the start of every line */
+		if (i == 0)
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"%08X: ", sram + offset);
+
+		if (device_format)
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"%02x", (val >> (8 * (3 - offset))) & 0xff);
+		else
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"%02x ", (val >> (8 * offset)) & 0xff);
+
+		/* if all bytes processed, read the next u32 from sram */
+		if (++offset == 4) {
+			sram += 4;
+			offset = 0;
+			val = iwl_trans_read_mem32(priv->trans, sram);
+		}
+
+		/* put in extra spaces and split lines for human readability */
+		if (++i == 16) {
+			i = 0;
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		} else if (!(i & 7)) {
+			pos += scnprintf(buf + pos, bufsz - pos, "   ");
+		} else if (!(i & 3)) {
+			pos += scnprintf(buf + pos, bufsz - pos, " ");
+		}
+	}
+	if (i)
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[64];
+	int buf_size;
+	u32 offset, len;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+		priv->dbgfs_sram_offset = offset;
+		priv->dbgfs_sram_len = len;
+	} else if (sscanf(buf, "%x", &offset) == 1) {
+		priv->dbgfs_sram_offset = offset;
+		priv->dbgfs_sram_len = -4;
+	} else {
+		priv->dbgfs_sram_offset = 0;
+		priv->dbgfs_sram_len = 0;
+	}
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	const struct fw_img *img = &priv->fw->img[IWL_UCODE_WOWLAN];
+
+	if (!priv->wowlan_sram)
+		return -ENODATA;
+
+	return simple_read_from_buffer(user_buf, count, ppos,
+				       priv->wowlan_sram,
+				       img->sec[IWL_UCODE_SECTION_DATA].len);
+}
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	struct iwl_station_entry *station;
+	struct iwl_tid_data *tid_data;
+	char *buf;
+	int i, j, pos = 0;
+	ssize_t ret;
+	/* Add 30 for initial string */
+	const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+			priv->num_stations);
+
+	for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+		station = &priv->stations[i];
+		if (!station->used)
+			continue;
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "station %d - addr: %pM, flags: %#x\n",
+				 i, station->sta.sta.addr,
+				 station->sta.station_flags_msk);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"TID seqno  next_rclmd "
+				"rate_n_flags state txq\n");
+
+		for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
+			tid_data = &priv->tid_data[i][j];
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"%d:  0x%.4x 0x%.4x     0x%.8x   "
+				"%d     %.2d",
+				j, tid_data->seq_number,
+				tid_data->next_reclaimed,
+				tid_data->agg.rate_n_flags,
+				tid_data->agg.state,
+				tid_data->agg.txq_id);
+
+			if (tid_data->agg.wait_for_ba)
+				pos += scnprintf(buf + pos, bufsz - pos,
+						 " - waitforba");
+			pos += scnprintf(buf + pos, bufsz - pos, "\n");
+		}
+
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_nvm_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count,
+				       loff_t *ppos)
+{
+	ssize_t ret;
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0, ofs = 0, buf_size = 0;
+	const u8 *ptr;
+	char *buf;
+	u16 nvm_ver;
+	size_t eeprom_len = priv->eeprom_blob_size;
+	buf_size = 4 * eeprom_len + 256;
+
+	if (eeprom_len % 16)
+		return -ENODATA;
+
+	ptr = priv->eeprom_blob;
+	if (!ptr)
+		return -ENOMEM;
+
+	/* 4 characters for byte 0xYY */
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	nvm_ver = priv->nvm_data->nvm_version;
+	pos += scnprintf(buf + pos, buf_size - pos,
+			 "NVM version: 0x%x\n", nvm_ver);
+	for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
+		pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x %16ph\n",
+				 ofs, ptr + ofs);
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	struct ieee80211_channel *channels = NULL;
+	const struct ieee80211_supported_band *supp_band = NULL;
+	int pos = 0, i, bufsz = PAGE_SIZE;
+	char *buf;
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"Displaying %d channels in 2.4GHz band 802.11bg):\n",
+				supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos += scnprintf(buf + pos, bufsz - pos,
+					"%d: %ddBm: BSS%s%s, %s.\n",
+					channels[i].hw_value,
+					channels[i].max_power,
+					channels[i].flags & IEEE80211_CHAN_RADAR ?
+					" (IEEE 802.11h required)" : "",
+					((channels[i].flags & IEEE80211_CHAN_NO_IR)
+					|| (channels[i].flags &
+					IEEE80211_CHAN_RADAR)) ? "" :
+					", IBSS",
+					channels[i].flags &
+					IEEE80211_CHAN_NO_IR ?
+					"passive only" : "active/passive");
+	}
+	supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
+	if (supp_band) {
+		channels = supp_band->channels;
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"Displaying %d channels in 5.2GHz band (802.11a)\n",
+				supp_band->n_channels);
+
+		for (i = 0; i < supp_band->n_channels; i++)
+			pos += scnprintf(buf + pos, bufsz - pos,
+					"%d: %ddBm: BSS%s%s, %s.\n",
+					channels[i].hw_value,
+					channels[i].max_power,
+					channels[i].flags & IEEE80211_CHAN_RADAR ?
+					" (IEEE 802.11h required)" : "",
+					((channels[i].flags & IEEE80211_CHAN_NO_IR)
+					|| (channels[i].flags &
+					IEEE80211_CHAN_RADAR)) ? "" :
+					", IBSS",
+					channels[i].flags &
+					IEEE80211_CHAN_NO_IR ?
+					"passive only" : "active/passive");
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_status_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
+		test_bit(STATUS_RF_KILL_HW, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
+		test_bit(STATUS_CT_KILL, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
+		test_bit(STATUS_ALIVE, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
+		test_bit(STATUS_READY, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
+		test_bit(STATUS_EXIT_PENDING, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
+		test_bit(STATUS_STATISTICS, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
+		test_bit(STATUS_SCANNING, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
+		test_bit(STATUS_SCAN_ABORTING, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
+		test_bit(STATUS_SCAN_HW, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
+		test_bit(STATUS_POWER_PMI, &priv->status));
+	pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
+		test_bit(STATUS_FW_ERROR, &priv->status));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = 24 * 64; /* 24 items * 64 char per item */
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < REPLY_MAX; cnt++) {
+		if (priv->rx_handlers_stats[cnt] > 0)
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"\tRx handler[%36s]:\t\t %u\n",
+				iwl_get_cmd_string(priv->trans, (u32)cnt),
+				priv->rx_handlers_stats[cnt]);
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+
+	char buf[8];
+	int buf_size;
+	u32 reset_flag;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &reset_flag) != 1)
+		return -EFAULT;
+	if (reset_flag == 0)
+		memset(&priv->rx_handlers_stats[0], 0,
+			sizeof(priv->rx_handlers_stats));
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	struct iwl_rxon_context *ctx;
+	int pos = 0, i;
+	char buf[256 * NUM_IWL_RXON_CTX];
+	const size_t bufsz = sizeof(buf);
+
+	for_each_context(priv, ctx) {
+		pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
+				 ctx->ctxid);
+		for (i = 0; i < AC_NUM; i++) {
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"\tcw_min\tcw_max\taifsn\ttxop\n");
+			pos += scnprintf(buf + pos, bufsz - pos,
+				"AC[%d]\t%u\t%u\t%u\t%u\n", i,
+				ctx->qos_data.def_qos_parm.ac[i].cw_min,
+				ctx->qos_data.def_qos_parm.ac[i].cw_max,
+				ctx->qos_data.def_qos_parm.ac[i].aifsn,
+				ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+		}
+		pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	}
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
+				char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	struct iwl_tt_restriction *restriction;
+	char buf[100];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"Thermal Throttling Mode: %s\n",
+			tt->advanced_tt ? "Advance" : "Legacy");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"Thermal Throttling State: %d\n",
+			tt->state);
+	if (tt->advanced_tt) {
+		restriction = tt->restriction + tt->state;
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"Tx mode: %d\n",
+				restriction->tx_stream);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"Rx mode: %d\n",
+				restriction->rx_stream);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"HT mode: %d\n",
+				restriction->is_ht);
+	}
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int ht40;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &ht40) != 1)
+		return -EFAULT;
+	if (!iwl_is_any_associated(priv))
+		priv->disable_ht40 = ht40 ? true : false;
+	else
+		return -EINVAL;
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[100];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"11n 40MHz Mode: %s\n",
+			priv->disable_ht40 ? "Disabled" : "Enabled");
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_temperature_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "%d\n", priv->temperature);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+
+static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
+						    const char __user *user_buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int value;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%d", &value) != 1)
+		return -EINVAL;
+
+	/*
+	 * Our users expect 0 to be "CAM", but 0 isn't actually
+	 * valid here. However, let's not confuse them and present
+	 * IWL_POWER_INDEX_1 as "1", not "0".
+	 */
+	if (value == 0)
+		return -EINVAL;
+	else if (value > 0)
+		value -= 1;
+
+	if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
+		return -EINVAL;
+
+	if (!iwl_is_ready_rf(priv))
+		return -EAGAIN;
+
+	priv->power_data.debug_sleep_level_override = value;
+
+	mutex_lock(&priv->mutex);
+	iwl_power_update_mode(priv, true);
+	mutex_unlock(&priv->mutex);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
+						   char __user *user_buf,
+						   size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[10];
+	int pos, value;
+	const size_t bufsz = sizeof(buf);
+
+	/* see the write function */
+	value = priv->power_data.debug_sleep_level_override;
+	if (value >= 0)
+		value += 1;
+
+	pos = scnprintf(buf, bufsz, "%d\n", value);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
+						    char __user *user_buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[200];
+	int pos = 0, i;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_powertable_cmd *cmd = &priv->power_data.sleep_cmd;
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "flags: %#.2x\n", le16_to_cpu(cmd->flags));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "RX/TX timeout: %d/%d usec\n",
+			 le32_to_cpu(cmd->rx_data_timeout),
+			 le32_to_cpu(cmd->tx_data_timeout));
+	for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "sleep_interval[%d]: %d\n", i,
+				 le32_to_cpu(cmd->sleep_interval[i]));
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(wowlan_sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(rx_handlers);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_FILE_OPS(thermal_throttling);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+DEBUGFS_READ_FILE_OPS(temperature);
+DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
+DEBUGFS_READ_FILE_OPS(current_sleep_command);
+
+#define fmt_value	"  %-30s %10u\n"
+#define fmt_hex		"  %-30s       0x%02X\n"
+#define fmt_table	"  %-30s %10u  %10u  %10u  %10u\n"
+#define fmt_header	"%-32s    current  cumulative       delta         max\n"
+
+static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
+{
+	int p = 0;
+	u32 flag;
+
+	lockdep_assert_held(&priv->statistics.lock);
+
+	flag = le32_to_cpu(priv->statistics.flag);
+
+	p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+	if (flag & UCODE_STATISTICS_CLEAR_MSK)
+		p += scnprintf(buf + p, bufsz - p,
+		"\tStatistics have been cleared\n");
+	p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+		(flag & UCODE_STATISTICS_FREQUENCY_MSK)
+		? "2.4 GHz" : "5.2 GHz");
+	p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+		(flag & UCODE_STATISTICS_NARROW_BAND_MSK)
+		 ? "enabled" : "disabled");
+
+	return p;
+}
+
+static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+		    sizeof(struct statistics_rx_non_phy) * 40 +
+		    sizeof(struct statistics_rx_ht_phy) * 40 + 400;
+	ssize_t ret;
+	struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+	struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+	struct statistics_rx_non_phy *general, *accum_general;
+	struct statistics_rx_non_phy *delta_general, *max_general;
+	struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+	if (!iwl_is_alive(priv))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/*
+	 * the statistic information display here is based on
+	 * the last statistics notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	spin_lock_bh(&priv->statistics.lock);
+	ofdm = &priv->statistics.rx_ofdm;
+	cck = &priv->statistics.rx_cck;
+	general = &priv->statistics.rx_non_phy;
+	ht = &priv->statistics.rx_ofdm_ht;
+	accum_ofdm = &priv->accum_stats.rx_ofdm;
+	accum_cck = &priv->accum_stats.rx_cck;
+	accum_general = &priv->accum_stats.rx_non_phy;
+	accum_ht = &priv->accum_stats.rx_ofdm_ht;
+	delta_ofdm = &priv->delta_stats.rx_ofdm;
+	delta_cck = &priv->delta_stats.rx_cck;
+	delta_general = &priv->delta_stats.rx_non_phy;
+	delta_ht = &priv->delta_stats.rx_ofdm_ht;
+	max_ofdm = &priv->max_delta_stats.rx_ofdm;
+	max_cck = &priv->max_delta_stats.rx_cck;
+	max_general = &priv->max_delta_stats.rx_non_phy;
+	max_ht = &priv->max_delta_stats.rx_ofdm_ht;
+
+	pos += iwl_statistics_flag(priv, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_Rx - OFDM:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "ina_cnt:",
+			 le32_to_cpu(ofdm->ina_cnt),
+			 accum_ofdm->ina_cnt,
+			 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_cnt:",
+			 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+			 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "plcp_err:",
+			 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+			 delta_ofdm->plcp_err, max_ofdm->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_err:",
+			 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+			 delta_ofdm->crc32_err, max_ofdm->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "overrun_err:",
+			 le32_to_cpu(ofdm->overrun_err),
+			 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
+			 max_ofdm->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "early_overrun_err:",
+			 le32_to_cpu(ofdm->early_overrun_err),
+			 accum_ofdm->early_overrun_err,
+			 delta_ofdm->early_overrun_err,
+			 max_ofdm->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_good:",
+			 le32_to_cpu(ofdm->crc32_good),
+			 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
+			 max_ofdm->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "false_alarm_cnt:",
+			 le32_to_cpu(ofdm->false_alarm_cnt),
+			 accum_ofdm->false_alarm_cnt,
+			 delta_ofdm->false_alarm_cnt,
+			 max_ofdm->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_sync_err_cnt:",
+			 le32_to_cpu(ofdm->fina_sync_err_cnt),
+			 accum_ofdm->fina_sync_err_cnt,
+			 delta_ofdm->fina_sync_err_cnt,
+			 max_ofdm->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sfd_timeout:",
+			 le32_to_cpu(ofdm->sfd_timeout),
+			 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
+			 max_ofdm->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_timeout:",
+			 le32_to_cpu(ofdm->fina_timeout),
+			 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
+			 max_ofdm->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "unresponded_rts:",
+			 le32_to_cpu(ofdm->unresponded_rts),
+			 accum_ofdm->unresponded_rts,
+			 delta_ofdm->unresponded_rts,
+			 max_ofdm->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "rxe_frame_lmt_ovrun:",
+			 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+			 accum_ofdm->rxe_frame_limit_overrun,
+			 delta_ofdm->rxe_frame_limit_overrun,
+			 max_ofdm->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_ack_cnt:",
+			 le32_to_cpu(ofdm->sent_ack_cnt),
+			 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
+			 max_ofdm->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_cts_cnt:",
+			 le32_to_cpu(ofdm->sent_cts_cnt),
+			 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
+			 max_ofdm->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_ba_rsp_cnt:",
+			 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+			 accum_ofdm->sent_ba_rsp_cnt,
+			 delta_ofdm->sent_ba_rsp_cnt,
+			 max_ofdm->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "dsp_self_kill:",
+			 le32_to_cpu(ofdm->dsp_self_kill),
+			 accum_ofdm->dsp_self_kill,
+			 delta_ofdm->dsp_self_kill,
+			 max_ofdm->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "mh_format_err:",
+			 le32_to_cpu(ofdm->mh_format_err),
+			 accum_ofdm->mh_format_err,
+			 delta_ofdm->mh_format_err,
+			 max_ofdm->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "re_acq_main_rssi_sum:",
+			 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+			 accum_ofdm->re_acq_main_rssi_sum,
+			 delta_ofdm->re_acq_main_rssi_sum,
+			 max_ofdm->re_acq_main_rssi_sum);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_Rx - CCK:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "ina_cnt:",
+			 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+			 delta_cck->ina_cnt, max_cck->ina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_cnt:",
+			 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+			 delta_cck->fina_cnt, max_cck->fina_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "plcp_err:",
+			 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+			 delta_cck->plcp_err, max_cck->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_err:",
+			 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+			 delta_cck->crc32_err, max_cck->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "overrun_err:",
+			 le32_to_cpu(cck->overrun_err),
+			 accum_cck->overrun_err, delta_cck->overrun_err,
+			 max_cck->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "early_overrun_err:",
+			 le32_to_cpu(cck->early_overrun_err),
+			 accum_cck->early_overrun_err,
+			 delta_cck->early_overrun_err,
+			 max_cck->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_good:",
+			 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+			 delta_cck->crc32_good, max_cck->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "false_alarm_cnt:",
+			 le32_to_cpu(cck->false_alarm_cnt),
+			 accum_cck->false_alarm_cnt,
+			 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_sync_err_cnt:",
+			 le32_to_cpu(cck->fina_sync_err_cnt),
+			 accum_cck->fina_sync_err_cnt,
+			 delta_cck->fina_sync_err_cnt,
+			 max_cck->fina_sync_err_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sfd_timeout:",
+			 le32_to_cpu(cck->sfd_timeout),
+			 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
+			 max_cck->sfd_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "fina_timeout:",
+			 le32_to_cpu(cck->fina_timeout),
+			 accum_cck->fina_timeout, delta_cck->fina_timeout,
+			 max_cck->fina_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "unresponded_rts:",
+			 le32_to_cpu(cck->unresponded_rts),
+			 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+			 max_cck->unresponded_rts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "rxe_frame_lmt_ovrun:",
+			 le32_to_cpu(cck->rxe_frame_limit_overrun),
+			 accum_cck->rxe_frame_limit_overrun,
+			 delta_cck->rxe_frame_limit_overrun,
+			 max_cck->rxe_frame_limit_overrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_ack_cnt:",
+			 le32_to_cpu(cck->sent_ack_cnt),
+			 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
+			 max_cck->sent_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_cts_cnt:",
+			 le32_to_cpu(cck->sent_cts_cnt),
+			 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
+			 max_cck->sent_cts_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sent_ba_rsp_cnt:",
+			 le32_to_cpu(cck->sent_ba_rsp_cnt),
+			 accum_cck->sent_ba_rsp_cnt,
+			 delta_cck->sent_ba_rsp_cnt,
+			 max_cck->sent_ba_rsp_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "dsp_self_kill:",
+			 le32_to_cpu(cck->dsp_self_kill),
+			 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
+			 max_cck->dsp_self_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "mh_format_err:",
+			 le32_to_cpu(cck->mh_format_err),
+			 accum_cck->mh_format_err, delta_cck->mh_format_err,
+			 max_cck->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "re_acq_main_rssi_sum:",
+			 le32_to_cpu(cck->re_acq_main_rssi_sum),
+			 accum_cck->re_acq_main_rssi_sum,
+			 delta_cck->re_acq_main_rssi_sum,
+			 max_cck->re_acq_main_rssi_sum);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_Rx - GENERAL:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "bogus_cts:",
+			 le32_to_cpu(general->bogus_cts),
+			 accum_general->bogus_cts, delta_general->bogus_cts,
+			 max_general->bogus_cts);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "bogus_ack:",
+			 le32_to_cpu(general->bogus_ack),
+			 accum_general->bogus_ack, delta_general->bogus_ack,
+			 max_general->bogus_ack);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "non_bssid_frames:",
+			 le32_to_cpu(general->non_bssid_frames),
+			 accum_general->non_bssid_frames,
+			 delta_general->non_bssid_frames,
+			 max_general->non_bssid_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "filtered_frames:",
+			 le32_to_cpu(general->filtered_frames),
+			 accum_general->filtered_frames,
+			 delta_general->filtered_frames,
+			 max_general->filtered_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "non_channel_beacons:",
+			 le32_to_cpu(general->non_channel_beacons),
+			 accum_general->non_channel_beacons,
+			 delta_general->non_channel_beacons,
+			 max_general->non_channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "channel_beacons:",
+			 le32_to_cpu(general->channel_beacons),
+			 accum_general->channel_beacons,
+			 delta_general->channel_beacons,
+			 max_general->channel_beacons);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "num_missed_bcon:",
+			 le32_to_cpu(general->num_missed_bcon),
+			 accum_general->num_missed_bcon,
+			 delta_general->num_missed_bcon,
+			 max_general->num_missed_bcon);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "adc_rx_saturation_time:",
+			 le32_to_cpu(general->adc_rx_saturation_time),
+			 accum_general->adc_rx_saturation_time,
+			 delta_general->adc_rx_saturation_time,
+			 max_general->adc_rx_saturation_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "ina_detect_search_tm:",
+			 le32_to_cpu(general->ina_detection_search_time),
+			 accum_general->ina_detection_search_time,
+			 delta_general->ina_detection_search_time,
+			 max_general->ina_detection_search_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_silence_rssi_a:",
+			 le32_to_cpu(general->beacon_silence_rssi_a),
+			 accum_general->beacon_silence_rssi_a,
+			 delta_general->beacon_silence_rssi_a,
+			 max_general->beacon_silence_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_silence_rssi_b:",
+			 le32_to_cpu(general->beacon_silence_rssi_b),
+			 accum_general->beacon_silence_rssi_b,
+			 delta_general->beacon_silence_rssi_b,
+			 max_general->beacon_silence_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_silence_rssi_c:",
+			 le32_to_cpu(general->beacon_silence_rssi_c),
+			 accum_general->beacon_silence_rssi_c,
+			 delta_general->beacon_silence_rssi_c,
+			 max_general->beacon_silence_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "interference_data_flag:",
+			 le32_to_cpu(general->interference_data_flag),
+			 accum_general->interference_data_flag,
+			 delta_general->interference_data_flag,
+			 max_general->interference_data_flag);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "channel_load:",
+			 le32_to_cpu(general->channel_load),
+			 accum_general->channel_load,
+			 delta_general->channel_load,
+			 max_general->channel_load);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "dsp_false_alarms:",
+			 le32_to_cpu(general->dsp_false_alarms),
+			 accum_general->dsp_false_alarms,
+			 delta_general->dsp_false_alarms,
+			 max_general->dsp_false_alarms);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_rssi_a:",
+			 le32_to_cpu(general->beacon_rssi_a),
+			 accum_general->beacon_rssi_a,
+			 delta_general->beacon_rssi_a,
+			 max_general->beacon_rssi_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_rssi_b:",
+			 le32_to_cpu(general->beacon_rssi_b),
+			 accum_general->beacon_rssi_b,
+			 delta_general->beacon_rssi_b,
+			 max_general->beacon_rssi_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_rssi_c:",
+			 le32_to_cpu(general->beacon_rssi_c),
+			 accum_general->beacon_rssi_c,
+			 delta_general->beacon_rssi_c,
+			 max_general->beacon_rssi_c);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_energy_a:",
+			 le32_to_cpu(general->beacon_energy_a),
+			 accum_general->beacon_energy_a,
+			 delta_general->beacon_energy_a,
+			 max_general->beacon_energy_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_energy_b:",
+			 le32_to_cpu(general->beacon_energy_b),
+			 accum_general->beacon_energy_b,
+			 delta_general->beacon_energy_b,
+			 max_general->beacon_energy_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "beacon_energy_c:",
+			 le32_to_cpu(general->beacon_energy_c),
+			 accum_general->beacon_energy_c,
+			 delta_general->beacon_energy_c,
+			 max_general->beacon_energy_c);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_Rx - OFDM_HT:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "plcp_err:",
+			 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+			 delta_ht->plcp_err, max_ht->plcp_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "overrun_err:",
+			 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+			 delta_ht->overrun_err, max_ht->overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "early_overrun_err:",
+			 le32_to_cpu(ht->early_overrun_err),
+			 accum_ht->early_overrun_err,
+			 delta_ht->early_overrun_err,
+			 max_ht->early_overrun_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_good:",
+			 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+			 delta_ht->crc32_good, max_ht->crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "crc32_err:",
+			 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+			 delta_ht->crc32_err, max_ht->crc32_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "mh_format_err:",
+			 le32_to_cpu(ht->mh_format_err),
+			 accum_ht->mh_format_err,
+			 delta_ht->mh_format_err, max_ht->mh_format_err);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg_crc32_good:",
+			 le32_to_cpu(ht->agg_crc32_good),
+			 accum_ht->agg_crc32_good,
+			 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg_mpdu_cnt:",
+			 le32_to_cpu(ht->agg_mpdu_cnt),
+			 accum_ht->agg_mpdu_cnt,
+			 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg_cnt:",
+			 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+			 delta_ht->agg_cnt, max_ht->agg_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "unsupport_mcs:",
+			 le32_to_cpu(ht->unsupport_mcs),
+			 accum_ht->unsupport_mcs,
+			 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
+	ssize_t ret;
+	struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+	if (!iwl_is_alive(priv))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* the statistic information display here is based on
+	 * the last statistics notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+	spin_lock_bh(&priv->statistics.lock);
+
+	tx = &priv->statistics.tx;
+	accum_tx = &priv->accum_stats.tx;
+	delta_tx = &priv->delta_stats.tx;
+	max_tx = &priv->max_delta_stats.tx;
+
+	pos += iwl_statistics_flag(priv, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_Tx:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "preamble:",
+			 le32_to_cpu(tx->preamble_cnt),
+			 accum_tx->preamble_cnt,
+			 delta_tx->preamble_cnt, max_tx->preamble_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "rx_detected_cnt:",
+			 le32_to_cpu(tx->rx_detected_cnt),
+			 accum_tx->rx_detected_cnt,
+			 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "bt_prio_defer_cnt:",
+			 le32_to_cpu(tx->bt_prio_defer_cnt),
+			 accum_tx->bt_prio_defer_cnt,
+			 delta_tx->bt_prio_defer_cnt,
+			 max_tx->bt_prio_defer_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "bt_prio_kill_cnt:",
+			 le32_to_cpu(tx->bt_prio_kill_cnt),
+			 accum_tx->bt_prio_kill_cnt,
+			 delta_tx->bt_prio_kill_cnt,
+			 max_tx->bt_prio_kill_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "few_bytes_cnt:",
+			 le32_to_cpu(tx->few_bytes_cnt),
+			 accum_tx->few_bytes_cnt,
+			 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "cts_timeout:",
+			 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+			 delta_tx->cts_timeout, max_tx->cts_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "ack_timeout:",
+			 le32_to_cpu(tx->ack_timeout),
+			 accum_tx->ack_timeout,
+			 delta_tx->ack_timeout, max_tx->ack_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "expected_ack_cnt:",
+			 le32_to_cpu(tx->expected_ack_cnt),
+			 accum_tx->expected_ack_cnt,
+			 delta_tx->expected_ack_cnt,
+			 max_tx->expected_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "actual_ack_cnt:",
+			 le32_to_cpu(tx->actual_ack_cnt),
+			 accum_tx->actual_ack_cnt,
+			 delta_tx->actual_ack_cnt,
+			 max_tx->actual_ack_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "dump_msdu_cnt:",
+			 le32_to_cpu(tx->dump_msdu_cnt),
+			 accum_tx->dump_msdu_cnt,
+			 delta_tx->dump_msdu_cnt,
+			 max_tx->dump_msdu_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "abort_nxt_frame_mismatch:",
+			 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+			 accum_tx->burst_abort_next_frame_mismatch_cnt,
+			 delta_tx->burst_abort_next_frame_mismatch_cnt,
+			 max_tx->burst_abort_next_frame_mismatch_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "abort_missing_nxt_frame:",
+			 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+			 accum_tx->burst_abort_missing_next_frame_cnt,
+			 delta_tx->burst_abort_missing_next_frame_cnt,
+			 max_tx->burst_abort_missing_next_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "cts_timeout_collision:",
+			 le32_to_cpu(tx->cts_timeout_collision),
+			 accum_tx->cts_timeout_collision,
+			 delta_tx->cts_timeout_collision,
+			 max_tx->cts_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "ack_ba_timeout_collision:",
+			 le32_to_cpu(tx->ack_or_ba_timeout_collision),
+			 accum_tx->ack_or_ba_timeout_collision,
+			 delta_tx->ack_or_ba_timeout_collision,
+			 max_tx->ack_or_ba_timeout_collision);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg ba_timeout:",
+			 le32_to_cpu(tx->agg.ba_timeout),
+			 accum_tx->agg.ba_timeout,
+			 delta_tx->agg.ba_timeout,
+			 max_tx->agg.ba_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg ba_resched_frames:",
+			 le32_to_cpu(tx->agg.ba_reschedule_frames),
+			 accum_tx->agg.ba_reschedule_frames,
+			 delta_tx->agg.ba_reschedule_frames,
+			 max_tx->agg.ba_reschedule_frames);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg scd_query_agg_frame:",
+			 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+			 accum_tx->agg.scd_query_agg_frame_cnt,
+			 delta_tx->agg.scd_query_agg_frame_cnt,
+			 max_tx->agg.scd_query_agg_frame_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg scd_query_no_agg:",
+			 le32_to_cpu(tx->agg.scd_query_no_agg),
+			 accum_tx->agg.scd_query_no_agg,
+			 delta_tx->agg.scd_query_no_agg,
+			 max_tx->agg.scd_query_no_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg scd_query_agg:",
+			 le32_to_cpu(tx->agg.scd_query_agg),
+			 accum_tx->agg.scd_query_agg,
+			 delta_tx->agg.scd_query_agg,
+			 max_tx->agg.scd_query_agg);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg scd_query_mismatch:",
+			 le32_to_cpu(tx->agg.scd_query_mismatch),
+			 accum_tx->agg.scd_query_mismatch,
+			 delta_tx->agg.scd_query_mismatch,
+			 max_tx->agg.scd_query_mismatch);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg frame_not_ready:",
+			 le32_to_cpu(tx->agg.frame_not_ready),
+			 accum_tx->agg.frame_not_ready,
+			 delta_tx->agg.frame_not_ready,
+			 max_tx->agg.frame_not_ready);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg underrun:",
+			 le32_to_cpu(tx->agg.underrun),
+			 accum_tx->agg.underrun,
+			 delta_tx->agg.underrun, max_tx->agg.underrun);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg bt_prio_kill:",
+			 le32_to_cpu(tx->agg.bt_prio_kill),
+			 accum_tx->agg.bt_prio_kill,
+			 delta_tx->agg.bt_prio_kill,
+			 max_tx->agg.bt_prio_kill);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "agg rx_ba_rsp_cnt:",
+			 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+			 accum_tx->agg.rx_ba_rsp_cnt,
+			 delta_tx->agg.rx_ba_rsp_cnt,
+			 max_tx->agg.rx_ba_rsp_cnt);
+
+	if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
+		pos += scnprintf(buf + pos, bufsz - pos,
+			"tx power: (1/2 dB step)\n");
+		if ((priv->nvm_data->valid_tx_ant & ANT_A) &&
+		    tx->tx_power.ant_a)
+			pos += scnprintf(buf + pos, bufsz - pos,
+					fmt_hex, "antenna A:",
+					tx->tx_power.ant_a);
+		if ((priv->nvm_data->valid_tx_ant & ANT_B) &&
+		    tx->tx_power.ant_b)
+			pos += scnprintf(buf + pos, bufsz - pos,
+					fmt_hex, "antenna B:",
+					tx->tx_power.ant_b);
+		if ((priv->nvm_data->valid_tx_ant & ANT_C) &&
+		    tx->tx_power.ant_c)
+			pos += scnprintf(buf + pos, bufsz - pos,
+					fmt_hex, "antenna C:",
+					tx->tx_power.ant_c);
+	}
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = sizeof(struct statistics_general) * 10 + 300;
+	ssize_t ret;
+	struct statistics_general_common *general, *accum_general;
+	struct statistics_general_common *delta_general, *max_general;
+	struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+	struct statistics_div *div, *accum_div, *delta_div, *max_div;
+
+	if (!iwl_is_alive(priv))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* the statistic information display here is based on
+	 * the last statistics notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+
+	spin_lock_bh(&priv->statistics.lock);
+
+	general = &priv->statistics.common;
+	dbg = &priv->statistics.common.dbg;
+	div = &priv->statistics.common.div;
+	accum_general = &priv->accum_stats.common;
+	accum_dbg = &priv->accum_stats.common.dbg;
+	accum_div = &priv->accum_stats.common.div;
+	delta_general = &priv->delta_stats.common;
+	max_general = &priv->max_delta_stats.common;
+	delta_dbg = &priv->delta_stats.common.dbg;
+	max_dbg = &priv->max_delta_stats.common.dbg;
+	delta_div = &priv->delta_stats.common.div;
+	max_div = &priv->max_delta_stats.common.div;
+
+	pos += iwl_statistics_flag(priv, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_header, "Statistics_General:");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_value, "temperature:",
+			 le32_to_cpu(general->temperature));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_value, "temperature_m:",
+			 le32_to_cpu(general->temperature_m));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_value, "ttl_timestamp:",
+			 le32_to_cpu(general->ttl_timestamp));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "burst_check:",
+			 le32_to_cpu(dbg->burst_check),
+			 accum_dbg->burst_check,
+			 delta_dbg->burst_check, max_dbg->burst_check);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "burst_count:",
+			 le32_to_cpu(dbg->burst_count),
+			 accum_dbg->burst_count,
+			 delta_dbg->burst_count, max_dbg->burst_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "wait_for_silence_timeout_count:",
+			 le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+			 accum_dbg->wait_for_silence_timeout_cnt,
+			 delta_dbg->wait_for_silence_timeout_cnt,
+			 max_dbg->wait_for_silence_timeout_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "sleep_time:",
+			 le32_to_cpu(general->sleep_time),
+			 accum_general->sleep_time,
+			 delta_general->sleep_time, max_general->sleep_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "slots_out:",
+			 le32_to_cpu(general->slots_out),
+			 accum_general->slots_out,
+			 delta_general->slots_out, max_general->slots_out);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "slots_idle:",
+			 le32_to_cpu(general->slots_idle),
+			 accum_general->slots_idle,
+			 delta_general->slots_idle, max_general->slots_idle);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "tx_on_a:",
+			 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+			 delta_div->tx_on_a, max_div->tx_on_a);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "tx_on_b:",
+			 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+			 delta_div->tx_on_b, max_div->tx_on_b);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "exec_time:",
+			 le32_to_cpu(div->exec_time), accum_div->exec_time,
+			 delta_div->exec_time, max_div->exec_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "probe_time:",
+			 le32_to_cpu(div->probe_time), accum_div->probe_time,
+			 delta_div->probe_time, max_div->probe_time);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "rx_enable_counter:",
+			 le32_to_cpu(general->rx_enable_counter),
+			 accum_general->rx_enable_counter,
+			 delta_general->rx_enable_counter,
+			 max_general->rx_enable_counter);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 fmt_table, "num_of_sos_states:",
+			 le32_to_cpu(general->num_of_sos_states),
+			 accum_general->num_of_sos_states,
+			 delta_general->num_of_sos_states,
+			 max_general->num_of_sos_states);
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct statistics_bt_activity) * 24) + 200;
+	ssize_t ret;
+	struct statistics_bt_activity *bt, *accum_bt;
+
+	if (!iwl_is_alive(priv))
+		return -EAGAIN;
+
+	if (!priv->bt_enable_flag)
+		return -EINVAL;
+
+	/* make request to uCode to retrieve statistics information */
+	mutex_lock(&priv->mutex);
+	ret = iwl_send_statistics_request(priv, 0, false);
+	mutex_unlock(&priv->mutex);
+
+	if (ret)
+		return -EAGAIN;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/*
+	 * the statistic information display here is based on
+	 * the last statistics notification from uCode
+	 * might not reflect the current uCode activity
+	 */
+
+	spin_lock_bh(&priv->statistics.lock);
+
+	bt = &priv->statistics.bt_activity;
+	accum_bt = &priv->accum_stats.bt_activity;
+
+	pos += iwl_statistics_flag(priv, buf, bufsz);
+	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_BT:\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\t\t\tcurrent\t\t\taccumulative\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "hi_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->hi_priority_tx_req_cnt),
+			 accum_bt->hi_priority_tx_req_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "hi_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->hi_priority_tx_denied_cnt),
+			 accum_bt->hi_priority_tx_denied_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "lo_priority_tx_req_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->lo_priority_tx_req_cnt),
+			 accum_bt->lo_priority_tx_req_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "lo_priority_tx_denied_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->lo_priority_tx_denied_cnt),
+			 accum_bt->lo_priority_tx_denied_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "hi_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->hi_priority_rx_req_cnt),
+			 accum_bt->hi_priority_rx_req_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "hi_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->hi_priority_rx_denied_cnt),
+			 accum_bt->hi_priority_rx_denied_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "lo_priority_rx_req_cnt:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->lo_priority_rx_req_cnt),
+			 accum_bt->lo_priority_rx_req_cnt);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "lo_priority_rx_denied_cnt:\t%u\t\t\t%u\n",
+			 le32_to_cpu(bt->lo_priority_rx_denied_cnt),
+			 accum_bt->lo_priority_rx_denied_cnt);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "(rx)num_bt_kills:\t\t%u\t\t\t%u\n",
+			 le32_to_cpu(priv->statistics.num_bt_kills),
+			 priv->statistics.accum_num_bt_kills);
+
+	spin_unlock_bh(&priv->statistics.lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+	int pos = 0;
+	char *buf;
+	int bufsz = (sizeof(struct reply_tx_error_statistics) * 24) +
+		(sizeof(struct reply_agg_tx_error_statistics) * 24) + 200;
+	ssize_t ret;
+
+	if (!iwl_is_alive(priv))
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_DELAY),
+			 priv->reply_tx_stats.pp_delay);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_FEW_BYTES),
+			 priv->reply_tx_stats.pp_few_bytes);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_BT_PRIO),
+			 priv->reply_tx_stats.pp_bt_prio);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_QUIET_PERIOD),
+			 priv->reply_tx_stats.pp_quiet_period);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_POSTPONE_CALC_TTAK),
+			 priv->reply_tx_stats.pp_calc_ttak);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_tx_fail_reason(
+				TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY),
+			 priv->reply_tx_stats.int_crossed_retry);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_SHORT_LIMIT),
+			 priv->reply_tx_stats.short_limit);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LONG_LIMIT),
+			 priv->reply_tx_stats.long_limit);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_UNDERRUN),
+			 priv->reply_tx_stats.fifo_underrun);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DRAIN_FLOW),
+			 priv->reply_tx_stats.drain_flow);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_RFKILL_FLUSH),
+			 priv->reply_tx_stats.rfkill_flush);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_LIFE_EXPIRE),
+			 priv->reply_tx_stats.life_expire);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_DEST_PS),
+			 priv->reply_tx_stats.dest_ps);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_HOST_ABORTED),
+			 priv->reply_tx_stats.host_abort);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_BT_RETRY),
+			 priv->reply_tx_stats.pp_delay);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_STA_INVALID),
+			 priv->reply_tx_stats.sta_invalid);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FRAG_DROPPED),
+			 priv->reply_tx_stats.frag_drop);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_TID_DISABLE),
+			 priv->reply_tx_stats.tid_disable);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_FIFO_FLUSHED),
+			 priv->reply_tx_stats.fifo_flush);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_tx_fail_reason(
+				TX_STATUS_FAIL_INSUFFICIENT_CF_POLL),
+			 priv->reply_tx_stats.insuff_cf_poll);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_tx_fail_reason(TX_STATUS_FAIL_PASSIVE_NO_RX),
+			 priv->reply_tx_stats.fail_hw_drop);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_tx_fail_reason(
+				TX_STATUS_FAIL_NO_BEACON_ON_RADAR),
+			 priv->reply_tx_stats.sta_color_mismatch);
+	pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
+			 priv->reply_tx_stats.unknown);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "\nStatistics_Agg_TX_Error:\n");
+
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_UNDERRUN_MSK),
+			 priv->reply_agg_tx_stats.underrun);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_BT_PRIO_MSK),
+			 priv->reply_agg_tx_stats.bt_prio);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_FEW_BYTES_MSK),
+			 priv->reply_agg_tx_stats.few_bytes);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_ABORT_MSK),
+			 priv->reply_agg_tx_stats.abort);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(
+				AGG_TX_STATE_LAST_SENT_TTL_MSK),
+			 priv->reply_agg_tx_stats.last_sent_ttl);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(
+				AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK),
+			 priv->reply_agg_tx_stats.last_sent_try);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(
+				AGG_TX_STATE_LAST_SENT_BT_KILL_MSK),
+			 priv->reply_agg_tx_stats.last_sent_bt_kill);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_SCD_QUERY_MSK),
+			 priv->reply_agg_tx_stats.scd_query);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(
+				AGG_TX_STATE_TEST_BAD_CRC32_MSK),
+			 priv->reply_agg_tx_stats.bad_crc32);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_RESPONSE_MSK),
+			 priv->reply_agg_tx_stats.response);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DUMP_TX_MSK),
+			 priv->reply_agg_tx_stats.dump_tx);
+	pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t%u\n",
+			 iwl_get_agg_tx_fail_reason(AGG_TX_STATE_DELAY_TX_MSK),
+			 priv->reply_agg_tx_stats.delay_tx);
+	pos += scnprintf(buf + pos, bufsz - pos, "UNKNOWN:\t\t\t%u\n",
+			 priv->reply_agg_tx_stats.unknown);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
+	ssize_t ret;
+	struct iwl_sensitivity_data *data;
+
+	data = &priv->sensitivity_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+			data->auto_corr_ofdm);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"auto_corr_ofdm_mrc:\t\t %u\n",
+			data->auto_corr_ofdm_mrc);
+	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+			data->auto_corr_ofdm_x1);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"auto_corr_ofdm_mrc_x1:\t\t %u\n",
+			data->auto_corr_ofdm_mrc_x1);
+	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+			data->auto_corr_cck);
+	pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+			data->auto_corr_cck_mrc);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"last_bad_plcp_cnt_ofdm:\t\t %u\n",
+			data->last_bad_plcp_cnt_ofdm);
+	pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+			data->last_fa_cnt_ofdm);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"last_bad_plcp_cnt_cck:\t\t %u\n",
+			data->last_bad_plcp_cnt_cck);
+	pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+			data->last_fa_cnt_cck);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+			data->nrg_curr_state);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+			data->nrg_prev_state);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+	for (cnt = 0; cnt < 10; cnt++) {
+		pos += scnprintf(buf + pos, bufsz - pos, " %u",
+				data->nrg_value[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+	for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+		pos += scnprintf(buf + pos, bufsz - pos, " %u",
+				data->nrg_silence_rssi[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+			data->nrg_silence_ref);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+			data->nrg_energy_idx);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+			data->nrg_silence_idx);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+			data->nrg_th_cck);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"nrg_auto_corr_silence_diff:\t %u\n",
+			data->nrg_auto_corr_silence_diff);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+			data->num_in_cck_no_fa);
+	pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+			data->nrg_th_ofdm);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+
+static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	int cnt = 0;
+	char *buf;
+	int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
+	ssize_t ret;
+	struct iwl_chain_noise_data *data;
+
+	data = &priv->chain_noise_data;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+			data->active_chains);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+			data->chain_noise_a);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+			data->chain_noise_b);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+			data->chain_noise_c);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+			data->chain_signal_a);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+			data->chain_signal_b);
+	pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+			data->chain_signal_c);
+	pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+			data->beacon_count);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos += scnprintf(buf + pos, bufsz - pos, " %u",
+				data->disconn_array[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+	for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+		pos += scnprintf(buf + pos, bufsz - pos, " %u",
+				data->delta_gain_code[cnt]);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "\n");
+	pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+			data->radio_write);
+	pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+			data->state);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
+						    char __user *user_buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[60];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	u32 pwrsave_status;
+
+	pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
+			CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+	pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
+		(pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+		(pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+		(pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+		"error");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int clear;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &clear) != 1)
+		return -EFAULT;
+
+	/* make request to uCode to retrieve statistics information */
+	mutex_lock(&priv->mutex);
+	iwl_send_statistics_request(priv, 0, true);
+	mutex_unlock(&priv->mutex);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char buf[128];
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+			priv->event_log.ucode_trace ? "On" : "Off");
+	pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+			priv->event_log.non_wraps_count);
+	pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+			priv->event_log.wraps_once_count);
+	pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+			priv->event_log.wraps_more_count);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int trace;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &trace) != 1)
+		return -EFAULT;
+
+	if (trace) {
+		priv->event_log.ucode_trace = true;
+		if (iwl_is_alive(priv)) {
+			/* start collecting data now */
+			mod_timer(&priv->ucode_trace, jiffies);
+		}
+	} else {
+		priv->event_log.ucode_trace = false;
+		del_timer_sync(&priv->ucode_trace);
+	}
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len = sprintf(buf, "0x%04X\n",
+		le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int len = 0;
+	char buf[20];
+
+	len = sprintf(buf, "0x%04X\n",
+		le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char buf[12];
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+			priv->missed_beacon_threshold);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int missed;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &missed) != 1)
+		return -EINVAL;
+
+	if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+	    missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+		priv->missed_beacon_threshold =
+			IWL_MISSED_BEACON_THRESHOLD_DEF;
+	else
+		priv->missed_beacon_threshold = missed;
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char buf[12];
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+			priv->plcp_delta_threshold);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int plcp;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &plcp) != 1)
+		return -EINVAL;
+	if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+		(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+		priv->plcp_delta_threshold =
+			IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
+	else
+		priv->plcp_delta_threshold = plcp;
+	return count;
+}
+
+static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	int pos = 0;
+	char buf[300];
+	const size_t bufsz = sizeof(buf);
+	struct iwl_rf_reset *rf_reset = &priv->rf_reset;
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"RF reset statistics\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request: %d\n",
+			rf_reset->reset_request_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request success: %d\n",
+			rf_reset->reset_success_count);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"\tnumber of reset request reject: %d\n",
+			rf_reset->reset_reject_count);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	int ret;
+
+	ret = iwl_force_rf_reset(priv, true);
+	return ret ? ret : count;
+}
+
+static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int flush;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &flush) != 1)
+		return -EINVAL;
+
+	if (iwl_is_rfkill(priv))
+		return -EFAULT;
+
+	iwlagn_dev_txfifo_flush(priv);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+	int pos = 0;
+	char buf[200];
+	const size_t bufsz = sizeof(buf);
+
+	if (!priv->bt_enable_flag) {
+		pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
+		return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	}
+	pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
+		priv->bt_enable_flag);
+	pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
+		priv->bt_full_concurrent ? "full concurrency" : "3-wire");
+	pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
+			 "last traffic notif: %d\n",
+		priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
+	pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
+			 "kill_ack_mask: %x, kill_cts_mask: %x\n",
+		priv->bt_ch_announce, priv->kill_ack_mask,
+		priv->kill_cts_mask);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "bluetooth traffic load: ");
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		pos += scnprintf(buf + pos, bufsz - pos, "Continuous\n");
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+		pos += scnprintf(buf + pos, bufsz - pos, "High\n");
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		pos += scnprintf(buf + pos, bufsz - pos, "Low\n");
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+	default:
+		pos += scnprintf(buf + pos, bufsz - pos, "None\n");
+		break;
+	}
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+
+	int pos = 0;
+	char buf[40];
+	const size_t bufsz = sizeof(buf);
+
+	if (priv->cfg->ht_params)
+		pos += scnprintf(buf + pos, bufsz - pos,
+			 "use %s for aggregation\n",
+			 (priv->hw_params.use_rts_for_aggregation) ?
+				"rts/cts" : "cts-to-self");
+	else
+		pos += scnprintf(buf + pos, bufsz - pos, "N/A");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos) {
+
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+	int rts;
+
+	if (!priv->cfg->ht_params)
+		return -EINVAL;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &rts) != 1)
+		return -EINVAL;
+	if (rts)
+		priv->hw_params.use_rts_for_aggregation = true;
+	else
+		priv->hw_params.use_rts_for_aggregation = false;
+	return count;
+}
+
+static int iwl_cmd_echo_test(struct iwl_priv *priv)
+{
+	int ret;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_ECHO,
+		.len = { 0 },
+	};
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+	if (ret)
+		IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
+	else
+		IWL_DEBUG_INFO(priv, "echo testing pass\n");
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	iwl_cmd_echo_test(priv);
+	return count;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char *buf = NULL;
+	ssize_t ret;
+
+	ret = iwl_dump_nic_event_log(priv, true, &buf);
+	if (ret > 0)
+		ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_log_event_write(struct file *file,
+					const char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	u32 event_log_flag;
+	char buf[8];
+	int buf_size;
+
+	/* check that the interface is up */
+	if (!iwl_is_ready(priv))
+		return -EAGAIN;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%d", &event_log_flag) != 1)
+		return -EFAULT;
+	if (event_log_flag == 1)
+		iwl_dump_nic_event_log(priv, true, NULL);
+
+	return count;
+}
+#endif
+
+static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[120];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Sensitivity calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_SENSITIVITY_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Chain noise calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_CHAIN_NOISE_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "Tx power calibrations %s\n",
+			 (priv->calib_disabled &
+					IWL_TX_POWER_CALIB_DISABLED) ?
+			 "DISABLED" : "ENABLED");
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	char buf[8];
+	u32 calib_disabled;
+	int buf_size;
+
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+	if (sscanf(buf, "%x", &calib_disabled) != 1)
+		return -EFAULT;
+
+	priv->calib_disabled = calib_disabled;
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
+					  const char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_priv *priv = file->private_data;
+	bool fw_restart = iwlwifi_mod_params.fw_restart;
+	int __maybe_unused ret;
+
+	iwlwifi_mod_params.fw_restart = true;
+
+	mutex_lock(&priv->mutex);
+
+	/* take the return value to make compiler happy - it will fail anyway */
+	ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, 0, 0, NULL);
+
+	mutex_unlock(&priv->mutex);
+
+	iwlwifi_mod_params.fw_restart = fw_restart;
+
+	return count;
+}
+
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
+DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
+DEBUGFS_READ_FILE_OPS(bt_traffic);
+DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
+DEBUGFS_READ_FILE_OPS(reply_tx_error);
+DEBUGFS_WRITE_FILE_OPS(echo_test);
+DEBUGFS_WRITE_FILE_OPS(fw_restart);
+#ifdef CONFIG_IWLWIFI_DEBUG
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
+#endif
+DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
+{
+	struct dentry *dir_data, *dir_rf, *dir_debug;
+
+	priv->debugfs_dir = dbgfs_dir;
+
+	dir_data = debugfs_create_dir("data", dbgfs_dir);
+	if (!dir_data)
+		goto err;
+	dir_rf = debugfs_create_dir("rf", dbgfs_dir);
+	if (!dir_rf)
+		goto err;
+	dir_debug = debugfs_create_dir("debug", dbgfs_dir);
+	if (!dir_debug)
+		goto err;
+
+	DEBUGFS_ADD_FILE(nvm, dir_data, 0400);
+	DEBUGFS_ADD_FILE(sram, dir_data, 0600);
+	DEBUGFS_ADD_FILE(wowlan_sram, dir_data, 0400);
+	DEBUGFS_ADD_FILE(stations, dir_data, 0400);
+	DEBUGFS_ADD_FILE(channels, dir_data, 0400);
+	DEBUGFS_ADD_FILE(status, dir_data, 0400);
+	DEBUGFS_ADD_FILE(rx_handlers, dir_data, 0600);
+	DEBUGFS_ADD_FILE(qos, dir_data, 0400);
+	DEBUGFS_ADD_FILE(sleep_level_override, dir_data, 0600);
+	DEBUGFS_ADD_FILE(current_sleep_command, dir_data, 0400);
+	DEBUGFS_ADD_FILE(thermal_throttling, dir_data, 0400);
+	DEBUGFS_ADD_FILE(disable_ht40, dir_data, 0600);
+	DEBUGFS_ADD_FILE(temperature, dir_data, 0400);
+
+	DEBUGFS_ADD_FILE(power_save_status, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(missed_beacon, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(plcp_delta, dir_debug, 0600);
+	DEBUGFS_ADD_FILE(rf_reset, dir_debug, 0600);
+	DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(txfifo_flush, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(protection_mode, dir_debug, 0600);
+	DEBUGFS_ADD_FILE(sensitivity, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(chain_noise, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, 0600);
+	DEBUGFS_ADD_FILE(ucode_bt_stats, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, 0400);
+	DEBUGFS_ADD_FILE(rxon_flags, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(echo_test, dir_debug, 0200);
+	DEBUGFS_ADD_FILE(fw_restart, dir_debug, 0200);
+#ifdef CONFIG_IWLWIFI_DEBUG
+	DEBUGFS_ADD_FILE(log_event, dir_debug, 0600);
+#endif
+
+	if (iwl_advanced_bt_coexist(priv))
+		DEBUGFS_ADD_FILE(bt_traffic, dir_debug, 0400);
+
+	/* Calibrations disabled/enabled status*/
+	DEBUGFS_ADD_FILE(calib_disabled, dir_rf, 0600);
+
+	/*
+	 * Create a symlink with mac80211. This is not very robust, as it does
+	 * not remove the symlink created. The implicit assumption is that
+	 * when the opmode exits, mac80211 will also exit, and will remove
+	 * this symlink as part of its cleanup.
+	 */
+	if (priv->mac80211_registered) {
+		char buf[100];
+		struct dentry *mac80211_dir, *dev_dir;
+
+		dev_dir = dbgfs_dir->d_parent;
+		mac80211_dir = priv->hw->wiphy->debugfsdir;
+
+		snprintf(buf, 100, "../../%pd2", dev_dir);
+
+		if (!debugfs_create_symlink("iwlwifi", mac80211_dir, buf))
+			goto err;
+	}
+
+	return 0;
+
+err:
+	IWL_ERR(priv, "failed to create the dvm debugfs entries\n");
+	return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
new file mode 100644
index 0000000..cceb4cd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -0,0 +1,949 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (dev.h) for driver implementation definitions.
+ * Please use commands.h for uCode API definitions.
+ */
+
+#ifndef __iwl_dev_h__
+#define __iwl_dev_h__
+
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#include "fw/img.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-debug.h"
+#include "iwl-agn-hw.h"
+#include "iwl-op-mode.h"
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+
+#include "led.h"
+#include "power.h"
+#include "rs.h"
+#include "tt.h"
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY   110 /* in Celsius */
+#define CT_KILL_THRESHOLD	   114 /* in Celsius */
+#define CT_KILL_EXIT_THRESHOLD     95  /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ *   This may be because we're:
+ *   1)  Not associated  no beacon statistics being sent to driver)
+ *   2)  Scanning (noise measurement does not apply to associated channel)
+ * Use default noise value of -127 ... this is below the range of measurable
+ *   Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
+ *   Also, -127 works better than 0 when averaging frames with/without
+ *   noise info (e.g. averaging might be done in app); measured dBm values are
+ *   always negative ... using a negative value as the default keeps all
+ *   averages within an s8's (used in some apps) range of negative values. */
+#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ *   a value of 0 means RTS on all data/management packets
+ *   a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ *   than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD     2347U
+#define MIN_RTS_THRESHOLD         0U
+#define MAX_RTS_THRESHOLD         2347U
+#define MAX_MSDU_SIZE		  2304U
+#define MAX_MPDU_SIZE		  2346U
+#define DEFAULT_BEACON_INTERVAL   200U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+#define IWL_NUM_SCAN_RATES         (2)
+
+
+#define IEEE80211_DATA_LEN              2304
+#define IEEE80211_4ADDR_LEN             30
+#define IEEE80211_HLEN                  (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN             (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  12
+
+#define IWL_SUPPORTED_RATES_IE_LEN         8
+
+#define IWL_INVALID_RATE     0xFF
+#define IWL_INVALID_VALUE    -1
+
+union iwl_ht_rate_supp {
+	u16 rates;
+	struct {
+		u8 siso_rate;
+		u8 mimo_rate;
+	};
+};
+
+struct iwl_ht_config {
+	bool single_chain_sufficient;
+	enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct iwl_qos_info {
+	int qos_active;
+	struct iwl_qosparam_cmd def_qos_parm;
+};
+
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+	IWL_AGG_OFF = 0,
+	IWL_AGG_STARTING,
+	IWL_AGG_ON,
+	IWL_EMPTYING_HW_QUEUE_ADDBA,
+	IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ *	Tx response (REPLY_TX), and the block ack notification
+ *	(REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ *	we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+	u32 rate_n_flags;
+	enum iwl_agg_state state;
+	u16 txq_id;
+	u16 ssn;
+	bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ *	This is basically (last acked packet++).
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+	u16 seq_number;
+	u16 next_reclaimed;
+	struct iwl_ht_agg agg;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
+ * held.
+ */
+struct iwl_station_entry {
+	struct iwl_addsta_cmd sta;
+	u8 used, ctxid;
+	struct iwl_link_quality_cmd *lq;
+};
+
+/*
+ * iwl_station_priv: Driver's private station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ */
+struct iwl_station_priv {
+	struct iwl_rxon_context *ctx;
+	struct iwl_lq_sta lq_sta;
+	atomic_t pending_frames;
+	bool client;
+	bool asleep;
+	u8 max_agg_bufsize;
+	u8 sta_id;
+};
+
+/**
+ * struct iwl_vif_priv - driver's private per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct iwl_vif_priv {
+	struct iwl_rxon_context *ctx;
+	u8 ibss_bssid_sta_id;
+};
+
+struct iwl_sensitivity_ranges {
+	u16 min_nrg_cck;
+
+	u16 nrg_th_cck;
+	u16 nrg_th_ofdm;
+
+	u16 auto_corr_min_ofdm;
+	u16 auto_corr_min_ofdm_mrc;
+	u16 auto_corr_min_ofdm_x1;
+	u16 auto_corr_min_ofdm_mrc_x1;
+
+	u16 auto_corr_max_ofdm;
+	u16 auto_corr_max_ofdm_mrc;
+	u16 auto_corr_max_ofdm_x1;
+	u16 auto_corr_max_ofdm_mrc_x1;
+
+	u16 auto_corr_max_cck;
+	u16 auto_corr_max_cck_mrc;
+	u16 auto_corr_min_cck;
+	u16 auto_corr_min_cck_mrc;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE:  The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * iwl_         <-- Is part of iwlwifi
+ * iwlXXXX_     <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ *
+ ****************************************************************************/
+void iwl_update_chain_flags(struct iwl_priv *priv);
+extern const u8 iwl_bcast_addr[ETH_ALEN];
+
+#define IWL_OPERATION_MODE_AUTO     0
+#define IWL_OPERATION_MODE_HT_ONLY  1
+#define IWL_OPERATION_MODE_MIXED    2
+#define IWL_OPERATION_MODE_20MHZ    3
+
+#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE		0xFFFF
+#define IWL_CAL_NUM_BEACONS		16
+#define MAXIMUM_ALLOWED_PATHLOSS	15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM  50
+#define MIN_FA_OFDM  5
+#define MAX_FA_CCK   50
+#define MIN_FA_CCK   5
+
+#define AUTO_CORR_STEP_OFDM       1
+
+#define AUTO_CORR_STEP_CCK     3
+#define AUTO_CORR_MAX_TH_CCK   160
+
+#define NRG_DIFF               2
+#define NRG_STEP_CCK           2
+#define NRG_MARGIN             8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF    (125)
+
+#define CHAIN_A             0
+#define CHAIN_B             1
+#define CHAIN_C             2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER			0xFF00
+#define IN_BAND_FILTER			0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE	0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L     20
+#define NUM_RX_CHAINS           3
+
+enum iwlagn_false_alarm_state {
+	IWL_FA_TOO_MANY = 0,
+	IWL_FA_TOO_FEW = 1,
+	IWL_FA_GOOD_RANGE = 2,
+};
+
+enum iwlagn_chain_noise_state {
+	IWL_CHAIN_NOISE_ALIVE = 0,  /* must be 0 */
+	IWL_CHAIN_NOISE_ACCUMULATE,
+	IWL_CHAIN_NOISE_CALIBRATED,
+	IWL_CHAIN_NOISE_DONE,
+};
+
+/* Sensitivity calib data */
+struct iwl_sensitivity_data {
+	u32 auto_corr_ofdm;
+	u32 auto_corr_ofdm_mrc;
+	u32 auto_corr_ofdm_x1;
+	u32 auto_corr_ofdm_mrc_x1;
+	u32 auto_corr_cck;
+	u32 auto_corr_cck_mrc;
+
+	u32 last_bad_plcp_cnt_ofdm;
+	u32 last_fa_cnt_ofdm;
+	u32 last_bad_plcp_cnt_cck;
+	u32 last_fa_cnt_cck;
+
+	u32 nrg_curr_state;
+	u32 nrg_prev_state;
+	u32 nrg_value[10];
+	u8  nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+	u32 nrg_silence_ref;
+	u32 nrg_energy_idx;
+	u32 nrg_silence_idx;
+	u32 nrg_th_cck;
+	s32 nrg_auto_corr_silence_diff;
+	u32 num_in_cck_no_fa;
+	u32 nrg_th_ofdm;
+
+	u16 barker_corr_th_min;
+	u16 barker_corr_th_min_mrc;
+	u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct iwl_chain_noise_data {
+	u32 active_chains;
+	u32 chain_noise_a;
+	u32 chain_noise_b;
+	u32 chain_noise_c;
+	u32 chain_signal_a;
+	u32 chain_signal_b;
+	u32 chain_signal_c;
+	u16 beacon_count;
+	u8 disconn_array[NUM_RX_CHAINS];
+	u8 delta_gain_code[NUM_RX_CHAINS];
+	u8 radio_write;
+	u8 state;
+};
+
+enum {
+	MEASUREMENT_READY = (1 << 0),
+	MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* reply_tx_statistics (for _agn devices) */
+struct reply_tx_error_statistics {
+	u32 pp_delay;
+	u32 pp_few_bytes;
+	u32 pp_bt_prio;
+	u32 pp_quiet_period;
+	u32 pp_calc_ttak;
+	u32 int_crossed_retry;
+	u32 short_limit;
+	u32 long_limit;
+	u32 fifo_underrun;
+	u32 drain_flow;
+	u32 rfkill_flush;
+	u32 life_expire;
+	u32 dest_ps;
+	u32 host_abort;
+	u32 bt_retry;
+	u32 sta_invalid;
+	u32 frag_drop;
+	u32 tid_disable;
+	u32 fifo_flush;
+	u32 insuff_cf_poll;
+	u32 fail_hw_drop;
+	u32 sta_color_mismatch;
+	u32 unknown;
+};
+
+/* reply_agg_tx_statistics (for _agn devices) */
+struct reply_agg_tx_error_statistics {
+	u32 underrun;
+	u32 bt_prio;
+	u32 few_bytes;
+	u32 abort;
+	u32 last_sent_ttl;
+	u32 last_sent_try;
+	u32 last_sent_bt_kill;
+	u32 scd_query;
+	u32 bad_crc32;
+	u32 response;
+	u32 dump_tx;
+	u32 delay_tx;
+	u32 unknown;
+};
+
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (10)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry:  the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ *		      when dump ucode events
+ */
+struct iwl_event_log {
+	bool ucode_trace;
+	u32 num_wraps;
+	u32 next_entry;
+	int non_wraps_count;
+	int wraps_once_count;
+	int wraps_more_count;
+};
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET  (HZ*3)
+
+/* BT Antenna Coupling Threshold (dB) */
+#define IWL_BT_ANTENNA_COUPLING_THRESHOLD	(35)
+
+/* Firmware reload counter and Timestamp */
+#define IWL_MIN_RELOAD_DURATION		1000 /* 1000 ms */
+#define IWL_MAX_CONTINUE_RELOAD_CNT	4
+
+
+struct iwl_rf_reset {
+	int reset_request_count;
+	int reset_success_count;
+	int reset_reject_count;
+	unsigned long last_reset_jiffies;
+};
+
+enum iwl_rxon_context_id {
+	IWL_RXON_CTX_BSS,
+	IWL_RXON_CTX_PAN,
+
+	NUM_IWL_RXON_CTX
+};
+
+/* extend beacon time format bit shifting  */
+/*
+ * for _agn devices
+ * bits 31:22 - extended
+ * bits 21:0  - interval
+ */
+#define IWLAGN_EXT_BEACON_TIME_POS	22
+
+struct iwl_rxon_context {
+	struct ieee80211_vif *vif;
+
+	u8 mcast_queue;
+	u8 ac_to_queue[IEEE80211_NUM_ACS];
+	u8 ac_to_fifo[IEEE80211_NUM_ACS];
+
+	/*
+	 * We could use the vif to indicate active, but we
+	 * also need it to be active during disabling when
+	 * we already removed the vif for type setting.
+	 */
+	bool always_active, is_active;
+
+	bool ht_need_multiple_chains;
+
+	enum iwl_rxon_context_id ctxid;
+
+	u32 interface_modes, exclusive_interface_modes;
+	u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+	/*
+	 * We declare this const so it can only be
+	 * changed via explicit cast within the
+	 * routines that actually update the physical
+	 * hardware.
+	 */
+	const struct iwl_rxon_cmd active;
+	struct iwl_rxon_cmd staging;
+
+	struct iwl_rxon_time_cmd timing;
+
+	struct iwl_qos_info qos_data;
+
+	u8 bcast_sta_id, ap_sta_id;
+
+	u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+	u8 qos_cmd;
+	u8 wep_key_cmd;
+
+	struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
+	u8 key_mapping_keys;
+
+	__le32 station_flags;
+
+	int beacon_int;
+
+	struct {
+		bool non_gf_sta_present;
+		u8 protection;
+		bool enabled, is_40mhz;
+		u8 extension_chan_offset;
+	} ht;
+};
+
+enum iwl_scan_type {
+	IWL_SCAN_NORMAL,
+	IWL_SCAN_RADIO_RESET,
+};
+
+/**
+ * struct iwl_hw_params
+ *
+ * Holds the module parameters
+ *
+ * @tx_chains_num: Number of TX chains
+ * @rx_chains_num: Number of RX chains
+ * @ct_kill_threshold: temperature threshold - in hw dependent unit
+ * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
+ *	relevant for 1000, 6000 and up
+ * @struct iwl_sensitivity_ranges: range of sensitivity values
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ */
+struct iwl_hw_params {
+	u8  tx_chains_num;
+	u8  rx_chains_num;
+	bool use_rts_for_aggregation;
+	u32 ct_kill_threshold;
+	u32 ct_kill_exit_threshold;
+
+	const struct iwl_sensitivity_ranges *sens;
+};
+
+/**
+ * struct iwl_dvm_bt_params - DVM specific BT (coex) parameters
+ * @advanced_bt_coexist: support advanced bt coexist
+ * @bt_init_traffic_load: specify initial bt traffic load
+ * @bt_prio_boost: default bt priority boost value
+ * @agg_time_limit: maximum number of uSec in aggregation
+ * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
+ */
+struct iwl_dvm_bt_params {
+	bool advanced_bt_coexist;
+	u8 bt_init_traffic_load;
+	u32 bt_prio_boost;
+	u16 agg_time_limit;
+	bool bt_sco_disable;
+	bool bt_session_2;
+};
+
+/**
+ * struct iwl_dvm_cfg - DVM firmware specific device configuration
+ * @set_hw_params: set hardware parameters
+ * @set_channel_switch: send channel switch command
+ * @nic_config: apply device specific configuration
+ * @temperature: read temperature
+ * @adv_thermal_throttle: support advance thermal throttle
+ * @support_ct_kill_exit: support ct kill exit condition
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ *	radio tuning when there is a high receiving plcp error rate
+ * @chain_noise_scale: default chain noise scale used for gain computation
+ * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * @no_idle_support: do not support idle mode
+ * @bt_params: pointer to BT parameters
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ *	don't send it to those
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ * @adv_pm: advanced power management
+ */
+struct iwl_dvm_cfg {
+	void (*set_hw_params)(struct iwl_priv *priv);
+	int (*set_channel_switch)(struct iwl_priv *priv,
+				  struct ieee80211_channel_switch *ch_switch);
+	void (*nic_config)(struct iwl_priv *priv);
+	void (*temperature)(struct iwl_priv *priv);
+
+	const struct iwl_dvm_bt_params *bt_params;
+	s32 chain_noise_scale;
+	u8 plcp_delta_threshold;
+	bool adv_thermal_throttle;
+	bool support_ct_kill_exit;
+	bool hd_v2;
+	bool no_idle_support;
+	bool need_temp_offset_calib;
+	bool no_xtal_calib;
+	bool temp_offset_v2;
+	bool adv_pm;
+};
+
+struct iwl_wipan_noa_data {
+	struct rcu_head rcu_head;
+	u32 length;
+	u8 data[];
+};
+
+/* Calibration disabling bit mask */
+enum {
+	IWL_CALIB_ENABLE_ALL			= 0,
+
+	IWL_SENSITIVITY_CALIB_DISABLED		= BIT(0),
+	IWL_CHAIN_NOISE_CALIB_DISABLED		= BIT(1),
+	IWL_TX_POWER_CALIB_DISABLED		= BIT(2),
+
+	IWL_CALIB_DISABLE_ALL			= 0xFFFFFFFF,
+};
+
+#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
+	((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
+
+#define IWL_MAC80211_GET_DVM(_hw) \
+	((struct iwl_priv *) ((struct iwl_op_mode *) \
+	(_hw)->priv)->op_mode_specific)
+
+struct iwl_priv {
+
+	struct iwl_trans *trans;
+	struct device *dev;		/* for debug prints only */
+	const struct iwl_cfg *cfg;
+	const struct iwl_fw *fw;
+	const struct iwl_dvm_cfg *lib;
+	unsigned long status;
+
+	spinlock_t sta_lock;
+	struct mutex mutex;
+
+	unsigned long transport_queue_stop;
+	bool passive_no_rx;
+#define IWL_INVALID_MAC80211_QUEUE	0xff
+	u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
+	atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
+
+	unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
+
+	/* ieee device used by generic ieee processing code */
+	struct ieee80211_hw *hw;
+
+	struct napi_struct *napi;
+
+	struct list_head calib_results;
+
+	struct workqueue_struct *workqueue;
+
+	struct iwl_hw_params hw_params;
+
+	enum nl80211_band band;
+	u8 valid_contexts;
+
+	void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb);
+
+	struct iwl_notif_wait_data notif_wait;
+
+	/* spectrum measurement report caching */
+	struct iwl_spectrum_notification measure_report;
+	u8 measurement_status;
+
+	/* ucode beacon time */
+	u32 ucode_beacon_time;
+	int missed_beacon_threshold;
+
+	/* track IBSS manager (last beacon) status */
+	u32 ibss_manager;
+
+	/* jiffies when last recovery from statistics was performed */
+	unsigned long rx_statistics_jiffies;
+
+	/*counters */
+	u32 rx_handlers_stats[REPLY_MAX];
+
+	/* rf reset */
+	struct iwl_rf_reset rf_reset;
+
+	/* firmware reload counter and timestamp */
+	unsigned long reload_jiffies;
+	int reload_count;
+	bool ucode_loaded;
+
+	u8 plcp_delta_threshold;
+
+	/* thermal calibration */
+	s32 temperature;	/* Celsius */
+	s32 last_temperature;
+
+	struct iwl_wipan_noa_data __rcu *noa_data;
+
+	/* Scan related variables */
+	unsigned long scan_start;
+	unsigned long scan_start_tsf;
+	void *scan_cmd;
+	enum nl80211_band scan_band;
+	struct cfg80211_scan_request *scan_request;
+	struct ieee80211_vif *scan_vif;
+	enum iwl_scan_type scan_type;
+	u8 scan_tx_ant[NUM_NL80211_BANDS];
+	u8 mgmt_tx_ant;
+
+	/* max number of station keys */
+	u8 sta_key_max_num;
+
+	bool new_scan_threshold_behaviour;
+
+	bool wowlan;
+
+	/* EEPROM MAC addresses */
+	struct mac_address addresses[2];
+
+	struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
+
+	__le16 switch_channel;
+
+	u8 start_calib;
+	struct iwl_sensitivity_data sensitivity_data;
+	struct iwl_chain_noise_data chain_noise_data;
+	__le16 sensitivity_tbl[HD_TABLE_SIZE];
+	__le16 enhance_sensitivity_tbl[ENHANCE_HD_TABLE_ENTRIES];
+
+	struct iwl_ht_config current_ht_config;
+
+	/* Rate scaling data */
+	u8 retry_rate;
+
+	int activity_timer_active;
+
+	struct iwl_power_mgr power_data;
+	struct iwl_tt_mgmt thermal_throttle;
+
+	/* station table variables */
+	int num_stations;
+	struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
+	unsigned long ucode_key_table;
+	struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
+	atomic_t num_aux_in_flight;
+
+	u8 mac80211_registered;
+
+	/* Indication if ieee80211_ops->open has been called */
+	u8 is_open;
+
+	enum nl80211_iftype iw_mode;
+
+	/* Last Rx'd beacon timestamp */
+	u64 timestamp;
+
+	struct {
+		__le32 flag;
+		struct statistics_general_common common;
+		struct statistics_rx_non_phy rx_non_phy;
+		struct statistics_rx_phy rx_ofdm;
+		struct statistics_rx_ht_phy rx_ofdm_ht;
+		struct statistics_rx_phy rx_cck;
+		struct statistics_tx tx;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		struct statistics_bt_activity bt_activity;
+		__le32 num_bt_kills, accum_num_bt_kills;
+#endif
+		spinlock_t lock;
+	} statistics;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct {
+		struct statistics_general_common common;
+		struct statistics_rx_non_phy rx_non_phy;
+		struct statistics_rx_phy rx_ofdm;
+		struct statistics_rx_ht_phy rx_ofdm_ht;
+		struct statistics_rx_phy rx_cck;
+		struct statistics_tx tx;
+		struct statistics_bt_activity bt_activity;
+	} accum_stats, delta_stats, max_delta_stats;
+#endif
+
+	/*
+	 * reporting the number of tids has AGG on. 0 means
+	 * no AGGREGATION
+	 */
+	u8 agg_tids_count;
+
+	struct iwl_rx_phy_res last_phy_res;
+	u32 ampdu_ref;
+	bool last_phy_res_valid;
+
+	/*
+	 * chain noise reset and gain commands are the
+	 * two extra calibration commands follows the standard
+	 * phy calibration commands
+	 */
+	u8 phy_calib_chain_noise_reset_cmd;
+	u8 phy_calib_chain_noise_gain_cmd;
+
+	/* counts reply_tx error */
+	struct reply_tx_error_statistics reply_tx_stats;
+	struct reply_agg_tx_error_statistics reply_agg_tx_stats;
+
+	/* bt coex */
+	u8 bt_enable_flag;
+	u8 bt_status;
+	u8 bt_traffic_load, last_bt_traffic_load;
+	bool bt_ch_announce;
+	bool bt_full_concurrent;
+	bool bt_ant_couple_ok;
+	__le32 kill_ack_mask;
+	__le32 kill_cts_mask;
+	__le16 bt_valid;
+	bool reduced_txpower;
+	u16 bt_on_thresh;
+	u16 bt_duration;
+	u16 dynamic_frag_thresh;
+	u8 bt_ci_compliance;
+	struct work_struct bt_traffic_change_work;
+	bool bt_enable_pspoll;
+	struct iwl_rxon_context *cur_rssi_ctx;
+	bool bt_is_sco;
+
+	struct work_struct restart;
+	struct work_struct scan_completed;
+	struct work_struct abort_scan;
+
+	struct work_struct beacon_update;
+	struct iwl_rxon_context *beacon_ctx;
+	struct sk_buff *beacon_skb;
+	void *beacon_cmd;
+
+	struct work_struct tt_work;
+	struct work_struct ct_enter;
+	struct work_struct ct_exit;
+	struct work_struct start_internal_scan;
+	struct work_struct tx_flush;
+	struct work_struct bt_full_concurrency;
+	struct work_struct bt_runtime_config;
+
+	struct delayed_work scan_check;
+
+	/* TX Power settings */
+	s8 tx_power_user_lmt;
+	s8 tx_power_next;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* debugfs */
+	struct dentry *debugfs_dir;
+	u32 dbgfs_sram_offset, dbgfs_sram_len;
+	bool disable_ht40;
+	void *wowlan_sram;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+	struct iwl_nvm_data *nvm_data;
+	/* eeprom blob for debugfs */
+	u8 *eeprom_blob;
+	size_t eeprom_blob_size;
+
+	struct work_struct txpower_work;
+	u32 calib_disabled;
+	struct work_struct run_time_calib_work;
+	struct timer_list statistics_periodic;
+	struct timer_list ucode_trace;
+
+	struct iwl_event_log event_log;
+
+#ifdef CONFIG_IWLWIFI_LEDS
+	struct led_classdev led;
+	unsigned long blink_on, blink_off;
+	bool led_registered;
+#endif
+
+	/* WoWLAN GTK rekey data */
+	u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+	__le64 replay_ctr;
+	__le16 last_seq_ctl;
+	bool have_rekey_data;
+#ifdef CONFIG_PM_SLEEP
+	struct wiphy_wowlan_support wowlan_support;
+#endif
+
+	/* device_pointers: pointers to ucode event tables */
+	struct {
+		u32 error_event_table;
+		u32 log_event_table;
+	} device_pointers;
+
+	/* indicator of loaded ucode image */
+	enum iwl_ucode_type cur_ucode;
+}; /*iwl_priv */
+
+static inline struct iwl_rxon_context *
+iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	return vif_priv->ctx;
+}
+
+#define for_each_context(priv, ctx)				\
+	for (ctx = &priv->contexts[IWL_RXON_CTX_BSS];		\
+	     ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++)	\
+		if (priv->valid_contexts & BIT(ctx->ctxid))
+
+static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
+{
+	return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int iwl_is_associated(struct iwl_priv *priv,
+				    enum iwl_rxon_context_id ctxid)
+{
+	return iwl_is_associated_ctx(&priv->contexts[ctxid]);
+}
+
+static inline int iwl_is_any_associated(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+	for_each_context(priv, ctx)
+		if (iwl_is_associated_ctx(ctx))
+			return true;
+	return false;
+}
+
+#endif				/* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
new file mode 100644
index 0000000..f21732e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -0,0 +1,690 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+/*
+ * DVM device-specific data & functions
+ */
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-eeprom-parse.h"
+
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+
+
+/*
+ * 1000 series
+ * ===========
+ */
+
+/*
+ * For 1000, use advance thermal throttling critical temperature threshold,
+ * but legacy thermal management implementation for now.
+ * This is for the reason of 1000 uCode using advance thermal throttling API
+ * but not implement ct_kill_exit based on ct_kill exit temperature
+ * so the thermal throttling will still based on legacy thermal throttling
+ * management.
+ * The code here need to be modified once 1000 uCode has the advanced thermal
+ * throttling algorithm in place
+ */
+static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 1000 series */
+static void iwl1000_nic_config(struct iwl_priv *priv)
+{
+	/* Setting digital SVR for 1000 card to 1.32V */
+	/* locking is acquired in iwl_set_bits_mask_prph() function */
+	iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
+				APMG_SVR_DIGITAL_VOLTAGE_1_32,
+				~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
+}
+
+/**
+ * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
+					   u16 tsf_bits)
+{
+	return (1 << tsf_bits) - 1;
+}
+
+/**
+ * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @priv -- pointer to iwl_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
+					    u16 tsf_bits)
+{
+	return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
+				u32 beacon_interval)
+{
+	u32 quot;
+	u32 rem;
+	u32 interval = beacon_interval * TIME_UNIT;
+
+	if (!interval || !usec)
+		return 0;
+
+	quot = (usec / interval) &
+		(iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
+		IWLAGN_EXT_BEACON_TIME_POS);
+	rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
+				   IWLAGN_EXT_BEACON_TIME_POS);
+
+	return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
+}
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
+			   u32 addon, u32 beacon_interval)
+{
+	u32 base_low = base & iwl_beacon_time_mask_low(priv,
+				IWLAGN_EXT_BEACON_TIME_POS);
+	u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
+				IWLAGN_EXT_BEACON_TIME_POS);
+	u32 interval = beacon_interval * TIME_UNIT;
+	u32 res = (base & iwl_beacon_time_mask_high(priv,
+				IWLAGN_EXT_BEACON_TIME_POS)) +
+				(addon & iwl_beacon_time_mask_high(priv,
+				IWLAGN_EXT_BEACON_TIME_POS));
+
+	if (base_low > addon_low)
+		res += base_low - addon_low;
+	else if (base_low < addon_low) {
+		res += interval + base_low - addon_low;
+		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
+	} else
+		res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
+
+	return cpu_to_le32(res);
+}
+
+static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+	.min_nrg_cck = 95,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 120,
+	.auto_corr_min_ofdm_mrc_x1 = 240,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 155,
+	.auto_corr_max_ofdm_mrc_x1 = 290,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 170,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 95,
+	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	iwl1000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl1000_sensitivity;
+}
+
+const struct iwl_dvm_cfg iwl_dvm_1000_cfg = {
+	.set_hw_params = iwl1000_hw_set_hw_params,
+	.nic_config = iwl1000_nic_config,
+	.temperature = iwlagn_temperature,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+};
+
+
+/*
+ * 2000 series
+ * ===========
+ */
+
+static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 2000 series */
+static void iwl2000_nic_config(struct iwl_priv *priv)
+{
+	iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+		    CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
+}
+
+static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
+	.min_nrg_cck = 97,
+	.auto_corr_min_ofdm = 80,
+	.auto_corr_min_ofdm_mrc = 128,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 192,
+
+	.auto_corr_max_ofdm = 145,
+	.auto_corr_max_ofdm_mrc = 232,
+	.auto_corr_max_ofdm_x1 = 110,
+	.auto_corr_max_ofdm_mrc_x1 = 232,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 175,
+	.auto_corr_min_cck_mrc = 160,
+	.auto_corr_max_cck_mrc = 310,
+	.nrg_th_cck = 97,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	iwl2000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl2000_sensitivity;
+}
+
+const struct iwl_dvm_cfg iwl_dvm_2000_cfg = {
+	.set_hw_params = iwl2000_hw_set_hw_params,
+	.nic_config = iwl2000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.hd_v2 = true,
+	.need_temp_offset_calib = true,
+	.temp_offset_v2 = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_105_cfg = {
+	.set_hw_params = iwl2000_hw_set_hw_params,
+	.nic_config = iwl2000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.hd_v2 = true,
+	.need_temp_offset_calib = true,
+	.temp_offset_v2 = true,
+	.adv_pm = true,
+};
+
+static const struct iwl_dvm_bt_params iwl2030_bt_params = {
+	/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+	.advanced_bt_coexist = true,
+	.agg_time_limit = BT_AGG_THRESHOLD_DEF,
+	.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+	.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT32,
+	.bt_sco_disable = true,
+	.bt_session_2 = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_2030_cfg = {
+	.set_hw_params = iwl2000_hw_set_hw_params,
+	.nic_config = iwl2000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.hd_v2 = true,
+	.bt_params = &iwl2030_bt_params,
+	.need_temp_offset_calib = true,
+	.temp_offset_v2 = true,
+	.adv_pm = true,
+};
+
+/*
+ * 5000 series
+ * ===========
+ */
+
+/* NIC configuration for 5000 series */
+static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
+	.min_nrg_cck = 100,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	.auto_corr_max_ofdm_x1 = 120,
+	.auto_corr_max_ofdm_mrc_x1 = 240,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 200,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 100,
+	.nrg_th_ofdm = 100,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+static const struct iwl_sensitivity_ranges iwl5150_sensitivity = {
+	.min_nrg_cck = 95,
+	.auto_corr_min_ofdm = 90,
+	.auto_corr_min_ofdm_mrc = 170,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 220,
+
+	.auto_corr_max_ofdm = 120,
+	.auto_corr_max_ofdm_mrc = 210,
+	/* max = min for performance bug in 5150 DSP */
+	.auto_corr_max_ofdm_x1 = 105,
+	.auto_corr_max_ofdm_mrc_x1 = 220,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 200,
+	.auto_corr_min_cck_mrc = 170,
+	.auto_corr_max_cck_mrc = 400,
+	.nrg_th_cck = 95,
+	.nrg_th_ofdm = 95,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 390,
+	.nrg_th_cca = 62,
+};
+
+#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF	(-5)
+
+static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+{
+	u16 temperature, voltage;
+
+	temperature = le16_to_cpu(priv->nvm_data->kelvin_temperature);
+	voltage = le16_to_cpu(priv->nvm_data->kelvin_voltage);
+
+	/* offset = temp - volt / coeff */
+	return (s32)(temperature -
+			voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
+}
+
+static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
+{
+	const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
+	s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
+			iwl_temp_calib_to_offset(priv);
+
+	priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
+}
+
+static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
+}
+
+static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	iwl5000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl5000_sensitivity;
+}
+
+static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
+{
+	iwl5150_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl5150_sensitivity;
+}
+
+static void iwl5150_temperature(struct iwl_priv *priv)
+{
+	u32 vt = 0;
+	s32 offset =  iwl_temp_calib_to_offset(priv);
+
+	vt = le32_to_cpu(priv->statistics.common.temperature);
+	vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
+	/* now vt hold the temperature in Kelvin */
+	priv->temperature = KELVIN_TO_CELSIUS(vt);
+	iwl_tt_handler(priv);
+}
+
+static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl5000_channel_switch_cmd cmd;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+	struct ieee80211_vif *vif = ctx->vif;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = { sizeof(cmd), },
+		.data = { &cmd, },
+	};
+
+	cmd.band = priv->band == NL80211_BAND_2GHZ;
+	ch = ch_switch->chandef.chan->hw_value;
+	IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
+		      ctx->active.channel, ch);
+	cmd.channel = cpu_to_le16(ch);
+	cmd.rxon_flags = ctx->staging.flags;
+	cmd.rxon_filter_flags = ctx->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd.switch_time = iwl_add_beacon_time(priv,
+						      priv->ucode_beacon_time,
+						      ucode_switch_time,
+						      beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd.switch_time);
+	cmd.expect_beacon =
+		ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
+
+	return iwl_dvm_send_cmd(priv, &hcmd);
+}
+
+const struct iwl_dvm_cfg iwl_dvm_5000_cfg = {
+	.set_hw_params = iwl5000_hw_set_hw_params,
+	.set_channel_switch = iwl5000_hw_channel_switch,
+	.temperature = iwlagn_temperature,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.no_idle_support = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_5150_cfg = {
+	.set_hw_params = iwl5150_hw_set_hw_params,
+	.set_channel_switch = iwl5000_hw_channel_switch,
+	.temperature = iwl5150_temperature,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.no_idle_support = true,
+	.no_xtal_calib = true,
+};
+
+
+
+/*
+ * 6000 series
+ * ===========
+ */
+
+static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
+{
+	/* want Celsius */
+	priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
+	priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
+}
+
+/* NIC configuration for 6000 series */
+static void iwl6000_nic_config(struct iwl_priv *priv)
+{
+	switch (priv->cfg->device_family) {
+	case IWL_DEVICE_FAMILY_6005:
+	case IWL_DEVICE_FAMILY_6030:
+	case IWL_DEVICE_FAMILY_6000:
+		break;
+	case IWL_DEVICE_FAMILY_6000i:
+		/* 2x2 IPA phy type */
+		iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
+			     CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
+		break;
+	case IWL_DEVICE_FAMILY_6050:
+		/* Indicate calibration version to uCode. */
+		if (priv->nvm_data->calib_version >= 6)
+			iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+					CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+		break;
+	case IWL_DEVICE_FAMILY_6150:
+		/* Indicate calibration version to uCode. */
+		if (priv->nvm_data->calib_version >= 6)
+			iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+					CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+		iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
+			    CSR_GP_DRIVER_REG_BIT_6050_1x2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
+	.min_nrg_cck = 110,
+	.auto_corr_min_ofdm = 80,
+	.auto_corr_min_ofdm_mrc = 128,
+	.auto_corr_min_ofdm_x1 = 105,
+	.auto_corr_min_ofdm_mrc_x1 = 192,
+
+	.auto_corr_max_ofdm = 145,
+	.auto_corr_max_ofdm_mrc = 232,
+	.auto_corr_max_ofdm_x1 = 110,
+	.auto_corr_max_ofdm_mrc_x1 = 232,
+
+	.auto_corr_min_cck = 125,
+	.auto_corr_max_cck = 175,
+	.auto_corr_min_cck_mrc = 160,
+	.auto_corr_max_cck_mrc = 310,
+	.nrg_th_cck = 110,
+	.nrg_th_ofdm = 110,
+
+	.barker_corr_th_min = 190,
+	.barker_corr_th_min_mrc = 336,
+	.nrg_th_cca = 62,
+};
+
+static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
+{
+	iwl6000_set_ct_threshold(priv);
+
+	/* Set initial sensitivity parameters */
+	priv->hw_params.sens = &iwl6000_sensitivity;
+
+}
+
+static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
+				     struct ieee80211_channel_switch *ch_switch)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl6000_channel_switch_cmd *cmd;
+	u32 switch_time_in_usec, ucode_switch_time;
+	u16 ch;
+	u32 tsf_low;
+	u8 switch_count;
+	u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+	struct ieee80211_vif *vif = ctx->vif;
+	struct iwl_host_cmd hcmd = {
+		.id = REPLY_CHANNEL_SWITCH,
+		.len = { sizeof(*cmd), },
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int err;
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	hcmd.data[0] = cmd;
+
+	cmd->band = priv->band == NL80211_BAND_2GHZ;
+	ch = ch_switch->chandef.chan->hw_value;
+	IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
+		      ctx->active.channel, ch);
+	cmd->channel = cpu_to_le16(ch);
+	cmd->rxon_flags = ctx->staging.flags;
+	cmd->rxon_filter_flags = ctx->staging.filter_flags;
+	switch_count = ch_switch->count;
+	tsf_low = ch_switch->timestamp & 0x0ffffffff;
+	/*
+	 * calculate the ucode channel switch time
+	 * adding TSF as one of the factor for when to switch
+	 */
+	if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
+		if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
+		    beacon_interval)) {
+			switch_count -= (priv->ucode_beacon_time -
+				tsf_low) / beacon_interval;
+		} else
+			switch_count = 0;
+	}
+	if (switch_count <= 1)
+		cmd->switch_time = cpu_to_le32(priv->ucode_beacon_time);
+	else {
+		switch_time_in_usec =
+			vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+		ucode_switch_time = iwl_usecs_to_beacons(priv,
+							 switch_time_in_usec,
+							 beacon_interval);
+		cmd->switch_time = iwl_add_beacon_time(priv,
+						       priv->ucode_beacon_time,
+						       ucode_switch_time,
+						       beacon_interval);
+	}
+	IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
+		      cmd->switch_time);
+	cmd->expect_beacon =
+		ch_switch->chandef.chan->flags & IEEE80211_CHAN_RADAR;
+
+	err = iwl_dvm_send_cmd(priv, &hcmd);
+	kfree(cmd);
+	return err;
+}
+
+const struct iwl_dvm_cfg iwl_dvm_6000_cfg = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6005_cfg = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.need_temp_offset_calib = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6050_cfg = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1500,
+};
+
+static const struct iwl_dvm_bt_params iwl6000_bt_params = {
+	/* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
+	.advanced_bt_coexist = true,
+	.agg_time_limit = BT_AGG_THRESHOLD_DEF,
+	.bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
+	.bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
+	.bt_sco_disable = true,
+};
+
+const struct iwl_dvm_cfg iwl_dvm_6030_cfg = {
+	.set_hw_params = iwl6000_hw_set_hw_params,
+	.set_channel_switch = iwl6000_hw_channel_switch,
+	.nic_config = iwl6000_nic_config,
+	.temperature = iwlagn_temperature,
+	.adv_thermal_throttle = true,
+	.support_ct_kill_exit = true,
+	.plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+	.chain_noise_scale = 1000,
+	.bt_params = &iwl6000_bt_params,
+	.need_temp_offset_calib = true,
+	.adv_pm = true,
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
new file mode 100644
index 0000000..1bbd17a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -0,0 +1,224 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include "iwl-io.h"
+#include "iwl-trans.h"
+#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+
+/* Throughput		OFF time(ms)	ON time (ms)
+ *	>300			25		25
+ *	>200 to 300		40		40
+ *	>100 to 200		55		55
+ *	>70 to 100		65		65
+ *	>50 to 70		75		75
+ *	>20 to 50		85		85
+ *	>10 to 20		95		95
+ *	>5 to 10		110		110
+ *	>1 to 5			130		130
+ *	>0 to 1			167		167
+ *	<=0					SOLID ON
+ */
+static const struct ieee80211_tpt_blink iwl_blink[] = {
+	{ .throughput = 0, .blink_time = 334 },
+	{ .throughput = 1 * 1024 - 1, .blink_time = 260 },
+	{ .throughput = 5 * 1024 - 1, .blink_time = 220 },
+	{ .throughput = 10 * 1024 - 1, .blink_time = 190 },
+	{ .throughput = 20 * 1024 - 1, .blink_time = 170 },
+	{ .throughput = 50 * 1024 - 1, .blink_time = 150 },
+	{ .throughput = 70 * 1024 - 1, .blink_time = 130 },
+	{ .throughput = 100 * 1024 - 1, .blink_time = 110 },
+	{ .throughput = 200 * 1024 - 1, .blink_time = 80 },
+	{ .throughput = 300 * 1024 - 1, .blink_time = 50 },
+};
+
+/* Set led register off */
+void iwlagn_led_enable(struct iwl_priv *priv)
+{
+	iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
+}
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 20% on 5000 series
+ * and up.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ *     compensation = (100 - averageDeviation) * 64 / 100
+ *     NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
+				    u8 time, u16 compensation)
+{
+	if (!compensation) {
+		IWL_ERR(priv, "undefined blink compensation: "
+			"use pre-defined blinking time\n");
+		return time;
+	}
+
+	return (u8)((time * compensation) >> 6);
+}
+
+static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
+{
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_LEDS_CMD,
+		.len = { sizeof(struct iwl_led_cmd), },
+		.data = { led_cmd, },
+		.flags = CMD_ASYNC,
+	};
+	u32 reg;
+
+	reg = iwl_read32(priv->trans, CSR_LED_REG);
+	if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+		iwl_write32(priv->trans, CSR_LED_REG,
+			    reg & CSR_LED_BSM_CTRL_MSK);
+
+	return iwl_dvm_send_cmd(priv, &cmd);
+}
+
+/* Set led pattern command */
+static int iwl_led_cmd(struct iwl_priv *priv,
+		       unsigned long on,
+		       unsigned long off)
+{
+	struct iwl_led_cmd led_cmd = {
+		.id = IWL_LED_LINK,
+		.interval = IWL_DEF_LED_INTRVL
+	};
+	int ret;
+
+	if (!test_bit(STATUS_READY, &priv->status))
+		return -EBUSY;
+
+	if (priv->blink_on == on && priv->blink_off == off)
+		return 0;
+
+	if (off == 0) {
+		/* led is SOLID_ON */
+		on = IWL_LED_SOLID;
+	}
+
+	led_cmd.on = iwl_blink_compensation(priv, on,
+				priv->cfg->base_params->led_compensation);
+	led_cmd.off = iwl_blink_compensation(priv, off,
+				priv->cfg->base_params->led_compensation);
+
+	ret = iwl_send_led_cmd(priv, &led_cmd);
+	if (!ret) {
+		priv->blink_on = on;
+		priv->blink_off = off;
+	}
+	return ret;
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+				   enum led_brightness brightness)
+{
+	struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+	unsigned long on = 0;
+	unsigned long off = 0;
+
+	if (brightness > 0)
+		on = IWL_LED_SOLID;
+	else
+		off = IWL_LED_SOLID;
+
+	iwl_led_cmd(priv, on, off);
+}
+
+static int iwl_led_blink_set(struct led_classdev *led_cdev,
+			     unsigned long *delay_on,
+			     unsigned long *delay_off)
+{
+	struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
+
+	return iwl_led_cmd(priv, *delay_on, *delay_off);
+}
+
+void iwl_leds_init(struct iwl_priv *priv)
+{
+	int mode = iwlwifi_mod_params.led_mode;
+	int ret;
+
+	if (mode == IWL_LED_DISABLE) {
+		IWL_INFO(priv, "Led disabled\n");
+		return;
+	}
+	if (mode == IWL_LED_DEFAULT)
+		mode = priv->cfg->led_mode;
+
+	priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
+				   wiphy_name(priv->hw->wiphy));
+	priv->led.brightness_set = iwl_led_brightness_set;
+	priv->led.blink_set = iwl_led_blink_set;
+	priv->led.max_brightness = 1;
+
+	switch (mode) {
+	case IWL_LED_DEFAULT:
+		WARN_ON(1);
+		break;
+	case IWL_LED_BLINK:
+		priv->led.default_trigger =
+			ieee80211_create_tpt_led_trigger(priv->hw,
+					IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+					iwl_blink, ARRAY_SIZE(iwl_blink));
+		break;
+	case IWL_LED_RF_STATE:
+		priv->led.default_trigger =
+			ieee80211_get_radio_led_name(priv->hw);
+		break;
+	}
+
+	ret = led_classdev_register(priv->trans->dev, &priv->led);
+	if (ret) {
+		kfree(priv->led.name);
+		return;
+	}
+
+	priv->led_registered = true;
+}
+
+void iwl_leds_exit(struct iwl_priv *priv)
+{
+	if (!priv->led_registered)
+		return;
+
+	led_classdev_unregister(&priv->led);
+	kfree(priv->led.name);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
new file mode 100644
index 0000000..75f74ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_leds_h__
+#define __iwl_leds_h__
+
+
+struct iwl_priv;
+
+#define IWL_LED_SOLID 11
+#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IWL_LED_ACTIVITY       (0<<1)
+#define IWL_LED_LINK           (1<<1)
+
+#ifdef CONFIG_IWLWIFI_LEDS
+void iwlagn_led_enable(struct iwl_priv *priv);
+void iwl_leds_init(struct iwl_priv *priv);
+void iwl_leds_exit(struct iwl_priv *priv);
+#else
+static inline void iwlagn_led_enable(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_init(struct iwl_priv *priv)
+{
+}
+static inline void iwl_leds_exit(struct iwl_priv *priv)
+{
+}
+#endif
+
+#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
new file mode 100644
index 0000000..2b6ffbc
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -0,0 +1,1282 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-trans.h"
+#include "iwl-modparams.h"
+
+#include "dev.h"
+#include "agn.h"
+
+int iwlagn_hw_valid_rtc_data_addr(u32 addr)
+{
+	return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
+		(addr < IWLAGN_RTC_DATA_UPPER_BOUND);
+}
+
+int iwlagn_send_tx_power(struct iwl_priv *priv)
+{
+	struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
+	u8 tx_ant_cfg_cmd;
+
+	if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
+		      "TX Power requested while scanning!\n"))
+		return -EAGAIN;
+
+	/* half dBm need to multiply */
+	tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
+
+	if (tx_power_cmd.global_lmt > priv->nvm_data->max_tx_pwr_half_dbm) {
+		/*
+		 * For the newer devices which using enhanced/extend tx power
+		 * table in EEPROM, the format is in half dBm. driver need to
+		 * convert to dBm format before report to mac80211.
+		 * By doing so, there is a possibility of 1/2 dBm resolution
+		 * lost. driver will perform "round-up" operation before
+		 * reporting, but it will cause 1/2 dBm tx power over the
+		 * regulatory limit. Perform the checking here, if the
+		 * "tx_power_user_lmt" is higher than EEPROM value (in
+		 * half-dBm format), lower the tx power based on EEPROM
+		 */
+		tx_power_cmd.global_lmt =
+			priv->nvm_data->max_tx_pwr_half_dbm;
+	}
+	tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
+	tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
+
+	if (IWL_UCODE_API(priv->fw->ucode_ver) == 1)
+		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
+	else
+		tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
+
+	return iwl_dvm_send_cmd_pdu(priv, tx_ant_cfg_cmd, 0,
+			sizeof(tx_power_cmd), &tx_power_cmd);
+}
+
+void iwlagn_temperature(struct iwl_priv *priv)
+{
+	lockdep_assert_held(&priv->statistics.lock);
+
+	/* store temperature from correct statistics (in Celsius) */
+	priv->temperature = le32_to_cpu(priv->statistics.common.temperature);
+	iwl_tt_handler(priv);
+}
+
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
+{
+	int idx = 0;
+	int band_offset = 0;
+
+	/* HT rate format: mac80211 wants an MCS number, which is just LSB */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = (rate_n_flags & 0xff);
+		return idx;
+	/* Legacy rate format, search for match in table */
+	} else {
+		if (band == NL80211_BAND_5GHZ)
+			band_offset = IWL_FIRST_OFDM_RATE;
+		for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+			if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
+				return idx - band_offset;
+	}
+
+	return -1;
+}
+
+int iwlagn_manage_ibss_station(struct iwl_priv *priv,
+			       struct ieee80211_vif *vif, bool add)
+{
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	if (add)
+		return iwlagn_add_bssid_station(priv, vif_priv->ctx,
+						vif->bss_conf.bssid,
+						&vif_priv->ibss_bssid_sta_id);
+	return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
+				  vif->bss_conf.bssid);
+}
+
+/**
+ * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
+ *
+ * pre-requirements:
+ *  1. acquire mutex before calling
+ *  2. make sure rf is on and not in exit state
+ */
+int iwlagn_txfifo_flush(struct iwl_priv *priv, u32 scd_q_msk)
+{
+	struct iwl_txfifo_flush_cmd_v3 flush_cmd_v3 = {
+		.flush_control = cpu_to_le16(IWL_DROP_ALL),
+	};
+	struct iwl_txfifo_flush_cmd_v2 flush_cmd_v2 = {
+		.flush_control = cpu_to_le16(IWL_DROP_ALL),
+	};
+
+	u32 queue_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+			    IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | IWL_SCD_MGMT_MSK;
+
+	if ((priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+		queue_control |= IWL_PAN_SCD_VO_MSK | IWL_PAN_SCD_VI_MSK |
+				 IWL_PAN_SCD_BE_MSK | IWL_PAN_SCD_BK_MSK |
+				 IWL_PAN_SCD_MGMT_MSK |
+				 IWL_PAN_SCD_MULTICAST_MSK;
+
+	if (priv->nvm_data->sku_cap_11n_enable)
+		queue_control |= IWL_AGG_TX_QUEUE_MSK;
+
+	if (scd_q_msk)
+		queue_control = scd_q_msk;
+
+	IWL_DEBUG_INFO(priv, "queue control: 0x%x\n", queue_control);
+	flush_cmd_v3.queue_control = cpu_to_le32(queue_control);
+	flush_cmd_v2.queue_control = cpu_to_le16((u16)queue_control);
+
+	if (IWL_UCODE_API(priv->fw->ucode_ver) > 2)
+		return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+					    sizeof(flush_cmd_v3),
+					    &flush_cmd_v3);
+	return iwl_dvm_send_cmd_pdu(priv, REPLY_TXFIFO_FLUSH, 0,
+				    sizeof(flush_cmd_v2), &flush_cmd_v2);
+}
+
+void iwlagn_dev_txfifo_flush(struct iwl_priv *priv)
+{
+	mutex_lock(&priv->mutex);
+	ieee80211_stop_queues(priv->hw);
+	if (iwlagn_txfifo_flush(priv, 0)) {
+		IWL_ERR(priv, "flush request fail\n");
+		goto done;
+	}
+	IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
+	iwl_trans_wait_tx_queues_empty(priv->trans, 0xffffffff);
+done:
+	ieee80211_wake_queues(priv->hw);
+	mutex_unlock(&priv->mutex);
+}
+
+/*
+ * BT coex
+ */
+/* Notmal TDM */
+static const __le32 iwlagn_def_3w_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaeaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xcc00ff28),
+	cpu_to_le32(0x0000aaaa),
+	cpu_to_le32(0xcc00aaaa),
+	cpu_to_le32(0x0000aaaa),
+	cpu_to_le32(0xc0004000),
+	cpu_to_le32(0x00004000),
+	cpu_to_le32(0xf0005000),
+	cpu_to_le32(0xf0005000),
+};
+
+/* Full concurrency */
+static const __le32 iwlagn_concurrent_lookup[IWLAGN_BT_DECISION_LUT_SIZE] = {
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0xaaaaaaaa),
+	cpu_to_le32(0x00000000),
+	cpu_to_le32(0x00000000),
+	cpu_to_le32(0x00000000),
+	cpu_to_le32(0x00000000),
+};
+
+void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
+{
+	struct iwl_basic_bt_cmd basic = {
+		.max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
+		.bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
+		.bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
+		.bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
+	};
+	struct iwl_bt_cmd_v1 bt_cmd_v1;
+	struct iwl_bt_cmd_v2 bt_cmd_v2;
+	int ret;
+
+	BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
+			sizeof(basic.bt3_lookup_table));
+
+	if (priv->lib->bt_params) {
+		/*
+		 * newer generation of devices (2000 series and newer)
+		 * use the version 2 of the bt command
+		 * we need to make sure sending the host command
+		 * with correct data structure to avoid uCode assert
+		 */
+		if (priv->lib->bt_params->bt_session_2) {
+			bt_cmd_v2.prio_boost = cpu_to_le32(
+				priv->lib->bt_params->bt_prio_boost);
+			bt_cmd_v2.tx_prio_boost = 0;
+			bt_cmd_v2.rx_prio_boost = 0;
+		} else {
+			/* older version only has 8 bits */
+			WARN_ON(priv->lib->bt_params->bt_prio_boost & ~0xFF);
+			bt_cmd_v1.prio_boost =
+				priv->lib->bt_params->bt_prio_boost;
+			bt_cmd_v1.tx_prio_boost = 0;
+			bt_cmd_v1.rx_prio_boost = 0;
+		}
+	} else {
+		IWL_ERR(priv, "failed to construct BT Coex Config\n");
+		return;
+	}
+
+	/*
+	 * Possible situations when BT needs to take over for receive,
+	 * at the same time where STA needs to response to AP's frame(s),
+	 * reduce the tx power of the required response frames, by that,
+	 * allow the concurrent BT receive & WiFi transmit
+	 * (BT - ANT A, WiFi -ANT B), without interference to one another
+	 *
+	 * Reduced tx power apply to control frames only (ACK/Back/CTS)
+	 * when indicated by the BT config command
+	 */
+	basic.kill_ack_mask = priv->kill_ack_mask;
+	basic.kill_cts_mask = priv->kill_cts_mask;
+	if (priv->reduced_txpower)
+		basic.reduce_txpower = IWLAGN_BT_REDUCED_TX_PWR;
+	basic.valid = priv->bt_valid;
+
+	/*
+	 * Configure BT coex mode to "no coexistence" when the
+	 * user disabled BT coexistence, we have no interface
+	 * (might be in monitor mode), or the interface is in
+	 * IBSS mode (no proper uCode support for coex then).
+	 */
+	if (!iwlwifi_mod_params.bt_coex_active ||
+	    priv->iw_mode == NL80211_IFTYPE_ADHOC) {
+		basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
+	} else {
+		basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
+					IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
+
+		if (!priv->bt_enable_pspoll)
+			basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+		else
+			basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+
+		if (priv->bt_ch_announce)
+			basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
+		IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
+	}
+	priv->bt_enable_flag = basic.flags;
+	if (priv->bt_full_concurrent)
+		memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
+			sizeof(iwlagn_concurrent_lookup));
+	else
+		memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
+			sizeof(iwlagn_def_3w_lookup));
+
+	IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
+		       basic.flags ? "active" : "disabled",
+		       priv->bt_full_concurrent ?
+		       "full concurrency" : "3-wire");
+
+	if (priv->lib->bt_params->bt_session_2) {
+		memcpy(&bt_cmd_v2.basic, &basic,
+			sizeof(basic));
+		ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+			0, sizeof(bt_cmd_v2), &bt_cmd_v2);
+	} else {
+		memcpy(&bt_cmd_v1.basic, &basic,
+			sizeof(basic));
+		ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+			0, sizeof(bt_cmd_v1), &bt_cmd_v1);
+	}
+	if (ret)
+		IWL_ERR(priv, "failed to send BT Coex Config\n");
+
+}
+
+void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
+{
+	struct iwl_rxon_context *ctx, *found_ctx = NULL;
+	bool found_ap = false;
+
+	lockdep_assert_held(&priv->mutex);
+
+	/* Check whether AP or GO mode is active. */
+	if (rssi_ena) {
+		for_each_context(priv, ctx) {
+			if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
+			    iwl_is_associated_ctx(ctx)) {
+				found_ap = true;
+				break;
+			}
+		}
+	}
+
+	/*
+	 * If disable was received or If GO/AP mode, disable RSSI
+	 * measurements.
+	 */
+	if (!rssi_ena || found_ap) {
+		if (priv->cur_rssi_ctx) {
+			ctx = priv->cur_rssi_ctx;
+			ieee80211_disable_rssi_reports(ctx->vif);
+			priv->cur_rssi_ctx = NULL;
+		}
+		return;
+	}
+
+	/*
+	 * If rssi measurements need to be enabled, consider all cases now.
+	 * Figure out how many contexts are active.
+	 */
+	for_each_context(priv, ctx) {
+		if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
+		    iwl_is_associated_ctx(ctx)) {
+			found_ctx = ctx;
+			break;
+		}
+	}
+
+	/*
+	 * rssi monitor already enabled for the correct interface...nothing
+	 * to do.
+	 */
+	if (found_ctx == priv->cur_rssi_ctx)
+		return;
+
+	/*
+	 * Figure out if rssi monitor is currently enabled, and needs
+	 * to be changed. If rssi monitor is already enabled, disable
+	 * it first else just enable rssi measurements on the
+	 * interface found above.
+	 */
+	if (priv->cur_rssi_ctx) {
+		ctx = priv->cur_rssi_ctx;
+		if (ctx->vif)
+			ieee80211_disable_rssi_reports(ctx->vif);
+	}
+
+	priv->cur_rssi_ctx = found_ctx;
+
+	if (!found_ctx)
+		return;
+
+	ieee80211_enable_rssi_reports(found_ctx->vif,
+			IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
+			IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
+}
+
+static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
+{
+	return (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
+		BT_UART_MSG_FRAME3SCOESCO_POS;
+}
+
+static void iwlagn_bt_traffic_change_work(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, bt_traffic_change_work);
+	struct iwl_rxon_context *ctx;
+	int smps_request = -1;
+
+	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+		/* bt coex disabled */
+		return;
+	}
+
+	/*
+	 * Note: bt_traffic_load can be overridden by scan complete and
+	 * coex profile notifications. Ignore that since only bad consequence
+	 * can be not matching debug print with actual state.
+	 */
+	IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
+		       priv->bt_traffic_load);
+
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+		if (priv->bt_status)
+			smps_request = IEEE80211_SMPS_DYNAMIC;
+		else
+			smps_request = IEEE80211_SMPS_AUTOMATIC;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		smps_request = IEEE80211_SMPS_DYNAMIC;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		smps_request = IEEE80211_SMPS_STATIC;
+		break;
+	default:
+		IWL_ERR(priv, "Invalid BT traffic load: %d\n",
+			priv->bt_traffic_load);
+		break;
+	}
+
+	mutex_lock(&priv->mutex);
+
+	/*
+	 * We can not send command to firmware while scanning. When the scan
+	 * complete we will schedule this work again. We do check with mutex
+	 * locked to prevent new scan request to arrive. We do not check
+	 * STATUS_SCANNING to avoid race when queue_work two times from
+	 * different notifications, but quit and not perform any work at all.
+	 */
+	if (test_bit(STATUS_SCAN_HW, &priv->status))
+		goto out;
+
+	iwl_update_chain_flags(priv);
+
+	if (smps_request != -1) {
+		priv->current_ht_config.smps = smps_request;
+		for_each_context(priv, ctx) {
+			if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
+				ieee80211_request_smps(ctx->vif, smps_request);
+		}
+	}
+
+	/*
+	 * Dynamic PS poll related functionality. Adjust RSSI measurements if
+	 * necessary.
+	 */
+	iwlagn_bt_coex_rssi_monitor(priv);
+out:
+	mutex_unlock(&priv->mutex);
+}
+
+/*
+ * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
+ * correct interface or disable it if this is the last interface to be
+ * removed.
+ */
+void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
+{
+	if (priv->bt_is_sco &&
+	    priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
+		iwlagn_bt_adjust_rssi_monitor(priv, true);
+	else
+		iwlagn_bt_adjust_rssi_monitor(priv, false);
+}
+
+static void iwlagn_print_uartmsg(struct iwl_priv *priv,
+				struct iwl_bt_uart_msg *uart_msg)
+{
+	IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
+			"Update Req = 0x%X\n",
+		(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
+			BT_UART_MSG_FRAME1MSGTYPE_POS,
+		(BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
+			BT_UART_MSG_FRAME1SSN_POS,
+		(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
+			BT_UART_MSG_FRAME1UPDATEREQ_POS);
+
+	IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
+			"Chl_SeqN = 0x%X, In band = 0x%X\n",
+		(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
+			BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
+		(BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
+			BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
+		(BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
+			BT_UART_MSG_FRAME2CHLSEQN_POS,
+		(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
+			BT_UART_MSG_FRAME2INBAND_POS);
+
+	IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
+			"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
+		(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3SCOESCO_POS,
+		(BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3SNIFF_POS,
+		(BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3A2DP_POS,
+		(BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3ACL_POS,
+		(BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3MASTER_POS,
+		(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
+			BT_UART_MSG_FRAME3OBEX_POS);
+
+	IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
+		(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
+			BT_UART_MSG_FRAME4IDLEDURATION_POS);
+
+	IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
+			"eSCO Retransmissions = 0x%X\n",
+		(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
+			BT_UART_MSG_FRAME5TXACTIVITY_POS,
+		(BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
+			BT_UART_MSG_FRAME5RXACTIVITY_POS,
+		(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
+			BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
+
+	IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
+		(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
+			BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
+		(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
+			BT_UART_MSG_FRAME6DISCOVERABLE_POS);
+
+	IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
+			"0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
+		(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
+			BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
+		(BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
+			BT_UART_MSG_FRAME7PAGE_POS,
+		(BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
+			BT_UART_MSG_FRAME7INQUIRY_POS,
+		(BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
+			BT_UART_MSG_FRAME7CONNECTABLE_POS);
+}
+
+static bool iwlagn_set_kill_msk(struct iwl_priv *priv,
+				struct iwl_bt_uart_msg *uart_msg)
+{
+	bool need_update = false;
+	u8 kill_msk = IWL_BT_KILL_REDUCE;
+	static const __le32 bt_kill_ack_msg[3] = {
+		IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
+	static const __le32 bt_kill_cts_msg[3] = {
+		IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_SCO,
+		IWLAGN_BT_KILL_ACK_CTS_MASK_REDUCE};
+
+	if (!priv->reduced_txpower)
+		kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
+			? IWL_BT_KILL_OVERRIDE : IWL_BT_KILL_DEFAULT;
+	if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
+	    priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
+		priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
+		priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
+		priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
+		priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
+		need_update = true;
+	}
+	return need_update;
+}
+
+/*
+ * Upon RSSI changes, sends a bt config command with following changes
+ *  1. enable/disable "reduced control frames tx power
+ *  2. update the "kill)ack_mask" and "kill_cts_mask"
+ *
+ * If "reduced tx power" is enabled, uCode shall
+ *  1. ACK/Back/CTS rate shall reduced to 6Mbps
+ *  2. not use duplciate 20/40MHz mode
+ */
+static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
+				struct iwl_bt_uart_msg *uart_msg)
+{
+	bool need_update = false;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	int ave_rssi;
+
+	if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
+		IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
+		return false;
+	}
+
+	ave_rssi = ieee80211_ave_rssi(ctx->vif);
+	if (!ave_rssi) {
+		/* no rssi data, no changes to reduce tx power */
+		IWL_DEBUG_COEX(priv, "no rssi data available\n");
+		return need_update;
+	}
+	if (!priv->reduced_txpower &&
+	    !iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+	    (ave_rssi > BT_ENABLE_REDUCED_TXPOWER_THRESHOLD) &&
+	    (uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
+	    BT_UART_MSG_FRAME3OBEX_MSK)) &&
+	    !(uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
+	    BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK))) {
+		/* enabling reduced tx power */
+		priv->reduced_txpower = true;
+		priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
+		need_update = true;
+	} else if (priv->reduced_txpower &&
+		   (iwl_is_associated(priv, IWL_RXON_CTX_PAN) ||
+		   (ave_rssi < BT_DISABLE_REDUCED_TXPOWER_THRESHOLD) ||
+		   (uart_msg->frame3 & (BT_UART_MSG_FRAME3SCOESCO_MSK |
+		   BT_UART_MSG_FRAME3SNIFF_MSK | BT_UART_MSG_FRAME3A2DP_MSK)) ||
+		   !(uart_msg->frame3 & (BT_UART_MSG_FRAME3ACL_MSK |
+		   BT_UART_MSG_FRAME3OBEX_MSK)))) {
+		/* disable reduced tx power */
+		priv->reduced_txpower = false;
+		priv->bt_valid |= IWLAGN_BT_VALID_REDUCED_TX_PWR;
+		need_update = true;
+	}
+
+	return need_update;
+}
+
+static void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
+					 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_bt_coex_profile_notif *coex = (void *)pkt->data;
+	struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
+
+	if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
+		/* bt coex disabled */
+		return;
+	}
+
+	IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
+	IWL_DEBUG_COEX(priv, "    status: %d\n", coex->bt_status);
+	IWL_DEBUG_COEX(priv, "    traffic load: %d\n", coex->bt_traffic_load);
+	IWL_DEBUG_COEX(priv, "    CI compliance: %d\n",
+			coex->bt_ci_compliance);
+	iwlagn_print_uartmsg(priv, uart_msg);
+
+	priv->last_bt_traffic_load = priv->bt_traffic_load;
+	priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
+
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
+		if (priv->bt_status != coex->bt_status ||
+		    priv->last_bt_traffic_load != coex->bt_traffic_load) {
+			if (coex->bt_status) {
+				/* BT on */
+				if (!priv->bt_ch_announce)
+					priv->bt_traffic_load =
+						IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+				else
+					priv->bt_traffic_load =
+						coex->bt_traffic_load;
+			} else {
+				/* BT off */
+				priv->bt_traffic_load =
+					IWL_BT_COEX_TRAFFIC_LOAD_NONE;
+			}
+			priv->bt_status = coex->bt_status;
+			queue_work(priv->workqueue,
+				   &priv->bt_traffic_change_work);
+		}
+	}
+
+	/* schedule to send runtime bt_config */
+	/* check reduce power before change ack/cts kill mask */
+	if (iwlagn_fill_txpower_mode(priv, uart_msg) ||
+	    iwlagn_set_kill_msk(priv, uart_msg))
+		queue_work(priv->workqueue, &priv->bt_runtime_config);
+
+
+	/* FIXME: based on notification, adjust the prio_boost */
+
+	priv->bt_ci_compliance = coex->bt_ci_compliance;
+}
+
+void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
+{
+	priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
+		iwlagn_bt_coex_profile_notif;
+}
+
+void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
+{
+	INIT_WORK(&priv->bt_traffic_change_work,
+		  iwlagn_bt_traffic_change_work);
+}
+
+void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
+{
+	cancel_work_sync(&priv->bt_traffic_change_work);
+}
+
+static bool is_single_rx_stream(struct iwl_priv *priv)
+{
+	return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+	       priv->current_ht_config.single_chain_sufficient;
+}
+
+#define IWL_NUM_RX_CHAINS_MULTIPLE	3
+#define IWL_NUM_RX_CHAINS_SINGLE	2
+#define IWL_NUM_IDLE_CHAINS_DUAL	2
+#define IWL_NUM_IDLE_CHAINS_SINGLE	1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity.  Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
+{
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist &&
+	    (priv->bt_full_concurrent ||
+	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
+		/*
+		 * only use chain 'A' in bt high traffic load or
+		 * full concurrency mode
+		 */
+		return IWL_NUM_RX_CHAINS_SINGLE;
+	}
+	/* # of Rx chains to use when expecting MIMO. */
+	if (is_single_rx_stream(priv))
+		return IWL_NUM_RX_CHAINS_SINGLE;
+	else
+		return IWL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
+{
+	/* # Rx chains when idling, depending on SMPS mode */
+	switch (priv->current_ht_config.smps) {
+	case IEEE80211_SMPS_STATIC:
+	case IEEE80211_SMPS_DYNAMIC:
+		return IWL_NUM_IDLE_CHAINS_SINGLE;
+	case IEEE80211_SMPS_AUTOMATIC:
+	case IEEE80211_SMPS_OFF:
+		return active_cnt;
+	default:
+		WARN(1, "invalid SMPS mode %d",
+		     priv->current_ht_config.smps);
+		return active_cnt;
+	}
+}
+
+/* up to 4 chains */
+static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
+{
+	u8 res;
+	res = (chain_bitmap & BIT(0)) >> 0;
+	res += (chain_bitmap & BIT(1)) >> 1;
+	res += (chain_bitmap & BIT(2)) >> 2;
+	res += (chain_bitmap & BIT(3)) >> 3;
+	return res;
+}
+
+/**
+ * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+	bool is_single = is_single_rx_stream(priv);
+	bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
+	u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+	u32 active_chains;
+	u16 rx_chain;
+
+	/* Tell uCode which antennas are actually connected.
+	 * Before first association, we assume all antennas are connected.
+	 * Just after first association, iwl_chain_noise_calibration()
+	 *    checks which antennas actually *are* connected. */
+	if (priv->chain_noise_data.active_chains)
+		active_chains = priv->chain_noise_data.active_chains;
+	else
+		active_chains = priv->nvm_data->valid_rx_ant;
+
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist &&
+	    (priv->bt_full_concurrent ||
+	     priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
+		/*
+		 * only use chain 'A' in bt high traffic load or
+		 * full concurrency mode
+		 */
+		active_chains = first_antenna(active_chains);
+	}
+
+	rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+	/* How many receivers should we use? */
+	active_rx_cnt = iwl_get_active_rx_chain_count(priv);
+	idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
+
+
+	/* correct rx chain count according hw settings
+	 * and chain noise calibration
+	 */
+	valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
+	if (valid_rx_cnt < active_rx_cnt)
+		active_rx_cnt = valid_rx_cnt;
+
+	if (valid_rx_cnt < idle_rx_cnt)
+		idle_rx_cnt = valid_rx_cnt;
+
+	rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+	rx_chain |= idle_rx_cnt  << RXON_RX_CHAIN_CNT_POS;
+
+	ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+	if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
+		ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+	else
+		ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+	IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
+			ctx->staging.rx_chain,
+			active_rx_cnt, idle_rx_cnt);
+
+	WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+		active_rx_cnt < idle_rx_cnt);
+}
+
+u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
+{
+	int i;
+	u8 ind = ant;
+
+	if (priv->band == NL80211_BAND_2GHZ &&
+	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
+		return 0;
+
+	for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+		ind = (ind + 1) < RATE_ANT_NUM ?  ind + 1 : 0;
+		if (valid & BIT(ind))
+			return ind;
+	}
+	return ant;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
+{
+	int i;
+
+	for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+		out[i] = cpu_to_le16(p1k[i]);
+}
+
+struct wowlan_key_data {
+	struct iwl_rxon_context *ctx;
+	struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+	struct iwlagn_wowlan_tkip_params_cmd *tkip;
+	const u8 *bssid;
+	bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta,
+			       struct ieee80211_key_conf *key,
+			       void *_data)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct wowlan_key_data *data = _data;
+	struct iwl_rxon_context *ctx = data->ctx;
+	struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+	struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+	struct iwlagn_p1k_cache *rx_p1ks;
+	u8 *rx_mic_key;
+	struct ieee80211_key_seq seq;
+	u32 cur_rx_iv32 = 0;
+	u16 p1k[IWLAGN_P1K_SIZE];
+	int ret, i;
+
+	mutex_lock(&priv->mutex);
+
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+	     !sta && !ctx->key_mapping_keys)
+		ret = iwl_set_default_wep_key(priv, ctx, key);
+	else
+		ret = iwl_set_dynamic_key(priv, ctx, key, sta);
+
+	if (ret) {
+		IWL_ERR(priv, "Error setting key during suspend!\n");
+		data->error = true;
+	}
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (sta) {
+			u64 pn64;
+
+			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+			tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+			rx_p1ks = data->tkip->rx_uni;
+
+			pn64 = atomic64_read(&key->tx_pn);
+			tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+			tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+
+			ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+			iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+			memcpy(data->tkip->mic_keys.tx,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+			       IWLAGN_MIC_KEY_SIZE);
+
+			rx_mic_key = data->tkip->mic_keys.rx_unicast;
+		} else {
+			tkip_sc =
+				data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+			rx_p1ks = data->tkip->rx_multi;
+			rx_mic_key = data->tkip->mic_keys.rx_mcast;
+		}
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211 use TID 0 (as they need to to avoid replay attacks)
+		 * for checking the IV in the frames.
+		 */
+		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+			ieee80211_get_key_rx_seq(key, i, &seq);
+			tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+			tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+			/* wrapping isn't allowed, AP must rekey */
+			if (seq.tkip.iv32 > cur_rx_iv32)
+				cur_rx_iv32 = seq.tkip.iv32;
+		}
+
+		ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+		iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+		ieee80211_get_tkip_rx_p1k(key, data->bssid,
+					  cur_rx_iv32 + 1, p1k);
+		iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+		memcpy(rx_mic_key,
+		       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+		       IWLAGN_MIC_KEY_SIZE);
+
+		data->use_tkip = true;
+		data->use_rsc_tsc = true;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		if (sta) {
+			u64 pn64;
+
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+			pn64 = atomic64_read(&key->tx_pn);
+			aes_tx_sc->pn = cpu_to_le64(pn64);
+		} else
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211 use TID 0 for checking the IV in the frames.
+		 */
+		for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+			u8 *pn = seq.ccmp.pn;
+
+			ieee80211_get_key_rx_seq(key, i, &seq);
+			aes_sc[i].pn = cpu_to_le64(
+					(u64)pn[5] |
+					((u64)pn[4] << 8) |
+					((u64)pn[3] << 16) |
+					((u64)pn[2] << 24) |
+					((u64)pn[1] << 32) |
+					((u64)pn[0] << 40));
+		}
+		data->use_rsc_tsc = true;
+		break;
+	}
+
+	mutex_unlock(&priv->mutex);
+}
+
+int iwlagn_send_patterns(struct iwl_priv *priv,
+			struct cfg80211_wowlan *wowlan)
+{
+	struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_WOWLAN_PATTERNS,
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int i, err;
+
+	if (!wowlan->n_patterns)
+		return 0;
+
+	cmd.len[0] = sizeof(*pattern_cmd) +
+		wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+	pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+	if (!pattern_cmd)
+		return -ENOMEM;
+
+	pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+	for (i = 0; i < wowlan->n_patterns; i++) {
+		int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+		memcpy(&pattern_cmd->patterns[i].mask,
+			wowlan->patterns[i].mask, mask_len);
+		memcpy(&pattern_cmd->patterns[i].pattern,
+			wowlan->patterns[i].pattern,
+			wowlan->patterns[i].pattern_len);
+		pattern_cmd->patterns[i].mask_size = mask_len;
+		pattern_cmd->patterns[i].pattern_size =
+			wowlan->patterns[i].pattern_len;
+	}
+
+	cmd.data[0] = pattern_cmd;
+	err = iwl_dvm_send_cmd(priv, &cmd);
+	kfree(pattern_cmd);
+	return err;
+}
+
+int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
+{
+	struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+	struct iwl_rxon_cmd rxon;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+	struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+	struct iwlagn_d3_config_cmd d3_cfg_cmd = {
+		/*
+		 * Program the minimum sleep time to 10 seconds, as many
+		 * platforms have issues processing a wakeup signal while
+		 * still being in the process of suspending.
+		 */
+		.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+	};
+	struct wowlan_key_data key_data = {
+		.ctx = ctx,
+		.bssid = ctx->active.bssid_addr,
+		.use_rsc_tsc = false,
+		.tkip = &tkip_cmd,
+		.use_tkip = false,
+	};
+	int ret, i;
+	u16 seq;
+
+	key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+	if (!key_data.rsc_tsc)
+		return -ENOMEM;
+
+	memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+	/*
+	 * We know the last used seqno, and the uCode expects to know that
+	 * one, it will increment before TX.
+	 */
+	seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+	wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+	/*
+	 * For QoS counters, we store the one to use next, so subtract 0x10
+	 * since the uCode will add 0x10 before using the value.
+	 */
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		seq = priv->tid_data[IWL_AP_ID][i].seq_number;
+		seq -= 0x10;
+		wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+	}
+
+	if (wowlan->disconnect)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+				    IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+	if (wowlan->magic_pkt)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+	if (wowlan->gtk_rekey_failure)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+	if (wowlan->eap_identity_req)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+	if (wowlan->four_way_handshake)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+	if (wowlan->n_patterns)
+		wakeup_filter_cmd.enabled |=
+			cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+	if (wowlan->rfkill_release)
+		d3_cfg_cmd.wakeup_flags |=
+			cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+	iwl_scan_cancel_timeout(priv, 200);
+
+	memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+	priv->ucode_loaded = false;
+	iwl_trans_stop_device(priv->trans);
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret)
+		goto out;
+
+	priv->wowlan = true;
+
+	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_WOWLAN);
+	if (ret)
+		goto out;
+
+	/* now configure WoWLAN ucode */
+	ret = iwl_alive_start(priv);
+	if (ret)
+		goto out;
+
+	memcpy(&ctx->staging, &rxon, sizeof(rxon));
+	ret = iwlagn_commit_rxon(priv, ctx);
+	if (ret)
+		goto out;
+
+	ret = iwl_power_update_mode(priv, true);
+	if (ret)
+		goto out;
+
+	if (!iwlwifi_mod_params.swcrypto) {
+		/* mark all keys clear */
+		priv->ucode_key_table = 0;
+		ctx->key_mapping_keys = 0;
+
+		/*
+		 * This needs to be unlocked due to lock ordering
+		 * constraints. Since we're in the suspend path
+		 * that isn't really a problem though.
+		 */
+		mutex_unlock(&priv->mutex);
+		ieee80211_iter_keys(priv->hw, ctx->vif,
+				    iwlagn_wowlan_program_keys,
+				    &key_data);
+		mutex_lock(&priv->mutex);
+		if (key_data.error) {
+			ret = -EIO;
+			goto out;
+		}
+
+		if (key_data.use_rsc_tsc) {
+			struct iwl_host_cmd rsc_tsc_cmd = {
+				.id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+				.data[0] = key_data.rsc_tsc,
+				.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+				.len[0] = sizeof(*key_data.rsc_tsc),
+			};
+
+			ret = iwl_dvm_send_cmd(priv, &rsc_tsc_cmd);
+			if (ret)
+				goto out;
+		}
+
+		if (key_data.use_tkip) {
+			ret = iwl_dvm_send_cmd_pdu(priv,
+						 REPLY_WOWLAN_TKIP_PARAMS,
+						 0, sizeof(tkip_cmd),
+						 &tkip_cmd);
+			if (ret)
+				goto out;
+		}
+
+		if (priv->have_rekey_data) {
+			memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+			memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+			kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+			memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+			kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+			kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+			ret = iwl_dvm_send_cmd_pdu(priv,
+						 REPLY_WOWLAN_KEK_KCK_MATERIAL,
+						 0, sizeof(kek_kck_cmd),
+						 &kek_kck_cmd);
+			if (ret)
+				goto out;
+		}
+	}
+
+	ret = iwl_dvm_send_cmd_pdu(priv, REPLY_D3_CONFIG, 0,
+				     sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+	if (ret)
+		goto out;
+
+	ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_WAKEUP_FILTER,
+				 0, sizeof(wakeup_filter_cmd),
+				 &wakeup_filter_cmd);
+	if (ret)
+		goto out;
+
+	ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+	kfree(key_data.rsc_tsc);
+	return ret;
+}
+#endif
+
+int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
+{
+	if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
+		IWL_WARN(priv, "Not sending command - %s KILL\n",
+			 iwl_is_rfkill(priv) ? "RF" : "CT");
+		return -EIO;
+	}
+
+	if (test_bit(STATUS_FW_ERROR, &priv->status)) {
+		IWL_ERR(priv, "Command %s failed: FW Error\n",
+			iwl_get_cmd_string(priv->trans, cmd->id));
+		return -EIO;
+	}
+
+	/*
+	 * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
+	 * in iwl_down but cancel the workers only later.
+	 */
+	if (!priv->ucode_loaded) {
+		IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
+		return -EIO;
+	}
+
+	/*
+	 * Synchronous commands from this op-mode must hold
+	 * the mutex, this ensures we don't try to send two
+	 * (or more) synchronous commands at a time.
+	 */
+	if (!(cmd->flags & CMD_ASYNC))
+		lockdep_assert_held(&priv->mutex);
+
+	return iwl_trans_send_cmd(priv->trans, cmd);
+}
+
+int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
+			 u32 flags, u16 len, const void *data)
+{
+	struct iwl_host_cmd cmd = {
+		.id = id,
+		.len = { len, },
+		.data = { data, },
+		.flags = flags,
+	};
+
+	return iwl_dvm_send_cmd(priv, &cmd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
new file mode 100644
index 0000000..82caae0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -0,0 +1,1657 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-io.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-modparams.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP),
+	},
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+	{
+		.max = 2,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .beacon_int_infra_match = true,
+	  .limits = iwlagn_sta_ap_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+	},
+	{ .num_different_channels = 1,
+	  .max_interfaces = 2,
+	  .limits = iwlagn_2sta_limits,
+	  .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+	},
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+			      const struct iwl_ucode_capabilities *capa)
+{
+	int ret;
+	struct ieee80211_hw *hw = priv->hw;
+	struct iwl_rxon_context *ctx;
+
+	hw->rate_control_algorithm = "iwl-agn-rs";
+
+	/* Tell mac80211 our characteristics */
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+
+	if (priv->trans->max_skb_frags)
+		hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+
+	hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
+	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FMT;
+
+	/*
+	 * Including the following line will crash some AP's.  This
+	 * workaround removes the stimulus which causes the crash until
+	 * the AP software can be fixed.
+	hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	 */
+
+	if (priv->nvm_data->sku_cap_11n_enable)
+		hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS |
+				       NL80211_FEATURE_STATIC_SMPS;
+
+	/*
+	 * Enable 11w if advertised by firmware and software crypto
+	 * is not enabled (as the firmware will interpret some mgmt
+	 * packets, so enabling it with software crypto isn't safe)
+	 */
+	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_MFP &&
+	    !iwlwifi_mod_params.swcrypto)
+		ieee80211_hw_set(hw, MFP_CAPABLE);
+
+	hw->sta_data_size = sizeof(struct iwl_station_priv);
+	hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+	for_each_context(priv, ctx) {
+		hw->wiphy->interface_modes |= ctx->interface_modes;
+		hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+	}
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+		hw->wiphy->iface_combinations =
+			iwlagn_iface_combinations_dualmode;
+		hw->wiphy->n_iface_combinations =
+			ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+	}
+
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+				       REGULATORY_DISABLE_BEACON_HINTS;
+
+#ifdef CONFIG_PM_SLEEP
+	if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
+	    priv->trans->ops->d3_suspend &&
+	    priv->trans->ops->d3_resume &&
+	    device_can_wakeup(priv->trans->dev)) {
+		priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
+					     WIPHY_WOWLAN_DISCONNECT |
+					     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+					     WIPHY_WOWLAN_RFKILL_RELEASE;
+		if (!iwlwifi_mod_params.swcrypto)
+			priv->wowlan_support.flags |=
+				WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+				WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+		priv->wowlan_support.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+		priv->wowlan_support.pattern_min_len =
+					IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+		priv->wowlan_support.pattern_max_len =
+					IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+		hw->wiphy->wowlan = &priv->wowlan_support;
+	}
+#endif
+
+	if (iwlwifi_mod_params.power_save)
+		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+	else
+		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+	/* we create the 802.11 header and a max-length SSID element */
+	hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 34;
+
+	/*
+	 * We don't use all queues: 4 and 9 are unused and any
+	 * aggregation queue gets mapped down to the AC queue.
+	 */
+	hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
+
+	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+	if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+		priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&priv->nvm_data->bands[NL80211_BAND_2GHZ];
+	if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+		priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&priv->nvm_data->bands[NL80211_BAND_5GHZ];
+
+	hw->wiphy->hw_version = priv->trans->hw_id;
+
+	iwl_leds_init(priv);
+
+	wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
+
+	ret = ieee80211_register_hw(priv->hw);
+	if (ret) {
+		IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+		iwl_leds_exit(priv);
+		return ret;
+	}
+	priv->mac80211_registered = 1;
+
+	return 0;
+}
+
+void iwlagn_mac_unregister(struct iwl_priv *priv)
+{
+	if (!priv->mac80211_registered)
+		return;
+	iwl_leds_exit(priv);
+	ieee80211_unregister_hw(priv->hw);
+	priv->mac80211_registered = 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+		IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+		return -EIO;
+	}
+
+	for_each_context(priv, ctx) {
+		ret = iwlagn_alloc_bcast_station(priv, ctx);
+		if (ret) {
+			iwl_dealloc_bcast_stations(priv);
+			return ret;
+		}
+	}
+
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_run_init_ucode(priv);
+	if (ret) {
+		IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_trans_start_hw(priv->trans);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start HW: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_REGULAR);
+	if (ret) {
+		IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_alive_start(priv);
+	if (ret)
+		goto error;
+	return 0;
+
+ error:
+	set_bit(STATUS_EXIT_PENDING, &priv->status);
+	iwl_down(priv);
+	clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+	IWL_ERR(priv, "Unable to initialize device.\n");
+	return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	/* we should be verifying the device is ready to be opened */
+	mutex_lock(&priv->mutex);
+	ret = __iwl_up(priv);
+	mutex_unlock(&priv->mutex);
+	if (ret)
+		return ret;
+
+	IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+	/* Now we should be done, and the READY bit should be set. */
+	if (WARN_ON(!test_bit(STATUS_READY, &priv->status)))
+		ret = -EIO;
+
+	iwlagn_led_enable(priv);
+
+	priv->is_open = 1;
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (!priv->is_open)
+		return;
+
+	priv->is_open = 0;
+
+	mutex_lock(&priv->mutex);
+	iwl_down(priv);
+	mutex_unlock(&priv->mutex);
+
+	iwl_cancel_deferred_work(priv);
+
+	flush_workqueue(priv->workqueue);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct cfg80211_gtk_rekey_data *data)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	if (iwlwifi_mod_params.swcrypto)
+		return;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->mutex);
+
+	if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+		goto out;
+
+	memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+	memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+	priv->replay_ctr =
+		cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+	priv->have_rekey_data = true;
+
+ out:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+			      struct cfg80211_wowlan *wowlan)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	int ret;
+
+	if (WARN_ON(!wowlan))
+		return -EINVAL;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->mutex);
+
+	/* Don't attempt WoWLAN when not associated, tear down instead. */
+	if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+	    !iwl_is_associated_ctx(ctx)) {
+		ret = 1;
+		goto out;
+	}
+
+	ret = iwlagn_suspend(priv, wowlan);
+	if (ret)
+		goto error;
+
+	/* let the ucode operate on its own */
+	iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
+		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+	iwl_trans_d3_suspend(priv->trans, false, true);
+
+	goto out;
+
+ error:
+	priv->wowlan = false;
+	iwlagn_prepare_restart(priv);
+	ieee80211_restart_hw(priv->hw);
+ out:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+struct iwl_resume_data {
+	struct iwl_priv *priv;
+	struct iwlagn_wowlan_status *cmd;
+	bool valid;
+};
+
+static bool iwl_resume_status_fn(struct iwl_notif_wait_data *notif_wait,
+				 struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_resume_data *resume_data = data;
+	struct iwl_priv *priv = resume_data->priv;
+
+	if (iwl_rx_packet_payload_len(pkt) != sizeof(*resume_data->cmd)) {
+		IWL_ERR(priv, "rx wrong size data\n");
+		return true;
+	}
+	memcpy(resume_data->cmd, pkt->data, sizeof(*resume_data->cmd));
+	resume_data->valid = true;
+
+	return true;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct ieee80211_vif *vif;
+	u32 base;
+	int ret;
+	enum iwl_d3_status d3_status;
+	struct error_table_start {
+		/* cf. struct iwl_error_event_table */
+		u32 valid;
+		u32 error_id;
+	} err_info;
+	struct iwl_notification_wait status_wait;
+	static const u16 status_cmd[] = {
+		REPLY_WOWLAN_GET_STATUS,
+	};
+	struct iwlagn_wowlan_status status_data = {};
+	struct iwl_resume_data resume_data = {
+		.priv = priv,
+		.cmd = &status_data,
+		.valid = false,
+	};
+	struct cfg80211_wowlan_wakeup wakeup = {
+		.pattern_idx = -1,
+	};
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	const struct fw_img *img;
+#endif
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->mutex);
+
+	/* we'll clear ctx->vif during iwlagn_prepare_restart() */
+	vif = ctx->vif;
+
+	ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
+	if (ret)
+		goto out_unlock;
+
+	if (d3_status != IWL_D3_STATUS_ALIVE) {
+		IWL_INFO(priv, "Device was reset during suspend\n");
+		goto out_unlock;
+	}
+
+	/* uCode is no longer operating by itself */
+	iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+	base = priv->device_pointers.error_event_table;
+	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+		IWL_WARN(priv, "Invalid error table during resume!\n");
+		goto out_unlock;
+	}
+
+	iwl_trans_read_mem_bytes(priv->trans, base,
+				 &err_info, sizeof(err_info));
+
+	if (err_info.valid) {
+		IWL_INFO(priv, "error table is valid (%d, 0x%x)\n",
+			 err_info.valid, err_info.error_id);
+		if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+			wakeup.rfkill_release = true;
+			ieee80211_report_wowlan_wakeup(vif, &wakeup,
+						       GFP_KERNEL);
+		}
+		goto out_unlock;
+	}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	img = &priv->fw->img[IWL_UCODE_WOWLAN];
+	if (!priv->wowlan_sram)
+		priv->wowlan_sram =
+			kzalloc(img->sec[IWL_UCODE_SECTION_DATA].len,
+				GFP_KERNEL);
+
+	if (priv->wowlan_sram)
+		iwl_trans_read_mem(priv->trans, 0x800000,
+				   priv->wowlan_sram,
+				   img->sec[IWL_UCODE_SECTION_DATA].len / 4);
+#endif
+
+	/*
+	 * This is very strange. The GET_STATUS command is sent but the device
+	 * doesn't reply properly, it seems it doesn't close the RBD so one is
+	 * always left open ... As a result, we need to send another command
+	 * and have to reset the driver afterwards. As we need to switch to
+	 * runtime firmware again that'll happen.
+	 */
+
+	iwl_init_notification_wait(&priv->notif_wait, &status_wait, status_cmd,
+				   ARRAY_SIZE(status_cmd), iwl_resume_status_fn,
+				   &resume_data);
+
+	iwl_dvm_send_cmd_pdu(priv, REPLY_WOWLAN_GET_STATUS, CMD_ASYNC, 0, NULL);
+	iwl_dvm_send_cmd_pdu(priv, REPLY_ECHO, CMD_ASYNC, 0, NULL);
+	/* an RBD is left open in the firmware now! */
+
+	ret = iwl_wait_notification(&priv->notif_wait, &status_wait, HZ/5);
+	if (ret)
+		goto out_unlock;
+
+	if (resume_data.valid && priv->contexts[IWL_RXON_CTX_BSS].vif) {
+		u32 reasons = le32_to_cpu(status_data.wakeup_reason);
+		struct cfg80211_wowlan_wakeup *wakeup_report;
+
+		IWL_INFO(priv, "WoWLAN wakeup reason(s): 0x%.8x\n", reasons);
+
+		if (reasons) {
+			if (reasons & IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET)
+				wakeup.magic_pkt = true;
+			if (reasons & IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH)
+				wakeup.pattern_idx = status_data.pattern_number;
+			if (reasons & (IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+				       IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE))
+				wakeup.disconnect = true;
+			if (reasons & IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL)
+				wakeup.gtk_rekey_failure = true;
+			if (reasons & IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ)
+				wakeup.eap_identity_req = true;
+			if (reasons & IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE)
+				wakeup.four_way_handshake = true;
+			wakeup_report = &wakeup;
+		} else {
+			wakeup_report = NULL;
+		}
+
+		ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+	}
+
+	priv->wowlan = false;
+
+	iwlagn_prepare_restart(priv);
+
+	memset((void *)&ctx->active, 0, sizeof(ctx->active));
+	iwl_connection_init_rx_config(priv, ctx);
+	iwlagn_set_rxon_chain(priv, ctx);
+
+ out_unlock:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	ieee80211_resume_disconnect(vif);
+
+	return 1;
+}
+
+static void iwlagn_mac_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	device_set_wakeup_enable(priv->trans->dev, enabled);
+}
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw,
+			  struct ieee80211_tx_control *control,
+			  struct sk_buff *skb)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	if (iwlagn_tx_skb(priv, control->sta, skb))
+		ieee80211_free_txskb(hw, skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_key_conf *keyconf,
+				       struct ieee80211_sta *sta,
+				       u32 iv32, u16 *phase1key)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta,
+			      struct ieee80211_key_conf *key)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+	int ret;
+	bool is_default_wep_key = false;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (iwlwifi_mod_params.swcrypto) {
+		IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_CCMP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * We could program these keys into the hardware as well, but we
+	 * don't expect much multicast traffic in IBSS and having keys
+	 * for more stations is probably more useful.
+	 *
+	 * Mark key TX-only and return 0.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC &&
+	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		key->hw_key_idx = WEP_INVALID_OFFSET;
+		return 0;
+	}
+
+	/* If they key was TX-only, accept deletion */
+	if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+		return 0;
+
+	mutex_lock(&priv->mutex);
+	iwl_scan_cancel_timeout(priv, 100);
+
+	BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+	/*
+	 * If we are getting WEP group key and we didn't receive any key mapping
+	 * so far, we are in legacy wep mode (group key only), otherwise we are
+	 * in 1X mode.
+	 * In legacy wep mode, we use another host command to the uCode.
+	 */
+	if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+		if (cmd == SET_KEY)
+			is_default_wep_key = !ctx->key_mapping_keys;
+		else
+			is_default_wep_key =
+				key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+	}
+
+
+	switch (cmd) {
+	case SET_KEY:
+		if (is_default_wep_key) {
+			ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+			break;
+		}
+		ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+		if (ret) {
+			/*
+			 * can't add key for RX, but we don't need it
+			 * in the device for TX so still return 0
+			 */
+			ret = 0;
+			key->hw_key_idx = WEP_INVALID_OFFSET;
+		}
+
+		IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+		break;
+	case DISABLE_KEY:
+		if (is_default_wep_key)
+			ret = iwl_remove_default_wep_key(priv, ctx, key);
+		else
+			ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+		IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		return false;
+	return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		return false;
+	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+		return true;
+
+	/* disabled by default */
+	return false;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_ampdu_params *params)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	int ret = -EINVAL;
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+	u8 buf_size = params->buf_size;
+	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+
+	IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+		     sta->addr, tid);
+
+	if (!(priv->nvm_data->sku_cap_11n_enable))
+		return -EACCES;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+	mutex_lock(&priv->mutex);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		if (!iwl_enable_rx_ampdu(priv->cfg))
+			break;
+		IWL_DEBUG_HT(priv, "start Rx\n");
+		ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		IWL_DEBUG_HT(priv, "stop Rx\n");
+		ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		if (!priv->trans->ops->txq_enable)
+			break;
+		if (!iwl_enable_tx_ampdu(priv->cfg))
+			break;
+		IWL_DEBUG_HT(priv, "start Tx\n");
+		ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		IWL_DEBUG_HT(priv, "Flush Tx\n");
+		ret = iwlagn_tx_agg_flush(priv, vif, sta, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		IWL_DEBUG_HT(priv, "stop Tx\n");
+		ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+		if ((ret == 0) && (priv->agg_tids_count > 0)) {
+			priv->agg_tids_count--;
+			IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+				     priv->agg_tids_count);
+		}
+		if (!priv->agg_tids_count &&
+		    priv->hw_params.use_rts_for_aggregation) {
+			/*
+			 * switch off RTS/CTS if it was previously enabled
+			 */
+			sta_priv->lq_sta.lq.general_params.flags &=
+				~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+			iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+					&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+		}
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
+		break;
+	}
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+	int ret;
+	u8 sta_id;
+
+	IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+			sta->addr);
+	sta_priv->sta_id = IWL_INVALID_STATION;
+
+	atomic_set(&sta_priv->pending_frames, 0);
+	if (vif->type == NL80211_IFTYPE_AP)
+		sta_priv->client = true;
+
+	ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+				     is_ap, sta, &sta_id);
+	if (ret) {
+		IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+			sta->addr, ret);
+		/* Should we return success if return code is EEXIST ? */
+		return ret;
+	}
+
+	sta_priv->sta_id = sta_id;
+
+	return 0;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	int ret;
+
+	IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n", sta->addr);
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		/*
+		 * Station will be removed from device when the RXON
+		 * is set to unassociated -- just deactivate it here
+		 * to avoid re-programming it.
+		 */
+		ret = 0;
+		iwl_deactivate_station(priv, sta_priv->sta_id, sta->addr);
+	} else {
+		ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+		if (ret)
+			IWL_DEBUG_QUIET_RFKILL(priv,
+				"Error removing station %pM\n", sta->addr);
+	}
+	return ret;
+}
+
+static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta,
+				enum ieee80211_sta_state old_state,
+				enum ieee80211_sta_state new_state)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	enum {
+		NONE, ADD, REMOVE, HT_RATE_INIT, ADD_RATE_INIT,
+	} op = NONE;
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "station %pM state change %d->%d\n",
+			   sta->addr, old_state, new_state);
+
+	mutex_lock(&priv->mutex);
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		if (old_state == IEEE80211_STA_NOTEXIST &&
+		    new_state == IEEE80211_STA_NONE)
+			op = ADD;
+		else if (old_state == IEEE80211_STA_NONE &&
+			 new_state == IEEE80211_STA_NOTEXIST)
+			op = REMOVE;
+		else if (old_state == IEEE80211_STA_AUTH &&
+			 new_state == IEEE80211_STA_ASSOC)
+			op = HT_RATE_INIT;
+	} else {
+		if (old_state == IEEE80211_STA_AUTH &&
+		    new_state == IEEE80211_STA_ASSOC)
+			op = ADD_RATE_INIT;
+		else if (old_state == IEEE80211_STA_ASSOC &&
+			 new_state == IEEE80211_STA_AUTH)
+			op = REMOVE;
+	}
+
+	switch (op) {
+	case ADD:
+		ret = iwlagn_mac_sta_add(hw, vif, sta);
+		if (ret)
+			break;
+		/*
+		 * Clear the in-progress flag, the AP station entry was added
+		 * but we'll initialize LQ only when we've associated (which
+		 * would also clear the in-progress flag). This is necessary
+		 * in case we never initialize LQ because association fails.
+		 */
+		spin_lock_bh(&priv->sta_lock);
+		priv->stations[iwl_sta_id(sta)].used &=
+			~IWL_STA_UCODE_INPROGRESS;
+		spin_unlock_bh(&priv->sta_lock);
+		break;
+	case REMOVE:
+		ret = iwlagn_mac_sta_remove(hw, vif, sta);
+		break;
+	case ADD_RATE_INIT:
+		ret = iwlagn_mac_sta_add(hw, vif, sta);
+		if (ret)
+			break;
+		/* Initialize rate scaling */
+		IWL_DEBUG_INFO(priv,
+			       "Initializing rate scaling for station %pM\n",
+			       sta->addr);
+		iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+		ret = 0;
+		break;
+	case HT_RATE_INIT:
+		/* Initialize rate scaling */
+		ret = iwl_sta_update_ht(priv, vif_priv->ctx, sta);
+		if (ret)
+			break;
+		IWL_DEBUG_INFO(priv,
+			       "Initializing rate scaling for station %pM\n",
+			       sta->addr);
+		iwl_rs_rate_init(priv, sta, iwl_sta_id(sta));
+		ret = 0;
+		break;
+	default:
+		ret = 0;
+		break;
+	}
+
+	/*
+	 * mac80211 might WARN if we fail, but due the way we
+	 * (badly) handle hard rfkill, we might fail here
+	 */
+	if (iwl_is_rfkill(priv))
+		ret = 0;
+
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_channel_switch *ch_switch)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = ch_switch->chandef.chan;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+	/*
+	 * MULTI-FIXME
+	 * When we add support for multiple interfaces, we need to
+	 * revisit this. The channel switch command in the device
+	 * only affects the BSS context, but what does that really
+	 * mean? And what if we get a CSA on the second interface?
+	 * This needs a lot of work.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	u16 ch;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	mutex_lock(&priv->mutex);
+
+	if (iwl_is_rfkill(priv))
+		goto out;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+	    test_bit(STATUS_SCANNING, &priv->status) ||
+	    test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+		goto out;
+
+	if (!iwl_is_associated_ctx(ctx))
+		goto out;
+
+	if (!priv->lib->set_channel_switch)
+		goto out;
+
+	ch = channel->hw_value;
+	if (le16_to_cpu(ctx->active.channel) == ch)
+		goto out;
+
+	priv->current_ht_config.smps = conf->smps_mode;
+
+	/* Configure HT40 channels */
+	switch (cfg80211_get_chandef_type(&ch_switch->chandef)) {
+	case NL80211_CHAN_NO_HT:
+	case NL80211_CHAN_HT20:
+		ctx->ht.is_40mhz = false;
+		ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+		break;
+	case NL80211_CHAN_HT40MINUS:
+		ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+		ctx->ht.is_40mhz = true;
+		break;
+	case NL80211_CHAN_HT40PLUS:
+		ctx->ht.extension_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		ctx->ht.is_40mhz = true;
+		break;
+	}
+
+	if ((le16_to_cpu(ctx->staging.channel) != ch))
+		ctx->staging.flags = 0;
+
+	iwl_set_rxon_channel(priv, channel, ctx);
+	iwl_set_rxon_ht(priv, ht_conf);
+	iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+	/*
+	 * at this point, staging_rxon has the
+	 * configuration for channel switch
+	 */
+	set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+	priv->switch_channel = cpu_to_le16(ch);
+	if (priv->lib->set_channel_switch(priv, ch_switch)) {
+		clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
+		priv->switch_channel = 0;
+		ieee80211_chswitch_done(ctx->vif, false);
+	}
+
+out:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
+{
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+		return;
+
+	if (ctx->vif)
+		ieee80211_chswitch_done(ctx->vif, is_success);
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+				    unsigned int changed_flags,
+				    unsigned int *total_flags,
+				    u64 multicast)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	__le32 filter_or = 0, filter_nand = 0;
+	struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag)	do { \
+	if (*total_flags & (test))		\
+		filter_or |= (flag);		\
+	else					\
+		filter_nand |= (flag);		\
+	} while (0)
+
+	IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+			changed_flags, *total_flags);
+
+	CHK(FIF_OTHER_BSS, RXON_FILTER_PROMISC_MSK);
+	/* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+	CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+	CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+	mutex_lock(&priv->mutex);
+
+	for_each_context(priv, ctx) {
+		ctx->staging.filter_flags &= ~filter_nand;
+		ctx->staging.filter_flags |= filter_or;
+
+		/*
+		 * Not committing directly because hardware can perform a scan,
+		 * but we'll eventually commit the filter flags change anyway.
+		 */
+	}
+
+	mutex_unlock(&priv->mutex);
+
+	/*
+	 * Receiving all multicast frames is always enabled by the
+	 * default flags setup in iwl_connection_init_rx_config()
+	 * since we currently do not support programming multicast
+	 * filters into the device.
+	 */
+	*total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI |
+			FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			     u32 queues, bool drop)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	u32 scd_queues;
+
+	mutex_lock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+		IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+		goto done;
+	}
+	if (iwl_is_rfkill(priv)) {
+		IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+		goto done;
+	}
+
+	scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
+	scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
+			BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
+
+	if (drop) {
+		IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n",
+				    scd_queues);
+		if (iwlagn_txfifo_flush(priv, scd_queues)) {
+			IWL_ERR(priv, "flush request fail\n");
+			goto done;
+		}
+	}
+
+	IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
+	iwl_trans_wait_tx_queues_empty(priv->trans, scd_queues);
+done:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_event_callback(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      const struct ieee80211_event *event)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	if (event->type != RSSI_EVENT)
+		return;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		if (event->u.rssi.data == RSSI_EVENT_LOW)
+			priv->bt_enable_pspoll = true;
+		else if (event->u.rssi.data == RSSI_EVENT_HIGH)
+			priv->bt_enable_pspoll = false;
+
+		queue_work(priv->workqueue, &priv->bt_runtime_config);
+	} else {
+		IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+				"ignoring RSSI callback\n");
+	}
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+			      struct ieee80211_sta *sta, bool set)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	queue_work(priv->workqueue, &priv->beacon_update);
+
+	return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif, u16 queue,
+			      const struct ieee80211_tx_queue_params *params)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *ctx = vif_priv->ctx;
+	int q;
+
+	if (WARN_ON(!ctx))
+		return -EINVAL;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (!iwl_is_ready_rf(priv)) {
+		IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+		return -EIO;
+	}
+
+	if (queue >= AC_NUM) {
+		IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+		return 0;
+	}
+
+	q = AC_NUM - 1 - queue;
+
+	mutex_lock(&priv->mutex);
+
+	ctx->qos_data.def_qos_parm.ac[q].cw_min =
+		cpu_to_le16(params->cw_min);
+	ctx->qos_data.def_qos_parm.ac[q].cw_max =
+		cpu_to_le16(params->cw_max);
+	ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+	ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+			cpu_to_le16((params->txop * 32));
+
+	ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+	mutex_unlock(&priv->mutex);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+
+	return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+	iwl_connection_init_rx_config(priv, ctx);
+
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx)
+{
+	struct ieee80211_vif *vif = ctx->vif;
+	int err, ac;
+
+	lockdep_assert_held(&priv->mutex);
+
+	/*
+	 * This variable will be correct only when there's just
+	 * a single context, but all code using it is for hardware
+	 * that supports only one context.
+	 */
+	priv->iw_mode = vif->type;
+
+	ctx->is_active = true;
+
+	err = iwl_set_mode(priv, ctx);
+	if (err) {
+		if (!ctx->always_active)
+			ctx->is_active = false;
+		return err;
+	}
+
+	if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist &&
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+		/*
+		 * pretend to have high BT traffic as long as we
+		 * are operating in IBSS mode, as this will cause
+		 * the rate scaling etc. to behave as intended.
+		 */
+		priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+	}
+
+	/* set up queue mappings */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+		vif->hw_queue[ac] = ctx->ac_to_queue[ac];
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		vif->cab_queue = ctx->mcast_queue;
+	else
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+
+	return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+	struct iwl_rxon_context *tmp, *ctx = NULL;
+	int err;
+	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+	bool reset = false;
+
+	IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+			   viftype, vif->addr);
+
+	mutex_lock(&priv->mutex);
+
+	if (!iwl_is_ready_rf(priv)) {
+		IWL_WARN(priv, "Try to add interface when device not ready\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	for_each_context(priv, tmp) {
+		u32 possible_modes =
+			tmp->interface_modes | tmp->exclusive_interface_modes;
+
+		if (tmp->vif) {
+			/* On reset we need to add the same interface again */
+			if (tmp->vif == vif) {
+				reset = true;
+				ctx = tmp;
+				break;
+			}
+
+			/* check if this busy context is exclusive */
+			if (tmp->exclusive_interface_modes &
+						BIT(tmp->vif->type)) {
+				err = -EINVAL;
+				goto out;
+			}
+			continue;
+		}
+
+		if (!(possible_modes & BIT(viftype)))
+			continue;
+
+		/* have maybe usable context w/o interface */
+		ctx = tmp;
+		break;
+	}
+
+	if (!ctx) {
+		err = -EOPNOTSUPP;
+		goto out;
+	}
+
+	vif_priv->ctx = ctx;
+	ctx->vif = vif;
+
+	/*
+	 * In SNIFFER device type, the firmware reports the FCS to
+	 * the host, rather than snipping it off. Unfortunately,
+	 * mac80211 doesn't (yet) provide a per-packet flag for
+	 * this, so that we have to set the hardware flag based
+	 * on the interfaces added. As the monitor interface can
+	 * only be present by itself, and will be removed before
+	 * other interfaces are added, this is safe.
+	 */
+	if (vif->type == NL80211_IFTYPE_MONITOR)
+		ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
+	else
+		__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, priv->hw->flags);
+
+	err = iwl_setup_interface(priv, ctx);
+	if (!err || reset)
+		goto out;
+
+	ctx->vif = NULL;
+	priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+	mutex_unlock(&priv->mutex);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+	return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+				   struct ieee80211_vif *vif,
+				   bool mode_change)
+{
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (priv->scan_vif == vif) {
+		iwl_scan_cancel_timeout(priv, 200);
+		iwl_force_scan_end(priv);
+	}
+
+	if (!mode_change) {
+		iwl_set_mode(priv, ctx);
+		if (!ctx->always_active)
+			ctx->is_active = false;
+	}
+
+	/*
+	 * When removing the IBSS interface, overwrite the
+	 * BT traffic load with the stored one from the last
+	 * notification, if any. If this is a device that
+	 * doesn't implement this, this has no effect since
+	 * both values are the same and zero.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC)
+		priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	mutex_lock(&priv->mutex);
+
+	WARN_ON(ctx->vif != vif);
+	ctx->vif = NULL;
+
+	iwl_teardown_interface(priv, vif, false);
+
+	mutex_unlock(&priv->mutex);
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       enum nl80211_iftype newtype, bool newp2p)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx, *tmp;
+	enum nl80211_iftype newviftype = newtype;
+	u32 interface_modes;
+	int err;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+	mutex_lock(&priv->mutex);
+
+	ctx = iwl_rxon_ctx_from_vif(vif);
+
+	/*
+	 * To simplify this code, only support changes on the
+	 * BSS context. The PAN context is usually reassigned
+	 * by creating/removing P2P interfaces anyway.
+	 */
+	if (ctx->ctxid != IWL_RXON_CTX_BSS) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (!ctx->vif || !iwl_is_ready_rf(priv)) {
+		/*
+		 * Huh? But wait ... this can maybe happen when
+		 * we're in the middle of a firmware restart!
+		 */
+		err = -EBUSY;
+		goto out;
+	}
+
+	/* Check if the switch is supported in the same context */
+	interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+	if (!(interface_modes & BIT(newtype))) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (ctx->exclusive_interface_modes & BIT(newtype)) {
+		for_each_context(priv, tmp) {
+			if (ctx == tmp)
+				continue;
+
+			if (!tmp->is_active)
+				continue;
+
+			/*
+			 * The current mode switch would be exclusive, but
+			 * another context is active ... refuse the switch.
+			 */
+			err = -EBUSY;
+			goto out;
+		}
+	}
+
+	/* success */
+	iwl_teardown_interface(priv, vif, true);
+	vif->type = newviftype;
+	vif->p2p = newp2p;
+	err = iwl_setup_interface(priv, ctx);
+	WARN_ON(err);
+	/*
+	 * We've switched internally, but submitting to the
+	 * device may have failed for some reason. Mask this
+	 * error, because otherwise mac80211 will not switch
+	 * (and set the interface type back) and we'll be
+	 * out of sync with it.
+	 */
+	err = 0;
+
+ out:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct ieee80211_scan_request *hw_req)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct cfg80211_scan_request *req = &hw_req->req;
+	int ret;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	if (req->n_channels == 0)
+		return -EINVAL;
+
+	mutex_lock(&priv->mutex);
+
+	/*
+	 * If an internal scan is in progress, just set
+	 * up the scan_request as per above.
+	 */
+	if (priv->scan_type != IWL_SCAN_NORMAL) {
+		IWL_DEBUG_SCAN(priv,
+			       "SCAN request during internal scan - defer\n");
+		priv->scan_request = req;
+		priv->scan_vif = vif;
+		ret = 0;
+	} else {
+		priv->scan_request = req;
+		priv->scan_vif = vif;
+		/*
+		 * mac80211 will only ask for one band at a time
+		 * so using channels[0] here is ok
+		 */
+		ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+					req->channels[0]->band);
+		if (ret) {
+			priv->scan_request = NULL;
+			priv->scan_vif = NULL;
+		}
+	}
+
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	mutex_unlock(&priv->mutex);
+
+	return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+	struct iwl_addsta_cmd cmd = {
+		.mode = STA_CONTROL_MODIFY_MSK,
+		.station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+		.sta.sta_id = sta_id,
+	};
+
+	iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  enum sta_notify_cmd cmd,
+				  struct ieee80211_sta *sta)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	int sta_id;
+
+	IWL_DEBUG_MAC80211(priv, "enter\n");
+
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		WARN_ON(!sta_priv->client);
+		sta_priv->asleep = true;
+		if (atomic_read(&sta_priv->pending_frames) > 0)
+			ieee80211_sta_block_awake(hw, sta, true);
+		break;
+	case STA_NOTIFY_AWAKE:
+		WARN_ON(!sta_priv->client);
+		if (!sta_priv->asleep)
+			break;
+		sta_priv->asleep = false;
+		sta_id = iwl_sta_id(sta);
+		if (sta_id != IWL_INVALID_STATION)
+			iwl_sta_modify_ps_wake(priv, sta_id);
+		break;
+	default:
+		break;
+	}
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+const struct ieee80211_ops iwlagn_hw_ops = {
+	.tx = iwlagn_mac_tx,
+	.start = iwlagn_mac_start,
+	.stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+	.suspend = iwlagn_mac_suspend,
+	.resume = iwlagn_mac_resume,
+	.set_wakeup = iwlagn_mac_set_wakeup,
+#endif
+	.add_interface = iwlagn_mac_add_interface,
+	.remove_interface = iwlagn_mac_remove_interface,
+	.change_interface = iwlagn_mac_change_interface,
+	.config = iwlagn_mac_config,
+	.configure_filter = iwlagn_configure_filter,
+	.set_key = iwlagn_mac_set_key,
+	.update_tkip_key = iwlagn_mac_update_tkip_key,
+	.set_rekey_data = iwlagn_mac_set_rekey_data,
+	.conf_tx = iwlagn_mac_conf_tx,
+	.bss_info_changed = iwlagn_bss_info_changed,
+	.ampdu_action = iwlagn_mac_ampdu_action,
+	.hw_scan = iwlagn_mac_hw_scan,
+	.sta_notify = iwlagn_mac_sta_notify,
+	.sta_state = iwlagn_mac_sta_state,
+	.channel_switch = iwlagn_mac_channel_switch,
+	.flush = iwlagn_mac_flush,
+	.tx_last_beacon = iwlagn_mac_tx_last_beacon,
+	.event_callback = iwlagn_mac_event_callback,
+	.set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+	struct iwl_priv *priv;
+	struct iwl_op_mode *op_mode;
+	/* mac80211 allocates memory for this device instance, including
+	 *   space for this driver's private structure */
+	struct ieee80211_hw *hw;
+
+	hw = ieee80211_alloc_hw(sizeof(struct iwl_priv) +
+				sizeof(struct iwl_op_mode), &iwlagn_hw_ops);
+	if (!hw)
+		goto out;
+
+	op_mode = hw->priv;
+	priv = IWL_OP_MODE_GET_DVM(op_mode);
+	priv->hw = hw;
+
+out:
+	return hw;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
new file mode 100644
index 0000000..030482b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -0,0 +1,2179 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom-read.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-io.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-prph.h"
+
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi Link AGN driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search.
+ * A warning will be triggered on violation.
+ */
+static const struct iwl_hcmd_names iwl_dvm_cmd_names[] = {
+	HCMD_NAME(REPLY_ALIVE),
+	HCMD_NAME(REPLY_ERROR),
+	HCMD_NAME(REPLY_ECHO),
+	HCMD_NAME(REPLY_RXON),
+	HCMD_NAME(REPLY_RXON_ASSOC),
+	HCMD_NAME(REPLY_QOS_PARAM),
+	HCMD_NAME(REPLY_RXON_TIMING),
+	HCMD_NAME(REPLY_ADD_STA),
+	HCMD_NAME(REPLY_REMOVE_STA),
+	HCMD_NAME(REPLY_REMOVE_ALL_STA),
+	HCMD_NAME(REPLY_TX),
+	HCMD_NAME(REPLY_TXFIFO_FLUSH),
+	HCMD_NAME(REPLY_WEPKEY),
+	HCMD_NAME(REPLY_LEDS_CMD),
+	HCMD_NAME(REPLY_TX_LINK_QUALITY_CMD),
+	HCMD_NAME(COEX_PRIORITY_TABLE_CMD),
+	HCMD_NAME(COEX_MEDIUM_NOTIFICATION),
+	HCMD_NAME(COEX_EVENT_CMD),
+	HCMD_NAME(TEMPERATURE_NOTIFICATION),
+	HCMD_NAME(CALIBRATION_CFG_CMD),
+	HCMD_NAME(CALIBRATION_RES_NOTIFICATION),
+	HCMD_NAME(CALIBRATION_COMPLETE_NOTIFICATION),
+	HCMD_NAME(REPLY_QUIET_CMD),
+	HCMD_NAME(REPLY_CHANNEL_SWITCH),
+	HCMD_NAME(CHANNEL_SWITCH_NOTIFICATION),
+	HCMD_NAME(REPLY_SPECTRUM_MEASUREMENT_CMD),
+	HCMD_NAME(SPECTRUM_MEASURE_NOTIFICATION),
+	HCMD_NAME(POWER_TABLE_CMD),
+	HCMD_NAME(PM_SLEEP_NOTIFICATION),
+	HCMD_NAME(PM_DEBUG_STATISTIC_NOTIFIC),
+	HCMD_NAME(REPLY_SCAN_CMD),
+	HCMD_NAME(REPLY_SCAN_ABORT_CMD),
+	HCMD_NAME(SCAN_START_NOTIFICATION),
+	HCMD_NAME(SCAN_RESULTS_NOTIFICATION),
+	HCMD_NAME(SCAN_COMPLETE_NOTIFICATION),
+	HCMD_NAME(BEACON_NOTIFICATION),
+	HCMD_NAME(REPLY_TX_BEACON),
+	HCMD_NAME(WHO_IS_AWAKE_NOTIFICATION),
+	HCMD_NAME(REPLY_TX_POWER_DBM_CMD),
+	HCMD_NAME(QUIET_NOTIFICATION),
+	HCMD_NAME(REPLY_TX_PWR_TABLE_CMD),
+	HCMD_NAME(REPLY_TX_POWER_DBM_CMD_V1),
+	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+	HCMD_NAME(MEASURE_ABORT_NOTIFICATION),
+	HCMD_NAME(REPLY_BT_CONFIG),
+	HCMD_NAME(REPLY_STATISTICS_CMD),
+	HCMD_NAME(STATISTICS_NOTIFICATION),
+	HCMD_NAME(REPLY_CARD_STATE_CMD),
+	HCMD_NAME(CARD_STATE_NOTIFICATION),
+	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+	HCMD_NAME(REPLY_CT_KILL_CONFIG_CMD),
+	HCMD_NAME(SENSITIVITY_CMD),
+	HCMD_NAME(REPLY_PHY_CALIBRATION_CMD),
+	HCMD_NAME(REPLY_WIPAN_PARAMS),
+	HCMD_NAME(REPLY_WIPAN_RXON),
+	HCMD_NAME(REPLY_WIPAN_RXON_TIMING),
+	HCMD_NAME(REPLY_WIPAN_RXON_ASSOC),
+	HCMD_NAME(REPLY_WIPAN_QOS_PARAM),
+	HCMD_NAME(REPLY_WIPAN_WEPKEY),
+	HCMD_NAME(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
+	HCMD_NAME(REPLY_WIPAN_NOA_NOTIFICATION),
+	HCMD_NAME(REPLY_WIPAN_DEACTIVATION_COMPLETE),
+	HCMD_NAME(REPLY_RX_PHY_CMD),
+	HCMD_NAME(REPLY_RX_MPDU_CMD),
+	HCMD_NAME(REPLY_RX),
+	HCMD_NAME(REPLY_COMPRESSED_BA),
+	HCMD_NAME(REPLY_BT_COEX_PRIO_TABLE),
+	HCMD_NAME(REPLY_BT_COEX_PROT_ENV),
+	HCMD_NAME(REPLY_BT_COEX_PROFILE_NOTIF),
+	HCMD_NAME(REPLY_D3_CONFIG),
+	HCMD_NAME(REPLY_WOWLAN_PATTERNS),
+	HCMD_NAME(REPLY_WOWLAN_WAKEUP_FILTER),
+	HCMD_NAME(REPLY_WOWLAN_TSC_RSC_PARAMS),
+	HCMD_NAME(REPLY_WOWLAN_TKIP_PARAMS),
+	HCMD_NAME(REPLY_WOWLAN_KEK_KCK_MATERIAL),
+	HCMD_NAME(REPLY_WOWLAN_GET_STATUS),
+};
+
+static const struct iwl_hcmd_arr iwl_dvm_groups[] = {
+	[0x0] = HCMD_ARR(iwl_dvm_cmd_names),
+};
+
+static const struct iwl_op_mode_ops iwl_dvm_ops;
+
+void iwl_update_chain_flags(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+
+	for_each_context(priv, ctx) {
+		iwlagn_set_rxon_chain(priv, ctx);
+		if (ctx->active.rx_chain != ctx->staging.rx_chain)
+			iwlagn_commit_rxon(priv, ctx);
+	}
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void iwl_set_beacon_tim(struct iwl_priv *priv,
+			       struct iwl_tx_beacon_cmd *tx_beacon_cmd,
+			       u8 *beacon, u32 frame_size)
+{
+	u16 tim_idx;
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+	/*
+	 * The index is relative to frame start but we start looking at the
+	 * variable-length part of the beacon.
+	 */
+	tim_idx = mgmt->u.beacon.variable - beacon;
+
+	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+	while ((tim_idx < (frame_size - 2)) &&
+			(beacon[tim_idx] != WLAN_EID_TIM))
+		tim_idx += beacon[tim_idx+1] + 2;
+
+	/* If TIM field was found, set variables */
+	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+		tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+		tx_beacon_cmd->tim_size = beacon[tim_idx+1];
+	} else
+		IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
+}
+
+int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
+{
+	struct iwl_tx_beacon_cmd *tx_beacon_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_TX_BEACON,
+	};
+	struct ieee80211_tx_info *info;
+	u32 frame_size;
+	u32 rate_flags;
+	u32 rate;
+
+	/*
+	 * We have to set up the TX command, the TX Beacon command, and the
+	 * beacon contents.
+	 */
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (!priv->beacon_ctx) {
+		IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
+		return 0;
+	}
+
+	if (WARN_ON(!priv->beacon_skb))
+		return -EINVAL;
+
+	/* Allocate beacon command */
+	if (!priv->beacon_cmd)
+		priv->beacon_cmd = kzalloc(sizeof(*tx_beacon_cmd), GFP_KERNEL);
+	tx_beacon_cmd = priv->beacon_cmd;
+	if (!tx_beacon_cmd)
+		return -ENOMEM;
+
+	frame_size = priv->beacon_skb->len;
+
+	/* Set up TX command fields */
+	tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
+	tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
+	tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+	tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
+		TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
+
+	/* Set up TX beacon command fields */
+	iwl_set_beacon_tim(priv, tx_beacon_cmd, priv->beacon_skb->data,
+			   frame_size);
+
+	/* Set up packet rate and flags */
+	info = IEEE80211_SKB_CB(priv->beacon_skb);
+
+	/*
+	 * Let's set up the rate at least somewhat correctly;
+	 * it will currently not actually be used by the uCode,
+	 * it uses the broadcast station's rate instead.
+	 */
+	if (info->control.rates[0].idx < 0 ||
+	    info->control.rates[0].flags & IEEE80211_TX_RC_MCS)
+		rate = 0;
+	else
+		rate = info->control.rates[0].idx;
+
+	priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+					      priv->nvm_data->valid_tx_ant);
+	rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+	/* In mac80211, rates for 5 GHz start at 0 */
+	if (info->band == NL80211_BAND_5GHZ)
+		rate += IWL_FIRST_OFDM_RATE;
+	else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	tx_beacon_cmd->tx.rate_n_flags =
+			iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+	/* Submit command */
+	cmd.len[0] = sizeof(*tx_beacon_cmd);
+	cmd.data[0] = tx_beacon_cmd;
+	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+	cmd.len[1] = frame_size;
+	cmd.data[1] = priv->beacon_skb->data;
+	cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+	return iwl_dvm_send_cmd(priv, &cmd);
+}
+
+static void iwl_bg_beacon_update(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, beacon_update);
+	struct sk_buff *beacon;
+
+	mutex_lock(&priv->mutex);
+	if (!priv->beacon_ctx) {
+		IWL_ERR(priv, "updating beacon w/o beacon context!\n");
+		goto out;
+	}
+
+	if (priv->beacon_ctx->vif->type != NL80211_IFTYPE_AP) {
+		/*
+		 * The ucode will send beacon notifications even in
+		 * IBSS mode, but we don't want to process them. But
+		 * we need to defer the type check to here due to
+		 * requiring locking around the beacon_ctx access.
+		 */
+		goto out;
+	}
+
+	/* Pull updated AP beacon from mac80211. will fail if not in AP mode */
+	beacon = ieee80211_beacon_get(priv->hw, priv->beacon_ctx->vif);
+	if (!beacon) {
+		IWL_ERR(priv, "update beacon failed -- keeping old\n");
+		goto out;
+	}
+
+	/* new beacon skb is allocated every time; dispose previous.*/
+	dev_kfree_skb(priv->beacon_skb);
+
+	priv->beacon_skb = beacon;
+
+	iwlagn_send_beacon_cmd(priv);
+ out:
+	mutex_unlock(&priv->mutex);
+}
+
+static void iwl_bg_bt_runtime_config(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, bt_runtime_config);
+
+	mutex_lock(&priv->mutex);
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		goto out;
+
+	/* dont send host command if rf-kill is on */
+	if (!iwl_is_ready_rf(priv))
+		goto out;
+
+	iwlagn_send_advance_bt_config(priv);
+out:
+	mutex_unlock(&priv->mutex);
+}
+
+static void iwl_bg_bt_full_concurrency(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, bt_full_concurrency);
+	struct iwl_rxon_context *ctx;
+
+	mutex_lock(&priv->mutex);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		goto out;
+
+	/* dont send host command if rf-kill is on */
+	if (!iwl_is_ready_rf(priv))
+		goto out;
+
+	IWL_DEBUG_INFO(priv, "BT coex in %s mode\n",
+		       priv->bt_full_concurrent ?
+		       "full concurrency" : "3-wire");
+
+	/*
+	 * LQ & RXON updated cmds must be sent before BT Config cmd
+	 * to avoid 3-wire collisions
+	 */
+	for_each_context(priv, ctx) {
+		iwlagn_set_rxon_chain(priv, ctx);
+		iwlagn_commit_rxon(priv, ctx);
+	}
+
+	iwlagn_send_advance_bt_config(priv);
+out:
+	mutex_unlock(&priv->mutex);
+}
+
+int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
+{
+	struct iwl_statistics_cmd statistics_cmd = {
+		.configuration_flags =
+			clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
+	};
+
+	if (flags & CMD_ASYNC)
+		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
+					CMD_ASYNC,
+					sizeof(struct iwl_statistics_cmd),
+					&statistics_cmd);
+	else
+		return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD, 0,
+					sizeof(struct iwl_statistics_cmd),
+					&statistics_cmd);
+}
+
+/**
+ * iwl_bg_statistics_periodic - Timer callback to queue statistics
+ *
+ * This callback is provided in order to send a statistics request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
+ * was received.  We need to ensure we receive the statistics in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void iwl_bg_statistics_periodic(struct timer_list *t)
+{
+	struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	/* dont send host command if rf-kill is on */
+	if (!iwl_is_ready_rf(priv))
+		return;
+
+	iwl_send_statistics_request(priv, CMD_ASYNC, false);
+}
+
+
+static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+					u32 start_idx, u32 num_events,
+					u32 capacity, u32 mode)
+{
+	u32 i;
+	u32 ptr;        /* SRAM byte address of log data */
+	u32 ev, time, data; /* event log data */
+	unsigned long reg_flags;
+
+	if (mode == 0)
+		ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+	else
+		ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+	/* Make sure device is powered up for SRAM reads */
+	if (!iwl_trans_grab_nic_access(priv->trans, &reg_flags))
+		return;
+
+	/* Set starting address; reads will auto-increment */
+	iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
+
+	/*
+	 * Refuse to read more than would have fit into the log from
+	 * the current start_idx. This used to happen due to the race
+	 * described below, but now WARN because the code below should
+	 * prevent it from happening here.
+	 */
+	if (WARN_ON(num_events > capacity - start_idx))
+		num_events = capacity - start_idx;
+
+	/*
+	 * "time" is actually "data" for mode 0 (no timestamp).
+	 * place event id # at far right for easier visual parsing.
+	 */
+	for (i = 0; i < num_events; i++) {
+		ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
+		time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
+		if (mode == 0) {
+			trace_iwlwifi_dev_ucode_cont_event(
+					priv->trans->dev, 0, time, ev);
+		} else {
+			data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
+			trace_iwlwifi_dev_ucode_cont_event(
+					priv->trans->dev, time, data, ev);
+		}
+	}
+	/* Allow device to power down */
+	iwl_trans_release_nic_access(priv->trans, &reg_flags);
+}
+
+static void iwl_continuous_event_trace(struct iwl_priv *priv)
+{
+	u32 capacity;   /* event log capacity in # entries */
+	struct {
+		u32 capacity;
+		u32 mode;
+		u32 wrap_counter;
+		u32 write_counter;
+	} __packed read;
+	u32 base;       /* SRAM byte address of event log header */
+	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+	u32 num_wraps;  /* # times uCode wrapped to top of log */
+	u32 next_entry; /* index of next entry to be written by uCode */
+
+	base = priv->device_pointers.log_event_table;
+	if (iwlagn_hw_valid_rtc_data_addr(base)) {
+		iwl_trans_read_mem_bytes(priv->trans, base,
+					 &read, sizeof(read));
+		capacity = read.capacity;
+		mode = read.mode;
+		num_wraps = read.wrap_counter;
+		next_entry = read.write_counter;
+	} else
+		return;
+
+	/*
+	 * Unfortunately, the uCode doesn't use temporary variables.
+	 * Therefore, it can happen that we read next_entry == capacity,
+	 * which really means next_entry == 0.
+	 */
+	if (unlikely(next_entry == capacity))
+		next_entry = 0;
+	/*
+	 * Additionally, the uCode increases the write pointer before
+	 * the wraps counter, so if the write pointer is smaller than
+	 * the old write pointer (wrap occurred) but we read that no
+	 * wrap occurred, we actually read between the next_entry and
+	 * num_wraps update (this does happen in practice!!) -- take
+	 * that into account by increasing num_wraps.
+	 */
+	if (unlikely(next_entry < priv->event_log.next_entry &&
+		     num_wraps == priv->event_log.num_wraps))
+		num_wraps++;
+
+	if (num_wraps == priv->event_log.num_wraps) {
+		iwl_print_cont_event_trace(
+			priv, base, priv->event_log.next_entry,
+			next_entry - priv->event_log.next_entry,
+			capacity, mode);
+
+		priv->event_log.non_wraps_count++;
+	} else {
+		if (num_wraps - priv->event_log.num_wraps > 1)
+			priv->event_log.wraps_more_count++;
+		else
+			priv->event_log.wraps_once_count++;
+
+		trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
+				num_wraps - priv->event_log.num_wraps,
+				next_entry, priv->event_log.next_entry);
+
+		if (next_entry < priv->event_log.next_entry) {
+			iwl_print_cont_event_trace(
+				priv, base, priv->event_log.next_entry,
+				capacity - priv->event_log.next_entry,
+				capacity, mode);
+
+			iwl_print_cont_event_trace(
+				priv, base, 0, next_entry, capacity, mode);
+		} else {
+			iwl_print_cont_event_trace(
+				priv, base, next_entry,
+				capacity - next_entry,
+				capacity, mode);
+
+			iwl_print_cont_event_trace(
+				priv, base, 0, next_entry, capacity, mode);
+		}
+	}
+
+	priv->event_log.num_wraps = num_wraps;
+	priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl_bg_ucode_trace(struct timer_list *t)
+{
+	struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (priv->event_log.ucode_trace) {
+		iwl_continuous_event_trace(priv);
+		/* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+		mod_timer(&priv->ucode_trace,
+			 jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+	}
+}
+
+static void iwl_bg_tx_flush(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, tx_flush);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	/* do nothing if rf-kill is on */
+	if (!iwl_is_ready_rf(priv))
+		return;
+
+	IWL_DEBUG_INFO(priv, "device request: flush all tx frames\n");
+	iwlagn_dev_txfifo_flush(priv);
+}
+
+/*
+ * queue/FIFO/AC mapping definitions
+ */
+
+static const u8 iwlagn_bss_ac_to_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_bss_ac_to_queue[] = {
+	0, 1, 2, 3,
+};
+
+static const u8 iwlagn_pan_ac_to_fifo[] = {
+	IWL_TX_FIFO_VO_IPAN,
+	IWL_TX_FIFO_VI_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_BK_IPAN,
+};
+
+static const u8 iwlagn_pan_ac_to_queue[] = {
+	7, 6, 5, 4,
+};
+
+static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
+{
+	int i;
+
+	/*
+	 * The default context is always valid,
+	 * the PAN context depends on uCode.
+	 */
+	priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
+	if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
+		priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
+
+	for (i = 0; i < NUM_IWL_RXON_CTX; i++)
+		priv->contexts[i].ctxid = i;
+
+	priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
+	priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
+	priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
+	priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
+	priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
+	priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
+	priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
+	priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
+	priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
+	priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
+		BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MONITOR);
+	priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
+		BIT(NL80211_IFTYPE_STATION);
+	priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
+	priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
+	priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
+	priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
+	memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
+	       iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
+	memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
+	       iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
+
+	priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
+	priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
+		REPLY_WIPAN_RXON_TIMING;
+	priv->contexts[IWL_RXON_CTX_PAN].rxon_assoc_cmd =
+		REPLY_WIPAN_RXON_ASSOC;
+	priv->contexts[IWL_RXON_CTX_PAN].qos_cmd = REPLY_WIPAN_QOS_PARAM;
+	priv->contexts[IWL_RXON_CTX_PAN].ap_sta_id = IWL_AP_ID_PAN;
+	priv->contexts[IWL_RXON_CTX_PAN].wep_key_cmd = REPLY_WIPAN_WEPKEY;
+	priv->contexts[IWL_RXON_CTX_PAN].bcast_sta_id = IWLAGN_PAN_BCAST_ID;
+	priv->contexts[IWL_RXON_CTX_PAN].station_flags = STA_FLG_PAN_STATION;
+	priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
+		BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
+
+	priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
+	priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
+	priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
+	memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
+	       iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
+	memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
+	       iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
+	priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+}
+
+static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
+{
+	struct iwl_ct_kill_config cmd;
+	struct iwl_ct_kill_throttling_config adv_cmd;
+	int ret = 0;
+
+	iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+		    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+
+	priv->thermal_throttle.ct_kill_toggle = false;
+
+	if (priv->lib->support_ct_kill_exit) {
+		adv_cmd.critical_temperature_enter =
+			cpu_to_le32(priv->hw_params.ct_kill_threshold);
+		adv_cmd.critical_temperature_exit =
+			cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
+
+		ret = iwl_dvm_send_cmd_pdu(priv,
+				       REPLY_CT_KILL_CONFIG_CMD,
+				       0, sizeof(adv_cmd), &adv_cmd);
+		if (ret)
+			IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+		else
+			IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+				"succeeded, critical temperature enter is %d,"
+				"exit is %d\n",
+				priv->hw_params.ct_kill_threshold,
+				priv->hw_params.ct_kill_exit_threshold);
+	} else {
+		cmd.critical_temperature_R =
+			cpu_to_le32(priv->hw_params.ct_kill_threshold);
+
+		ret = iwl_dvm_send_cmd_pdu(priv,
+				       REPLY_CT_KILL_CONFIG_CMD,
+				       0, sizeof(cmd), &cmd);
+		if (ret)
+			IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
+		else
+			IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
+				"succeeded, "
+				"critical temperature is %d\n",
+				priv->hw_params.ct_kill_threshold);
+	}
+}
+
+static int iwlagn_send_calib_cfg_rt(struct iwl_priv *priv, u32 cfg)
+{
+	struct iwl_calib_cfg_cmd calib_cfg_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = CALIBRATION_CFG_CMD,
+		.len = { sizeof(struct iwl_calib_cfg_cmd), },
+		.data = { &calib_cfg_cmd, },
+	};
+
+	memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+	calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_RT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.once.start = cpu_to_le32(cfg);
+
+	return iwl_dvm_send_cmd(priv, &cmd);
+}
+
+
+static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
+{
+	struct iwl_tx_ant_config_cmd tx_ant_cmd = {
+	  .valid = cpu_to_le32(valid_tx_ant),
+	};
+
+	if (IWL_UCODE_API(priv->fw->ucode_ver) > 1) {
+		IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
+		return iwl_dvm_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD, 0,
+					sizeof(struct iwl_tx_ant_config_cmd),
+					&tx_ant_cmd);
+	} else {
+		IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
+		return -EOPNOTSUPP;
+	}
+}
+
+static void iwl_send_bt_config(struct iwl_priv *priv)
+{
+	struct iwl_bt_cmd bt_cmd = {
+		.lead_time = BT_LEAD_TIME_DEF,
+		.max_kill = BT_MAX_KILL_DEF,
+		.kill_ack_mask = 0,
+		.kill_cts_mask = 0,
+	};
+
+	if (!iwlwifi_mod_params.bt_coex_active)
+		bt_cmd.flags = BT_COEX_DISABLE;
+	else
+		bt_cmd.flags = BT_COEX_ENABLE;
+
+	priv->bt_enable_flag = bt_cmd.flags;
+	IWL_DEBUG_INFO(priv, "BT coex %s\n",
+		(bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+	if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
+			     0, sizeof(struct iwl_bt_cmd), &bt_cmd))
+		IWL_ERR(priv, "failed to send BT Coex Config\n");
+}
+
+/**
+ * iwl_alive_start - called after REPLY_ALIVE notification received
+ *                   from protocol/runtime uCode (initialization uCode's
+ *                   Alive gets handled by iwl_init_alive_start()).
+ */
+int iwl_alive_start(struct iwl_priv *priv)
+{
+	int ret = 0;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
+
+	/* After the ALIVE response, we can send host commands to the uCode */
+	set_bit(STATUS_ALIVE, &priv->status);
+
+	if (iwl_is_rfkill(priv))
+		return -ERFKILL;
+
+	if (priv->event_log.ucode_trace) {
+		/* start collecting data now */
+		mod_timer(&priv->ucode_trace, jiffies);
+	}
+
+	/* download priority table before any calibration request */
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		/* Configure Bluetooth device coexistence support */
+		if (priv->lib->bt_params->bt_sco_disable)
+			priv->bt_enable_pspoll = false;
+		else
+			priv->bt_enable_pspoll = true;
+
+		priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
+		priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
+		priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
+		iwlagn_send_advance_bt_config(priv);
+		priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
+		priv->cur_rssi_ctx = NULL;
+
+		iwl_send_prio_tbl(priv);
+
+		/* FIXME: w/a to force change uCode BT state machine */
+		ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+					 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+		if (ret)
+			return ret;
+		ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+					 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+		if (ret)
+			return ret;
+	} else if (priv->lib->bt_params) {
+		/*
+		 * default is 2-wire BT coexexistence support
+		 */
+		iwl_send_bt_config(priv);
+	}
+
+	/*
+	 * Perform runtime calibrations, including DC calibration.
+	 */
+	iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
+
+	ieee80211_wake_queues(priv->hw);
+
+	/* Configure Tx antenna selection based on H/W config */
+	iwlagn_send_tx_ant_config(priv, priv->nvm_data->valid_tx_ant);
+
+	if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
+		struct iwl_rxon_cmd *active_rxon =
+				(struct iwl_rxon_cmd *)&ctx->active;
+		/* apply any changes in staging */
+		ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	} else {
+		struct iwl_rxon_context *tmp;
+		/* Initialize our rx_config data */
+		for_each_context(priv, tmp)
+			iwl_connection_init_rx_config(priv, tmp);
+
+		iwlagn_set_rxon_chain(priv, ctx);
+	}
+
+	if (!priv->wowlan) {
+		/* WoWLAN ucode will not reply in the same way, skip it */
+		iwl_reset_run_time_calib(priv);
+	}
+
+	set_bit(STATUS_READY, &priv->status);
+
+	/* Configure the adapter for unassociated operation */
+	ret = iwlagn_commit_rxon(priv, ctx);
+	if (ret)
+		return ret;
+
+	/* At this point, the NIC is initialized and operational */
+	iwl_rf_kill_ct_config(priv);
+
+	IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
+
+	return iwl_power_update_mode(priv, true);
+}
+
+/**
+ * iwl_clear_driver_stations - clear knowledge of all stations from driver
+ * @priv: iwl priv struct
+ *
+ * This is called during iwl_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static void iwl_clear_driver_stations(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+
+	spin_lock_bh(&priv->sta_lock);
+	memset(priv->stations, 0, sizeof(priv->stations));
+	priv->num_stations = 0;
+
+	priv->ucode_key_table = 0;
+
+	for_each_context(priv, ctx) {
+		/*
+		 * Remove all key information that is not stored as part
+		 * of station information since mac80211 may not have had
+		 * a chance to remove all the keys. When device is
+		 * reconfigured by mac80211 after an error all keys will
+		 * be reconfigured.
+		 */
+		memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+		ctx->key_mapping_keys = 0;
+	}
+
+	spin_unlock_bh(&priv->sta_lock);
+}
+
+void iwl_down(struct iwl_priv *priv)
+{
+	int exit_pending;
+
+	IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
+
+	lockdep_assert_held(&priv->mutex);
+
+	iwl_scan_cancel_timeout(priv, 200);
+
+	exit_pending =
+		test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
+
+	iwl_clear_ucode_stations(priv, NULL);
+	iwl_dealloc_bcast_stations(priv);
+	iwl_clear_driver_stations(priv);
+
+	/* reset BT coex data */
+	priv->bt_status = 0;
+	priv->cur_rssi_ctx = NULL;
+	priv->bt_is_sco = 0;
+	if (priv->lib->bt_params)
+		priv->bt_traffic_load =
+			 priv->lib->bt_params->bt_init_traffic_load;
+	else
+		priv->bt_traffic_load = 0;
+	priv->bt_full_concurrent = false;
+	priv->bt_ci_compliance = 0;
+
+	/* Wipe out the EXIT_PENDING status bit if we are not actually
+	 * exiting the module */
+	if (!exit_pending)
+		clear_bit(STATUS_EXIT_PENDING, &priv->status);
+
+	if (priv->mac80211_registered)
+		ieee80211_stop_queues(priv->hw);
+
+	priv->ucode_loaded = false;
+	iwl_trans_stop_device(priv->trans);
+
+	/* Set num_aux_in_flight must be done after the transport is stopped */
+	atomic_set(&priv->num_aux_in_flight, 0);
+
+	/* Clear out all status bits but a few that are stable across reset */
+	priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
+				STATUS_RF_KILL_HW |
+			test_bit(STATUS_FW_ERROR, &priv->status) <<
+				STATUS_FW_ERROR |
+			test_bit(STATUS_EXIT_PENDING, &priv->status) <<
+				STATUS_EXIT_PENDING;
+
+	dev_kfree_skb(priv->beacon_skb);
+	priv->beacon_skb = NULL;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void iwl_bg_run_time_calib_work(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work, struct iwl_priv,
+			run_time_calib_work);
+
+	mutex_lock(&priv->mutex);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
+	    test_bit(STATUS_SCANNING, &priv->status)) {
+		mutex_unlock(&priv->mutex);
+		return;
+	}
+
+	if (priv->start_calib) {
+		iwl_chain_noise_calibration(priv);
+		iwl_sensitivity_calibration(priv);
+	}
+
+	mutex_unlock(&priv->mutex);
+}
+
+void iwlagn_prepare_restart(struct iwl_priv *priv)
+{
+	bool bt_full_concurrent;
+	u8 bt_ci_compliance;
+	u8 bt_load;
+	u8 bt_status;
+	bool bt_is_sco;
+	int i;
+
+	lockdep_assert_held(&priv->mutex);
+
+	priv->is_open = 0;
+
+	/*
+	 * __iwl_down() will clear the BT status variables,
+	 * which is correct, but when we restart we really
+	 * want to keep them so restore them afterwards.
+	 *
+	 * The restart process will later pick them up and
+	 * re-configure the hw when we reconfigure the BT
+	 * command.
+	 */
+	bt_full_concurrent = priv->bt_full_concurrent;
+	bt_ci_compliance = priv->bt_ci_compliance;
+	bt_load = priv->bt_traffic_load;
+	bt_status = priv->bt_status;
+	bt_is_sco = priv->bt_is_sco;
+
+	iwl_down(priv);
+
+	priv->bt_full_concurrent = bt_full_concurrent;
+	priv->bt_ci_compliance = bt_ci_compliance;
+	priv->bt_traffic_load = bt_load;
+	priv->bt_status = bt_status;
+	priv->bt_is_sco = bt_is_sco;
+
+	/* reset aggregation queues */
+	for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
+		priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+	/* and stop counts */
+	for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
+		atomic_set(&priv->queue_stop_count[i], 0);
+
+	memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
+}
+
+static void iwl_bg_restart(struct work_struct *data)
+{
+	struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
+		mutex_lock(&priv->mutex);
+		iwlagn_prepare_restart(priv);
+		mutex_unlock(&priv->mutex);
+		iwl_cancel_deferred_work(priv);
+		if (priv->mac80211_registered)
+			ieee80211_restart_hw(priv->hw);
+		else
+			IWL_ERR(priv,
+				"Cannot request restart before registrating with mac80211\n");
+	} else {
+		WARN_ON(1);
+	}
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void iwl_setup_deferred_work(struct iwl_priv *priv)
+{
+	priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
+
+	INIT_WORK(&priv->restart, iwl_bg_restart);
+	INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
+	INIT_WORK(&priv->run_time_calib_work, iwl_bg_run_time_calib_work);
+	INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
+	INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
+	INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
+
+	iwl_setup_scan_deferred_work(priv);
+
+	if (priv->lib->bt_params)
+		iwlagn_bt_setup_deferred_work(priv);
+
+	timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
+
+	timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
+}
+
+void iwl_cancel_deferred_work(struct iwl_priv *priv)
+{
+	if (priv->lib->bt_params)
+		iwlagn_bt_cancel_deferred_work(priv);
+
+	cancel_work_sync(&priv->run_time_calib_work);
+	cancel_work_sync(&priv->beacon_update);
+
+	iwl_cancel_scan_deferred_work(priv);
+
+	cancel_work_sync(&priv->bt_full_concurrency);
+	cancel_work_sync(&priv->bt_runtime_config);
+
+	del_timer_sync(&priv->statistics_periodic);
+	del_timer_sync(&priv->ucode_trace);
+}
+
+static int iwl_init_drv(struct iwl_priv *priv)
+{
+	spin_lock_init(&priv->sta_lock);
+
+	mutex_init(&priv->mutex);
+
+	INIT_LIST_HEAD(&priv->calib_results);
+
+	priv->band = NL80211_BAND_2GHZ;
+
+	priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
+
+	priv->iw_mode = NL80211_IFTYPE_STATION;
+	priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+	priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+	priv->agg_tids_count = 0;
+
+	priv->rx_statistics_jiffies = jiffies;
+
+	/* Choose which receivers/antennas to use */
+	iwlagn_set_rxon_chain(priv, &priv->contexts[IWL_RXON_CTX_BSS]);
+
+	iwl_init_scan_params(priv);
+
+	/* init bt coex */
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
+		priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
+		priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
+		priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
+		priv->bt_duration = BT_DURATION_LIMIT_DEF;
+		priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
+	}
+
+	return 0;
+}
+
+static void iwl_uninit_drv(struct iwl_priv *priv)
+{
+	kfree(priv->scan_cmd);
+	kfree(priv->beacon_cmd);
+	kfree(rcu_dereference_raw(priv->noa_data));
+	iwl_calib_free_results(priv);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	kfree(priv->wowlan_sram);
+#endif
+}
+
+static void iwl_set_hw_params(struct iwl_priv *priv)
+{
+	if (priv->cfg->ht_params)
+		priv->hw_params.use_rts_for_aggregation =
+			priv->cfg->ht_params->use_rts_for_aggregation;
+
+	/* Device-specific setup */
+	priv->lib->set_hw_params(priv);
+}
+
+
+
+/* show what optional capabilities we have */
+static void iwl_option_config(struct iwl_priv *priv)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG enabled\n");
+#else
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUG disabled\n");
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS enabled\n");
+#else
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEBUGFS disabled\n");
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING enabled\n");
+#else
+	IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
+#endif
+}
+
+static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
+{
+	struct iwl_nvm_data *data = priv->nvm_data;
+
+	if (data->sku_cap_11n_enable &&
+	    !priv->cfg->ht_params) {
+		IWL_ERR(priv, "Invalid 11n configuration\n");
+		return -EINVAL;
+	}
+
+	if (!data->sku_cap_11n_enable && !data->sku_cap_band_24ghz_enable &&
+	    !data->sku_cap_band_52ghz_enable) {
+		IWL_ERR(priv, "Invalid device sku\n");
+		return -EINVAL;
+	}
+
+	IWL_DEBUG_INFO(priv,
+		       "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n",
+		       data->sku_cap_band_24ghz_enable ? "" : "NOT", "enabled",
+		       data->sku_cap_band_52ghz_enable ? "" : "NOT", "enabled",
+		       data->sku_cap_11n_enable ? "" : "NOT", "enabled");
+
+	priv->hw_params.tx_chains_num =
+		num_of_ant(data->valid_tx_ant);
+	if (priv->cfg->rx_with_siso_diversity)
+		priv->hw_params.rx_chains_num = 1;
+	else
+		priv->hw_params.rx_chains_num =
+			num_of_ant(data->valid_rx_ant);
+
+	IWL_DEBUG_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+		       data->valid_tx_ant,
+		       data->valid_rx_ant);
+
+	return 0;
+}
+
+static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
+						 const struct iwl_cfg *cfg,
+						 const struct iwl_fw *fw,
+						 struct dentry *dbgfs_dir)
+{
+	struct iwl_priv *priv;
+	struct ieee80211_hw *hw;
+	struct iwl_op_mode *op_mode;
+	u16 num_mac;
+	u32 ucode_flags;
+	struct iwl_trans_config trans_cfg = {};
+	static const u8 no_reclaim_cmds[] = {
+		REPLY_RX_PHY_CMD,
+		REPLY_RX_MPDU_CMD,
+		REPLY_COMPRESSED_BA,
+		STATISTICS_NOTIFICATION,
+		REPLY_TX,
+	};
+	int i;
+
+	/************************
+	 * 1. Allocating HW data
+	 ************************/
+	hw = iwl_alloc_all();
+	if (!hw) {
+		pr_err("%s: Cannot allocate network device\n", cfg->name);
+		goto out;
+	}
+
+	op_mode = hw->priv;
+	op_mode->ops = &iwl_dvm_ops;
+	priv = IWL_OP_MODE_GET_DVM(op_mode);
+	priv->trans = trans;
+	priv->dev = trans->dev;
+	priv->cfg = cfg;
+	priv->fw = fw;
+
+	switch (priv->cfg->device_family) {
+	case IWL_DEVICE_FAMILY_1000:
+	case IWL_DEVICE_FAMILY_100:
+		priv->lib = &iwl_dvm_1000_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_2000:
+		priv->lib = &iwl_dvm_2000_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_105:
+		priv->lib = &iwl_dvm_105_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_2030:
+	case IWL_DEVICE_FAMILY_135:
+		priv->lib = &iwl_dvm_2030_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_5000:
+		priv->lib = &iwl_dvm_5000_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_5150:
+		priv->lib = &iwl_dvm_5150_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_6000:
+	case IWL_DEVICE_FAMILY_6000i:
+		priv->lib = &iwl_dvm_6000_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_6005:
+		priv->lib = &iwl_dvm_6005_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_6050:
+	case IWL_DEVICE_FAMILY_6150:
+		priv->lib = &iwl_dvm_6050_cfg;
+		break;
+	case IWL_DEVICE_FAMILY_6030:
+		priv->lib = &iwl_dvm_6030_cfg;
+		break;
+	default:
+		break;
+	}
+
+	if (WARN_ON(!priv->lib))
+		goto out_free_hw;
+
+	/*
+	 * Populate the state variables that the transport layer needs
+	 * to know about.
+	 */
+	trans_cfg.op_mode = op_mode;
+	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+
+	switch (iwlwifi_mod_params.amsdu_size) {
+	case IWL_AMSDU_DEF:
+	case IWL_AMSDU_4K:
+		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+		break;
+	case IWL_AMSDU_8K:
+		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+		break;
+	case IWL_AMSDU_12K:
+	default:
+		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+		pr_err("Unsupported amsdu_size: %d\n",
+		       iwlwifi_mod_params.amsdu_size);
+	}
+
+	trans_cfg.cmd_q_wdg_timeout = IWL_WATCHDOG_DISABLED;
+
+	trans_cfg.command_groups = iwl_dvm_groups;
+	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_dvm_groups);
+
+	trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
+	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
+					  driver_data[2]);
+
+	WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
+		priv->cfg->base_params->num_of_queues);
+
+	ucode_flags = fw->ucode_capa.flags;
+
+	if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
+		priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
+		trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
+	} else {
+		priv->sta_key_max_num = STA_KEY_MAX_NUM;
+		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+	}
+
+	/* Configure transport layer */
+	iwl_trans_configure(priv->trans, &trans_cfg);
+
+	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+	trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_res_start);
+	trans->command_groups = trans_cfg.command_groups;
+	trans->command_groups_size = trans_cfg.command_groups_size;
+
+	/* At this point both hw and priv are allocated. */
+
+	SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
+
+	iwl_option_config(priv);
+
+	IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
+
+	/* is antenna coupling more than 35dB ? */
+	priv->bt_ant_couple_ok =
+		(iwlwifi_mod_params.antenna_coupling >
+			IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
+			true : false;
+
+	/* bt channel inhibition enabled*/
+	priv->bt_ch_announce = true;
+	IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
+		       (priv->bt_ch_announce) ? "On" : "Off");
+
+	/* these spin locks will be used in apm_ops.init and EEPROM access
+	 * we should init now
+	 */
+	spin_lock_init(&priv->statistics.lock);
+
+	/***********************
+	 * 2. Read REV register
+	 ***********************/
+	IWL_INFO(priv, "Detected %s, REV=0x%X\n",
+		priv->cfg->name, priv->trans->hw_rev);
+
+	if (iwl_trans_start_hw(priv->trans))
+		goto out_free_hw;
+
+	/* Read the EEPROM */
+	if (iwl_read_eeprom(priv->trans, &priv->eeprom_blob,
+			    &priv->eeprom_blob_size)) {
+		IWL_ERR(priv, "Unable to init EEPROM\n");
+		goto out_free_hw;
+	}
+
+	/* Reset chip to save power until we load uCode during "up". */
+	iwl_trans_stop_device(priv->trans);
+
+	priv->nvm_data = iwl_parse_eeprom_data(priv->trans->dev, priv->cfg,
+						  priv->eeprom_blob,
+						  priv->eeprom_blob_size);
+	if (!priv->nvm_data)
+		goto out_free_eeprom_blob;
+
+	if (iwl_nvm_check_version(priv->nvm_data, priv->trans))
+		goto out_free_eeprom;
+
+	if (iwl_eeprom_init_hw_params(priv))
+		goto out_free_eeprom;
+
+	/* extract MAC Address */
+	memcpy(priv->addresses[0].addr, priv->nvm_data->hw_addr, ETH_ALEN);
+	IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
+	priv->hw->wiphy->addresses = priv->addresses;
+	priv->hw->wiphy->n_addresses = 1;
+	num_mac = priv->nvm_data->n_hw_addrs;
+	if (num_mac > 1) {
+		memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
+		       ETH_ALEN);
+		priv->addresses[1].addr[5]++;
+		priv->hw->wiphy->n_addresses++;
+	}
+
+	/************************
+	 * 4. Setup HW constants
+	 ************************/
+	iwl_set_hw_params(priv);
+
+	if (!(priv->nvm_data->sku_cap_ipan_enable)) {
+		IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN\n");
+		ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+		/*
+		 * if not PAN, then don't support P2P -- might be a uCode
+		 * packaging bug or due to the eeprom check above
+		 */
+		priv->sta_key_max_num = STA_KEY_MAX_NUM;
+		trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
+
+		/* Configure transport layer again*/
+		iwl_trans_configure(priv->trans, &trans_cfg);
+	}
+
+	/*******************
+	 * 5. Setup priv
+	 *******************/
+	for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
+		priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
+		if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
+		    i != IWL_DEFAULT_CMD_QUEUE_NUM &&
+		    i != IWL_IPAN_CMD_QUEUE_NUM)
+			priv->queue_to_mac80211[i] = i;
+		atomic_set(&priv->queue_stop_count[i], 0);
+	}
+
+	if (iwl_init_drv(priv))
+		goto out_free_eeprom;
+
+	/* At this point both hw and priv are initialized. */
+
+	/********************
+	 * 6. Setup services
+	 ********************/
+	iwl_setup_deferred_work(priv);
+	iwl_setup_rx_handlers(priv);
+
+	iwl_power_initialize(priv);
+	iwl_tt_initialize(priv);
+
+	snprintf(priv->hw->wiphy->fw_version,
+		 sizeof(priv->hw->wiphy->fw_version),
+		 "%s", fw->fw_version);
+
+	priv->new_scan_threshold_behaviour =
+		!!(ucode_flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
+
+	priv->phy_calib_chain_noise_reset_cmd =
+		fw->ucode_capa.standard_phy_calibration_size;
+	priv->phy_calib_chain_noise_gain_cmd =
+		fw->ucode_capa.standard_phy_calibration_size + 1;
+
+	/* initialize all valid contexts */
+	iwl_init_context(priv, ucode_flags);
+
+	/**************************************************
+	 * This is still part of probe() in a sense...
+	 *
+	 * 7. Setup and register with mac80211 and debugfs
+	 **************************************************/
+	if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
+		goto out_destroy_workqueue;
+
+	if (iwl_dbgfs_register(priv, dbgfs_dir))
+		goto out_mac80211_unregister;
+
+	return op_mode;
+
+out_mac80211_unregister:
+	iwlagn_mac_unregister(priv);
+out_destroy_workqueue:
+	iwl_tt_exit(priv);
+	iwl_cancel_deferred_work(priv);
+	destroy_workqueue(priv->workqueue);
+	priv->workqueue = NULL;
+	iwl_uninit_drv(priv);
+out_free_eeprom_blob:
+	kfree(priv->eeprom_blob);
+out_free_eeprom:
+	kfree(priv->nvm_data);
+out_free_hw:
+	ieee80211_free_hw(priv->hw);
+out:
+	op_mode = NULL;
+	return op_mode;
+}
+
+static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
+
+	iwlagn_mac_unregister(priv);
+
+	iwl_tt_exit(priv);
+
+	kfree(priv->eeprom_blob);
+	kfree(priv->nvm_data);
+
+	/*netif_stop_queue(dev); */
+	flush_workqueue(priv->workqueue);
+
+	/* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
+	 * priv->workqueue... so we can't take down the workqueue
+	 * until now... */
+	destroy_workqueue(priv->workqueue);
+	priv->workqueue = NULL;
+
+	iwl_uninit_drv(priv);
+
+	dev_kfree_skb(priv->beacon_skb);
+
+	iwl_trans_op_mode_leave(priv->trans);
+	ieee80211_free_hw(priv->hw);
+}
+
+static const char * const desc_lookup_text[] = {
+	"OK",
+	"FAIL",
+	"BAD_PARAM",
+	"BAD_CHECKSUM",
+	"NMI_INTERRUPT_WDG",
+	"SYSASSERT",
+	"FATAL_ERROR",
+	"BAD_COMMAND",
+	"HW_ERROR_TUNE_LOCK",
+	"HW_ERROR_TEMPERATURE",
+	"ILLEGAL_CHAN_FREQ",
+	"VCC_NOT_STABLE",
+	"FH_ERROR",
+	"NMI_INTERRUPT_HOST",
+	"NMI_INTERRUPT_ACTION_PT",
+	"NMI_INTERRUPT_UNKNOWN",
+	"UCODE_VERSION_MISMATCH",
+	"HW_ERROR_ABS_LOCK",
+	"HW_ERROR_CAL_LOCK_FAIL",
+	"NMI_INTERRUPT_INST_ACTION_PT",
+	"NMI_INTERRUPT_DATA_ACTION_PT",
+	"NMI_TRM_HW_ER",
+	"NMI_INTERRUPT_TRM",
+	"NMI_INTERRUPT_BREAK_POINT",
+	"DEBUG_0",
+	"DEBUG_1",
+	"DEBUG_2",
+	"DEBUG_3",
+};
+
+static struct { char *name; u8 num; } advanced_lookup[] = {
+	{ "NMI_INTERRUPT_WDG", 0x34 },
+	{ "SYSASSERT", 0x35 },
+	{ "UCODE_VERSION_MISMATCH", 0x37 },
+	{ "BAD_COMMAND", 0x38 },
+	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+	{ "FATAL_ERROR", 0x3D },
+	{ "NMI_TRM_HW_ERR", 0x46 },
+	{ "NMI_INTERRUPT_TRM", 0x4C },
+	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+	{ "NMI_INTERRUPT_HOST", 0x66 },
+	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
+	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
+	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+	{ "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+	int i;
+	int max = ARRAY_SIZE(desc_lookup_text);
+
+	if (num < max)
+		return desc_lookup_text[num];
+
+	max = ARRAY_SIZE(advanced_lookup) - 1;
+	for (i = 0; i < max; i++) {
+		if (advanced_lookup[i].num == num)
+			break;
+	}
+	return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+static void iwl_dump_nic_error_log(struct iwl_priv *priv)
+{
+	struct iwl_trans *trans = priv->trans;
+	u32 base;
+	struct iwl_error_event_table table;
+
+	base = priv->device_pointers.error_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		if (!base)
+			base = priv->fw->init_errlog_ptr;
+	} else {
+		if (!base)
+			base = priv->fw->inst_errlog_ptr;
+	}
+
+	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+		IWL_ERR(priv,
+			"Not valid error log pointer 0x%08X for %s uCode\n",
+			base,
+			(priv->cur_ucode == IWL_UCODE_INIT)
+					? "Init" : "RT");
+		return;
+	}
+
+	/*TODO: Update dbgfs with ISR error stats obtained below */
+	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+			priv->status, table.valid);
+	}
+
+	trace_iwlwifi_dev_ucode_error(trans->dev, &table, 0, table.brd_ver);
+	IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
+		desc_lookup(table.error_id));
+	IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
+	IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
+	IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
+	IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
+	IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
+	IWL_ERR(priv, "0x%08X | data1\n", table.data1);
+	IWL_ERR(priv, "0x%08X | data2\n", table.data2);
+	IWL_ERR(priv, "0x%08X | line\n", table.line);
+	IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
+	IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
+	IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
+	IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
+	IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
+	IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
+	IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
+	IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
+	IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
+	IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
+	IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
+	IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
+	IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
+	IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
+	IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
+	IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
+	IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
+	IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
+	IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
+	IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+	IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+	IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+	IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
+	IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+#define EVENT_START_OFFSET  (4 * sizeof(u32))
+
+/**
+ * iwl_print_event_log - Dump error event log to syslog
+ *
+ */
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+			       u32 num_events, u32 mode,
+			       int pos, char **buf, size_t bufsz)
+{
+	u32 i;
+	u32 base;       /* SRAM byte address of event log header */
+	u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
+	u32 ptr;        /* SRAM byte address of log data */
+	u32 ev, time, data; /* event log data */
+	unsigned long reg_flags;
+
+	struct iwl_trans *trans = priv->trans;
+
+	if (num_events == 0)
+		return pos;
+
+	base = priv->device_pointers.log_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		if (!base)
+			base = priv->fw->init_evtlog_ptr;
+	} else {
+		if (!base)
+			base = priv->fw->inst_evtlog_ptr;
+	}
+
+	if (mode == 0)
+		event_size = 2 * sizeof(u32);
+	else
+		event_size = 3 * sizeof(u32);
+
+	ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
+
+	/* Make sure device is powered up for SRAM reads */
+	if (!iwl_trans_grab_nic_access(trans, &reg_flags))
+		return pos;
+
+	/* Set starting address; reads will auto-increment */
+	iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
+
+	/* "time" is actually "data" for mode 0 (no timestamp).
+	* place event id # at far right for easier visual parsing. */
+	for (i = 0; i < num_events; i++) {
+		ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+		time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+		if (mode == 0) {
+			/* data, ev */
+			if (bufsz) {
+				pos += scnprintf(*buf + pos, bufsz - pos,
+						"EVT_LOG:0x%08x:%04u\n",
+						time, ev);
+			} else {
+				trace_iwlwifi_dev_ucode_event(trans->dev, 0,
+					time, ev);
+				IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+					time, ev);
+			}
+		} else {
+			data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+			if (bufsz) {
+				pos += scnprintf(*buf + pos, bufsz - pos,
+						"EVT_LOGT:%010u:0x%08x:%04u\n",
+						 time, data, ev);
+			} else {
+				IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+					time, data, ev);
+				trace_iwlwifi_dev_ucode_event(trans->dev, time,
+					data, ev);
+			}
+		}
+	}
+
+	/* Allow device to power down */
+	iwl_trans_release_nic_access(trans, &reg_flags);
+	return pos;
+}
+
+/**
+ * iwl_print_last_event_logs - Dump the newest # of event log to syslog
+ */
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+				    u32 num_wraps, u32 next_entry,
+				    u32 size, u32 mode,
+				    int pos, char **buf, size_t bufsz)
+{
+	/*
+	 * display the newest DEFAULT_LOG_ENTRIES entries
+	 * i.e the entries just before the next ont that uCode would fill.
+	 */
+	if (num_wraps) {
+		if (next_entry < size) {
+			pos = iwl_print_event_log(priv,
+						capacity - (size - next_entry),
+						size - next_entry, mode,
+						pos, buf, bufsz);
+			pos = iwl_print_event_log(priv, 0,
+						  next_entry, mode,
+						  pos, buf, bufsz);
+		} else
+			pos = iwl_print_event_log(priv, next_entry - size,
+						  size, mode, pos, buf, bufsz);
+	} else {
+		if (next_entry < size) {
+			pos = iwl_print_event_log(priv, 0, next_entry,
+						  mode, pos, buf, bufsz);
+		} else {
+			pos = iwl_print_event_log(priv, next_entry - size,
+						  size, mode, pos, buf, bufsz);
+		}
+	}
+	return pos;
+}
+
+#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
+
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+			    char **buf)
+{
+	u32 base;       /* SRAM byte address of event log header */
+	u32 capacity;   /* event log capacity in # entries */
+	u32 mode;       /* 0 - no timestamp, 1 - timestamp recorded */
+	u32 num_wraps;  /* # times uCode wrapped to top of log */
+	u32 next_entry; /* index of next entry to be written by uCode */
+	u32 size;       /* # entries that we'll print */
+	u32 logsize;
+	int pos = 0;
+	size_t bufsz = 0;
+	struct iwl_trans *trans = priv->trans;
+
+	base = priv->device_pointers.log_event_table;
+	if (priv->cur_ucode == IWL_UCODE_INIT) {
+		logsize = priv->fw->init_evtlog_size;
+		if (!base)
+			base = priv->fw->init_evtlog_ptr;
+	} else {
+		logsize = priv->fw->inst_evtlog_size;
+		if (!base)
+			base = priv->fw->inst_evtlog_ptr;
+	}
+
+	if (!iwlagn_hw_valid_rtc_data_addr(base)) {
+		IWL_ERR(priv,
+			"Invalid event log pointer 0x%08X for %s uCode\n",
+			base,
+			(priv->cur_ucode == IWL_UCODE_INIT)
+					? "Init" : "RT");
+		return -EINVAL;
+	}
+
+	/* event log header */
+	capacity = iwl_trans_read_mem32(trans, base);
+	mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
+	num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
+	next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
+
+	if (capacity > logsize) {
+		IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
+			"entries\n", capacity, logsize);
+		capacity = logsize;
+	}
+
+	if (next_entry > logsize) {
+		IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
+			next_entry, logsize);
+		next_entry = logsize;
+	}
+
+	size = num_wraps ? capacity : next_entry;
+
+	/* bail out if nothing in log */
+	if (size == 0) {
+		IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
+		return pos;
+	}
+
+	if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
+		size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
+			? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
+	IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
+		size);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (buf) {
+		if (full_log)
+			bufsz = capacity * 48;
+		else
+			bufsz = size * 48;
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+	}
+	if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
+		/*
+		 * if uCode has wrapped back to top of log,
+		 * start at the oldest entry,
+		 * i.e the next one that uCode would fill.
+		 */
+		if (num_wraps)
+			pos = iwl_print_event_log(priv, next_entry,
+						capacity - next_entry, mode,
+						pos, buf, bufsz);
+		/* (then/else) start at top of log */
+		pos = iwl_print_event_log(priv, 0,
+					  next_entry, mode, pos, buf, bufsz);
+	} else
+		pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+						next_entry, size, mode,
+						pos, buf, bufsz);
+#else
+	pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+					next_entry, size, mode,
+					pos, buf, bufsz);
+#endif
+	return pos;
+}
+
+static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
+{
+	unsigned int reload_msec;
+	unsigned long reload_jiffies;
+
+	if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
+		iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
+
+	/* uCode is no longer loaded. */
+	priv->ucode_loaded = false;
+
+	/* Set the FW error flag -- cleared on iwl_down */
+	set_bit(STATUS_FW_ERROR, &priv->status);
+
+	iwl_abort_notification_waits(&priv->notif_wait);
+
+	/* Keep the restart process from trying to send host
+	 * commands by clearing the ready bit */
+	clear_bit(STATUS_READY, &priv->status);
+
+	if (!ondemand) {
+		/*
+		 * If firmware keep reloading, then it indicate something
+		 * serious wrong and firmware having problem to recover
+		 * from it. Instead of keep trying which will fill the syslog
+		 * and hang the system, let's just stop it
+		 */
+		reload_jiffies = jiffies;
+		reload_msec = jiffies_to_msecs((long) reload_jiffies -
+					(long) priv->reload_jiffies);
+		priv->reload_jiffies = reload_jiffies;
+		if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
+			priv->reload_count++;
+			if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
+				IWL_ERR(priv, "BUG_ON, Stop restarting\n");
+				return;
+			}
+		} else
+			priv->reload_count = 0;
+	}
+
+	if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
+		if (iwlwifi_mod_params.fw_restart) {
+			IWL_DEBUG_FW_ERRORS(priv,
+				  "Restarting adapter due to uCode error.\n");
+			queue_work(priv->workqueue, &priv->restart);
+		} else
+			IWL_DEBUG_FW_ERRORS(priv,
+				  "Detected FW error, but not restarting\n");
+	}
+}
+
+static void iwl_nic_error(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	IWL_ERR(priv, "Loaded firmware version: %s\n",
+		priv->fw->fw_version);
+
+	iwl_dump_nic_error_log(priv);
+	iwl_dump_nic_event_log(priv, false, NULL);
+
+	iwlagn_fw_error(priv, false);
+}
+
+static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	if (!iwl_check_for_ct_kill(priv)) {
+		IWL_ERR(priv, "Restarting adapter queue is full\n");
+		iwlagn_fw_error(priv, false);
+	}
+}
+
+#define EEPROM_RF_CONFIG_TYPE_MAX      0x3
+
+static void iwl_nic_config(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	/* SKU Control */
+	iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP,
+				(CSR_HW_REV_STEP(priv->trans->hw_rev) <<
+					CSR_HW_IF_CONFIG_REG_POS_MAC_STEP) |
+				(CSR_HW_REV_DASH(priv->trans->hw_rev) <<
+					CSR_HW_IF_CONFIG_REG_POS_MAC_DASH));
+
+	/* write radio config values to register */
+	if (priv->nvm_data->radio_cfg_type <= EEPROM_RF_CONFIG_TYPE_MAX) {
+		u32 reg_val =
+			priv->nvm_data->radio_cfg_type <<
+				CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE |
+			priv->nvm_data->radio_cfg_step <<
+				CSR_HW_IF_CONFIG_REG_POS_PHY_STEP |
+			priv->nvm_data->radio_cfg_dash <<
+				CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+		iwl_trans_set_bits_mask(priv->trans, CSR_HW_IF_CONFIG_REG,
+					CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+					CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+					CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH,
+					reg_val);
+
+		IWL_INFO(priv, "Radio type=0x%x-0x%x-0x%x\n",
+			 priv->nvm_data->radio_cfg_type,
+			 priv->nvm_data->radio_cfg_step,
+			 priv->nvm_data->radio_cfg_dash);
+	} else {
+		WARN_ON(1);
+	}
+
+	/* set CSR_HW_CONFIG_REG for uCode use */
+	iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+		    CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+	/* W/A : NIC is stuck in a reset state after Early PCIe power off
+	 * (PCIe power is lost before PERST# is asserted),
+	 * causing ME FW to lose ownership and not being able to obtain it back.
+	 */
+	iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
+			       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+			       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+
+	if (priv->lib->nic_config)
+		priv->lib->nic_config(priv);
+}
+
+static void iwl_wimax_active(struct iwl_op_mode *op_mode)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	clear_bit(STATUS_READY, &priv->status);
+	IWL_ERR(priv, "RF is used by WiMAX\n");
+}
+
+static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	int mq = priv->queue_to_mac80211[queue];
+
+	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+		return;
+
+	if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"queue %d (mac80211 %d) already stopped\n",
+			queue, mq);
+		return;
+	}
+
+	set_bit(mq, &priv->transport_queue_stop);
+	ieee80211_stop_queue(priv->hw, mq);
+}
+
+static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	int mq = priv->queue_to_mac80211[queue];
+
+	if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
+		return;
+
+	if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"queue %d (mac80211 %d) already awake\n",
+			queue, mq);
+		return;
+	}
+
+	clear_bit(mq, &priv->transport_queue_stop);
+
+	if (!priv->passive_no_rx)
+		ieee80211_wake_queue(priv->hw, mq);
+}
+
+void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
+{
+	int mq;
+
+	if (!priv->passive_no_rx)
+		return;
+
+	for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
+		if (!test_bit(mq, &priv->transport_queue_stop)) {
+			IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d\n", mq);
+			ieee80211_wake_queue(priv->hw, mq);
+		} else {
+			IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d\n", mq);
+		}
+	}
+
+	priv->passive_no_rx = false;
+}
+
+static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	struct ieee80211_tx_info *info;
+
+	info = IEEE80211_SKB_CB(skb);
+	iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+	ieee80211_free_txskb(priv->hw, skb);
+}
+
+static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	if (state)
+		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	else
+		clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+	wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
+
+	return false;
+}
+
+static const struct iwl_op_mode_ops iwl_dvm_ops = {
+	.start = iwl_op_mode_dvm_start,
+	.stop = iwl_op_mode_dvm_stop,
+	.rx = iwl_rx_dispatch,
+	.queue_full = iwl_stop_sw_queue,
+	.queue_not_full = iwl_wake_sw_queue,
+	.hw_rf_kill = iwl_set_hw_rfkill_state,
+	.free_skb = iwl_free_skb,
+	.nic_error = iwl_nic_error,
+	.cmd_queue_full = iwl_cmd_queue_full,
+	.nic_config = iwl_nic_config,
+	.wimax_active = iwl_wimax_active,
+};
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+static int __init iwl_init(void)
+{
+
+	int ret;
+
+	ret = iwlagn_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = iwl_opmode_register("iwldvm", &iwl_dvm_ops);
+	if (ret) {
+		pr_err("Unable to register op_mode: %d\n", ret);
+		iwlagn_rate_control_unregister();
+	}
+
+	return ret;
+}
+module_init(iwl_init);
+
+static void __exit iwl_exit(void)
+{
+	iwl_opmode_deregister("iwldvm");
+	iwlagn_rate_control_unregister();
+}
+module_exit(iwl_exit);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
new file mode 100644
index 0000000..0ad557c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -0,0 +1,395 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-debug.h"
+#include "iwl-trans.h"
+#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "commands.h"
+#include "power.h"
+
+static bool force_cam = true;
+module_param(force_cam, bool, 0644);
+MODULE_PARM_DESC(force_cam, "force continuously aware mode (no power saving at all)");
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct iwl_power_vec_entry {
+	struct iwl_powertable_cmd cmd;
+	u8 no_dtim;	/* number of skip dtim */
+};
+
+#define IWL_DTIM_RANGE_0_MAX	2
+#define IWL_DTIM_RANGE_1_MAX	10
+
+#define NOSLP cpu_to_le16(0), 0, 0
+#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
+#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK |	\
+		IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
+		IWL_POWER_ADVANCE_PM_ENA_MSK)
+#define ASLP_TOUT(T) cpu_to_le32(T)
+#define TU_TO_USEC 1024
+#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
+#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
+				     cpu_to_le32(X1), \
+				     cpu_to_le32(X2), \
+				     cpu_to_le32(X3), \
+				     cpu_to_le32(X4)}
+/* default power management (not Tx power) table values */
+/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
+/* DTIM 0 - 2 */
+static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
+	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
+	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2}
+};
+
+
+/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
+/* DTIM 3 - 10 */
+static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
+	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
+	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
+	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
+};
+
+/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
+/* DTIM 11 - */
+static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
+	{{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0},
+	{{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
+};
+
+/* advance power management */
+/* DTIM 0 - 2 */
+static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
+};
+
+
+/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
+/* DTIM 3 - 10 */
+static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
+};
+
+/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
+/* DTIM 11 - */
+static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
+	{{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
+		SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
+};
+
+static void iwl_static_sleep_cmd(struct iwl_priv *priv,
+				 struct iwl_powertable_cmd *cmd,
+				 enum iwl_power_level lvl, int period)
+{
+	const struct iwl_power_vec_entry *table;
+	int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
+	int i;
+	u8 skip;
+	u32 slp_itrvl;
+
+	if (priv->lib->adv_pm) {
+		table = apm_range_2;
+		if (period <= IWL_DTIM_RANGE_1_MAX)
+			table = apm_range_1;
+		if (period <= IWL_DTIM_RANGE_0_MAX)
+			table = apm_range_0;
+	} else {
+		table = range_2;
+		if (period <= IWL_DTIM_RANGE_1_MAX)
+			table = range_1;
+		if (period <= IWL_DTIM_RANGE_0_MAX)
+			table = range_0;
+	}
+
+	if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM))
+		memset(cmd, 0, sizeof(*cmd));
+	else
+		*cmd = table[lvl].cmd;
+
+	if (period == 0) {
+		skip = 0;
+		period = 1;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] =  1;
+
+	} else {
+		skip = table[lvl].no_dtim;
+		for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
+			max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
+		max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
+	}
+
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	/* figure out the listen interval based on dtim period and skip */
+	if (slp_itrvl == 0xFF)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(period * (skip + 1));
+
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > period)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32((slp_itrvl / period) * period);
+
+	if (skip)
+		cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
+	else
+		cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
+
+	if (priv->cfg->base_params->shadow_reg_enable)
+		cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
+	else
+		cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
+
+	if (iwl_advanced_bt_coexist(priv)) {
+		if (!priv->lib->bt_params->bt_sco_disable)
+			cmd->flags |= IWL_POWER_BT_SCO_ENA;
+		else
+			cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
+	}
+
+
+	slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
+	if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
+		cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
+			cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
+
+	/* enforce max sleep interval */
+	for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
+		if (le32_to_cpu(cmd->sleep_interval[i]) >
+		    (max_sleep[i] * period))
+			cmd->sleep_interval[i] =
+				cpu_to_le32(max_sleep[i] * period);
+		if (i != (IWL_POWER_VEC_SIZE - 1)) {
+			if (le32_to_cpu(cmd->sleep_interval[i]) >
+			    le32_to_cpu(cmd->sleep_interval[i+1]))
+				cmd->sleep_interval[i] =
+					cmd->sleep_interval[i+1];
+		}
+	}
+
+	if (priv->power_data.bus_pm)
+		cmd->flags |= IWL_POWER_PCI_PM_MSK;
+	else
+		cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
+
+	IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
+			skip, period);
+	/* The power level here is 0-4 (used as array index), but user expects
+	to see 1-5 (according to spec). */
+	IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
+}
+
+static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
+				    struct iwl_powertable_cmd *cmd)
+{
+	memset(cmd, 0, sizeof(*cmd));
+
+	if (priv->power_data.bus_pm)
+		cmd->flags |= IWL_POWER_PCI_PM_MSK;
+
+	IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
+}
+
+static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
+{
+	IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
+	IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
+	IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+	IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+	IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+			le32_to_cpu(cmd->sleep_interval[0]),
+			le32_to_cpu(cmd->sleep_interval[1]),
+			le32_to_cpu(cmd->sleep_interval[2]),
+			le32_to_cpu(cmd->sleep_interval[3]),
+			le32_to_cpu(cmd->sleep_interval[4]));
+
+	return iwl_dvm_send_cmd_pdu(priv, POWER_TABLE_CMD, 0,
+				sizeof(struct iwl_powertable_cmd), cmd);
+}
+
+static void iwl_power_build_cmd(struct iwl_priv *priv,
+				struct iwl_powertable_cmd *cmd)
+{
+	bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
+	int dtimper;
+
+	if (force_cam) {
+		iwl_power_sleep_cam_cmd(priv, cmd);
+		return;
+	}
+
+	dtimper = priv->hw->conf.ps_dtim_period ?: 1;
+
+	if (priv->wowlan)
+		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
+	else if (!priv->lib->no_idle_support &&
+		 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
+		iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
+	else if (iwl_tt_is_low_power_state(priv)) {
+		/* in thermal throttling low power state */
+		iwl_static_sleep_cmd(priv, cmd,
+		    iwl_tt_current_power_mode(priv), dtimper);
+	} else if (!enabled)
+		iwl_power_sleep_cam_cmd(priv, cmd);
+	else if (priv->power_data.debug_sleep_level_override >= 0)
+		iwl_static_sleep_cmd(priv, cmd,
+				     priv->power_data.debug_sleep_level_override,
+				     dtimper);
+	else {
+		/* Note that the user parameter is 1-5 (according to spec),
+		but we pass 0-4 because it acts as an array index. */
+		if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
+		    iwlwifi_mod_params.power_level <= IWL_POWER_NUM)
+			iwl_static_sleep_cmd(priv, cmd,
+				iwlwifi_mod_params.power_level - 1, dtimper);
+		else
+			iwl_static_sleep_cmd(priv, cmd,
+				IWL_POWER_INDEX_1, dtimper);
+	}
+}
+
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+		       bool force)
+{
+	int ret;
+	bool update_chains;
+
+	lockdep_assert_held(&priv->mutex);
+
+	/* Don't update the RX chain when chain noise calibration is running */
+	update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
+			priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
+
+	if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+		return 0;
+
+	if (!iwl_is_ready_rf(priv))
+		return -EIO;
+
+	/* scan complete use sleep_power_next, need to be updated */
+	memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+	if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
+		IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
+		return 0;
+	}
+
+	if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+		iwl_dvm_set_pmi(priv, true);
+
+	ret = iwl_set_power(priv, cmd);
+	if (!ret) {
+		if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+			iwl_dvm_set_pmi(priv, false);
+
+		if (update_chains)
+			iwl_update_chain_flags(priv);
+		else
+			IWL_DEBUG_POWER(priv,
+					"Cannot update the power, chain noise "
+					"calibration running: %d\n",
+					priv->chain_noise_data.state);
+
+		memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
+	} else
+		IWL_ERR(priv, "set power fail, ret = %d\n", ret);
+
+	return ret;
+}
+
+int iwl_power_update_mode(struct iwl_priv *priv, bool force)
+{
+	struct iwl_powertable_cmd cmd;
+
+	iwl_power_build_cmd(priv, &cmd);
+	return iwl_power_set_mode(priv, &cmd, force);
+}
+
+/* initialize to default */
+void iwl_power_initialize(struct iwl_priv *priv)
+{
+	priv->power_data.bus_pm = priv->trans->pm_support;
+
+	priv->power_data.debug_sleep_level_override = -1;
+
+	memset(&priv->power_data.sleep_cmd, 0,
+		sizeof(priv->power_data.sleep_cmd));
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
new file mode 100644
index 0000000..2fd9b43
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
@@ -0,0 +1,47 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_power_setting_h__
+#define __iwl_power_setting_h__
+
+#include "commands.h"
+
+struct iwl_power_mgr {
+	struct iwl_powertable_cmd sleep_cmd;
+	struct iwl_powertable_cmd sleep_cmd_next;
+	int debug_sleep_level_override;
+	bool bus_pm;
+};
+
+int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
+		       bool force);
+int iwl_power_update_mode(struct iwl_priv *priv, bool force);
+void iwl_power_initialize(struct iwl_priv *priv);
+
+extern bool no_sleep_autoadjust;
+
+#endif  /* __iwl_power_setting_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
new file mode 100644
index 0000000..98050d7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -0,0 +1,3338 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "dev.h"
+#include "agn.h"
+
+#define RS_NAME "iwl-agn-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IWL_NUMBER_TRY      1
+#define IWL_HT_NUMBER_TRY   3
+
+#define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
+#define IWL_RATE_MIN_FAILURE_TH		6	/* min failures to calc tpt */
+#define IWL_RATE_MIN_SUCCESS_TH		8	/* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IWL_MISSED_RATE_MAX		15
+/* max time to accum history 2 seconds */
+#define IWL_RATE_SCALE_FLUSH_INTVL   (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+	IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+	IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
+	IWL_RATE_6M_INDEX,
+	IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
+	IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
+	IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
+	IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+};
+
+static const u8 ant_toggle_lookup[] = {
+	/*ANT_NONE -> */ ANT_NONE,
+	/*ANT_A    -> */ ANT_B,
+	/*ANT_B    -> */ ANT_C,
+	/*ANT_AB   -> */ ANT_BC,
+	/*ANT_C    -> */ ANT_A,
+	/*ANT_AC   -> */ ANT_AB,
+	/*ANT_BC   -> */ ANT_AC,
+	/*ANT_ABC  -> */ ANT_ABC,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np)    \
+	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,      \
+				    IWL_RATE_SISO_##s##M_PLCP, \
+				    IWL_RATE_MIMO2_##s##M_PLCP,\
+				    IWL_RATE_MIMO3_##s##M_PLCP,\
+				    IWL_RATE_##r##M_IEEE,      \
+				    IWL_RATE_##ip##M_INDEX,    \
+				    IWL_RATE_##in##M_INDEX,    \
+				    IWL_RATE_##rp##M_INDEX,    \
+				    IWL_RATE_##rn##M_INDEX,    \
+				    IWL_RATE_##pp##M_INDEX,    \
+				    IWL_RATE_##np##M_INDEX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
+	IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2),    /*  1mbps */
+	IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5),          /*  2mbps */
+	IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11),        /*5.5mbps */
+	IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18),      /* 11mbps */
+	IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11),        /*  6mbps */
+	IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11),       /*  9mbps */
+	IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18),   /* 12mbps */
+	IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24),   /* 18mbps */
+	IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36),   /* 24mbps */
+	IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48),   /* 36mbps */
+	IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54),   /* 48mbps */
+	IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+	IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+	/* FIXME:RS:          ^^    should be INV (legacy) */
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+	return (u8)(rate_n_flags & RATE_MCS_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+	int idx = 0;
+
+	/* HT rate format */
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = rs_extract_rate(rate_n_flags);
+
+		if (idx >= IWL_RATE_MIMO3_6M_PLCP)
+			idx = idx - IWL_RATE_MIMO3_6M_PLCP;
+		else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+			idx = idx - IWL_RATE_MIMO2_6M_PLCP;
+
+		idx += IWL_FIRST_OFDM_RATE;
+		/* skip 9M not supported in ht*/
+		if (idx >= IWL_RATE_9M_INDEX)
+			idx += 1;
+		if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
+			return idx;
+
+	/* legacy rate format, search for match in table */
+	} else {
+		for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+			if (iwl_rates[idx].plcp ==
+					rs_extract_rate(rate_n_flags))
+				return idx;
+	}
+
+	return -1;
+}
+
+static void rs_rate_scale_perform(struct iwl_priv *priv,
+				   struct sk_buff *skb,
+				   struct ieee80211_sta *sta,
+				   struct iwl_lq_sta *lq_sta);
+static void rs_fill_link_cmd(struct iwl_priv *priv,
+			     struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+			     u32 *rate_n_flags, int index);
+#else
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+			     u32 *rate_n_flags, int index)
+{}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202}, /* Norm */
+	{0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210}, /* SGI */
+	{0, 0, 0, 0, 47, 0,  91, 133, 171, 242, 305, 334, 362}, /* AGG */
+	{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
+};
+
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+	{0, 0, 0, 0,  94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
+	{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
+};
+
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
+	{0, 0, 0, 0,  81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
+	{0, 0, 0, 0,  89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
+	{0, 0, 0, 0,  97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
+};
+
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+	{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+	{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
+	{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
+};
+
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
+	{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
+	{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
+	{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
+};
+
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 152, 0, 211, 239, 255, 279,  290,  294,  297}, /* Norm */
+	{0, 0, 0, 0, 160, 0, 219, 245, 261, 284,  294,  297,  300}, /* SGI */
+	{0, 0, 0, 0, 254, 0, 443, 584, 695, 868,  984, 1030, 1070}, /* AGG */
+	{0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+	{  "1", "BPSK DSSS"},
+	{  "2", "QPSK DSSS"},
+	{"5.5", "BPSK CCK"},
+	{ "11", "QPSK CCK"},
+	{  "6", "BPSK 1/2"},
+	{  "9", "BPSK 1/2"},
+	{ "12", "QPSK 1/2"},
+	{ "18", "QPSK 3/4"},
+	{ "24", "16QAM 1/2"},
+	{ "36", "16QAM 3/4"},
+	{ "48", "64QAM 2/3"},
+	{ "54", "64QAM 3/4"},
+	{ "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM	(8)
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+	window->data = 0;
+	window->success_counter = 0;
+	window->success_ratio = IWL_INVALID_VALUE;
+	window->counter = 0;
+	window->average_tpt = IWL_INVALID_VALUE;
+	window->stamp = 0;
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+	return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ *	removes the old data from the statistics. All data that is older than
+ *	TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
+{
+	/* The oldest age we want to keep */
+	u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+	while (tl->queue_count &&
+	       (tl->time_stamp < oldest_time)) {
+		tl->total -= tl->packet_count[tl->head];
+		tl->packet_count[tl->head] = 0;
+		tl->time_stamp += TID_QUEUE_CELL_SPACING;
+		tl->queue_count--;
+		tl->head++;
+		if (tl->head >= TID_QUEUE_MAX_SIZE)
+			tl->head = 0;
+	}
+}
+
+/*
+ *	increment traffic load value for tid and also remove
+ *	any old values if passed the certain time period
+ */
+static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
+			   struct ieee80211_hdr *hdr)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 index;
+	struct iwl_traffic_load *tl = NULL;
+	u8 tid;
+
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	} else
+		return IWL_MAX_TID_COUNT;
+
+	if (unlikely(tid >= IWL_MAX_TID_COUNT))
+		return IWL_MAX_TID_COUNT;
+
+	tl = &lq_data->load[tid];
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	/* Happens only for the first packet. Initialize the data */
+	if (!(tl->queue_count)) {
+		tl->total = 1;
+		tl->time_stamp = curr_time;
+		tl->queue_count = 1;
+		tl->head = 0;
+		tl->packet_count[0] = 1;
+		return IWL_MAX_TID_COUNT;
+	}
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	index = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (index >= TID_QUEUE_MAX_SIZE)
+		rs_tl_rm_old_stats(tl, curr_time);
+
+	index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
+	tl->packet_count[index] = tl->packet_count[index] + 1;
+	tl->total = tl->total + 1;
+
+	if ((index + 1) > tl->queue_count)
+		tl->queue_count = index + 1;
+
+	return tid;
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_priv *priv,
+				struct iwl_lq_sta *lq_sta)
+{
+	struct iwl_station_priv *sta_priv =
+		container_of(lq_sta, struct iwl_station_priv, lq_sta);
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
+	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo3_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+
+	IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
+		lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
+
+	if (lq_sta->dbg_fixed_rate) {
+		rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+		iwl_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
+				false);
+	}
+}
+#endif
+
+/*
+	get the traffic load value for tid
+*/
+static void rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
+{
+	u32 curr_time = jiffies_to_msecs(jiffies);
+	u32 time_diff;
+	s32 index;
+	struct iwl_traffic_load *tl = NULL;
+
+	if (tid >= IWL_MAX_TID_COUNT)
+		return;
+
+	tl = &(lq_data->load[tid]);
+
+	curr_time -= curr_time % TID_ROUND_VALUE;
+
+	if (!(tl->queue_count))
+		return;
+
+	time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+	index = time_diff / TID_QUEUE_CELL_SPACING;
+
+	/* The history is too long: remove data that is older than */
+	/* TID_MAX_TIME_DIFF */
+	if (index >= TID_QUEUE_MAX_SIZE)
+		rs_tl_rm_old_stats(tl, curr_time);
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
+				      struct iwl_lq_sta *lq_data, u8 tid,
+				      struct ieee80211_sta *sta)
+{
+	int ret = -EAGAIN;
+
+	/*
+	 * Don't create TX aggregation sessions when in high
+	 * BT traffic, as they would just be disrupted by BT.
+	 */
+	if (priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) {
+		IWL_DEBUG_COEX(priv,
+			       "BT traffic (%d), no aggregation allowed\n",
+			       priv->bt_traffic_load);
+		return ret;
+	}
+
+	rs_tl_get_load(lq_data, tid);
+
+	IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
+			sta->addr, tid);
+	ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+	if (ret == -EAGAIN) {
+		/*
+		 * driver and mac80211 is out of sync
+		 * this might be cause by reloading firmware
+		 * stop the tx ba session here
+		 */
+		IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
+			tid);
+		ieee80211_stop_tx_ba_session(sta, tid);
+	}
+	return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
+			      struct iwl_lq_sta *lq_data,
+			      struct ieee80211_sta *sta)
+{
+	if (tid < IWL_MAX_TID_COUNT)
+		rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+	else
+		IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+			tid, IWL_MAX_TID_COUNT);
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+	       !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+	       !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+	if (tbl->expected_tpt)
+		return tbl->expected_tpt[rs_index];
+	return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
+			      int scale_index, int attempts, int successes)
+{
+	struct iwl_rate_scale_data *window = NULL;
+	static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+	s32 fail_count, tpt;
+
+	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+		return -EINVAL;
+
+	/* Select window for current tx bit rate */
+	window = &(tbl->win[scale_index]);
+
+	/* Get expected throughput */
+	tpt = get_expected_tpt(tbl, scale_index);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history window; anything older isn't really relevant any more.
+	 * If we have filled up the sliding window, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 */
+	while (attempts > 0) {
+		if (window->counter >= IWL_RATE_MAX_WINDOW) {
+
+			/* remove earliest */
+			window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+			if (window->data & mask) {
+				window->data &= ~mask;
+				window->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		window->counter++;
+
+		/* Shift bitmap by one frame to throw away oldest history */
+		window->data <<= 1;
+
+		/* Mark the most recent #successes attempts as successful */
+		if (successes > 0) {
+			window->success_counter++;
+			window->data |= 0x1;
+			successes--;
+		}
+
+		attempts--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (window->counter > 0)
+		window->success_ratio = 128 * (100 * window->success_counter)
+					/ window->counter;
+	else
+		window->success_ratio = IWL_INVALID_VALUE;
+
+	fail_count = window->counter - window->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
+	    (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
+		window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+	else
+		window->average_tpt = IWL_INVALID_VALUE;
+
+	/* Tag this window as having been updated */
+	window->stamp = jiffies;
+
+	return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+/* FIXME:RS:remove this function and put the flags statically in the table */
+static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
+				 struct iwl_scale_tbl_info *tbl,
+				 int index, u8 use_green)
+{
+	u32 rate_n_flags = 0;
+
+	if (is_legacy(tbl->lq_type)) {
+		rate_n_flags = iwl_rates[index].plcp;
+		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+			rate_n_flags |= RATE_MCS_CCK_MSK;
+
+	} else if (is_Ht(tbl->lq_type)) {
+		if (index > IWL_LAST_OFDM_RATE) {
+			IWL_ERR(priv, "Invalid HT rate index %d\n", index);
+			index = IWL_LAST_OFDM_RATE;
+		}
+		rate_n_flags = RATE_MCS_HT_MSK;
+
+		if (is_siso(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_siso;
+		else if (is_mimo2(tbl->lq_type))
+			rate_n_flags |=	iwl_rates[index].plcp_mimo2;
+		else
+			rate_n_flags |=	iwl_rates[index].plcp_mimo3;
+	} else {
+		IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
+	}
+
+	rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
+						     RATE_MCS_ANT_ABC_MSK);
+
+	if (is_Ht(tbl->lq_type)) {
+		if (tbl->is_ht40) {
+			if (tbl->is_dup)
+				rate_n_flags |= RATE_MCS_DUP_MSK;
+			else
+				rate_n_flags |= RATE_MCS_HT40_MSK;
+		}
+		if (tbl->is_SGI)
+			rate_n_flags |= RATE_MCS_SGI_MSK;
+
+		if (use_green) {
+			rate_n_flags |= RATE_MCS_GF_MSK;
+			if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+				rate_n_flags &= ~RATE_MCS_SGI_MSK;
+				IWL_ERR(priv, "GF was set with SGI:SISO\n");
+			}
+		}
+	}
+	return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+				    enum nl80211_band band,
+				    struct iwl_scale_tbl_info *tbl,
+				    int *rate_idx)
+{
+	u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+	u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
+	u8 mcs;
+
+	memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+	*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
+
+	if (*rate_idx  == IWL_RATE_INVALID) {
+		*rate_idx = -1;
+		return -EINVAL;
+	}
+	tbl->is_SGI = 0;	/* default legacy setup */
+	tbl->is_ht40 = 0;
+	tbl->is_dup = 0;
+	tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+	tbl->lq_type = LQ_NONE;
+	tbl->max_search = IWL_MAX_SEARCH;
+
+	/* legacy rate format */
+	if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+		if (num_of_ant == 1) {
+			if (band == NL80211_BAND_5GHZ)
+				tbl->lq_type = LQ_A;
+			else
+				tbl->lq_type = LQ_G;
+		}
+	/* HT rate format */
+	} else {
+		if (rate_n_flags & RATE_MCS_SGI_MSK)
+			tbl->is_SGI = 1;
+
+		if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+		    (rate_n_flags & RATE_MCS_DUP_MSK))
+			tbl->is_ht40 = 1;
+
+		if (rate_n_flags & RATE_MCS_DUP_MSK)
+			tbl->is_dup = 1;
+
+		mcs = rs_extract_rate(rate_n_flags);
+
+		/* SISO */
+		if (mcs <= IWL_RATE_SISO_60M_PLCP) {
+			if (num_of_ant == 1)
+				tbl->lq_type = LQ_SISO; /*else NONE*/
+		/* MIMO2 */
+		} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
+			if (num_of_ant == 2)
+				tbl->lq_type = LQ_MIMO2;
+		/* MIMO3 */
+		} else {
+			if (num_of_ant == 3) {
+				tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+				tbl->lq_type = LQ_MIMO3;
+			}
+		}
+	}
+	return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+			     struct iwl_scale_tbl_info *tbl)
+{
+	u8 new_ant_type;
+
+	if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+		return 0;
+
+	if (!rs_is_valid_ant(valid_ant, tbl->ant_type))
+		return 0;
+
+	new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+	while ((new_ant_type != tbl->ant_type) &&
+	       !rs_is_valid_ant(valid_ant, new_ant_type))
+		new_ant_type = ant_toggle_lookup[new_ant_type];
+
+	if (new_ant_type == tbl->ant_type)
+		return 0;
+
+	tbl->ant_type = new_ant_type;
+	*rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+	*rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+	return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool rs_use_green(struct ieee80211_sta *sta)
+{
+	/*
+	 * There's a bug somewhere in this code that causes the
+	 * scaling to get stuck because GF+SGI can't be combined
+	 * in SISO rates. Until we find that bug, disable GF, it
+	 * has only limited benefit and we still interoperate with
+	 * GF APs since we can always receive GF transmissions.
+	 */
+	return false;
+}
+
+/**
+ * rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+				  struct ieee80211_hdr *hdr,
+				  enum iwl_table_type rate_type)
+{
+	if (is_legacy(rate_type)) {
+		return lq_sta->active_legacy_rate;
+	} else {
+		if (is_siso(rate_type))
+			return lq_sta->active_siso_rate;
+		else if (is_mimo2(rate_type))
+			return lq_sta->active_mimo2_rate;
+		else
+			return lq_sta->active_mimo3_rate;
+	}
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
+				int rate_type)
+{
+	u8 high = IWL_RATE_INVALID;
+	u8 low = IWL_RATE_INVALID;
+
+	/* 802.11A or ht walks to the next literal adjacent rate in
+	 * the rate table */
+	if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = index - 1;
+		if (i >= 0)
+			mask = BIT(i);
+
+		for (; i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = index + 1;
+		for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = index;
+	while (low != IWL_RATE_INVALID) {
+		low = iwl_rates[low].prev_rs;
+		if (low == IWL_RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+		IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
+	}
+
+	high = index;
+	while (high != IWL_RATE_INVALID) {
+		high = iwl_rates[high].next_rs;
+		if (high == IWL_RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+		IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
+	}
+
+	return (high << 8) | low;
+}
+
+static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
+			     struct iwl_scale_tbl_info *tbl,
+			     u8 scale_index, u8 ht_possible)
+{
+	s32 low;
+	u16 rate_mask;
+	u16 high_low;
+	u8 switch_to_legacy = 0;
+	u8 is_green = lq_sta->is_green;
+	struct iwl_priv *priv = lq_sta->drv;
+
+	/* check if we need to switch from HT to legacy rates.
+	 * assumption is that mandatory rates (1Mbps or 6Mbps)
+	 * are always supported (spec demand) */
+	if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
+		switch_to_legacy = 1;
+		scale_index = rs_ht_to_legacy[scale_index];
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			tbl->lq_type = LQ_A;
+		else
+			tbl->lq_type = LQ_G;
+
+		if (num_of_ant(tbl->ant_type) > 1)
+			tbl->ant_type =
+			    first_antenna(priv->nvm_data->valid_tx_ant);
+
+		tbl->is_ht40 = 0;
+		tbl->is_SGI = 0;
+		tbl->max_search = IWL_MAX_SEARCH;
+	}
+
+	rate_mask = rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+	/* Mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		/* supp_rates has no CCK bits in A mode */
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			rate_mask  = (u16)(rate_mask &
+			   (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+		else
+			rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
+	}
+
+	/* If we switched from HT to legacy, check current rate */
+	if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
+		low = scale_index;
+		goto out;
+	}
+
+	high_low = rs_get_adjacent_rate(lq_sta->drv, scale_index, rate_mask,
+					tbl->lq_type);
+	low = high_low & 0xff;
+
+	if (low == IWL_RATE_INVALID)
+		low = scale_index;
+
+out:
+	return rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool table_type_matches(struct iwl_scale_tbl_info *a,
+			       struct iwl_scale_tbl_info *b)
+{
+	return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
+		(a->is_SGI == b->is_SGI);
+}
+
+static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			    struct iwl_lq_sta *lq_sta)
+{
+	struct iwl_scale_tbl_info *tbl;
+	bool full_concurrent = priv->bt_full_concurrent;
+
+	if (priv->bt_ant_couple_ok) {
+		/*
+		 * Is there a need to switch between
+		 * full concurrency and 3-wire?
+		 */
+		if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
+			full_concurrent = true;
+		else
+			full_concurrent = false;
+	}
+	if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
+	    (priv->bt_full_concurrent != full_concurrent)) {
+		priv->bt_full_concurrent = full_concurrent;
+		priv->last_bt_traffic_load = priv->bt_traffic_load;
+
+		/* Update uCode's rate table. */
+		tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
+		iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+
+		queue_work(priv->workqueue, &priv->bt_full_concurrency);
+	}
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
+			 struct ieee80211_sta *sta, void *priv_sta,
+			 struct sk_buff *skb)
+{
+	int legacy_success;
+	int retries;
+	int rs_index, mac_index, i;
+	struct iwl_lq_sta *lq_sta = priv_sta;
+	struct iwl_link_quality_cmd *table;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)priv_r;
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	enum mac80211_rate_control_flags mac_flags;
+	u32 tx_rate;
+	struct iwl_scale_tbl_info tbl_type;
+	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!lq_sta) {
+		IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
+		return;
+	} else if (!lq_sta->drv) {
+		IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+		return;
+	}
+
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    info->flags & IEEE80211_TX_CTL_NO_ACK)
+		return;
+
+	/* This packet was aggregated but doesn't carry status info */
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
+		return;
+
+	/*
+	 * Ignore this Tx frame response if its initial rate doesn't match
+	 * that of latest Link Quality command.  There may be stragglers
+	 * from a previous Link Quality command, but we're no longer interested
+	 * in those; they're either from the "active" mode while we're trying
+	 * to check "search" mode, or a prior "search" mode after we've moved
+	 * to a new "search" mode (which might become the new "active" mode).
+	 */
+	table = &lq_sta->lq;
+	tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+	rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
+	if (priv->band == NL80211_BAND_5GHZ)
+		rs_index -= IWL_FIRST_OFDM_RATE;
+	mac_flags = info->status.rates[0].flags;
+	mac_index = info->status.rates[0].idx;
+	/* For HT packets, map MCS to PLCP */
+	if (mac_flags & IEEE80211_TX_RC_MCS) {
+		mac_index &= RATE_MCS_CODE_MSK;	/* Remove # of streams */
+		if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
+			mac_index++;
+		/*
+		 * mac80211 HT index is always zero-indexed; we need to move
+		 * HT OFDM rates after CCK rates in 2.4 GHz band
+		 */
+		if (priv->band == NL80211_BAND_2GHZ)
+			mac_index += IWL_FIRST_OFDM_RATE;
+	}
+	/* Here we actually compare this rate to the latest LQ command */
+	if ((mac_index < 0) ||
+	    (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
+	    (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
+	    (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
+	    (tbl_type.ant_type != info->status.antenna) ||
+	    (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
+	    (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
+	    (rs_index != mac_index)) {
+		IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
+		/*
+		 * Since rates mis-match, the last LQ command may have failed.
+		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+		 * ... driver.
+		 */
+		lq_sta->missed_rate_counter++;
+		if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
+			lq_sta->missed_rate_counter = 0;
+			iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+		}
+		/* Regardless, ignore this status info for outdated rate */
+		return;
+	} else
+		/* Rate did match, so reset the missed_rate_counter */
+		lq_sta->missed_rate_counter = 0;
+
+	/* Figure out if rate scale algorithm is in active or search table */
+	if (table_type_matches(&tbl_type,
+				&(lq_sta->lq_info[lq_sta->active_tbl]))) {
+		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+	} else if (table_type_matches(&tbl_type,
+				&lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	} else {
+		IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
+		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
+			tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
+			tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
+		IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
+			tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
+		/*
+		 * no matching table found, let's by-pass the data collection
+		 * and continue to perform rate scale to find the rate table
+		 */
+		rs_stay_in_table(lq_sta, true);
+		goto done;
+	}
+
+	/*
+	 * Updating the frame history depends on whether packets were
+	 * aggregated.
+	 *
+	 * For aggregation, all packets were transmitted at the same rate, the
+	 * first index into rate scale table.
+	 */
+	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+		tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+		rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
+				&rs_index);
+		rs_collect_tx_data(curr_tbl, rs_index,
+				   info->status.ampdu_len,
+				   info->status.ampdu_ack_len);
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += info->status.ampdu_ack_len;
+			lq_sta->total_failed += (info->status.ampdu_len -
+					info->status.ampdu_ack_len);
+		}
+	} else {
+	/*
+	 * For legacy, update frame history with for each Tx retry.
+	 */
+		retries = info->status.rates[0].count - 1;
+		/* HW doesn't send more than 15 retries */
+		retries = min(retries, 15);
+
+		/* The last transmission may have been successful */
+		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		/* Collect data for each rate used during failed TX attempts */
+		for (i = 0; i <= retries; ++i) {
+			tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+			rs_get_tbl_info_from_mcs(tx_rate, priv->band,
+					&tbl_type, &rs_index);
+			/*
+			 * Only collect stats if retried rate is in the same RS
+			 * table as active/search.
+			 */
+			if (table_type_matches(&tbl_type, curr_tbl))
+				tmp_tbl = curr_tbl;
+			else if (table_type_matches(&tbl_type, other_tbl))
+				tmp_tbl = other_tbl;
+			else
+				continue;
+			rs_collect_tx_data(tmp_tbl, rs_index, 1,
+					   i < retries ? 0 : legacy_success);
+		}
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->stay_in_tbl) {
+			lq_sta->total_success += legacy_success;
+			lq_sta->total_failed += retries + (1 - legacy_success);
+		}
+	}
+	/* The last TX rate is cached in lq_sta; it's set in if/else above */
+	lq_sta->last_rate_n_flags = tx_rate;
+done:
+	/* See if there's a better rate or modulation mode to try. */
+	if (sta && sta->supp_rates[sband->band])
+		rs_rate_scale_perform(priv, skb, sta, lq_sta);
+
+	if (priv->lib->bt_params && priv->lib->bt_params->advanced_bt_coexist)
+		rs_bt_update_lq(priv, ctx, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
+				 struct iwl_lq_sta *lq_sta)
+{
+	IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
+	lq_sta->stay_in_tbl = 1;	/* only place this gets set */
+	if (is_legacy) {
+		lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
+		lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
+	} else {
+		lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
+		lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
+	}
+	lq_sta->table_count = 0;
+	lq_sta->total_failed = 0;
+	lq_sta->total_success = 0;
+	lq_sta->flush_timer = jiffies;
+	lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+				      struct iwl_scale_tbl_info *tbl)
+{
+	/* Used to choose among HT tables */
+	const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+	/* Check for invalid LQ type */
+	if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Legacy rates have only one table */
+	if (is_legacy(tbl->lq_type)) {
+		tbl->expected_tpt = expected_tpt_legacy;
+		return;
+	}
+
+	/* Choose among many HT tables depending on number of streams
+	 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+	 * status */
+	if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_siso20MHz;
+	else if (is_siso(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_siso40MHz;
+	else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+	else if (is_mimo2(tbl->lq_type))
+		ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+	else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+		ht_tbl_pointer = expected_tpt_mimo3_20MHz;
+	else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
+		ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+
+	if (!tbl->is_SGI && !lq_sta->is_agg)		/* Normal */
+		tbl->expected_tpt = ht_tbl_pointer[0];
+	else if (tbl->is_SGI && !lq_sta->is_agg)	/* SGI */
+		tbl->expected_tpt = ht_tbl_pointer[1];
+	else if (!tbl->is_SGI && lq_sta->is_agg)	/* AGG */
+		tbl->expected_tpt = ht_tbl_pointer[2];
+	else						/* AGG+SGI */
+		tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO).  When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput.  When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32 rs_get_best_rate(struct iwl_priv *priv,
+			    struct iwl_lq_sta *lq_sta,
+			    struct iwl_scale_tbl_info *tbl,	/* "search" */
+			    u16 rate_mask, s8 index)
+{
+	/* "active" values */
+	struct iwl_scale_tbl_info *active_tbl =
+	    &(lq_sta->lq_info[lq_sta->active_tbl]);
+	s32 active_sr = active_tbl->win[index].success_ratio;
+	s32 active_tpt = active_tbl->expected_tpt[index];
+	/* expected "search" throughput */
+	const u16 *tpt_tbl = tbl->expected_tpt;
+
+	s32 new_rate, high, low, start_hi;
+	u16 high_low;
+	s8 rate = index;
+
+	new_rate = high = low = start_hi = IWL_RATE_INVALID;
+
+	for (; ;) {
+		high_low = rs_get_adjacent_rate(priv, rate, rate_mask,
+						tbl->lq_type);
+
+		low = high_low & 0xff;
+		high = (high_low >> 8) & 0xff;
+
+		/*
+		 * Lower the "search" bit rate, to give new "search" mode
+		 * approximately the same throughput as "active" if:
+		 *
+		 * 1) "Active" mode has been working modestly well (but not
+		 *    great), and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above the actual
+		 *    measured "active" throughput (but less than expected
+		 *    "active" throughput under perfect conditions).
+		 * OR
+		 * 2) "Active" mode has been working perfectly or very well
+		 *    and expected "search" throughput (under perfect
+		 *    conditions) at candidate rate is above expected
+		 *    "active" throughput (under perfect conditions).
+		 */
+		if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
+		     ((active_sr > IWL_RATE_DECREASE_TH) &&
+		      (active_sr <= IWL_RATE_HIGH_TH) &&
+		      (tpt_tbl[rate] <= active_tpt))) ||
+		    ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
+		     (tpt_tbl[rate] > active_tpt))) {
+
+			/* (2nd or later pass)
+			 * If we've already tried to raise the rate, and are
+			 * now trying to lower it, use the higher rate. */
+			if (start_hi != IWL_RATE_INVALID) {
+				new_rate = start_hi;
+				break;
+			}
+
+			new_rate = rate;
+
+			/* Loop again with lower rate */
+			if (low != IWL_RATE_INVALID)
+				rate = low;
+
+			/* Lower rate not available, use the original */
+			else
+				break;
+
+		/* Else try to raise the "search" rate to match "active" */
+		} else {
+			/* (2nd or later pass)
+			 * If we've already tried to lower the rate, and are
+			 * now trying to raise it, use the lower rate. */
+			if (new_rate != IWL_RATE_INVALID)
+				break;
+
+			/* Loop again with higher rate */
+			else if (high != IWL_RATE_INVALID) {
+				start_hi = high;
+				rate = high;
+
+			/* Higher rate not available, use the original */
+			} else {
+				new_rate = rate;
+				break;
+			}
+		}
+	}
+
+	return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int rs_switch_to_mimo2(struct iwl_priv *priv,
+			     struct iwl_lq_sta *lq_sta,
+			     struct ieee80211_conf *conf,
+			     struct ieee80211_sta *sta,
+			     struct iwl_scale_tbl_info *tbl, int index)
+{
+	u16 rate_mask;
+	s32 rate;
+	s8 is_green = lq_sta->is_green;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+		return -1;
+
+	/* Need both Tx chains/antennas to support MIMO */
+	if (priv->hw_params.tx_chains_num < 2)
+		return -1;
+
+	IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
+
+	tbl->lq_type = LQ_MIMO2;
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->action = 0;
+	tbl->max_search = IWL_MAX_SEARCH;
+	rate_mask = lq_sta->active_mimo2_rate;
+
+	if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	rs_set_expected_tpt_table(lq_sta, tbl);
+
+	rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+	IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+		IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
+						rate, rate_mask);
+		return -1;
+	}
+	tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
+
+	IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+		     tbl->current_rate, is_green);
+	return 0;
+}
+
+/*
+ * Set up search table for MIMO3
+ */
+static int rs_switch_to_mimo3(struct iwl_priv *priv,
+			     struct iwl_lq_sta *lq_sta,
+			     struct ieee80211_conf *conf,
+			     struct ieee80211_sta *sta,
+			     struct iwl_scale_tbl_info *tbl, int index)
+{
+	u16 rate_mask;
+	s32 rate;
+	s8 is_green = lq_sta->is_green;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+		return -1;
+
+	/* Need both Tx chains/antennas to support MIMO */
+	if (priv->hw_params.tx_chains_num < 3)
+		return -1;
+
+	IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
+
+	tbl->lq_type = LQ_MIMO3;
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->action = 0;
+	tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
+	rate_mask = lq_sta->active_mimo3_rate;
+
+	if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	rs_set_expected_tpt_table(lq_sta, tbl);
+
+	rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+	IWL_DEBUG_RATE(priv, "LQ: MIMO3 best rate %d mask %X\n",
+		rate, rate_mask);
+	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+		IWL_DEBUG_RATE(priv, "Can't switch with index %d rate mask %x\n",
+						rate, rate_mask);
+		return -1;
+	}
+	tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
+
+	IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+		     tbl->current_rate, is_green);
+	return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int rs_switch_to_siso(struct iwl_priv *priv,
+			     struct iwl_lq_sta *lq_sta,
+			     struct ieee80211_conf *conf,
+			     struct ieee80211_sta *sta,
+			     struct iwl_scale_tbl_info *tbl, int index)
+{
+	u16 rate_mask;
+	u8 is_green = lq_sta->is_green;
+	s32 rate;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+		return -1;
+
+	IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
+
+	tbl->is_dup = lq_sta->is_dup;
+	tbl->lq_type = LQ_SISO;
+	tbl->action = 0;
+	tbl->max_search = IWL_MAX_SEARCH;
+	rate_mask = lq_sta->active_siso_rate;
+
+	if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
+		tbl->is_ht40 = 1;
+	else
+		tbl->is_ht40 = 0;
+
+	if (is_green)
+		tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
+
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rate = rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
+
+	IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
+	if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
+		IWL_DEBUG_RATE(priv, "can not switch with index %d rate mask %x\n",
+			     rate, rate_mask);
+		return -1;
+	}
+	tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, rate, is_green);
+	IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
+		     tbl->current_rate, is_green);
+	return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static void rs_move_legacy_other(struct iwl_priv *priv,
+				 struct iwl_lq_sta *lq_sta,
+				 struct ieee80211_conf *conf,
+				 struct ieee80211_sta *sta,
+				 int index)
+{
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct iwl_rate_scale_data *window = &(tbl->win[index]);
+	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
+	int ret = 0;
+	u8 update_search_tbl_counter = 0;
+
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+		/* nothing */
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		/* avoid antenna B unless MIMO */
+		if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
+			tbl->action = IWL_LEGACY_SWITCH_SISO;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		/* avoid antenna B and MIMO */
+		valid_tx_ant =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+		if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
+		    tbl->action != IWL_LEGACY_SWITCH_SISO)
+			tbl->action = IWL_LEGACY_SWITCH_SISO;
+		break;
+	default:
+		IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
+		break;
+	}
+
+	if (!iwl_ht_enabled(priv))
+		/* stay in Legacy */
+		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+	else if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
+		   tbl->action > IWL_LEGACY_SWITCH_SISO)
+		tbl->action = IWL_LEGACY_SWITCH_SISO;
+
+	/* configure as 1x1 if bt full concurrency */
+	if (priv->bt_full_concurrent) {
+		if (!iwl_ht_enabled(priv))
+			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+		else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
+			tbl->action = IWL_LEGACY_SWITCH_SISO;
+		valid_tx_ant =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+	}
+
+	start_action = tbl->action;
+	for (; ;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IWL_LEGACY_SWITCH_ANTENNA1:
+		case IWL_LEGACY_SWITCH_ANTENNA2:
+			IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
+
+			if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
+							tx_chains_num <= 1) ||
+			    (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
+							tx_chains_num <= 2))
+				break;
+
+			/* Don't change antenna if success has been great */
+			if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
+			    !priv->bt_full_concurrent &&
+			    priv->bt_traffic_load ==
+					IWL_BT_COEX_TRAFFIC_LOAD_NONE)
+				break;
+
+			/* Set up search table to try other antenna */
+			memcpy(search_tbl, tbl, sz);
+
+			if (rs_toggle_antenna(valid_tx_ant,
+				&search_tbl->current_rate, search_tbl)) {
+				update_search_tbl_counter = 1;
+				rs_set_expected_tpt_table(lq_sta, search_tbl);
+				goto out;
+			}
+			break;
+		case IWL_LEGACY_SWITCH_SISO:
+			IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
+
+			/* Set up search table to try SISO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+
+			break;
+		case IWL_LEGACY_SWITCH_MIMO2_AB:
+		case IWL_LEGACY_SWITCH_MIMO2_AC:
+		case IWL_LEGACY_SWITCH_MIMO2_BC:
+			IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
+
+			/* Set up search table to try MIMO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+			break;
+
+		case IWL_LEGACY_SWITCH_MIMO3_ABC:
+			IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO3\n");
+
+			/* Set up search table to try MIMO3 */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			search_tbl->ant_type = ANT_ABC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret) {
+				lq_sta->action_counter = 0;
+				goto out;
+			}
+			break;
+		}
+		tbl->action++;
+		if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+			tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return;
+
+out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+		tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static void rs_move_siso_to_other(struct iwl_priv *priv,
+				  struct iwl_lq_sta *lq_sta,
+				  struct ieee80211_conf *conf,
+				  struct ieee80211_sta *sta, int index)
+{
+	u8 is_green = lq_sta->is_green;
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct iwl_rate_scale_data *window = &(tbl->win[index]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+		/* nothing */
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		/* avoid antenna B unless MIMO */
+		if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
+			tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		/* avoid antenna B and MIMO */
+		valid_tx_ant =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+		if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
+			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+		break;
+	default:
+		IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
+		break;
+	}
+
+	if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE &&
+	    tbl->action > IWL_SISO_SWITCH_ANTENNA2) {
+		/* stay in SISO */
+		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+	}
+
+	/* configure as 1x1 if bt full concurrency */
+	if (priv->bt_full_concurrent) {
+		valid_tx_ant =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+		if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
+			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+	}
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IWL_SISO_SWITCH_ANTENNA1:
+		case IWL_SISO_SWITCH_ANTENNA2:
+			IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
+			if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
+						tx_chains_num <= 1) ||
+			    (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
+						tx_chains_num <= 2))
+				break;
+
+			if (window->success_ratio >= IWL_RS_GOOD_RATIO &&
+			    !priv->bt_full_concurrent &&
+			    priv->bt_traffic_load ==
+					IWL_BT_COEX_TRAFFIC_LOAD_NONE)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (rs_toggle_antenna(valid_tx_ant,
+				       &search_tbl->current_rate, search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IWL_SISO_SWITCH_MIMO2_AB:
+		case IWL_SISO_SWITCH_MIMO2_AC:
+		case IWL_SISO_SWITCH_MIMO2_BC:
+			IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+
+			if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+			break;
+		case IWL_SISO_SWITCH_GI:
+			if (!tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
+
+			memcpy(search_tbl, tbl, sz);
+			if (is_green) {
+				if (!tbl->is_SGI)
+					break;
+				else
+					IWL_ERR(priv,
+						"SGI was set in GF+SISO\n");
+			}
+			search_tbl->is_SGI = !tbl->is_SGI;
+			rs_set_expected_tpt_table(lq_sta, search_tbl);
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[index])
+					break;
+			}
+			search_tbl->current_rate =
+				rate_n_flags_from_tbl(priv, search_tbl,
+						      index, is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+		case IWL_SISO_SWITCH_MIMO3_ABC:
+			IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO3\n");
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			search_tbl->ant_type = ANT_ABC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+			break;
+		}
+		tbl->action++;
+		if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+			tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return;
+
+ out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+		tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static void rs_move_mimo2_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
+{
+	s8 is_green = lq_sta->is_green;
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct iwl_rate_scale_data *window = &(tbl->win[index]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
+	u8 update_search_tbl_counter = 0;
+	int ret;
+
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+		/* nothing */
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		/* avoid antenna B and MIMO */
+		if (tbl->action != IWL_MIMO2_SWITCH_SISO_A)
+			tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		/* avoid antenna B unless MIMO */
+		if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
+		    tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+			tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+		break;
+	default:
+		IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
+		break;
+	}
+
+	if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
+	    (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
+	     tbl->action > IWL_MIMO2_SWITCH_SISO_C)) {
+		/* switch in SISO */
+		tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+	}
+
+	/* configure as 1x1 if bt full concurrency */
+	if (priv->bt_full_concurrent &&
+	    (tbl->action < IWL_MIMO2_SWITCH_SISO_A ||
+	     tbl->action > IWL_MIMO2_SWITCH_SISO_C))
+		tbl->action = IWL_MIMO2_SWITCH_SISO_A;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IWL_MIMO2_SWITCH_ANTENNA1:
+		case IWL_MIMO2_SWITCH_ANTENNA2:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
+
+			if (tx_chains_num <= 2)
+				break;
+
+			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (rs_toggle_antenna(valid_tx_ant,
+				       &search_tbl->current_rate, search_tbl)) {
+				update_search_tbl_counter = 1;
+				goto out;
+			}
+			break;
+		case IWL_MIMO2_SWITCH_SISO_A:
+		case IWL_MIMO2_SWITCH_SISO_B:
+		case IWL_MIMO2_SWITCH_SISO_C:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
+
+			/* Set up new search table for SISO */
+			memcpy(search_tbl, tbl, sz);
+
+			if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
+				search_tbl->ant_type = ANT_A;
+			else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+				search_tbl->ant_type = ANT_B;
+			else
+				search_tbl->ant_type = ANT_C;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+
+			break;
+
+		case IWL_MIMO2_SWITCH_GI:
+			if (!tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
+
+			/* Set up new search table for MIMO2 */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = !tbl->is_SGI;
+			rs_set_expected_tpt_table(lq_sta, search_tbl);
+			/*
+			 * If active table already uses the fastest possible
+			 * modulation (dual stream with short guard interval),
+			 * and it's working well, there's no need to look
+			 * for a better type of modulation!
+			 */
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[index])
+					break;
+			}
+			search_tbl->current_rate =
+				rate_n_flags_from_tbl(priv, search_tbl,
+						      index, is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+
+		case IWL_MIMO2_SWITCH_MIMO3_ABC:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to MIMO3\n");
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			search_tbl->ant_type = ANT_ABC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo3(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+
+			break;
+		}
+		tbl->action++;
+		if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+			tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return;
+ out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+		tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO3
+ */
+static void rs_move_mimo3_to_other(struct iwl_priv *priv,
+				   struct iwl_lq_sta *lq_sta,
+				   struct ieee80211_conf *conf,
+				   struct ieee80211_sta *sta, int index)
+{
+	s8 is_green = lq_sta->is_green;
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct iwl_rate_scale_data *window = &(tbl->win[index]);
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	u32 sz = (sizeof(struct iwl_scale_tbl_info) -
+		  (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
+	u8 start_action;
+	u8 valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	u8 tx_chains_num = priv->hw_params.tx_chains_num;
+	int ret;
+	u8 update_search_tbl_counter = 0;
+
+	switch (priv->bt_traffic_load) {
+	case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
+		/* nothing */
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
+	case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
+		/* avoid antenna B and MIMO */
+		if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
+			tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+		break;
+	case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
+		/* avoid antenna B unless MIMO */
+		if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
+		    tbl->action == IWL_MIMO3_SWITCH_SISO_C)
+			tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+		break;
+	default:
+		IWL_ERR(priv, "Invalid BT load %d\n", priv->bt_traffic_load);
+		break;
+	}
+
+	if ((iwl_tx_ant_restriction(priv) == IWL_ANT_OK_SINGLE) &&
+	    (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
+	     tbl->action > IWL_MIMO3_SWITCH_SISO_C)) {
+		/* switch in SISO */
+		tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+	}
+
+	/* configure as 1x1 if bt full concurrency */
+	if (priv->bt_full_concurrent &&
+	    (tbl->action < IWL_MIMO3_SWITCH_SISO_A ||
+	     tbl->action > IWL_MIMO3_SWITCH_SISO_C))
+		tbl->action = IWL_MIMO3_SWITCH_SISO_A;
+
+	start_action = tbl->action;
+	for (;;) {
+		lq_sta->action_counter++;
+		switch (tbl->action) {
+		case IWL_MIMO3_SWITCH_ANTENNA1:
+		case IWL_MIMO3_SWITCH_ANTENNA2:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle Antennas\n");
+
+			if (tx_chains_num <= 3)
+				break;
+
+			if (window->success_ratio >= IWL_RS_GOOD_RATIO)
+				break;
+
+			memcpy(search_tbl, tbl, sz);
+			if (rs_toggle_antenna(valid_tx_ant,
+				       &search_tbl->current_rate, search_tbl))
+				goto out;
+			break;
+		case IWL_MIMO3_SWITCH_SISO_A:
+		case IWL_MIMO3_SWITCH_SISO_B:
+		case IWL_MIMO3_SWITCH_SISO_C:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to SISO\n");
+
+			/* Set up new search table for SISO */
+			memcpy(search_tbl, tbl, sz);
+
+			if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
+				search_tbl->ant_type = ANT_A;
+			else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
+				search_tbl->ant_type = ANT_B;
+			else
+				search_tbl->ant_type = ANT_C;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_siso(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+
+			break;
+
+		case IWL_MIMO3_SWITCH_MIMO2_AB:
+		case IWL_MIMO3_SWITCH_MIMO2_AC:
+		case IWL_MIMO3_SWITCH_MIMO2_BC:
+			IWL_DEBUG_RATE(priv, "LQ: MIMO3 switch to MIMO2\n");
+
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = 0;
+			if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
+				search_tbl->ant_type = ANT_AB;
+			else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
+				search_tbl->ant_type = ANT_AC;
+			else
+				search_tbl->ant_type = ANT_BC;
+
+			if (!rs_is_valid_ant(valid_tx_ant, search_tbl->ant_type))
+				break;
+
+			ret = rs_switch_to_mimo2(priv, lq_sta, conf, sta,
+						 search_tbl, index);
+			if (!ret)
+				goto out;
+
+			break;
+
+		case IWL_MIMO3_SWITCH_GI:
+			if (!tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_20))
+				break;
+			if (tbl->is_ht40 && !(ht_cap->cap &
+						IEEE80211_HT_CAP_SGI_40))
+				break;
+
+			IWL_DEBUG_RATE(priv, "LQ: MIMO3 toggle SGI/NGI\n");
+
+			/* Set up new search table for MIMO */
+			memcpy(search_tbl, tbl, sz);
+			search_tbl->is_SGI = !tbl->is_SGI;
+			rs_set_expected_tpt_table(lq_sta, search_tbl);
+			/*
+			 * If active table already uses the fastest possible
+			 * modulation (dual stream with short guard interval),
+			 * and it's working well, there's no need to look
+			 * for a better type of modulation!
+			 */
+			if (tbl->is_SGI) {
+				s32 tpt = lq_sta->last_tpt / 100;
+				if (tpt >= search_tbl->expected_tpt[index])
+					break;
+			}
+			search_tbl->current_rate =
+				rate_n_flags_from_tbl(priv, search_tbl,
+						      index, is_green);
+			update_search_tbl_counter = 1;
+			goto out;
+		}
+		tbl->action++;
+		if (tbl->action > IWL_MIMO3_SWITCH_GI)
+			tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+
+		if (tbl->action == start_action)
+			break;
+	}
+	search_tbl->lq_type = LQ_NONE;
+	return;
+ out:
+	lq_sta->search_better_tbl = 1;
+	tbl->action++;
+	if (tbl->action > IWL_MIMO3_SWITCH_GI)
+		tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
+	if (update_search_tbl_counter)
+		search_tbl->action = tbl->action;
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+	struct iwl_scale_tbl_info *tbl;
+	int i;
+	int active_tbl;
+	int flush_interval_passed = 0;
+	struct iwl_priv *priv;
+
+	priv = lq_sta->drv;
+	active_tbl = lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	/* If we've been disallowing search, see if we should now allow it */
+	if (lq_sta->stay_in_tbl) {
+
+		/* Elapsed time using current modulation mode */
+		if (lq_sta->flush_timer)
+			flush_interval_passed =
+			time_after(jiffies,
+					(unsigned long)(lq_sta->flush_timer +
+					IWL_RATE_SCALE_FLUSH_INTVL));
+
+		/*
+		 * Check if we should allow search for new modulation mode.
+		 * If many frames have failed or succeeded, or we've used
+		 * this same modulation for a long time, allow search, and
+		 * reset history stats that keep track of whether we should
+		 * allow a new search.  Also (below) reset all bitmaps and
+		 * stats in active history.
+		 */
+		if (force_search ||
+		    (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+		    (lq_sta->total_success > lq_sta->max_success_limit) ||
+		    ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
+		     && (flush_interval_passed))) {
+			IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
+				     lq_sta->total_failed,
+				     lq_sta->total_success,
+				     flush_interval_passed);
+
+			/* Allow search for new mode */
+			lq_sta->stay_in_tbl = 0;	/* only place reset */
+			lq_sta->total_failed = 0;
+			lq_sta->total_success = 0;
+			lq_sta->flush_timer = 0;
+
+		/*
+		 * Else if we've used this modulation mode enough repetitions
+		 * (regardless of elapsed time or success/failure), reset
+		 * history bitmaps and rate-specific stats for all rates in
+		 * active table.
+		 */
+		} else {
+			lq_sta->table_count++;
+			if (lq_sta->table_count >=
+			    lq_sta->table_count_limit) {
+				lq_sta->table_count = 0;
+
+				IWL_DEBUG_RATE(priv, "LQ: stay in table clear win\n");
+				for (i = 0; i < IWL_RATE_COUNT; i++)
+					rs_rate_scale_clear_window(
+						&(tbl->win[i]));
+			}
+		}
+
+		/* If transitioning to allow "search", reset all history
+		 * bitmaps and stats in active table (this will become the new
+		 * "search" table). */
+		if (!lq_sta->stay_in_tbl) {
+			for (i = 0; i < IWL_RATE_COUNT; i++)
+				rs_rate_scale_clear_window(&(tbl->win[i]));
+		}
+	}
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx,
+			       struct iwl_lq_sta *lq_sta,
+			       struct iwl_scale_tbl_info *tbl,
+			       int index, u8 is_green)
+{
+	u32 rate;
+
+	/* Update uCode's rate table. */
+	rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
+	rs_fill_link_cmd(priv, lq_sta, rate);
+	iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_priv *priv,
+				  struct sk_buff *skb,
+				  struct ieee80211_sta *sta,
+				  struct iwl_lq_sta *lq_sta)
+{
+	struct ieee80211_hw *hw = priv->hw;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	int low = IWL_RATE_INVALID;
+	int high = IWL_RATE_INVALID;
+	int index;
+	int i;
+	struct iwl_rate_scale_data *window = NULL;
+	int current_tpt = IWL_INVALID_VALUE;
+	int low_tpt = IWL_INVALID_VALUE;
+	int high_tpt = IWL_INVALID_VALUE;
+	u32 fail_count;
+	s8 scale_action = 0;
+	u16 rate_mask;
+	u8 update_lq = 0;
+	struct iwl_scale_tbl_info *tbl, *tbl1;
+	u16 rate_scale_index_msk = 0;
+	u8 is_green = 0;
+	u8 active_tbl = 0;
+	u8 done_search = 0;
+	u16 high_low;
+	s32 sr;
+	u8 tid = IWL_MAX_TID_COUNT;
+	struct iwl_tid_data *tid_data;
+	struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+	struct iwl_rxon_context *ctx = sta_priv->ctx;
+
+	IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	/* TODO: this could probably be improved.. */
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    info->flags & IEEE80211_TX_CTL_NO_ACK)
+		return;
+
+	lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+	tid = rs_tl_add_packet(lq_sta, hdr);
+	if ((tid != IWL_MAX_TID_COUNT) &&
+	    (lq_sta->tx_agg_tid_en & (1 << tid))) {
+		tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
+		if (tid_data->agg.state == IWL_AGG_OFF)
+			lq_sta->is_agg = 0;
+		else
+			lq_sta->is_agg = 1;
+	} else
+		lq_sta->is_agg = 0;
+
+	/*
+	 * Select rate-scale / modulation-mode table to work with in
+	 * the rest of this function:  "search" if searching for better
+	 * modulation mode, or "active" if doing rate scaling within a mode.
+	 */
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+	if (is_legacy(tbl->lq_type))
+		lq_sta->is_green = 0;
+	else
+		lq_sta->is_green = rs_use_green(sta);
+	is_green = lq_sta->is_green;
+
+	/* current tx rate */
+	index = lq_sta->last_txrate_idx;
+
+	IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
+		       tbl->lq_type);
+
+	/* rates available for this association, and for modulation mode */
+	rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+	IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
+
+	/* mask with station rate restriction */
+	if (is_legacy(tbl->lq_type)) {
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			/* supp_rates has no CCK bits in A mode */
+			rate_scale_index_msk = (u16) (rate_mask &
+				(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
+		else
+			rate_scale_index_msk = (u16) (rate_mask &
+						      lq_sta->supp_rates);
+
+	} else
+		rate_scale_index_msk = rate_mask;
+
+	if (!rate_scale_index_msk)
+		rate_scale_index_msk = rate_mask;
+
+	if (!((1 << index) & rate_scale_index_msk)) {
+		IWL_ERR(priv, "Current Rate is not valid\n");
+		if (lq_sta->search_better_tbl) {
+			/* revert to active table if search table is not valid*/
+			tbl->lq_type = LQ_NONE;
+			lq_sta->search_better_tbl = 0;
+			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+			/* get "active" rate info */
+			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+			rs_update_rate_tbl(priv, ctx, lq_sta, tbl,
+					   index, is_green);
+		}
+		return;
+	}
+
+	/* Get expected throughput table and history window for current rate */
+	if (!tbl->expected_tpt) {
+		IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
+		return;
+	}
+
+	/* force user max rate if set by user */
+	if ((lq_sta->max_rate_idx != -1) &&
+	    (lq_sta->max_rate_idx < index)) {
+		index = lq_sta->max_rate_idx;
+		update_lq = 1;
+		window = &(tbl->win[index]);
+		goto lq_update;
+	}
+
+	window = &(tbl->win[index]);
+
+	/*
+	 * If there is not enough history to calculate actual average
+	 * throughput, keep analyzing results of more tx frames, without
+	 * changing rate or mode (bypass most of the rest of this function).
+	 * Set up new rate table in uCode only if old rate is not supported
+	 * in current association (use new rate found above).
+	 */
+	fail_count = window->counter - window->success_counter;
+	if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
+			(window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
+		IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
+			       "for index %d\n",
+			       window->success_counter, window->counter, index);
+
+		/* Can't calculate this yet; not enough history */
+		window->average_tpt = IWL_INVALID_VALUE;
+
+		/* Should we stay with this modulation mode,
+		 * or search for a new one? */
+		rs_stay_in_table(lq_sta, false);
+
+		goto out;
+	}
+	/* Else we have enough samples; calculate estimate of
+	 * actual average throughput */
+	if (window->average_tpt != ((window->success_ratio *
+			tbl->expected_tpt[index] + 64) / 128)) {
+		IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
+		window->average_tpt = ((window->success_ratio *
+					tbl->expected_tpt[index] + 64) / 128);
+	}
+
+	/* If we are searching for better modulation mode, check success. */
+	if (lq_sta->search_better_tbl &&
+	    (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI)) {
+		/* If good success, continue using the "search" mode;
+		 * no need to send new link quality command, since we're
+		 * continuing to use the setup that we've been trying. */
+		if (window->average_tpt > lq_sta->last_tpt) {
+
+			IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
+					"suc=%d cur-tpt=%d old-tpt=%d\n",
+					window->success_ratio,
+					window->average_tpt,
+					lq_sta->last_tpt);
+
+			if (!is_legacy(tbl->lq_type))
+				lq_sta->enable_counter = 1;
+
+			/* Swap tables; "search" becomes "active" */
+			lq_sta->active_tbl = active_tbl;
+			current_tpt = window->average_tpt;
+
+		/* Else poor success; go back to mode in "active" table */
+		} else {
+
+			IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
+					"suc=%d cur-tpt=%d old-tpt=%d\n",
+					window->success_ratio,
+					window->average_tpt,
+					lq_sta->last_tpt);
+
+			/* Nullify "search" table */
+			tbl->lq_type = LQ_NONE;
+
+			/* Revert to "active" table */
+			active_tbl = lq_sta->active_tbl;
+			tbl = &(lq_sta->lq_info[active_tbl]);
+
+			/* Revert to "active" rate and throughput info */
+			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+			current_tpt = lq_sta->last_tpt;
+
+			/* Need to set up a new rate table in uCode */
+			update_lq = 1;
+		}
+
+		/* Either way, we've made a decision; modulation mode
+		 * search is done, allow rate adjustment next time. */
+		lq_sta->search_better_tbl = 0;
+		done_search = 1;	/* Don't switch modes below! */
+		goto lq_update;
+	}
+
+	/* (Else) not in search of better modulation mode, try for better
+	 * starting rate, while staying in this mode. */
+	high_low = rs_get_adjacent_rate(priv, index, rate_scale_index_msk,
+					tbl->lq_type);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* If user set max rate, dont allow higher than user constrain */
+	if ((lq_sta->max_rate_idx != -1) &&
+	    (lq_sta->max_rate_idx < high))
+		high = IWL_RATE_INVALID;
+
+	sr = window->success_ratio;
+
+	/* Collect measured throughputs for current and adjacent rates */
+	current_tpt = window->average_tpt;
+	if (low != IWL_RATE_INVALID)
+		low_tpt = tbl->win[low].average_tpt;
+	if (high != IWL_RATE_INVALID)
+		high_tpt = tbl->win[high].average_tpt;
+
+	scale_action = 0;
+
+	/* Too many failures, decrease rate */
+	if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
+		IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
+		scale_action = -1;
+
+	/* No throughput measured yet for adjacent rates; try increase. */
+	} else if ((low_tpt == IWL_INVALID_VALUE) &&
+		   (high_tpt == IWL_INVALID_VALUE)) {
+
+		if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
+			scale_action = 1;
+		else if (low != IWL_RATE_INVALID)
+			scale_action = 0;
+	}
+
+	/* Both adjacent throughputs are measured, but neither one has better
+	 * throughput; we're using the best rate, don't change it! */
+	else if ((low_tpt != IWL_INVALID_VALUE) &&
+		 (high_tpt != IWL_INVALID_VALUE) &&
+		 (low_tpt < current_tpt) &&
+		 (high_tpt < current_tpt))
+		scale_action = 0;
+
+	/* At least one adjacent rate's throughput is measured,
+	 * and may have better performance. */
+	else {
+		/* Higher adjacent rate's throughput is measured */
+		if (high_tpt != IWL_INVALID_VALUE) {
+			/* Higher rate has better throughput */
+			if (high_tpt > current_tpt &&
+					sr >= IWL_RATE_INCREASE_TH) {
+				scale_action = 1;
+			} else {
+				scale_action = 0;
+			}
+
+		/* Lower adjacent rate's throughput is measured */
+		} else if (low_tpt != IWL_INVALID_VALUE) {
+			/* Lower rate has better throughput */
+			if (low_tpt > current_tpt) {
+				IWL_DEBUG_RATE(priv,
+				    "decrease rate because of low tpt\n");
+				scale_action = -1;
+			} else if (sr >= IWL_RATE_INCREASE_TH) {
+				scale_action = 1;
+			}
+		}
+	}
+
+	/* Sanity check; asked for decrease, but success rate or throughput
+	 * has been good at old rate.  Don't change it. */
+	if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
+		    ((sr > IWL_RATE_HIGH_TH) ||
+		     (current_tpt > (100 * tbl->expected_tpt[low]))))
+		scale_action = 0;
+	if (!iwl_ht_enabled(priv) && !is_legacy(tbl->lq_type))
+		scale_action = -1;
+	if (iwl_tx_ant_restriction(priv) != IWL_ANT_OK_MULTI &&
+		(is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type)))
+		scale_action = -1;
+
+	if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+	     (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+		if (lq_sta->last_bt_traffic > priv->bt_traffic_load) {
+			/*
+			 * don't set scale_action, don't want to scale up if
+			 * the rate scale doesn't otherwise think that is a
+			 * good idea.
+			 */
+		} else if (lq_sta->last_bt_traffic <= priv->bt_traffic_load) {
+			scale_action = -1;
+		}
+	}
+	lq_sta->last_bt_traffic = priv->bt_traffic_load;
+
+	if ((priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
+	     (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+		/* search for a new modulation */
+		rs_stay_in_table(lq_sta, true);
+		goto lq_update;
+	}
+
+	switch (scale_action) {
+	case -1:
+		/* Decrease starting rate, update uCode's rate table */
+		if (low != IWL_RATE_INVALID) {
+			update_lq = 1;
+			index = low;
+		}
+
+		break;
+	case 1:
+		/* Increase starting rate, update uCode's rate table */
+		if (high != IWL_RATE_INVALID) {
+			update_lq = 1;
+			index = high;
+		}
+
+		break;
+	case 0:
+		/* No change */
+	default:
+		break;
+	}
+
+	IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
+		    "high %d type %d\n",
+		     index, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+	/* Replace uCode's rate table for the destination station. */
+	if (update_lq)
+		rs_update_rate_tbl(priv, ctx, lq_sta, tbl, index, is_green);
+
+	if (iwl_tx_ant_restriction(priv) == IWL_ANT_OK_MULTI) {
+		/* Should we stay with this modulation mode,
+		 * or search for a new one? */
+	  rs_stay_in_table(lq_sta, false);
+	}
+	/*
+	 * Search for new modulation mode if we're:
+	 * 1)  Not changing rates right now
+	 * 2)  Not just finishing up a search
+	 * 3)  Allowing a new search
+	 */
+	if (!update_lq && !done_search && !lq_sta->stay_in_tbl && window->counter) {
+		/* Save current throughput to compare with "search" throughput*/
+		lq_sta->last_tpt = current_tpt;
+
+		/* Select a new "search" modulation mode to try.
+		 * If one is found, set up the new "search" table. */
+		if (is_legacy(tbl->lq_type))
+			rs_move_legacy_other(priv, lq_sta, conf, sta, index);
+		else if (is_siso(tbl->lq_type))
+			rs_move_siso_to_other(priv, lq_sta, conf, sta, index);
+		else if (is_mimo2(tbl->lq_type))
+			rs_move_mimo2_to_other(priv, lq_sta, conf, sta, index);
+		else
+			rs_move_mimo3_to_other(priv, lq_sta, conf, sta, index);
+
+		/* If new "search" mode was selected, set up in uCode table */
+		if (lq_sta->search_better_tbl) {
+			/* Access the "search" table, clear its history. */
+			tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+			for (i = 0; i < IWL_RATE_COUNT; i++)
+				rs_rate_scale_clear_window(&(tbl->win[i]));
+
+			/* Use new "search" start rate */
+			index = iwl_hwrate_to_plcp_idx(tbl->current_rate);
+
+			IWL_DEBUG_RATE(priv, "Switch current  mcs: %X index: %d\n",
+				     tbl->current_rate, index);
+			rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
+			iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+		} else
+			done_search = 1;
+	}
+
+	if (done_search && !lq_sta->stay_in_tbl) {
+		/* If the "active" (non-search) mode was legacy,
+		 * and we've tried switching antennas,
+		 * but we haven't been able to try HT modes (not available),
+		 * stay with best antenna legacy modulation for a while
+		 * before next round of mode comparisons. */
+		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+		    lq_sta->action_counter > tbl1->max_search) {
+			IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
+			rs_set_stay_in_table(priv, 1, lq_sta);
+		}
+
+		/* If we're in an HT mode, and all 3 mode switch actions
+		 * have been tried and compared, stay in this best modulation
+		 * mode for a while before next round of mode comparisons. */
+		if (lq_sta->enable_counter &&
+		    (lq_sta->action_counter >= tbl1->max_search) &&
+		    iwl_ht_enabled(priv)) {
+			if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
+			    (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+			    (tid != IWL_MAX_TID_COUNT)) {
+				u8 sta_id = lq_sta->lq.sta_id;
+				tid_data = &priv->tid_data[sta_id][tid];
+				if (tid_data->agg.state == IWL_AGG_OFF) {
+					IWL_DEBUG_RATE(priv,
+						       "try to aggregate tid %d\n",
+						       tid);
+					rs_tl_turn_on_agg(priv, tid,
+							  lq_sta, sta);
+				}
+			}
+			rs_set_stay_in_table(priv, 0, lq_sta);
+		}
+	}
+
+out:
+	tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
+	lq_sta->last_txrate_idx = index;
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_priv *priv,
+			     struct ieee80211_sta *sta,
+			     struct iwl_lq_sta *lq_sta)
+{
+	struct iwl_scale_tbl_info *tbl;
+	int rate_idx;
+	int i;
+	u32 rate;
+	u8 use_green = rs_use_green(sta);
+	u8 active_tbl = 0;
+	u8 valid_tx_ant;
+	struct iwl_station_priv *sta_priv;
+	struct iwl_rxon_context *ctx;
+
+	if (!sta || !lq_sta)
+		return;
+
+	sta_priv = (void *)sta->drv_priv;
+	ctx = sta_priv->ctx;
+
+	i = lq_sta->last_txrate_idx;
+
+	valid_tx_ant = priv->nvm_data->valid_tx_ant;
+
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	if ((i < 0) || (i >= IWL_RATE_COUNT))
+		i = 0;
+
+	rate = iwl_rates[i].plcp;
+	tbl->ant_type = first_antenna(valid_tx_ant);
+	rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+	if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
+		rate |= RATE_MCS_CCK_MSK;
+
+	rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
+	if (!rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+	    rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+	rate = rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
+	tbl->current_rate = rate;
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rs_fill_link_cmd(NULL, lq_sta, rate);
+	priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+	iwl_send_lq_cmd(priv, ctx, &lq_sta->lq, 0, true);
+}
+
+static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
+			struct ieee80211_tx_rate_control *txrc)
+{
+
+	struct sk_buff *skb = txrc->skb;
+	struct ieee80211_supported_band *sband = txrc->sband;
+	struct iwl_op_mode *op_mode __maybe_unused =
+			(struct iwl_op_mode *)priv_r;
+	struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct iwl_lq_sta *lq_sta = priv_sta;
+	int rate_idx;
+
+	IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
+
+	/* Get max rate if user set max rate */
+	if (lq_sta) {
+		lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
+		if ((sband->band == NL80211_BAND_5GHZ) &&
+		    (lq_sta->max_rate_idx != -1))
+			lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+		if ((lq_sta->max_rate_idx < 0) ||
+		    (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
+			lq_sta->max_rate_idx = -1;
+	}
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (lq_sta && !lq_sta->drv) {
+		IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
+		priv_sta = NULL;
+	}
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	if (rate_control_send_low(sta, priv_sta, txrc))
+		return;
+
+	rate_idx  = lq_sta->last_txrate_idx;
+
+	if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+		rate_idx -= IWL_FIRST_OFDM_RATE;
+		/* 6M and 9M shared same MCS index */
+		rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+		if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+		    IWL_RATE_MIMO3_6M_PLCP)
+			rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
+		else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
+			 IWL_RATE_MIMO2_6M_PLCP)
+			rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
+		info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+			info->control.rates[0].flags |= IEEE80211_TX_RC_SHORT_GI;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+			info->control.rates[0].flags |= IEEE80211_TX_RC_DUP_DATA;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+			info->control.rates[0].flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+			info->control.rates[0].flags |= IEEE80211_TX_RC_GREEN_FIELD;
+	} else {
+		/* Check for invalid rates */
+		if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
+				((sband->band == NL80211_BAND_5GHZ) &&
+				 (rate_idx < IWL_FIRST_OFDM_RATE)))
+			rate_idx = rate_lowest_index(sband, sta);
+		/* On valid 5 GHz rate, adjust index */
+		else if (sband->band == NL80211_BAND_5GHZ)
+			rate_idx -= IWL_FIRST_OFDM_RATE;
+		info->control.rates[0].flags = 0;
+	}
+	info->control.rates[0].idx = rate_idx;
+	info->control.rates[0].count = 1;
+}
+
+static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
+			  gfp_t gfp)
+{
+	struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+	struct iwl_op_mode *op_mode __maybe_unused =
+			(struct iwl_op_mode *)priv_rate;
+	struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
+
+	IWL_DEBUG_RATE(priv, "create station rate scale window\n");
+
+	return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
+{
+	int i, j;
+	struct ieee80211_hw *hw = priv->hw;
+	struct ieee80211_conf *conf = &priv->hw->conf;
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct iwl_station_priv *sta_priv;
+	struct iwl_lq_sta *lq_sta;
+	struct ieee80211_supported_band *sband;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+	sta_priv = (struct iwl_station_priv *) sta->drv_priv;
+	lq_sta = &sta_priv->lq_sta;
+	sband = hw->wiphy->bands[conf->chandef.chan->band];
+
+
+	lq_sta->lq.sta_id = sta_id;
+
+	for (j = 0; j < LQ_SIZE; j++)
+		for (i = 0; i < IWL_RATE_COUNT; i++)
+			rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
+
+	lq_sta->flush_timer = 0;
+	lq_sta->supp_rates = sta->supp_rates[sband->band];
+
+	IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
+		       sta_id);
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	lq_sta->is_dup = 0;
+	lq_sta->max_rate_idx = -1;
+	lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
+	lq_sta->is_green = rs_use_green(sta);
+	lq_sta->band = sband->band;
+	/*
+	 * active legacy rates as per supported rates bitmap
+	 */
+	supp = sta->supp_rates[sband->band];
+	lq_sta->active_legacy_rate = 0;
+	for_each_set_bit(i, &supp, BITS_PER_LONG)
+		lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+	/*
+	 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+	 * supp_rates[] does not; shift to convert format, force 9 MBits off.
+	 */
+	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+	lq_sta->active_siso_rate &= ~((u16)0x2);
+	lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+	/* Same here */
+	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+	lq_sta->active_mimo2_rate &= ~((u16)0x2);
+	lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+	lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
+	lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
+	lq_sta->active_mimo3_rate &= ~((u16)0x2);
+	lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
+
+	IWL_DEBUG_RATE(priv, "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+		     lq_sta->active_siso_rate,
+		     lq_sta->active_mimo2_rate,
+		     lq_sta->active_mimo3_rate);
+
+	/* These values will be overridden later */
+	lq_sta->lq.general_params.single_stream_ant_msk =
+		first_antenna(priv->nvm_data->valid_tx_ant);
+	lq_sta->lq.general_params.dual_stream_ant_msk =
+		priv->nvm_data->valid_tx_ant &
+		~first_antenna(priv->nvm_data->valid_tx_ant);
+	if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+		lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
+		lq_sta->lq.general_params.dual_stream_ant_msk =
+			priv->nvm_data->valid_tx_ant;
+	}
+
+	/* as default allow aggregation for all tids */
+	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+	lq_sta->drv = priv;
+
+	/* Set last_txrate_idx to lowest rate */
+	lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+	if (sband->band == NL80211_BAND_5GHZ)
+		lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
+	lq_sta->is_agg = 0;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->dbg_fixed_rate = 0;
+#endif
+
+	rs_initialize_lq(priv, sta, lq_sta);
+}
+
+static void rs_fill_link_cmd(struct iwl_priv *priv,
+			     struct iwl_lq_sta *lq_sta, u32 new_rate)
+{
+	struct iwl_scale_tbl_info tbl_type;
+	int index = 0;
+	int rate_idx;
+	int repeat_rate = 0;
+	u8 ant_toggle_cnt = 0;
+	u8 use_ht_possible = 1;
+	u8 valid_tx_ant = 0;
+	struct iwl_station_priv *sta_priv =
+		container_of(lq_sta, struct iwl_station_priv, lq_sta);
+	struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+	/* Override starting rate (index 0) if needed for debug purposes */
+	rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+	/* Interpret new_rate (rate_n_flags) */
+	rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+				  &tbl_type, &rate_idx);
+
+	if (priv && priv->bt_full_concurrent) {
+		/* 1x1 only */
+		tbl_type.ant_type =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+	}
+
+	/* How many times should we repeat the initial rate? */
+	if (is_legacy(tbl_type.lq_type)) {
+		ant_toggle_cnt = 1;
+		repeat_rate = IWL_NUMBER_TRY;
+	} else {
+		repeat_rate = min(IWL_HT_NUMBER_TRY,
+				  LINK_QUAL_AGG_DISABLE_START_DEF - 1);
+	}
+
+	lq_cmd->general_params.mimo_delimiter =
+			is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+	/* Fill 1st table entry (index 0) */
+	lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+	if (num_of_ant(tbl_type.ant_type) == 1) {
+		lq_cmd->general_params.single_stream_ant_msk =
+						tbl_type.ant_type;
+	} else if (num_of_ant(tbl_type.ant_type) == 2) {
+		lq_cmd->general_params.dual_stream_ant_msk =
+						tbl_type.ant_type;
+	} /* otherwise we don't modify the existing value */
+
+	index++;
+	repeat_rate--;
+	if (priv) {
+		if (priv->bt_full_concurrent)
+			valid_tx_ant = ANT_A;
+		else
+			valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	}
+
+	/* Fill rest of rate table */
+	while (index < LINK_QUAL_MAX_RETRY_NUM) {
+		/* Repeat initial/next rate.
+		 * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
+		 * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
+		while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
+			if (is_legacy(tbl_type.lq_type)) {
+				if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+					ant_toggle_cnt++;
+				else if (priv &&
+					 rs_toggle_antenna(valid_tx_ant,
+							&new_rate, &tbl_type))
+					ant_toggle_cnt = 1;
+			}
+
+			/* Override next rate if needed for debug purposes */
+			rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+			/* Fill next table entry */
+			lq_cmd->rs_table[index].rate_n_flags =
+					cpu_to_le32(new_rate);
+			repeat_rate--;
+			index++;
+		}
+
+		rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+						&rate_idx);
+
+		if (priv && priv->bt_full_concurrent) {
+			/* 1x1 only */
+			tbl_type.ant_type =
+			    first_antenna(priv->nvm_data->valid_tx_ant);
+		}
+
+		/* Indicate to uCode which entries might be MIMO.
+		 * If initial rate was MIMO, this will finally end up
+		 * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+		if (is_mimo(tbl_type.lq_type))
+			lq_cmd->general_params.mimo_delimiter = index;
+
+		/* Get next rate */
+		new_rate = rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+					     use_ht_possible);
+
+		/* How many times should we repeat the next rate? */
+		if (is_legacy(tbl_type.lq_type)) {
+			if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+				ant_toggle_cnt++;
+			else if (priv &&
+				 rs_toggle_antenna(valid_tx_ant,
+						   &new_rate, &tbl_type))
+				ant_toggle_cnt = 1;
+
+			repeat_rate = IWL_NUMBER_TRY;
+		} else {
+			repeat_rate = IWL_HT_NUMBER_TRY;
+		}
+
+		/* Don't allow HT rates after next pass.
+		 * rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+		use_ht_possible = 0;
+
+		/* Override next rate if needed for debug purposes */
+		rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+
+		/* Fill next table entry */
+		lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
+
+		index++;
+		repeat_rate--;
+	}
+
+	lq_cmd->agg_params.agg_frame_cnt_limit =
+		sta_priv->max_agg_bufsize ?: LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+	lq_cmd->agg_params.agg_time_limit =
+		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+	/*
+	 * overwrite if needed, pass aggregation time limit
+	 * to uCode in uSec
+	 */
+	if (priv && priv->lib->bt_params &&
+	    priv->lib->bt_params->agg_time_limit &&
+	    priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
+		lq_cmd->agg_params.agg_time_limit =
+			cpu_to_le16(priv->lib->bt_params->agg_time_limit);
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+/* rate scale requires free function to be implemented */
+static void rs_free(void *priv_rate)
+{
+	return;
+}
+
+static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
+			void *priv_sta)
+{
+	struct iwl_op_mode *op_mode __maybe_unused = priv_r;
+	struct iwl_priv *priv __maybe_unused = IWL_OP_MODE_GET_DVM(op_mode);
+
+	IWL_DEBUG_RATE(priv, "enter\n");
+	IWL_DEBUG_RATE(priv, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
+			     u32 *rate_n_flags, int index)
+{
+	struct iwl_priv *priv;
+	u8 valid_tx_ant;
+	u8 ant_sel_tx;
+
+	priv = lq_sta->drv;
+	valid_tx_ant = priv->nvm_data->valid_tx_ant;
+	if (lq_sta->dbg_fixed_rate) {
+		ant_sel_tx =
+		  ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
+		  >> RATE_MCS_ANT_POS);
+		if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+			*rate_n_flags = lq_sta->dbg_fixed_rate;
+			IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
+		} else {
+			lq_sta->dbg_fixed_rate = 0;
+			IWL_ERR(priv,
+			    "Invalid antenna selection 0x%X, Valid is 0x%X\n",
+			    ant_sel_tx, valid_tx_ant);
+			IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+		}
+	} else {
+		IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
+	}
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+			const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct iwl_priv *priv;
+	char buf[64];
+	size_t buf_size;
+	u32 parsed_rate;
+
+
+	priv = lq_sta->drv;
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x", &parsed_rate) == 1)
+		lq_sta->dbg_fixed_rate = parsed_rate;
+	else
+		lq_sta->dbg_fixed_rate = 0;
+
+	rs_program_fix_rate(priv, lq_sta);
+
+	return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i = 0;
+	int index = 0;
+	ssize_t ret;
+
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct iwl_priv *priv;
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+	priv = lq_sta->drv;
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
+	desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
+			lq_sta->total_failed, lq_sta->total_success,
+			lq_sta->active_legacy_rate);
+	desc += sprintf(buff+desc, "fixed rate 0x%X\n",
+			lq_sta->dbg_fixed_rate);
+	desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
+	    (priv->nvm_data->valid_tx_ant & ANT_A) ? "ANT_A," : "",
+	    (priv->nvm_data->valid_tx_ant & ANT_B) ? "ANT_B," : "",
+	    (priv->nvm_data->valid_tx_ant & ANT_C) ? "ANT_C" : "");
+	desc += sprintf(buff+desc, "lq type %s\n",
+	   (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+	if (is_Ht(tbl->lq_type)) {
+		desc += sprintf(buff + desc, " %s",
+		   (is_siso(tbl->lq_type)) ? "SISO" :
+		   ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+		desc += sprintf(buff + desc, " %s",
+		   (tbl->is_ht40) ? "40MHz" : "20MHz");
+		desc += sprintf(buff + desc, " %s %s %s\n",
+		   (tbl->is_SGI) ? "SGI" : "",
+		   (lq_sta->is_green) ? "GF enabled" : "",
+		   (lq_sta->is_agg) ? "AGG on" : "");
+	}
+	desc += sprintf(buff+desc, "last tx rate=0x%X\n",
+		lq_sta->last_rate_n_flags);
+	desc += sprintf(buff+desc, "general:"
+		"flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+		lq_sta->lq.general_params.flags,
+		lq_sta->lq.general_params.mimo_delimiter,
+		lq_sta->lq.general_params.single_stream_ant_msk,
+		lq_sta->lq.general_params.dual_stream_ant_msk);
+
+	desc += sprintf(buff+desc, "agg:"
+			"time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+			le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+			lq_sta->lq.agg_params.agg_dis_start_th,
+			lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+	desc += sprintf(buff+desc,
+			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+			lq_sta->lq.general_params.start_rate_index[0],
+			lq_sta->lq.general_params.start_rate_index[1],
+			lq_sta->lq.general_params.start_rate_index[2],
+			lq_sta->lq.general_params.start_rate_index[3]);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		index = iwl_hwrate_to_plcp_idx(
+			le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
+		if (is_legacy(tbl->lq_type)) {
+			desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
+				i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+				iwl_rate_mcs[index].mbps);
+		} else {
+			desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps (%s)\n",
+				i, le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
+				iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
+		}
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+	.write = rs_sta_dbgfs_scale_table_write,
+	.read = rs_sta_dbgfs_scale_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i, j;
+	ssize_t ret;
+
+	struct iwl_lq_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	for (i = 0; i < LQ_SIZE; i++) {
+		desc += sprintf(buff+desc,
+				"%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+				"rate=0x%X\n",
+				lq_sta->active_tbl == i ? "*" : "x",
+				lq_sta->lq_info[i].lq_type,
+				lq_sta->lq_info[i].is_SGI,
+				lq_sta->lq_info[i].is_ht40,
+				lq_sta->lq_info[i].is_dup,
+				lq_sta->is_green,
+				lq_sta->lq_info[i].current_rate);
+		for (j = 0; j < IWL_RATE_COUNT; j++) {
+			desc += sprintf(buff+desc,
+				"counter=%d success=%d %%=%d\n",
+				lq_sta->lq_info[i].win[j].counter,
+				lq_sta->lq_info[i].win[j].success_counter,
+				lq_sta->lq_info[i].win[j].success_ratio);
+		}
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = rs_sta_dbgfs_stats_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+	char buff[120];
+	int desc = 0;
+
+	if (is_Ht(tbl->lq_type))
+		desc += sprintf(buff+desc,
+				"Bit Rate= %d Mb/s\n",
+				tbl->expected_tpt[lq_sta->last_txrate_idx]);
+	else
+		desc += sprintf(buff+desc,
+				"Bit Rate= %d Mb/s\n",
+				iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+	.read = rs_sta_dbgfs_rate_scale_data_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static void rs_add_debugfs(void *priv, void *priv_sta,
+					struct dentry *dir)
+{
+	struct iwl_lq_sta *lq_sta = priv_sta;
+	lq_sta->rs_sta_dbgfs_scale_table_file =
+		debugfs_create_file("rate_scale_table", 0600, dir,
+				    lq_sta, &rs_sta_dbgfs_scale_table_ops);
+	lq_sta->rs_sta_dbgfs_stats_table_file =
+		debugfs_create_file("rate_stats_table", 0400, dir,
+				    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+	lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+		debugfs_create_file("rate_scale_data", 0400, dir,
+				    lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
+	lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+		debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+				  &lq_sta->tx_agg_tid_en);
+
+}
+
+static void rs_remove_debugfs(void *priv, void *priv_sta)
+{
+	struct iwl_lq_sta *lq_sta = priv_sta;
+	debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+	debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
+			      struct cfg80211_chan_def *chandef,
+			      struct ieee80211_sta *sta, void *priv_sta)
+{
+}
+
+static const struct rate_control_ops rs_ops = {
+	.name = RS_NAME,
+	.tx_status = rs_tx_status,
+	.get_rate = rs_get_rate,
+	.rate_init = rs_rate_init_stub,
+	.alloc = rs_alloc,
+	.free = rs_free,
+	.alloc_sta = rs_alloc_sta,
+	.free_sta = rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = rs_add_debugfs,
+	.remove_sta_debugfs = rs_remove_debugfs,
+#endif
+};
+
+int iwlagn_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_ops);
+}
+
+void iwlagn_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_ops);
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
new file mode 100644
index 0000000..50c1e95
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_agn_rs_h__
+#define __iwl_agn_rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "commands.h"
+
+struct iwl_rate_info {
+	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
+	u8 plcp_siso;	/* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_mimo2;	/* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+	u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
+	u8 ieee;	/* MAC header:  IWL_RATE_6M_IEEE, etc. */
+	u8 prev_ieee;    /* previous rate in IEEE speeds */
+	u8 next_ieee;    /* next rate in IEEE speeds */
+	u8 prev_rs;      /* previous rate used in rs algo */
+	u8 next_rs;      /* next rate used in rs algo */
+	u8 prev_rs_tgg;  /* previous rate used in TGG rs algo */
+	u8 next_rs_tgg;  /* next rate used in TGG rs algo */
+};
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
+ */
+enum {
+	IWL_RATE_1M_INDEX = 0,
+	IWL_RATE_2M_INDEX,
+	IWL_RATE_5M_INDEX,
+	IWL_RATE_11M_INDEX,
+	IWL_RATE_6M_INDEX,
+	IWL_RATE_9M_INDEX,
+	IWL_RATE_12M_INDEX,
+	IWL_RATE_18M_INDEX,
+	IWL_RATE_24M_INDEX,
+	IWL_RATE_36M_INDEX,
+	IWL_RATE_48M_INDEX,
+	IWL_RATE_54M_INDEX,
+	IWL_RATE_60M_INDEX,
+	IWL_RATE_COUNT, /*FIXME:RS:change to IWL_RATE_INDEX_COUNT,*/
+	IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1,	/* Excluding 60M */
+	IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+	IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+enum {
+	IWL_RATE_6M_INDEX_TABLE = 0,
+	IWL_RATE_9M_INDEX_TABLE,
+	IWL_RATE_12M_INDEX_TABLE,
+	IWL_RATE_18M_INDEX_TABLE,
+	IWL_RATE_24M_INDEX_TABLE,
+	IWL_RATE_36M_INDEX_TABLE,
+	IWL_RATE_48M_INDEX_TABLE,
+	IWL_RATE_54M_INDEX_TABLE,
+	IWL_RATE_1M_INDEX_TABLE,
+	IWL_RATE_2M_INDEX_TABLE,
+	IWL_RATE_5M_INDEX_TABLE,
+	IWL_RATE_11M_INDEX_TABLE,
+	IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+enum {
+	IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+	IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
+	IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+	IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define	IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
+#define	IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
+#define	IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
+#define	IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
+#define	IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
+#define	IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
+#define	IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
+#define	IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
+#define	IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
+#define	IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
+#define	IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
+#define	IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+	IWL_RATE_6M_PLCP  = 13,
+	IWL_RATE_9M_PLCP  = 15,
+	IWL_RATE_12M_PLCP = 5,
+	IWL_RATE_18M_PLCP = 7,
+	IWL_RATE_24M_PLCP = 9,
+	IWL_RATE_36M_PLCP = 11,
+	IWL_RATE_48M_PLCP = 1,
+	IWL_RATE_54M_PLCP = 3,
+	IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
+	IWL_RATE_1M_PLCP  = 10,
+	IWL_RATE_2M_PLCP  = 20,
+	IWL_RATE_5M_PLCP  = 55,
+	IWL_RATE_11M_PLCP = 110,
+	/*FIXME:RS:change to IWL_RATE_LEGACY_??M_PLCP */
+	/*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+	IWL_RATE_SISO_6M_PLCP = 0,
+	IWL_RATE_SISO_12M_PLCP = 1,
+	IWL_RATE_SISO_18M_PLCP = 2,
+	IWL_RATE_SISO_24M_PLCP = 3,
+	IWL_RATE_SISO_36M_PLCP = 4,
+	IWL_RATE_SISO_48M_PLCP = 5,
+	IWL_RATE_SISO_54M_PLCP = 6,
+	IWL_RATE_SISO_60M_PLCP = 7,
+	IWL_RATE_MIMO2_6M_PLCP  = 0x8,
+	IWL_RATE_MIMO2_12M_PLCP = 0x9,
+	IWL_RATE_MIMO2_18M_PLCP = 0xa,
+	IWL_RATE_MIMO2_24M_PLCP = 0xb,
+	IWL_RATE_MIMO2_36M_PLCP = 0xc,
+	IWL_RATE_MIMO2_48M_PLCP = 0xd,
+	IWL_RATE_MIMO2_54M_PLCP = 0xe,
+	IWL_RATE_MIMO2_60M_PLCP = 0xf,
+	IWL_RATE_MIMO3_6M_PLCP  = 0x10,
+	IWL_RATE_MIMO3_12M_PLCP = 0x11,
+	IWL_RATE_MIMO3_18M_PLCP = 0x12,
+	IWL_RATE_MIMO3_24M_PLCP = 0x13,
+	IWL_RATE_MIMO3_36M_PLCP = 0x14,
+	IWL_RATE_MIMO3_48M_PLCP = 0x15,
+	IWL_RATE_MIMO3_54M_PLCP = 0x16,
+	IWL_RATE_MIMO3_60M_PLCP = 0x17,
+	IWL_RATE_SISO_INVM_PLCP,
+	IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+	IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+	IWL_RATE_6M_IEEE  = 12,
+	IWL_RATE_9M_IEEE  = 18,
+	IWL_RATE_12M_IEEE = 24,
+	IWL_RATE_18M_IEEE = 36,
+	IWL_RATE_24M_IEEE = 48,
+	IWL_RATE_36M_IEEE = 72,
+	IWL_RATE_48M_IEEE = 96,
+	IWL_RATE_54M_IEEE = 108,
+	IWL_RATE_60M_IEEE = 120,
+	IWL_RATE_1M_IEEE  = 2,
+	IWL_RATE_2M_IEEE  = 4,
+	IWL_RATE_5M_IEEE  = 11,
+	IWL_RATE_11M_IEEE = 22,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE    -1
+
+#define IWL_MIN_RSSI_VAL                 -100
+#define IWL_MAX_RSSI_VAL                    0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IWL_LEGACY_FAILURE_LIMIT	160
+#define IWL_LEGACY_SUCCESS_LIMIT	480
+#define IWL_LEGACY_TABLE_COUNT		160
+
+#define IWL_NONE_LEGACY_FAILURE_LIMIT	400
+#define IWL_NONE_LEGACY_SUCCESS_LIMIT	4500
+#define IWL_NONE_LEGACY_TABLE_COUNT	1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IWL_RS_GOOD_RATIO		12800	/* 100% */
+#define IWL_RATE_SCALE_SWITCH		10880	/*  85% */
+#define IWL_RATE_HIGH_TH		10880	/*  85% */
+#define IWL_RATE_INCREASE_TH		6400	/*  50% */
+#define IWL_RATE_DECREASE_TH		1920	/*  15% */
+
+/* possible actions when in legacy mode */
+#define IWL_LEGACY_SWITCH_ANTENNA1      0
+#define IWL_LEGACY_SWITCH_ANTENNA2      1
+#define IWL_LEGACY_SWITCH_SISO          2
+#define IWL_LEGACY_SWITCH_MIMO2_AB      3
+#define IWL_LEGACY_SWITCH_MIMO2_AC      4
+#define IWL_LEGACY_SWITCH_MIMO2_BC      5
+#define IWL_LEGACY_SWITCH_MIMO3_ABC     6
+
+/* possible actions when in siso mode */
+#define IWL_SISO_SWITCH_ANTENNA1        0
+#define IWL_SISO_SWITCH_ANTENNA2        1
+#define IWL_SISO_SWITCH_MIMO2_AB        2
+#define IWL_SISO_SWITCH_MIMO2_AC        3
+#define IWL_SISO_SWITCH_MIMO2_BC        4
+#define IWL_SISO_SWITCH_GI              5
+#define IWL_SISO_SWITCH_MIMO3_ABC       6
+
+
+/* possible actions when in mimo mode */
+#define IWL_MIMO2_SWITCH_ANTENNA1       0
+#define IWL_MIMO2_SWITCH_ANTENNA2       1
+#define IWL_MIMO2_SWITCH_SISO_A         2
+#define IWL_MIMO2_SWITCH_SISO_B         3
+#define IWL_MIMO2_SWITCH_SISO_C         4
+#define IWL_MIMO2_SWITCH_GI             5
+#define IWL_MIMO2_SWITCH_MIMO3_ABC      6
+
+
+/* possible actions when in mimo3 mode */
+#define IWL_MIMO3_SWITCH_ANTENNA1       0
+#define IWL_MIMO3_SWITCH_ANTENNA2       1
+#define IWL_MIMO3_SWITCH_SISO_A         2
+#define IWL_MIMO3_SWITCH_SISO_B         3
+#define IWL_MIMO3_SWITCH_SISO_C         4
+#define IWL_MIMO3_SWITCH_MIMO2_AB       5
+#define IWL_MIMO3_SWITCH_MIMO2_AC       6
+#define IWL_MIMO3_SWITCH_MIMO2_BC       7
+#define IWL_MIMO3_SWITCH_GI             8
+
+
+#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
+
+/*FIXME:RS:add possible actions for MIMO3*/
+
+#define IWL_ACTION_LIMIT		3	/* # possible actions */
+
+#define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD	0
+#define IWL_AGG_LOAD_THRESHOLD	10
+#define IWL_AGG_ALL_TID		0xff
+#define TID_QUEUE_CELL_SPACING	50	/*mS */
+#define TID_QUEUE_MAX_SIZE	20
+#define TID_ROUND_VALUE		5	/* mS */
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
+
+enum iwl_table_type {
+	LQ_NONE,
+	LQ_G,		/* legacy types */
+	LQ_A,
+	LQ_SISO,	/* high-throughput types */
+	LQ_MIMO2,
+	LQ_MIMO3,
+	LQ_MAX,
+};
+
+#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
+#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE	12
+
+struct iwl_rate_mcs_info {
+	char	mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+	char	mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+	unsigned long stamp;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+	enum iwl_table_type lq_type;
+	u8 ant_type;
+	u8 is_SGI;	/* 1 = short guard interval */
+	u8 is_ht40;	/* 1 = 40 MHz channel width */
+	u8 is_dup;	/* 1 = duplicated data streams */
+	u8 action;	/* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
+	u8 max_search;	/* maximun number of tables we can search */
+	const u16 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	u32 current_rate;  /* rate_n_flags, uCode API format */
+	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+};
+
+struct iwl_traffic_load {
+	unsigned long time_stamp;	/* age of the oldest statistics */
+	u32 packet_count[TID_QUEUE_MAX_SIZE];   /* packet count in this time
+						 * slice */
+	u32 total;			/* total num of packets during the
+					 * last TID_MAX_TIME_DIFF */
+	u8 queue_count;			/* number of queues that has
+					 * been used since the last cleanup */
+	u8 head;			/* start of the circular buffer */
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+	u8 active_tbl;		/* index of active table, range 0-1 */
+	u8 enable_counter;	/* indicates HT mode */
+	u8 stay_in_tbl;		/* 1: disallow, 0: allow search for new mode */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u8 action_counter;	/* # mode-switch actions tried */
+	u8 is_green;
+	u8 is_dup;
+	enum nl80211_band band;
+
+	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+	u32 supp_rates;
+	u16 active_legacy_rate;
+	u16 active_siso_rate;
+	u16 active_mimo2_rate;
+	u16 active_mimo3_rate;
+	s8 max_rate_idx;     /* Max rate set by user */
+	u8 missed_rate_counter;
+
+	struct iwl_link_quality_cmd lq;
+	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+	struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
+	u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	struct dentry *rs_sta_dbgfs_scale_table_file;
+	struct dentry *rs_sta_dbgfs_stats_table_file;
+	struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+	struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+	u32 dbg_fixed_rate;
+#endif
+	struct iwl_priv *drv;
+
+	/* used to be in sta_info */
+	int last_txrate_idx;
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+	/* BT traffic this sta was last updated in */
+	u8 last_bt_traffic;
+};
+
+static inline u8 first_antenna(u8 mask)
+{
+	if (mask & ANT_A)
+		return ANT_A;
+	if (mask & ANT_B)
+		return ANT_B;
+	return ANT_C;
+}
+
+
+/* Initialize station's rate scaling information after adding station */
+void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta,
+		      u8 sta_id);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+int iwlagn_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+void iwlagn_rate_control_unregister(void);
+
+#endif /* __iwl_agn__rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
new file mode 100644
index 0000000..c942830
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -0,0 +1,1028 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portionhelp of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <net/mac80211.h>
+#include <asm/unaligned.h>
+
+#include "iwl-trans.h"
+#include "iwl-io.h"
+#include "dev.h"
+#include "calib.h"
+#include "agn.h"
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+
+static void iwlagn_rx_reply_error(struct iwl_priv *priv,
+				  struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+	IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
+		"seq 0x%04X ser 0x%08X\n",
+		le32_to_cpu(err_resp->error_type),
+		err_resp->cmd_id,
+		le16_to_cpu(err_resp->bad_cmd_seq_num),
+		le32_to_cpu(err_resp->error_info));
+}
+
+static void iwlagn_rx_csa(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_csa_notification *csa = (void *)pkt->data;
+	/*
+	 * MULTI-FIXME
+	 * See iwlagn_mac_channel_switch.
+	 */
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl_rxon_cmd *rxon = (void *)&ctx->active;
+
+	if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+		return;
+
+	if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
+		rxon->channel = csa->channel;
+		ctx->staging.channel = csa->channel;
+		IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
+			      le16_to_cpu(csa->channel));
+		iwl_chswitch_done(priv, true);
+	} else {
+		IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
+			le16_to_cpu(csa->channel));
+		iwl_chswitch_done(priv, false);
+	}
+}
+
+
+static void iwlagn_rx_spectrum_measure_notif(struct iwl_priv *priv,
+					     struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_spectrum_notification *report = (void *)pkt->data;
+
+	if (!report->state) {
+		IWL_DEBUG_11H(priv,
+			"Spectrum Measure Notification: Start\n");
+		return;
+	}
+
+	memcpy(&priv->measure_report, report, sizeof(*report));
+	priv->measurement_status |= MEASUREMENT_READY;
+}
+
+static void iwlagn_rx_pm_sleep_notif(struct iwl_priv *priv,
+				     struct iwl_rx_cmd_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_sleep_notification *sleep = (void *)pkt->data;
+	IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
+		     sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+
+static void iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
+						struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u32 __maybe_unused len = iwl_rx_packet_len(pkt);
+	IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
+			"notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
+	iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
+}
+
+static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwlagn_beacon_notif *beacon = (void *)pkt->data;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	u16 status = le16_to_cpu(beacon->beacon_notify_hdr.status.status);
+	u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+	IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
+		"tsf:0x%.8x%.8x rate:%d\n",
+		status & TX_STATUS_MSK,
+		beacon->beacon_notify_hdr.failure_frame,
+		le32_to_cpu(beacon->ibss_mgr_status),
+		le32_to_cpu(beacon->high_tsf),
+		le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+	priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+/**
+ * iwl_good_plcp_health - checks for plcp error.
+ *
+ * When the plcp error is exceeding the thresholds, reset the radio
+ * to improve the throughput.
+ */
+static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
+				 struct statistics_rx_phy *cur_ofdm,
+				 struct statistics_rx_ht_phy *cur_ofdm_ht,
+				 unsigned int msecs)
+{
+	int delta;
+	int threshold = priv->plcp_delta_threshold;
+
+	if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
+		IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
+		return true;
+	}
+
+	delta = le32_to_cpu(cur_ofdm->plcp_err) -
+		le32_to_cpu(priv->statistics.rx_ofdm.plcp_err) +
+		le32_to_cpu(cur_ofdm_ht->plcp_err) -
+		le32_to_cpu(priv->statistics.rx_ofdm_ht.plcp_err);
+
+	/* Can be negative if firmware reset statistics */
+	if (delta <= 0)
+		return true;
+
+	if ((delta * 100 / msecs) > threshold) {
+		IWL_DEBUG_RADIO(priv,
+				"plcp health threshold %u delta %d msecs %u\n",
+				threshold, delta, msecs);
+		return false;
+	}
+
+	return true;
+}
+
+int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
+{
+	struct iwl_rf_reset *rf_reset;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return -EAGAIN;
+
+	if (!iwl_is_any_associated(priv)) {
+		IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+		return -ENOLINK;
+	}
+
+	rf_reset = &priv->rf_reset;
+	rf_reset->reset_request_count++;
+	if (!external && rf_reset->last_reset_jiffies &&
+	    time_after(rf_reset->last_reset_jiffies +
+		       IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
+		IWL_DEBUG_INFO(priv, "RF reset rejected\n");
+		rf_reset->reset_reject_count++;
+		return -EAGAIN;
+	}
+	rf_reset->reset_success_count++;
+	rf_reset->last_reset_jiffies = jiffies;
+
+	/*
+	 * There is no easy and better way to force reset the radio,
+	 * the only known method is switching channel which will force to
+	 * reset and tune the radio.
+	 * Use internal short scan (single channel) operation to should
+	 * achieve this objective.
+	 * Driver should reset the radio when number of consecutive missed
+	 * beacon, or any other uCode error condition detected.
+	 */
+	IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+	iwl_internal_short_hw_scan(priv);
+	return 0;
+}
+
+
+static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
+				struct statistics_rx_phy *cur_ofdm,
+				struct statistics_rx_ht_phy *cur_ofdm_ht,
+				struct statistics_tx *tx,
+				unsigned long stamp)
+{
+	unsigned int msecs;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	msecs = jiffies_to_msecs(stamp - priv->rx_statistics_jiffies);
+
+	/* Only gather statistics and update time stamp when not associated */
+	if (!iwl_is_any_associated(priv))
+		return;
+
+	/* Do not check/recover when do not have enough statistics data */
+	if (msecs < 99)
+		return;
+
+	if (!iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
+		iwl_force_rf_reset(priv, false);
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ *   before arriving beacon.  This measurement can be done only if we know
+ *   exactly when to expect beacons, therefore only when we're associated. */
+static void iwlagn_rx_calc_noise(struct iwl_priv *priv)
+{
+	struct statistics_rx_non_phy *rx_info;
+	int num_active_rx = 0;
+	int total_silence = 0;
+	int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+	int last_rx_noise;
+
+	rx_info = &priv->statistics.rx_non_phy;
+
+	bcn_silence_a =
+		le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+	bcn_silence_b =
+		le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+	bcn_silence_c =
+		le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+	if (bcn_silence_a) {
+		total_silence += bcn_silence_a;
+		num_active_rx++;
+	}
+	if (bcn_silence_b) {
+		total_silence += bcn_silence_b;
+		num_active_rx++;
+	}
+	if (bcn_silence_c) {
+		total_silence += bcn_silence_c;
+		num_active_rx++;
+	}
+
+	/* Average among active antennas */
+	if (num_active_rx)
+		last_rx_noise = (total_silence / num_active_rx) - 107;
+	else
+		last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
+
+	IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
+			bcn_silence_a, bcn_silence_b, bcn_silence_c,
+			last_rx_noise);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/*
+ *  based on the assumption of all statistics counter are in DWORD
+ *  FIXME: This function is for debugging, do not deal with
+ *  the case of counters roll-over.
+ */
+static void accum_stats(__le32 *prev, __le32 *cur, __le32 *delta,
+			__le32 *max_delta, __le32 *accum, int size)
+{
+	int i;
+
+	for (i = 0;
+	     i < size / sizeof(__le32);
+	     i++, prev++, cur++, delta++, max_delta++, accum++) {
+		if (le32_to_cpu(*cur) > le32_to_cpu(*prev)) {
+			*delta = cpu_to_le32(
+				le32_to_cpu(*cur) - le32_to_cpu(*prev));
+			le32_add_cpu(accum, le32_to_cpu(*delta));
+			if (le32_to_cpu(*delta) > le32_to_cpu(*max_delta))
+				*max_delta = *delta;
+		}
+	}
+}
+
+static void
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
+			    struct statistics_general_common *common,
+			    struct statistics_rx_non_phy *rx_non_phy,
+			    struct statistics_rx_phy *rx_ofdm,
+			    struct statistics_rx_ht_phy *rx_ofdm_ht,
+			    struct statistics_rx_phy *rx_cck,
+			    struct statistics_tx *tx,
+			    struct statistics_bt_activity *bt_activity)
+{
+#define ACCUM(_name)	\
+	accum_stats((__le32 *)&priv->statistics._name,		\
+		    (__le32 *)_name,				\
+		    (__le32 *)&priv->delta_stats._name,		\
+		    (__le32 *)&priv->max_delta_stats._name,	\
+		    (__le32 *)&priv->accum_stats._name,		\
+		    sizeof(*_name));
+
+	ACCUM(common);
+	ACCUM(rx_non_phy);
+	ACCUM(rx_ofdm);
+	ACCUM(rx_ofdm_ht);
+	ACCUM(rx_cck);
+	ACCUM(tx);
+	if (bt_activity)
+		ACCUM(bt_activity);
+#undef ACCUM
+}
+#else
+static inline void
+iwlagn_accumulative_statistics(struct iwl_priv *priv,
+			    struct statistics_general_common *common,
+			    struct statistics_rx_non_phy *rx_non_phy,
+			    struct statistics_rx_phy *rx_ofdm,
+			    struct statistics_rx_ht_phy *rx_ofdm_ht,
+			    struct statistics_rx_phy *rx_cck,
+			    struct statistics_tx *tx,
+			    struct statistics_bt_activity *bt_activity)
+{
+}
+#endif
+
+static void iwlagn_rx_statistics(struct iwl_priv *priv,
+				 struct iwl_rx_cmd_buffer *rxb)
+{
+	unsigned long stamp = jiffies;
+	const int reg_recalib_period = 60;
+	int change;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u32 len = iwl_rx_packet_payload_len(pkt);
+	__le32 *flag;
+	struct statistics_general_common *common;
+	struct statistics_rx_non_phy *rx_non_phy;
+	struct statistics_rx_phy *rx_ofdm;
+	struct statistics_rx_ht_phy *rx_ofdm_ht;
+	struct statistics_rx_phy *rx_cck;
+	struct statistics_tx *tx;
+	struct statistics_bt_activity *bt_activity;
+
+	IWL_DEBUG_RX(priv, "Statistics notification received (%d bytes).\n",
+		     len);
+
+	spin_lock(&priv->statistics.lock);
+
+	if (len == sizeof(struct iwl_bt_notif_statistics)) {
+		struct iwl_bt_notif_statistics *stats;
+		stats = (void *)&pkt->data;
+		flag = &stats->flag;
+		common = &stats->general.common;
+		rx_non_phy = &stats->rx.general.common;
+		rx_ofdm = &stats->rx.ofdm;
+		rx_ofdm_ht = &stats->rx.ofdm_ht;
+		rx_cck = &stats->rx.cck;
+		tx = &stats->tx;
+		bt_activity = &stats->general.activity;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		/* handle this exception directly */
+		priv->statistics.num_bt_kills = stats->rx.general.num_bt_kills;
+		le32_add_cpu(&priv->statistics.accum_num_bt_kills,
+			     le32_to_cpu(stats->rx.general.num_bt_kills));
+#endif
+	} else if (len == sizeof(struct iwl_notif_statistics)) {
+		struct iwl_notif_statistics *stats;
+		stats = (void *)&pkt->data;
+		flag = &stats->flag;
+		common = &stats->general.common;
+		rx_non_phy = &stats->rx.general;
+		rx_ofdm = &stats->rx.ofdm;
+		rx_ofdm_ht = &stats->rx.ofdm_ht;
+		rx_cck = &stats->rx.cck;
+		tx = &stats->tx;
+		bt_activity = NULL;
+	} else {
+		WARN_ONCE(1, "len %d doesn't match BT (%zu) or normal (%zu)\n",
+			  len, sizeof(struct iwl_bt_notif_statistics),
+			  sizeof(struct iwl_notif_statistics));
+		spin_unlock(&priv->statistics.lock);
+		return;
+	}
+
+	change = common->temperature != priv->statistics.common.temperature ||
+		 (*flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
+		 (priv->statistics.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK);
+
+	iwlagn_accumulative_statistics(priv, common, rx_non_phy, rx_ofdm,
+				    rx_ofdm_ht, rx_cck, tx, bt_activity);
+
+	iwlagn_recover_from_statistics(priv, rx_ofdm, rx_ofdm_ht, tx, stamp);
+
+	priv->statistics.flag = *flag;
+	memcpy(&priv->statistics.common, common, sizeof(*common));
+	memcpy(&priv->statistics.rx_non_phy, rx_non_phy, sizeof(*rx_non_phy));
+	memcpy(&priv->statistics.rx_ofdm, rx_ofdm, sizeof(*rx_ofdm));
+	memcpy(&priv->statistics.rx_ofdm_ht, rx_ofdm_ht, sizeof(*rx_ofdm_ht));
+	memcpy(&priv->statistics.rx_cck, rx_cck, sizeof(*rx_cck));
+	memcpy(&priv->statistics.tx, tx, sizeof(*tx));
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (bt_activity)
+		memcpy(&priv->statistics.bt_activity, bt_activity,
+			sizeof(*bt_activity));
+#endif
+
+	priv->rx_statistics_jiffies = stamp;
+
+	set_bit(STATUS_STATISTICS, &priv->status);
+
+	/* Reschedule the statistics timer to occur in
+	 * reg_recalib_period seconds to ensure we get a
+	 * thermal update even if the uCode doesn't give
+	 * us one */
+	mod_timer(&priv->statistics_periodic, jiffies +
+		  msecs_to_jiffies(reg_recalib_period * 1000));
+
+	if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
+	    (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
+		iwlagn_rx_calc_noise(priv);
+		queue_work(priv->workqueue, &priv->run_time_calib_work);
+	}
+	if (priv->lib->temperature && change)
+		priv->lib->temperature(priv);
+
+	spin_unlock(&priv->statistics.lock);
+}
+
+static void iwlagn_rx_reply_statistics(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_notif_statistics *stats = (void *)pkt->data;
+
+	if (le32_to_cpu(stats->flag) & UCODE_STATISTICS_CLEAR_MSK) {
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		memset(&priv->accum_stats, 0,
+			sizeof(priv->accum_stats));
+		memset(&priv->delta_stats, 0,
+			sizeof(priv->delta_stats));
+		memset(&priv->max_delta_stats, 0,
+			sizeof(priv->max_delta_stats));
+#endif
+		IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
+	}
+
+	iwlagn_rx_statistics(priv, rxb);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void iwlagn_rx_card_state_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+	u32 flags = le32_to_cpu(card_state_notif->flags);
+	unsigned long status = priv->status;
+
+	IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
+			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+			  (flags & CT_CARD_DISABLED) ?
+			  "Reached" : "Not reached");
+
+	if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
+		     CT_CARD_DISABLED)) {
+
+		iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
+			    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+		iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
+					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+		if (!(flags & RXON_CARD_DISABLED)) {
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+				    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+			iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
+					HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+		}
+		if (flags & CT_CARD_DISABLED)
+			iwl_tt_enter_ct_kill(priv);
+	}
+	if (!(flags & CT_CARD_DISABLED))
+		iwl_tt_exit_ct_kill(priv);
+
+	if (flags & HW_CARD_DISABLED)
+		set_bit(STATUS_RF_KILL_HW, &priv->status);
+	else
+		clear_bit(STATUS_RF_KILL_HW, &priv->status);
+
+
+	if (!(flags & RXON_CARD_DISABLED))
+		iwl_scan_cancel(priv);
+
+	if ((test_bit(STATUS_RF_KILL_HW, &status) !=
+	     test_bit(STATUS_RF_KILL_HW, &priv->status)))
+		wiphy_rfkill_set_hw_state(priv->hw->wiphy,
+			test_bit(STATUS_RF_KILL_HW, &priv->status));
+}
+
+static void iwlagn_rx_missed_beacon_notif(struct iwl_priv *priv,
+					  struct iwl_rx_cmd_buffer *rxb)
+
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_missed_beacon_notif *missed_beacon = (void *)pkt->data;
+
+	if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+	    priv->missed_beacon_threshold) {
+		IWL_DEBUG_CALIB(priv,
+		    "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+		    le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+		    le32_to_cpu(missed_beacon->total_missed_becons),
+		    le32_to_cpu(missed_beacon->num_recvd_beacons),
+		    le32_to_cpu(missed_beacon->num_expected_beacons));
+		if (!test_bit(STATUS_SCANNING, &priv->status))
+			iwl_init_sensitivity(priv);
+	}
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
+ * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
+static void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+	priv->last_phy_res_valid = true;
+	priv->ampdu_ref++;
+	memcpy(&priv->last_phy_res, pkt->data,
+	       sizeof(struct iwl_rx_phy_res));
+}
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+static int iwlagn_set_decrypted_flag(struct iwl_priv *priv,
+				  struct ieee80211_hdr *hdr,
+				  u32 decrypt_res,
+				  struct ieee80211_rx_status *stats)
+{
+	u16 fc = le16_to_cpu(hdr->frame_control);
+
+	/*
+	 * All contexts have the same setting here due to it being
+	 * a module parameter, so OK to check any context.
+	 */
+	if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
+						RXON_FILTER_DIS_DECRYPT_MSK)
+		return 0;
+
+	if (!(fc & IEEE80211_FCTL_PROTECTED))
+		return 0;
+
+	IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
+	switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		/* The uCode has got a bad phase 1 Key, pushes the packet.
+		 * Decryption will be done in SW. */
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_KEY_TTAK)
+			break;
+
+	case RX_RES_STATUS_SEC_TYPE_WEP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_BAD_ICV_MIC) {
+			/* bad ICV, the packet is destroyed since the
+			 * decryption is inplace, drop it */
+			IWL_DEBUG_RX(priv, "Packet destroyed\n");
+			return -1;
+		}
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+		    RX_RES_STATUS_DECRYPT_OK) {
+			IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
+			stats->flag |= RX_FLAG_DECRYPTED;
+		}
+		break;
+
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
+					struct ieee80211_hdr *hdr,
+					u16 len,
+					u32 ampdu_status,
+					struct iwl_rx_cmd_buffer *rxb,
+					struct ieee80211_rx_status *stats)
+{
+	struct sk_buff *skb;
+	__le16 fc = hdr->frame_control;
+	struct iwl_rxon_context *ctx;
+	unsigned int hdrlen, fraglen;
+
+	/* We only process data packets if the interface is open */
+	if (unlikely(!priv->is_open)) {
+		IWL_DEBUG_DROP_LIMIT(priv,
+		    "Dropping packet while interface is not open.\n");
+		return;
+	}
+
+	/* In case of HW accelerated crypto and bad decryption, drop */
+	if (!iwlwifi_mod_params.swcrypto &&
+	    iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
+		return;
+
+	/* Dont use dev_alloc_skb(), we'll have enough headroom once
+	 * ieee80211_hdr pulled.
+	 */
+	skb = alloc_skb(128, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(priv, "alloc_skb failed\n");
+		return;
+	}
+	/* If frame is small enough to fit in skb->head, pull it completely.
+	 * If not, only pull ieee80211_hdr so that splice() or TCP coalesce
+	 * are more efficient.
+	 */
+	hdrlen = (len <= skb_tailroom(skb)) ? len : sizeof(*hdr);
+
+	skb_put_data(skb, hdr, hdrlen);
+	fraglen = len - hdrlen;
+
+	if (fraglen) {
+		int offset = (void *)hdr + hdrlen -
+			     rxb_addr(rxb) + rxb_offset(rxb);
+
+		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+				fraglen, rxb->truesize);
+	}
+
+	/*
+	* Wake any queues that were stopped due to a passive channel tx
+	* failure. This can happen because the regulatory enforcement in
+	* the device waits for a beacon before allowing transmission,
+	* sometimes even after already having transmitted frames for the
+	* association because the new RXON may reset the information.
+	*/
+	if (unlikely(ieee80211_is_beacon(fc) && priv->passive_no_rx)) {
+		for_each_context(priv, ctx) {
+			if (!ether_addr_equal(hdr->addr3,
+					      ctx->active.bssid_addr))
+				continue;
+			iwlagn_lift_passive_no_rx(priv);
+		}
+	}
+
+	memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+	ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
+}
+
+static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
+{
+	u32 decrypt_out = 0;
+
+	if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+					RX_RES_STATUS_STATION_FOUND)
+		decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
+				RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+	decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+	/* packet was not encrypted */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+					RX_RES_STATUS_SEC_TYPE_NONE)
+		return decrypt_out;
+
+	/* packet was encrypted with unknown alg */
+	if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+					RX_RES_STATUS_SEC_TYPE_ERR)
+		return decrypt_out;
+
+	/* decryption was not done in HW */
+	if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+					RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+		return decrypt_out;
+
+	switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+	case RX_RES_STATUS_SEC_TYPE_CCMP:
+		/* alg is CCM: check MIC only */
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+			/* Bad MIC */
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+		break;
+
+	case RX_RES_STATUS_SEC_TYPE_TKIP:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+			/* Bad TTAK */
+			decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+			break;
+		}
+		/* fall through if TTAK OK */
+	default:
+		if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+			decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+		else
+			decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+		break;
+	}
+
+	IWL_DEBUG_RX(priv, "decrypt_in:0x%x  decrypt_out = 0x%x\n",
+					decrypt_in, decrypt_out);
+
+	return decrypt_out;
+}
+
+/* Calc max signal level (dBm) among 3 possible receivers */
+static int iwlagn_calc_rssi(struct iwl_priv *priv,
+			     struct iwl_rx_phy_res *rx_resp)
+{
+	/* data from PHY/DSP regarding signal strength, etc.,
+	 *   contents are always there, not configurable by host
+	 */
+	struct iwlagn_non_cfg_phy *ncphy =
+		(struct iwlagn_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+	u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
+	u8 agc;
+
+	val  = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_AGC_IDX]);
+	agc = (val & IWLAGN_OFDM_AGC_MSK) >> IWLAGN_OFDM_AGC_BIT_POS;
+
+	/* Find max rssi among 3 possible receivers.
+	 * These values are measured by the digital signal processor (DSP).
+	 * They should stay fairly constant even as the signal strength varies,
+	 *   if the radio's automatic gain control (AGC) is working right.
+	 * AGC value (see below) will provide the "interesting" info.
+	 */
+	val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_AB_IDX]);
+	rssi_a = (val & IWLAGN_OFDM_RSSI_INBAND_A_BITMSK) >>
+		IWLAGN_OFDM_RSSI_A_BIT_POS;
+	rssi_b = (val & IWLAGN_OFDM_RSSI_INBAND_B_BITMSK) >>
+		IWLAGN_OFDM_RSSI_B_BIT_POS;
+	val = le32_to_cpu(ncphy->non_cfg_phy[IWLAGN_RX_RES_RSSI_C_IDX]);
+	rssi_c = (val & IWLAGN_OFDM_RSSI_INBAND_C_BITMSK) >>
+		IWLAGN_OFDM_RSSI_C_BIT_POS;
+
+	max_rssi = max_t(u32, rssi_a, rssi_b);
+	max_rssi = max_t(u32, max_rssi, rssi_c);
+
+	IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+		rssi_a, rssi_b, rssi_c, max_rssi, agc);
+
+	/* dBm = max_rssi dB - agc dB - constant.
+	 * Higher AGC (higher radio gain) means lower signal. */
+	return max_rssi - agc - IWLAGN_RSSI_OFFSET;
+}
+
+/* Called for REPLY_RX_MPDU_CMD */
+static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
+			       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct ieee80211_hdr *header;
+	struct ieee80211_rx_status rx_status = {};
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_rx_phy_res *phy_res;
+	__le32 rx_pkt_status;
+	struct iwl_rx_mpdu_res_start *amsdu;
+	u32 len;
+	u32 ampdu_status;
+	u32 rate_n_flags;
+
+	if (!priv->last_phy_res_valid) {
+		IWL_ERR(priv, "MPDU frame without cached PHY data\n");
+		return;
+	}
+	phy_res = &priv->last_phy_res;
+	amsdu = (struct iwl_rx_mpdu_res_start *)pkt->data;
+	header = (struct ieee80211_hdr *)(pkt->data + sizeof(*amsdu));
+	len = le16_to_cpu(amsdu->byte_count);
+	rx_pkt_status = *(__le32 *)(pkt->data + sizeof(*amsdu) + len);
+	ampdu_status = iwlagn_translate_rx_status(priv,
+						  le32_to_cpu(rx_pkt_status));
+
+	if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+		IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
+				phy_res->cfg_phy_cnt);
+		return;
+	}
+
+	if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+	    !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+		IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
+				le32_to_cpu(rx_pkt_status));
+		return;
+	}
+
+	/* This will be used in several places later */
+	rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+	/* rx_status carries information about the packet to mac80211 */
+	rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+	rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+	rx_status.freq =
+		ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+					       rx_status.band);
+	rx_status.rate_idx =
+		iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+	rx_status.flag = 0;
+
+	/* TSF isn't reliable. In order to allow smooth user experience,
+	 * this W/A doesn't propagate it to the mac80211 */
+	/*rx_status.flag |= RX_FLAG_MACTIME_START;*/
+
+	priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+	/* Find max signal strength (dBm) among 3 antenna/receiver chains */
+	rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
+
+	IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
+		rx_status.signal, (unsigned long long)rx_status.mactime);
+
+	/*
+	 * "antenna number"
+	 *
+	 * It seems that the antenna field in the phy flags value
+	 * is actually a bit field. This is undefined by radiotap,
+	 * it wants an actual antenna number but I always get "7"
+	 * for most legacy frames I receive indicating that the
+	 * same frame was received on all three RX chains.
+	 *
+	 * I think this field should be removed in favor of a
+	 * new 802.11n radiotap field "RX chains" that is defined
+	 * as a bitmask.
+	 */
+	rx_status.antenna =
+		(le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
+		>> RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+	/* set the preamble flag if appropriate */
+	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+	if (phy_res->phy_flags & RX_RES_PHY_FLAGS_AGG_MSK) {
+		/*
+		 * We know which subframes of an A-MPDU belong
+		 * together since we get a single PHY response
+		 * from the firmware for all of them
+		 */
+		rx_status.flag |= RX_FLAG_AMPDU_DETAILS;
+		rx_status.ampdu_reference = priv->ampdu_ref;
+	}
+
+	/* Set up the HT phy flags */
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		rx_status.encoding = RX_ENC_HT;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		rx_status.bw = RATE_INFO_BW_40;
+	else
+		rx_status.bw = RATE_INFO_BW_20;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI;
+	if (rate_n_flags & RATE_MCS_GF_MSK)
+		rx_status.enc_flags |= RX_ENC_FLAG_HT_GF;
+
+	iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
+				    rxb, &rx_status);
+}
+
+static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_wipan_noa_data *new_data, *old_data;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->data;
+
+	/* no condition -- we're in softirq */
+	old_data = rcu_dereference_protected(priv->noa_data, true);
+
+	if (noa_notif->noa_active) {
+		u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+		u32 copylen = len;
+
+		/* EID, len, OUI, subtype */
+		len += 1 + 1 + 3 + 1;
+		/* P2P id, P2P length */
+		len += 1 + 2;
+		copylen += 1 + 2;
+
+		new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+		if (new_data) {
+			new_data->length = len;
+			new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+			new_data->data[1] = len - 2; /* not counting EID, len */
+			new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+			new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+			new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+			new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+			memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+			       copylen);
+		}
+	} else
+		new_data = NULL;
+
+	rcu_assign_pointer(priv->noa_data, new_data);
+
+	if (old_data)
+		kfree_rcu(old_data, rcu_head);
+}
+
+/**
+ * iwl_setup_rx_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ */
+void iwl_setup_rx_handlers(struct iwl_priv *priv)
+{
+	void (**handlers)(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb);
+
+	handlers = priv->rx_handlers;
+
+	handlers[REPLY_ERROR]			= iwlagn_rx_reply_error;
+	handlers[CHANNEL_SWITCH_NOTIFICATION]	= iwlagn_rx_csa;
+	handlers[SPECTRUM_MEASURE_NOTIFICATION]	=
+		iwlagn_rx_spectrum_measure_notif;
+	handlers[PM_SLEEP_NOTIFICATION]		= iwlagn_rx_pm_sleep_notif;
+	handlers[PM_DEBUG_STATISTIC_NOTIFIC]	=
+		iwlagn_rx_pm_debug_statistics_notif;
+	handlers[BEACON_NOTIFICATION]		= iwlagn_rx_beacon_notif;
+	handlers[REPLY_ADD_STA]			= iwl_add_sta_callback;
+
+	handlers[REPLY_WIPAN_NOA_NOTIFICATION]	= iwlagn_rx_noa_notification;
+
+	/*
+	 * The same handler is used for both the REPLY to a discrete
+	 * statistics request from the host as well as for the periodic
+	 * statistics notifications (after received beacons) from the uCode.
+	 */
+	handlers[REPLY_STATISTICS_CMD]		= iwlagn_rx_reply_statistics;
+	handlers[STATISTICS_NOTIFICATION]	= iwlagn_rx_statistics;
+
+	iwl_setup_rx_scan_handlers(priv);
+
+	handlers[CARD_STATE_NOTIFICATION]	= iwlagn_rx_card_state_notif;
+	handlers[MISSED_BEACONS_NOTIFICATION]	=
+		iwlagn_rx_missed_beacon_notif;
+
+	/* Rx handlers */
+	handlers[REPLY_RX_PHY_CMD]		= iwlagn_rx_reply_rx_phy;
+	handlers[REPLY_RX_MPDU_CMD]		= iwlagn_rx_reply_rx;
+
+	/* block ack */
+	handlers[REPLY_COMPRESSED_BA]		=
+		iwlagn_rx_reply_compressed_ba;
+
+	priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
+
+	/* set up notification wait support */
+	iwl_notification_wait_init(&priv->notif_wait);
+
+	/* Set up BT Rx handlers */
+	if (priv->lib->bt_params)
+		iwlagn_bt_rx_handler_setup(priv);
+}
+
+void iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		     struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
+
+	/*
+	 * Do the notification wait before RX handlers so
+	 * even if the RX handler consumes the RXB we have
+	 * access to it in the notification wait entry.
+	 */
+	iwl_notification_wait_notify(&priv->notif_wait, pkt);
+
+	/* Based on type of command response or notification,
+	 *   handle those that need handling via function in
+	 *   rx_handlers table.  See iwl_setup_rx_handlers() */
+	if (priv->rx_handlers[pkt->hdr.cmd]) {
+		priv->rx_handlers_stats[pkt->hdr.cmd]++;
+		priv->rx_handlers[pkt->hdr.cmd](priv, rxb);
+	} else {
+		/* No handling needed */
+		IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
+			     iwl_get_cmd_string(priv->trans,
+						iwl_cmd_id(pkt->hdr.cmd,
+							   0, 0)),
+			     pkt->hdr.cmd);
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
new file mode 100644
index 0000000..8f3e558
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -0,0 +1,1567 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include "iwl-trans.h"
+#include "iwl-modparams.h"
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void iwl_connection_init_rx_config(struct iwl_priv *priv,
+				   struct iwl_rxon_context *ctx)
+{
+	memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+	if (!ctx->vif) {
+		ctx->staging.dev_type = ctx->unused_devtype;
+	} else
+	switch (ctx->vif->type) {
+	case NL80211_IFTYPE_AP:
+		ctx->staging.dev_type = ctx->ap_devtype;
+		break;
+
+	case NL80211_IFTYPE_STATION:
+		ctx->staging.dev_type = ctx->station_devtype;
+		ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+
+	case NL80211_IFTYPE_ADHOC:
+		ctx->staging.dev_type = ctx->ibss_devtype;
+		ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+		ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
+						  RXON_FILTER_ACCEPT_GRP_MSK;
+		break;
+
+	case NL80211_IFTYPE_MONITOR:
+		ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER;
+		break;
+
+	default:
+		IWL_ERR(priv, "Unsupported interface type %d\n",
+			ctx->vif->type);
+		break;
+	}
+
+#if 0
+	/* TODO:  Figure out when short_preamble would be set and cache from
+	 * that */
+	if (!hw_to_local(priv->hw)->short_preamble)
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+	ctx->staging.channel =
+		cpu_to_le16(priv->hw->conf.chandef.chan->hw_value);
+	priv->band = priv->hw->conf.chandef.chan->band;
+
+	iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
+
+	/* clear both MIX and PURE40 mode flag */
+	ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
+					RXON_FLG_CHANNEL_MODE_PURE_40);
+	if (ctx->vif)
+		memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+	ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+	ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+	ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
+}
+
+static int iwlagn_disable_bss(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx,
+			      struct iwl_rxon_cmd *send)
+{
+	__le32 old_filter = send->filter_flags;
+	int ret;
+
+	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
+				0, sizeof(*send), send);
+
+	send->filter_flags = old_filter;
+
+	if (ret)
+		IWL_DEBUG_QUIET_RFKILL(priv,
+			"Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+
+	return ret;
+}
+
+static int iwlagn_disable_pan(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx,
+			      struct iwl_rxon_cmd *send)
+{
+	struct iwl_notification_wait disable_wait;
+	__le32 old_filter = send->filter_flags;
+	u8 old_dev_type = send->dev_type;
+	int ret;
+	static const u16 deactivate_cmd[] = {
+		REPLY_WIPAN_DEACTIVATION_COMPLETE
+	};
+
+	iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
+				   deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
+				   NULL, NULL);
+
+	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	send->dev_type = RXON_DEV_TYPE_P2P;
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd,
+				0, sizeof(*send), send);
+
+	send->filter_flags = old_filter;
+	send->dev_type = old_dev_type;
+
+	if (ret) {
+		IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
+		iwl_remove_notification(&priv->notif_wait, &disable_wait);
+	} else {
+		ret = iwl_wait_notification(&priv->notif_wait,
+					    &disable_wait, HZ);
+		if (ret)
+			IWL_ERR(priv, "Timed out waiting for PAN disable\n");
+	}
+
+	return ret;
+}
+
+static int iwlagn_disconn_pan(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx,
+			      struct iwl_rxon_cmd *send)
+{
+	__le32 old_filter = send->filter_flags;
+	int ret;
+
+	send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
+				sizeof(*send), send);
+
+	send->filter_flags = old_filter;
+
+	return ret;
+}
+
+static void iwlagn_update_qos(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx)
+{
+	int ret;
+
+	if (!ctx->is_active)
+		return;
+
+	ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+	if (ctx->qos_data.qos_active)
+		ctx->qos_data.def_qos_parm.qos_flags |=
+			QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+	if (ctx->ht.enabled)
+		ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+	IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+		      ctx->qos_data.qos_active,
+		      ctx->qos_data.def_qos_parm.qos_flags);
+
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0,
+			       sizeof(struct iwl_qosparam_cmd),
+			       &ctx->qos_data.def_qos_parm);
+	if (ret)
+		IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
+}
+
+static int iwlagn_update_beacon(struct iwl_priv *priv,
+				struct ieee80211_vif *vif)
+{
+	lockdep_assert_held(&priv->mutex);
+
+	dev_kfree_skb(priv->beacon_skb);
+	priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
+	if (!priv->beacon_skb)
+		return -ENOMEM;
+	return iwlagn_send_beacon_cmd(priv);
+}
+
+static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
+				  struct iwl_rxon_context *ctx)
+{
+	int ret = 0;
+	struct iwl_rxon_assoc_cmd rxon_assoc;
+	const struct iwl_rxon_cmd *rxon1 = &ctx->staging;
+	const struct iwl_rxon_cmd *rxon2 = &ctx->active;
+
+	if ((rxon1->flags == rxon2->flags) &&
+	    (rxon1->filter_flags == rxon2->filter_flags) &&
+	    (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
+	    (rxon1->ofdm_ht_single_stream_basic_rates ==
+	     rxon2->ofdm_ht_single_stream_basic_rates) &&
+	    (rxon1->ofdm_ht_dual_stream_basic_rates ==
+	     rxon2->ofdm_ht_dual_stream_basic_rates) &&
+	    (rxon1->ofdm_ht_triple_stream_basic_rates ==
+	     rxon2->ofdm_ht_triple_stream_basic_rates) &&
+	    (rxon1->acquisition_data == rxon2->acquisition_data) &&
+	    (rxon1->rx_chain == rxon2->rx_chain) &&
+	    (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
+		IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC.  Not resending.\n");
+		return 0;
+	}
+
+	rxon_assoc.flags = ctx->staging.flags;
+	rxon_assoc.filter_flags = ctx->staging.filter_flags;
+	rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+	rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+	rxon_assoc.reserved1 = 0;
+	rxon_assoc.reserved2 = 0;
+	rxon_assoc.reserved3 = 0;
+	rxon_assoc.ofdm_ht_single_stream_basic_rates =
+	    ctx->staging.ofdm_ht_single_stream_basic_rates;
+	rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+	    ctx->staging.ofdm_ht_dual_stream_basic_rates;
+	rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+	rxon_assoc.ofdm_ht_triple_stream_basic_rates =
+		 ctx->staging.ofdm_ht_triple_stream_basic_rates;
+	rxon_assoc.acquisition_data = ctx->staging.acquisition_data;
+
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd,
+				CMD_ASYNC, sizeof(rxon_assoc), &rxon_assoc);
+	return ret;
+}
+
+static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+	u16 new_val;
+	u16 beacon_factor;
+
+	/*
+	 * If mac80211 hasn't given us a beacon interval, program
+	 * the default into the device (not checking this here
+	 * would cause the adjustment below to return the maximum
+	 * value, which may break PAN.)
+	 */
+	if (!beacon_val)
+		return DEFAULT_BEACON_INTERVAL;
+
+	/*
+	 * If the beacon interval we obtained from the peer
+	 * is too large, we'll have to wake up more often
+	 * (and in IBSS case, we'll beacon too much)
+	 *
+	 * For example, if max_beacon_val is 4096, and the
+	 * requested beacon interval is 7000, we'll have to
+	 * use 3500 to be able to wake up on the beacons.
+	 *
+	 * This could badly influence beacon detection stats.
+	 */
+
+	beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+	new_val = beacon_val / beacon_factor;
+
+	if (!new_val)
+		new_val = max_beacon_val;
+
+	return new_val;
+}
+
+static int iwl_send_rxon_timing(struct iwl_priv *priv,
+				struct iwl_rxon_context *ctx)
+{
+	u64 tsf;
+	s32 interval_tm, rem;
+	struct ieee80211_conf *conf = NULL;
+	u16 beacon_int;
+	struct ieee80211_vif *vif = ctx->vif;
+
+	conf = &priv->hw->conf;
+
+	lockdep_assert_held(&priv->mutex);
+
+	memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
+
+	ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
+	ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+	beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+	/*
+	 * TODO: For IBSS we need to get atim_window from mac80211,
+	 *	 for now just always use 0
+	 */
+	ctx->timing.atim_window = 0;
+
+	if (ctx->ctxid == IWL_RXON_CTX_PAN &&
+	    (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
+	    iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
+	    priv->contexts[IWL_RXON_CTX_BSS].vif &&
+	    priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
+		ctx->timing.beacon_interval =
+			priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
+		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
+	} else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
+		   iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
+		   priv->contexts[IWL_RXON_CTX_PAN].vif &&
+		   priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
+		   (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
+		    !ctx->vif->bss_conf.beacon_int)) {
+		ctx->timing.beacon_interval =
+			priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
+		beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
+	} else {
+		beacon_int = iwl_adjust_beacon_interval(beacon_int,
+			IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
+		ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+	}
+
+	ctx->beacon_int = beacon_int;
+
+	tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
+	interval_tm = beacon_int * TIME_UNIT;
+	rem = do_div(tsf, interval_tm);
+	ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+	ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
+
+	IWL_DEBUG_ASSOC(priv,
+			"beacon interval %d beacon timer %d beacon tim %d\n",
+			le16_to_cpu(ctx->timing.beacon_interval),
+			le32_to_cpu(ctx->timing.beacon_init_val),
+			le16_to_cpu(ctx->timing.atim_window));
+
+	return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
+				0, sizeof(ctx->timing), &ctx->timing);
+}
+
+static int iwlagn_rxon_disconn(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx)
+{
+	int ret;
+	struct iwl_rxon_cmd *active = (void *)&ctx->active;
+
+	if (ctx->ctxid == IWL_RXON_CTX_BSS) {
+		ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
+	} else {
+		ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
+		if (ret)
+			return ret;
+		if (ctx->vif) {
+			ret = iwl_send_rxon_timing(priv, ctx);
+			if (ret) {
+				IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+				return ret;
+			}
+			ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging);
+		}
+	}
+	if (ret)
+		return ret;
+
+	/*
+	 * Un-assoc RXON clears the station table and WEP
+	 * keys, so we have to restore those afterwards.
+	 */
+	iwl_clear_ucode_stations(priv, ctx);
+	/* update -- might need P2P now */
+	iwl_update_bcast_station(priv, ctx);
+	iwl_restore_stations(priv, ctx);
+	ret = iwl_restore_default_wep_keys(priv, ctx);
+	if (ret) {
+		IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
+		return ret;
+	}
+
+	memcpy(active, &ctx->staging, sizeof(*active));
+	return 0;
+}
+
+static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
+{
+	int ret;
+	s8 prev_tx_power;
+	bool defer;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+
+	if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
+		return 0;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (priv->tx_power_user_lmt == tx_power && !force)
+		return 0;
+
+	if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
+		IWL_WARN(priv,
+			 "Requested user TXPOWER %d below lower limit %d.\n",
+			 tx_power,
+			 IWLAGN_TX_POWER_TARGET_POWER_MIN);
+		return -EINVAL;
+	}
+
+	if (tx_power > DIV_ROUND_UP(priv->nvm_data->max_tx_pwr_half_dbm, 2)) {
+		IWL_WARN(priv,
+			"Requested user TXPOWER %d above upper limit %d.\n",
+			 tx_power, priv->nvm_data->max_tx_pwr_half_dbm);
+		return -EINVAL;
+	}
+
+	if (!iwl_is_ready_rf(priv))
+		return -EIO;
+
+	/* scan complete and commit_rxon use tx_power_next value,
+	 * it always need to be updated for newest request */
+	priv->tx_power_next = tx_power;
+
+	/* do not set tx power when scanning or channel changing */
+	defer = test_bit(STATUS_SCANNING, &priv->status) ||
+		memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+	if (defer && !force) {
+		IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
+		return 0;
+	}
+
+	prev_tx_power = priv->tx_power_user_lmt;
+	priv->tx_power_user_lmt = tx_power;
+
+	ret = iwlagn_send_tx_power(priv);
+
+	/* if fail to set tx_power, restore the orig. tx power */
+	if (ret) {
+		priv->tx_power_user_lmt = prev_tx_power;
+		priv->tx_power_next = prev_tx_power;
+	}
+	return ret;
+}
+
+static int iwlagn_rxon_connect(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx)
+{
+	int ret;
+	struct iwl_rxon_cmd *active = (void *)&ctx->active;
+
+	/* RXON timing must be before associated RXON */
+	if (ctx->ctxid == IWL_RXON_CTX_BSS) {
+		ret = iwl_send_rxon_timing(priv, ctx);
+		if (ret) {
+			IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
+			return ret;
+		}
+	}
+	/* QoS info may be cleared by previous un-assoc RXON */
+	iwlagn_update_qos(priv, ctx);
+
+	/*
+	 * We'll run into this code path when beaconing is
+	 * enabled, but then we also need to send the beacon
+	 * to the device.
+	 */
+	if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
+		ret = iwlagn_update_beacon(priv, ctx->vif);
+		if (ret) {
+			IWL_ERR(priv,
+				"Error sending required beacon (%d)!\n",
+				ret);
+			return ret;
+		}
+	}
+
+	priv->start_calib = 0;
+	/*
+	 * Apply the new configuration.
+	 *
+	 * Associated RXON doesn't clear the station table in uCode,
+	 * so we don't need to restore stations etc. after this.
+	 */
+	ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0,
+		      sizeof(struct iwl_rxon_cmd), &ctx->staging);
+	if (ret) {
+		IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
+		return ret;
+	}
+	memcpy(active, &ctx->staging, sizeof(*active));
+
+	/* IBSS beacon needs to be sent after setting assoc */
+	if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
+		if (iwlagn_update_beacon(priv, ctx->vif))
+			IWL_ERR(priv, "Error sending IBSS beacon\n");
+	iwl_init_sensitivity(priv);
+
+	/*
+	 * If we issue a new RXON command which required a tune then
+	 * we must send a new TXPOWER command or we won't be able to
+	 * Tx any frames.
+	 *
+	 * It's expected we set power here if channel is changing.
+	 */
+	ret = iwl_set_tx_power(priv, priv->tx_power_next, true);
+	if (ret) {
+		IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int iwlagn_set_pan_params(struct iwl_priv *priv)
+{
+	struct iwl_wipan_params_cmd cmd;
+	struct iwl_rxon_context *ctx_bss, *ctx_pan;
+	int slot0 = 300, slot1 = 0;
+	int ret;
+
+	if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
+		return 0;
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	lockdep_assert_held(&priv->mutex);
+
+	ctx_bss = &priv->contexts[IWL_RXON_CTX_BSS];
+	ctx_pan = &priv->contexts[IWL_RXON_CTX_PAN];
+
+	/*
+	 * If the PAN context is inactive, then we don't need
+	 * to update the PAN parameters, the last thing we'll
+	 * have done before it goes inactive is making the PAN
+	 * parameters be WLAN-only.
+	 */
+	if (!ctx_pan->is_active)
+		return 0;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	/* only 2 slots are currently allowed */
+	cmd.num_slots = 2;
+
+	cmd.slots[0].type = 0; /* BSS */
+	cmd.slots[1].type = 1; /* PAN */
+
+	if (ctx_bss->vif && ctx_pan->vif) {
+		int bcnint = ctx_pan->beacon_int;
+		int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
+
+		/* should be set, but seems unused?? */
+		cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
+
+		if (ctx_pan->vif->type == NL80211_IFTYPE_AP &&
+		    bcnint &&
+		    bcnint != ctx_bss->beacon_int) {
+			IWL_ERR(priv,
+				"beacon intervals don't match (%d, %d)\n",
+				ctx_bss->beacon_int, ctx_pan->beacon_int);
+		} else
+			bcnint = max_t(int, bcnint,
+				       ctx_bss->beacon_int);
+		if (!bcnint)
+			bcnint = DEFAULT_BEACON_INTERVAL;
+		slot0 = bcnint / 2;
+		slot1 = bcnint - slot0;
+
+		if (test_bit(STATUS_SCAN_HW, &priv->status) ||
+		    (!ctx_bss->vif->bss_conf.idle &&
+		     !ctx_bss->vif->bss_conf.assoc)) {
+			slot0 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+			slot1 = IWL_MIN_SLOT_TIME;
+		} else if (!ctx_pan->vif->bss_conf.idle &&
+			   !ctx_pan->vif->bss_conf.assoc) {
+			slot1 = dtim * bcnint * 3 - IWL_MIN_SLOT_TIME;
+			slot0 = IWL_MIN_SLOT_TIME;
+		}
+	} else if (ctx_pan->vif) {
+		slot0 = 0;
+		slot1 = max_t(int, 1, ctx_pan->vif->bss_conf.dtim_period) *
+					ctx_pan->beacon_int;
+		slot1 = max_t(int, DEFAULT_BEACON_INTERVAL, slot1);
+
+		if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+			slot0 = slot1 * 3 - IWL_MIN_SLOT_TIME;
+			slot1 = IWL_MIN_SLOT_TIME;
+		}
+	}
+
+	cmd.slots[0].width = cpu_to_le16(slot0);
+	cmd.slots[1].width = cpu_to_le16(slot1);
+
+	ret = iwl_dvm_send_cmd_pdu(priv, REPLY_WIPAN_PARAMS, 0,
+			sizeof(cmd), &cmd);
+	if (ret)
+		IWL_ERR(priv, "Error setting PAN parameters (%d)\n", ret);
+
+	return ret;
+}
+
+static void _iwl_set_rxon_ht(struct iwl_priv *priv,
+			     struct iwl_ht_config *ht_conf,
+			     struct iwl_rxon_context *ctx)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	if (!ctx->ht.enabled) {
+		rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+			RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+			RXON_FLG_HT40_PROT_MSK |
+			RXON_FLG_HT_PROT_MSK);
+		return;
+	}
+
+	/* FIXME: if the definition of ht.protection changed, the "translation"
+	 * will be needed for rxon->flags
+	 */
+	rxon->flags |= cpu_to_le32(ctx->ht.protection <<
+				   RXON_FLG_HT_OPERATING_MODE_POS);
+
+	/* Set up channel bandwidth:
+	 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+	/* clear the HT channel mode before set the mode */
+	rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
+			 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+	if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
+		/* pure ht40 */
+		if (ctx->ht.protection ==
+		    IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+			rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+			/*
+			 * Note: control channel is opposite of extension
+			 * channel
+			 */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+					~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |=
+					RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				break;
+			}
+		} else {
+			/*
+			 * Note: control channel is opposite of extension
+			 * channel
+			 */
+			switch (ctx->ht.extension_chan_offset) {
+			case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+				rxon->flags &=
+					~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+				rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+				rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+				break;
+			case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+			default:
+				/*
+				 * channel location only valid if in Mixed
+				 * mode
+				 */
+				IWL_ERR(priv,
+					"invalid extension channel offset\n");
+				break;
+			}
+		}
+	} else {
+		rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+	}
+
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
+			"extension channel offset 0x%x\n",
+			le32_to_cpu(rxon->flags), ctx->ht.protection,
+			ctx->ht.extension_chan_offset);
+}
+
+void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
+{
+	struct iwl_rxon_context *ctx;
+
+	for_each_context(priv, ctx)
+		_iwl_set_rxon_ht(priv, ht_conf, ctx);
+}
+
+/**
+ * iwl_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE:  Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
+			 struct iwl_rxon_context *ctx)
+{
+	enum nl80211_band band = ch->band;
+	u16 channel = ch->hw_value;
+
+	if ((le16_to_cpu(ctx->staging.channel) == channel) &&
+	    (priv->band == band))
+		return;
+
+	ctx->staging.channel = cpu_to_le16(channel);
+	if (band == NL80211_BAND_5GHZ)
+		ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+	else
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+	priv->band = band;
+
+	IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
+
+}
+
+void iwl_set_flags_for_band(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    enum nl80211_band band,
+			    struct ieee80211_vif *vif)
+{
+	if (band == NL80211_BAND_5GHZ) {
+		ctx->staging.flags &=
+		    ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
+		      | RXON_FLG_CCK_MSK);
+		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+	} else {
+		/* Copied from iwl_post_associate() */
+		if (vif && vif->bss_conf.use_short_slot)
+			ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+		else
+			ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+		ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+		ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+		ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+	}
+}
+
+static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
+				  struct iwl_rxon_context *ctx, int hw_decrypt)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	if (hw_decrypt)
+		rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+	else
+		rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+
+/* validate RXON structure is valid */
+static int iwl_check_rxon_cmd(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx)
+{
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+	u32 errors = 0;
+
+	if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+		if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+			IWL_WARN(priv, "check 2.4G: wrong narrow\n");
+			errors |= BIT(0);
+		}
+		if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+			IWL_WARN(priv, "check 2.4G: wrong radar\n");
+			errors |= BIT(1);
+		}
+	} else {
+		if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+			IWL_WARN(priv, "check 5.2G: not short slot!\n");
+			errors |= BIT(2);
+		}
+		if (rxon->flags & RXON_FLG_CCK_MSK) {
+			IWL_WARN(priv, "check 5.2G: CCK!\n");
+			errors |= BIT(3);
+		}
+	}
+	if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+		IWL_WARN(priv, "mac/bssid mcast!\n");
+		errors |= BIT(4);
+	}
+
+	/* make sure basic rates 6Mbps and 1Mbps are supported */
+	if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
+	    (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
+		IWL_WARN(priv, "neither 1 nor 6 are basic\n");
+		errors |= BIT(5);
+	}
+
+	if (le16_to_cpu(rxon->assoc_id) > 2007) {
+		IWL_WARN(priv, "aid > 2007\n");
+		errors |= BIT(6);
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
+			== (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+		IWL_WARN(priv, "CCK and short slot\n");
+		errors |= BIT(7);
+	}
+
+	if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
+			== (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+		IWL_WARN(priv, "CCK and auto detect\n");
+		errors |= BIT(8);
+	}
+
+	if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
+			    RXON_FLG_TGG_PROTECT_MSK)) ==
+			    RXON_FLG_TGG_PROTECT_MSK) {
+		IWL_WARN(priv, "TGg but no auto-detect\n");
+		errors |= BIT(9);
+	}
+
+	if (rxon->channel == 0) {
+		IWL_WARN(priv, "zero channel is invalid\n");
+		errors |= BIT(10);
+	}
+
+	WARN(errors, "Invalid RXON (%#x), channel %d",
+	     errors, le16_to_cpu(rxon->channel));
+
+	return errors ? -EINVAL : 0;
+}
+
+/**
+ * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @priv: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+static int iwl_full_rxon_required(struct iwl_priv *priv,
+				  struct iwl_rxon_context *ctx)
+{
+	const struct iwl_rxon_cmd *staging = &ctx->staging;
+	const struct iwl_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond)							\
+	if ((cond)) {							\
+		IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n");	\
+		return 1;						\
+	}
+
+#define CHK_NEQ(c1, c2)						\
+	if ((c1) != (c2)) {					\
+		IWL_DEBUG_INFO(priv, "need full RXON - "	\
+			       #c1 " != " #c2 " - %d != %d\n",	\
+			       (c1), (c2));			\
+		return 1;					\
+	}
+
+	/* These items are only settable from the full RXON command */
+	CHK(!iwl_is_associated_ctx(ctx));
+	CHK(!ether_addr_equal(staging->bssid_addr, active->bssid_addr));
+	CHK(!ether_addr_equal(staging->node_addr, active->node_addr));
+	CHK(!ether_addr_equal(staging->wlap_bssid_addr,
+			      active->wlap_bssid_addr));
+	CHK_NEQ(staging->dev_type, active->dev_type);
+	CHK_NEQ(staging->channel, active->channel);
+	CHK_NEQ(staging->air_propagation, active->air_propagation);
+	CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+		active->ofdm_ht_single_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+		active->ofdm_ht_dual_stream_basic_rates);
+	CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
+		active->ofdm_ht_triple_stream_basic_rates);
+	CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+	/* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+	 * be updated with the RXON_ASSOC command -- however only some
+	 * flag transitions are allowed using RXON_ASSOC */
+
+	/* Check if we are not switching bands */
+	CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+		active->flags & RXON_FLG_BAND_24G_MSK);
+
+	/* Check if we are switching association toggle */
+	CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+		active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+	return 0;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+void iwl_print_rx_config_cmd(struct iwl_priv *priv,
+			     enum iwl_rxon_context_id ctxid)
+{
+	struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
+	struct iwl_rxon_cmd *rxon = &ctx->staging;
+
+	IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
+	iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+	IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
+			le16_to_cpu(rxon->channel));
+	IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
+			le32_to_cpu(rxon->flags));
+	IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
+			le32_to_cpu(rxon->filter_flags));
+	IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
+	IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
+			rxon->ofdm_basic_rates);
+	IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
+			rxon->cck_basic_rates);
+	IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
+	IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+	IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
+			le16_to_cpu(rxon->assoc_id));
+}
+#endif
+
+static void iwl_calc_basic_rates(struct iwl_priv *priv,
+				 struct iwl_rxon_context *ctx)
+{
+	int lowest_present_ofdm = 100;
+	int lowest_present_cck = 100;
+	u8 cck = 0;
+	u8 ofdm = 0;
+
+	if (ctx->vif) {
+		struct ieee80211_supported_band *sband;
+		unsigned long basic = ctx->vif->bss_conf.basic_rates;
+		int i;
+
+		sband = priv->hw->wiphy->bands[priv->hw->conf.chandef.chan->band];
+
+		for_each_set_bit(i, &basic, BITS_PER_LONG) {
+			int hw = sband->bitrates[i].hw_value;
+			if (hw >= IWL_FIRST_OFDM_RATE) {
+				ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+				if (lowest_present_ofdm > hw)
+					lowest_present_ofdm = hw;
+			} else {
+				BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+				cck |= BIT(hw);
+				if (lowest_present_cck > hw)
+					lowest_present_cck = hw;
+			}
+		}
+	}
+
+	/*
+	 * Now we've got the basic rates as bitmaps in the ofdm and cck
+	 * variables. This isn't sufficient though, as there might not
+	 * be all the right rates in the bitmap. E.g. if the only basic
+	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+	 *
+	 *    [...] a STA responding to a received frame shall transmit
+	 *    its Control Response frame [...] at the highest rate in the
+	 *    BSSBasicRateSet parameter that is less than or equal to the
+	 *    rate of the immediately previous frame in the frame exchange
+	 *    sequence ([...]) and that is of the same modulation class
+	 *    ([...]) as the received frame. If no rate contained in the
+	 *    BSSBasicRateSet parameter meets these conditions, then the
+	 *    control frame sent in response to a received frame shall be
+	 *    transmitted at the highest mandatory rate of the PHY that is
+	 *    less than or equal to the rate of the received frame, and
+	 *    that is of the same modulation class as the received frame.
+	 *
+	 * As a consequence, we need to add all mandatory rates that are
+	 * lower than all of the basic rates to these bitmaps.
+	 */
+
+	if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
+	if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
+	/* 6M already there or needed so always add */
+	ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
+
+	/*
+	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+	 * Note, however:
+	 *  - if no CCK rates are basic, it must be ERP since there must
+	 *    be some basic rates at all, so they're OFDM => ERP PHY
+	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
+	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+	 *  - if 5.5M is basic, 1M and 2M are mandatory
+	 *  - if 2M is basic, 1M is mandatory
+	 *  - if 1M is basic, that's the only valid ACK rate.
+	 * As a consequence, it's not as complicated as it sounds, just add
+	 * any lower rates to the ACK rate bitmap.
+	 */
+	if (IWL_RATE_11M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_5M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_2M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
+	/* 1M already there or needed so always add */
+	cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
+
+	IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
+		       cck, ofdm);
+
+	/* "basic_rates" is a misnomer here -- should be called ACK rates */
+	ctx->staging.cck_basic_rates = cck;
+	ctx->staging.ofdm_basic_rates = ofdm;
+}
+
+/**
+ * iwlagn_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data.  This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ *
+ * The connect/disconnect flow should be as the following:
+ *
+ * 1. make sure send RXON command with association bit unset if not connect
+ *	this should include the channel and the band for the candidate
+ *	to be connected to
+ * 2. Add Station before RXON association with the AP
+ * 3. RXON_timing has to send before RXON for connection
+ * 4. full RXON command - associated bit set
+ * 5. use RXON_ASSOC command to update any flags changes
+ */
+int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+	/* cast away the const for active_rxon in this function */
+	struct iwl_rxon_cmd *active = (void *)&ctx->active;
+	bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (!iwl_is_alive(priv))
+		return -EBUSY;
+
+	/* This function hardcodes a bunch of dual-mode assumptions */
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	if (!ctx->is_active)
+		return 0;
+
+	/* always get timestamp with Rx frame */
+	ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+	/* recalculate basic rates */
+	iwl_calc_basic_rates(priv, ctx);
+
+	/*
+	 * force CTS-to-self frames protection if RTS-CTS is not preferred
+	 * one aggregation protection method
+	 */
+	if (!priv->hw_params.use_rts_for_aggregation)
+		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+
+	if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
+	    !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
+		ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+	else
+		ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+	iwl_print_rx_config_cmd(priv, ctx->ctxid);
+	ret = iwl_check_rxon_cmd(priv, ctx);
+	if (ret) {
+		IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * receive commit_rxon request
+	 * abort any previous channel switch if still in process
+	 */
+	if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
+	    (priv->switch_channel != ctx->staging.channel)) {
+		IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
+			      le16_to_cpu(priv->switch_channel));
+		iwl_chswitch_done(priv, false);
+	}
+
+	/*
+	 * If we don't need to send a full RXON, we can use
+	 * iwl_rxon_assoc_cmd which is used to reconfigure filter
+	 * and other flags for the current radio configuration.
+	 */
+	if (!iwl_full_rxon_required(priv, ctx)) {
+		ret = iwlagn_send_rxon_assoc(priv, ctx);
+		if (ret) {
+			IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
+			return ret;
+		}
+
+		memcpy(active, &ctx->staging, sizeof(*active));
+		/*
+		 * We do not commit tx power settings while channel changing,
+		 * do it now if after settings changed.
+		 */
+		iwl_set_tx_power(priv, priv->tx_power_next, false);
+
+		/* make sure we are in the right PS state */
+		iwl_power_update_mode(priv, true);
+
+		return 0;
+	}
+
+	iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.swcrypto);
+
+	IWL_DEBUG_INFO(priv,
+		       "Going to commit RXON\n"
+		       "  * with%s RXON_FILTER_ASSOC_MSK\n"
+		       "  * channel = %d\n"
+		       "  * bssid = %pM\n",
+		       (new_assoc ? "" : "out"),
+		       le16_to_cpu(ctx->staging.channel),
+		       ctx->staging.bssid_addr);
+
+	/*
+	 * Always clear associated first, but with the correct config.
+	 * This is required as for example station addition for the
+	 * AP station must be done after the BSSID is set to correctly
+	 * set up filters in the device.
+	 */
+	ret = iwlagn_rxon_disconn(priv, ctx);
+	if (ret)
+		return ret;
+
+	ret = iwlagn_set_pan_params(priv);
+	if (ret)
+		return ret;
+
+	if (new_assoc)
+		return iwlagn_rxon_connect(priv, ctx);
+
+	return 0;
+}
+
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+			struct iwl_rxon_context *ctx)
+{
+	if (conf_is_ht40_minus(conf)) {
+		ctx->ht.extension_chan_offset =
+			IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+		ctx->ht.is_40mhz = true;
+	} else if (conf_is_ht40_plus(conf)) {
+		ctx->ht.extension_chan_offset =
+			IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+		ctx->ht.is_40mhz = true;
+	} else {
+		ctx->ht.extension_chan_offset =
+			IEEE80211_HT_PARAM_CHA_SEC_NONE;
+		ctx->ht.is_40mhz = false;
+	}
+}
+
+int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx;
+	struct ieee80211_conf *conf = &hw->conf;
+	struct ieee80211_channel *channel = conf->chandef.chan;
+	int ret = 0;
+
+	IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
+
+	mutex_lock(&priv->mutex);
+
+	if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
+		IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
+		goto out;
+	}
+
+	if (!iwl_is_ready(priv)) {
+		IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+		goto out;
+	}
+
+	if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+		       IEEE80211_CONF_CHANGE_CHANNEL)) {
+		/* mac80211 uses static for non-HT which is what we want */
+		priv->current_ht_config.smps = conf->smps_mode;
+
+		/*
+		 * Recalculate chain counts.
+		 *
+		 * If monitor mode is enabled then mac80211 will
+		 * set up the SM PS mode to OFF if an HT channel is
+		 * configured.
+		 */
+		for_each_context(priv, ctx)
+			iwlagn_set_rxon_chain(priv, ctx);
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+		for_each_context(priv, ctx) {
+			/* Configure HT40 channels */
+			if (ctx->ht.enabled != conf_is_ht(conf))
+				ctx->ht.enabled = conf_is_ht(conf);
+
+			if (ctx->ht.enabled) {
+				/* if HT40 is used, it should not change
+				 * after associated except channel switch */
+				if (!ctx->ht.is_40mhz ||
+						!iwl_is_associated_ctx(ctx))
+					iwlagn_config_ht40(conf, ctx);
+			} else
+				ctx->ht.is_40mhz = false;
+
+			/*
+			 * Default to no protection. Protection mode will
+			 * later be set from BSS config in iwl_ht_conf
+			 */
+			ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+			/* if we are switching from ht to 2.4 clear flags
+			 * from any ht related info since 2.4 does not
+			 * support ht */
+			if (le16_to_cpu(ctx->staging.channel) !=
+			    channel->hw_value)
+				ctx->staging.flags = 0;
+
+			iwl_set_rxon_channel(priv, channel, ctx);
+			iwl_set_rxon_ht(priv, &priv->current_ht_config);
+
+			iwl_set_flags_for_band(priv, ctx, channel->band,
+					       ctx->vif);
+		}
+
+		iwl_update_bcast_stations(priv);
+	}
+
+	if (changed & (IEEE80211_CONF_CHANGE_PS |
+			IEEE80211_CONF_CHANGE_IDLE)) {
+		ret = iwl_power_update_mode(priv, false);
+		if (ret)
+			IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
+	}
+
+	if (changed & IEEE80211_CONF_CHANGE_POWER) {
+		IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
+			priv->tx_power_user_lmt, conf->power_level);
+
+		iwl_set_tx_power(priv, conf->power_level, false);
+	}
+
+	for_each_context(priv, ctx) {
+		if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+			continue;
+		iwlagn_commit_rxon(priv, ctx);
+	}
+ out:
+	mutex_unlock(&priv->mutex);
+	IWL_DEBUG_MAC80211(priv, "leave\n");
+
+	return ret;
+}
+
+static void iwlagn_check_needed_chains(struct iwl_priv *priv,
+				       struct iwl_rxon_context *ctx,
+				       struct ieee80211_bss_conf *bss_conf)
+{
+	struct ieee80211_vif *vif = ctx->vif;
+	struct iwl_rxon_context *tmp;
+	struct ieee80211_sta *sta;
+	struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+	struct ieee80211_sta_ht_cap *ht_cap;
+	bool need_multiple;
+
+	lockdep_assert_held(&priv->mutex);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		rcu_read_lock();
+		sta = ieee80211_find_sta(vif, bss_conf->bssid);
+		if (!sta) {
+			/*
+			 * If at all, this can only happen through a race
+			 * when the AP disconnects us while we're still
+			 * setting up the connection, in that case mac80211
+			 * will soon tell us about that.
+			 */
+			need_multiple = false;
+			rcu_read_unlock();
+			break;
+		}
+
+		ht_cap = &sta->ht_cap;
+
+		need_multiple = true;
+
+		/*
+		 * If the peer advertises no support for receiving 2 and 3
+		 * stream MCS rates, it can't be transmitting them either.
+		 */
+		if (ht_cap->mcs.rx_mask[1] == 0 &&
+		    ht_cap->mcs.rx_mask[2] == 0) {
+			need_multiple = false;
+		} else if (!(ht_cap->mcs.tx_params &
+						IEEE80211_HT_MCS_TX_DEFINED)) {
+			/* If it can't TX MCS at all ... */
+			need_multiple = false;
+		} else if (ht_cap->mcs.tx_params &
+						IEEE80211_HT_MCS_TX_RX_DIFF) {
+			int maxstreams;
+
+			/*
+			 * But if it can receive them, it might still not
+			 * be able to transmit them, which is what we need
+			 * to check here -- so check the number of streams
+			 * it advertises for TX (if different from RX).
+			 */
+
+			maxstreams = (ht_cap->mcs.tx_params &
+				 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK);
+			maxstreams >>=
+				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+			maxstreams += 1;
+
+			if (maxstreams <= 1)
+				need_multiple = false;
+		}
+
+		rcu_read_unlock();
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		/* currently */
+		need_multiple = false;
+		break;
+	default:
+		/* only AP really */
+		need_multiple = true;
+		break;
+	}
+
+	ctx->ht_need_multiple_chains = need_multiple;
+
+	if (!need_multiple) {
+		/* check all contexts */
+		for_each_context(priv, tmp) {
+			if (!tmp->vif)
+				continue;
+			if (tmp->ht_need_multiple_chains) {
+				need_multiple = true;
+				break;
+			}
+		}
+	}
+
+	ht_conf->single_chain_sufficient = !need_multiple;
+}
+
+static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
+{
+	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+	int ret;
+
+	if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
+		return;
+
+	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
+	    iwl_is_any_associated(priv)) {
+		struct iwl_calib_chain_noise_reset_cmd cmd;
+
+		/* clear data for chain noise calibration algorithm */
+		data->chain_noise_a = 0;
+		data->chain_noise_b = 0;
+		data->chain_noise_c = 0;
+		data->chain_signal_a = 0;
+		data->chain_signal_b = 0;
+		data->chain_signal_c = 0;
+		data->beacon_count = 0;
+
+		memset(&cmd, 0, sizeof(cmd));
+		iwl_set_calib_hdr(&cmd.hdr,
+			priv->phy_calib_chain_noise_reset_cmd);
+		ret = iwl_dvm_send_cmd_pdu(priv,
+					REPLY_PHY_CALIBRATION_CMD,
+					0, sizeof(cmd), &cmd);
+		if (ret)
+			IWL_ERR(priv,
+				"Could not send REPLY_PHY_CALIBRATION_CMD\n");
+		data->state = IWL_CHAIN_NOISE_ACCUMULATE;
+		IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
+	}
+}
+
+void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_bss_conf *bss_conf,
+			     u32 changes)
+{
+	struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	int ret;
+	bool force = false;
+
+	mutex_lock(&priv->mutex);
+
+	if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
+		/*
+		 * If we go idle, then clearly no "passive-no-rx"
+		 * workaround is needed any more, this is a reset.
+		 */
+		iwlagn_lift_passive_no_rx(priv);
+	}
+
+	if (unlikely(!iwl_is_ready(priv))) {
+		IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
+		mutex_unlock(&priv->mutex);
+		return;
+        }
+
+	if (unlikely(!ctx->vif)) {
+		IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
+		mutex_unlock(&priv->mutex);
+		return;
+	}
+
+	if (changes & BSS_CHANGED_BEACON_INT)
+		force = true;
+
+	if (changes & BSS_CHANGED_QOS) {
+		ctx->qos_data.qos_active = bss_conf->qos;
+		iwlagn_update_qos(priv, ctx);
+	}
+
+	ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+	if (vif->bss_conf.use_short_preamble)
+		ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+	else
+		ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+	if (changes & BSS_CHANGED_ASSOC) {
+		if (bss_conf->assoc) {
+			priv->timestamp = bss_conf->sync_tsf;
+			ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+		} else {
+			ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+			if (ctx->ctxid == IWL_RXON_CTX_BSS)
+				priv->have_rekey_data = false;
+		}
+
+		iwlagn_bt_coex_rssi_monitor(priv);
+	}
+
+	if (ctx->ht.enabled) {
+		ctx->ht.protection = bss_conf->ht_operation_mode &
+					IEEE80211_HT_OP_MODE_PROTECTION;
+		ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
+					IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+		iwlagn_check_needed_chains(priv, ctx, bss_conf);
+		iwl_set_rxon_ht(priv, &priv->current_ht_config);
+	}
+
+	iwlagn_set_rxon_chain(priv, ctx);
+
+	if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
+		ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+	else
+		ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+
+	if (bss_conf->use_cts_prot)
+		ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+	else
+		ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+
+	memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
+
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+		if (vif->bss_conf.enable_beacon) {
+			ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+			priv->beacon_ctx = ctx;
+		} else {
+			ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+			priv->beacon_ctx = NULL;
+		}
+	}
+
+	/*
+	 * If the ucode decides to do beacon filtering before
+	 * association, it will lose beacons that are needed
+	 * before sending frames out on passive channels. This
+	 * causes association failures on those channels. Enable
+	 * receiving beacons in such cases.
+	 */
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		if (!bss_conf->assoc)
+			ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
+		else
+			ctx->staging.filter_flags &=
+						    ~RXON_FILTER_BCON_AWARE_MSK;
+	}
+
+	if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+		iwlagn_commit_rxon(priv, ctx);
+
+	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+		/*
+		 * The chain noise calibration will enable PM upon
+		 * completion. If calibration has already been run
+		 * then we need to enable power management here.
+		 */
+		if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
+			iwl_power_update_mode(priv, false);
+
+		/* Enable RX differential gain and sensitivity calibrations */
+		iwlagn_chain_noise_reset(priv);
+		priv->start_calib = 1;
+	}
+
+	if (changes & BSS_CHANGED_IBSS) {
+		ret = iwlagn_manage_ibss_station(priv, vif,
+						 bss_conf->ibss_joined);
+		if (ret)
+			IWL_ERR(priv, "failed to %s IBSS station %pM\n",
+				bss_conf->ibss_joined ? "add" : "remove",
+				bss_conf->bssid);
+	}
+
+	if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) {
+		if (iwlagn_update_beacon(priv, vif))
+			IWL_ERR(priv, "Error updating beacon\n");
+	}
+
+	mutex_unlock(&priv->mutex);
+}
+
+void iwlagn_post_scan(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+
+	/*
+	 * We do not commit power settings while scan is pending,
+	 * do it now if the settings changed.
+	 */
+	iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
+	iwl_set_tx_power(priv, priv->tx_power_next, false);
+
+	/*
+	 * Since setting the RXON may have been deferred while
+	 * performing the scan, fire one off if needed
+	 */
+	for_each_context(priv, ctx)
+		if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+			iwlagn_commit_rxon(priv, ctx);
+
+	iwlagn_set_pan_params(priv);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
new file mode 100644
index 0000000..17e6a32
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -0,0 +1,1079 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "dev.h"
+#include "agn.h"
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req.  This should be set long enough to hear probe responses
+ * from more than one AP.  */
+#define IWL_ACTIVE_DWELL_TIME_24    (30)       /* all times in msec */
+#define IWL_ACTIVE_DWELL_TIME_52    (20)
+
+#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IWL_PASSIVE_DWELL_TIME_24   (20)       /* all times in msec */
+#define IWL_PASSIVE_DWELL_TIME_52   (10)
+#define IWL_PASSIVE_DWELL_BASE      (100)
+#define IWL_CHANNEL_TUNE_TIME       5
+#define MAX_SCAN_CHANNEL	    50
+
+/* For reset radio, need minimal dwell time only */
+#define IWL_RADIO_RESET_DWELL_TIME	5
+
+static int iwl_send_scan_abort(struct iwl_priv *priv)
+{
+	int ret;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_SCAN_ABORT_CMD,
+		.flags = CMD_WANT_SKB,
+	};
+	__le32 *status;
+
+	/* Exit instantly with error when device is not ready
+	 * to receive scan abort command or it does not perform
+	 * hardware scan currently */
+	if (!test_bit(STATUS_READY, &priv->status) ||
+	    !test_bit(STATUS_SCAN_HW, &priv->status) ||
+	    test_bit(STATUS_FW_ERROR, &priv->status))
+		return -EIO;
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+	if (ret)
+		return ret;
+
+	status = (void *)cmd.resp_pkt->data;
+	if (*status != CAN_ABORT_STATUS) {
+		/* The scan abort will return 1 for success or
+		 * 2 for "failure".  A failure condition can be
+		 * due to simply not being in an active scan which
+		 * can occur if we send the scan abort before we
+		 * the microcode has notified us that a scan is
+		 * completed. */
+		IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n",
+			       le32_to_cpu(*status));
+		ret = -EIO;
+	}
+
+	iwl_free_resp(&cmd);
+	return ret;
+}
+
+static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
+{
+	struct cfg80211_scan_info info = {
+		.aborted = aborted,
+	};
+
+	/* check if scan was requested from mac80211 */
+	if (priv->scan_request) {
+		IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
+		ieee80211_scan_completed(priv->hw, &info);
+	}
+
+	priv->scan_type = IWL_SCAN_NORMAL;
+	priv->scan_vif = NULL;
+	priv->scan_request = NULL;
+}
+
+static void iwl_process_scan_complete(struct iwl_priv *priv)
+{
+	bool aborted;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (!test_and_clear_bit(STATUS_SCAN_COMPLETE, &priv->status))
+		return;
+
+	IWL_DEBUG_SCAN(priv, "Completed scan.\n");
+
+	cancel_delayed_work(&priv->scan_check);
+
+	aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+	if (aborted)
+		IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
+
+	if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
+		goto out_settings;
+	}
+
+	if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
+		int err;
+
+		/* Check if mac80211 requested scan during our internal scan */
+		if (priv->scan_request == NULL)
+			goto out_complete;
+
+		/* If so request a new scan */
+		err = iwl_scan_initiate(priv, priv->scan_vif, IWL_SCAN_NORMAL,
+					priv->scan_request->channels[0]->band);
+		if (err) {
+			IWL_DEBUG_SCAN(priv,
+				"failed to initiate pending scan: %d\n", err);
+			aborted = true;
+			goto out_complete;
+		}
+
+		return;
+	}
+
+out_complete:
+	iwl_complete_scan(priv, aborted);
+
+out_settings:
+	/* Can we still talk to firmware ? */
+	if (!iwl_is_ready_rf(priv))
+		return;
+
+	iwlagn_post_scan(priv);
+}
+
+void iwl_force_scan_end(struct iwl_priv *priv)
+{
+	lockdep_assert_held(&priv->mutex);
+
+	if (!test_bit(STATUS_SCANNING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
+		return;
+	}
+
+	IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
+	clear_bit(STATUS_SCANNING, &priv->status);
+	clear_bit(STATUS_SCAN_HW, &priv->status);
+	clear_bit(STATUS_SCAN_ABORTING, &priv->status);
+	clear_bit(STATUS_SCAN_COMPLETE, &priv->status);
+	iwl_complete_scan(priv, true);
+}
+
+static void iwl_do_scan_abort(struct iwl_priv *priv)
+{
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (!test_bit(STATUS_SCANNING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
+		return;
+	}
+
+	if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
+		return;
+	}
+
+	ret = iwl_send_scan_abort(priv);
+	if (ret) {
+		IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
+		iwl_force_scan_end(priv);
+	} else
+		IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
+}
+
+/**
+ * iwl_scan_cancel - Cancel any currently executing HW scan
+ */
+int iwl_scan_cancel(struct iwl_priv *priv)
+{
+	IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
+	queue_work(priv->workqueue, &priv->abort_scan);
+	return 0;
+}
+
+/**
+ * iwl_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
+{
+	unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+	lockdep_assert_held(&priv->mutex);
+
+	IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
+
+	iwl_do_scan_abort(priv);
+
+	while (time_before_eq(jiffies, timeout)) {
+		if (!test_bit(STATUS_SCAN_HW, &priv->status))
+			goto finished;
+		msleep(20);
+	}
+
+	return;
+
+ finished:
+	/*
+	 * Now STATUS_SCAN_HW is clear. This means that the
+	 * device finished, but the background work is going
+	 * to execute at best as soon as we release the mutex.
+	 * Since we need to be able to issue a new scan right
+	 * after this function returns, run the complete here.
+	 * The STATUS_SCAN_COMPLETE bit will then be cleared
+	 * and prevent the background work from "completing"
+	 * a possible new scan.
+	 */
+	iwl_process_scan_complete(priv);
+}
+
+/* Service response to REPLY_SCAN_CMD (0x80) */
+static void iwl_rx_reply_scan(struct iwl_priv *priv,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_scanreq_notification *notif = (void *)pkt->data;
+
+	IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service SCAN_START_NOTIFICATION (0x82) */
+static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
+				    struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_scanstart_notification *notif = (void *)pkt->data;
+
+	priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+	IWL_DEBUG_SCAN(priv, "Scan start: "
+		       "%d [802.11%s] "
+		       "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
+		       notif->channel,
+		       notif->band ? "bg" : "a",
+		       le32_to_cpu(notif->tsf_high),
+		       le32_to_cpu(notif->tsf_low),
+		       notif->status, notif->beacon_timer);
+}
+
+/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
+static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
+				      struct iwl_rx_cmd_buffer *rxb)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_scanresults_notification *notif = (void *)pkt->data;
+
+	IWL_DEBUG_SCAN(priv, "Scan ch.res: "
+		       "%d [802.11%s] "
+		       "probe status: %u:%u "
+		       "(TSF: 0x%08X:%08X) - %d "
+		       "elapsed=%lu usec\n",
+		       notif->channel,
+		       notif->band ? "bg" : "a",
+		       notif->probe_status, notif->num_probe_not_sent,
+		       le32_to_cpu(notif->tsf_high),
+		       le32_to_cpu(notif->tsf_low),
+		       le32_to_cpu(notif->statistics[0]),
+		       le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
+#endif
+}
+
+/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
+static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
+				       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_scancomplete_notification *scan_notif = (void *)pkt->data;
+
+	IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+		       scan_notif->scanned_channels,
+		       scan_notif->tsf_low,
+		       scan_notif->tsf_high, scan_notif->status);
+
+	IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
+		       (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
+		       jiffies_to_msecs(jiffies - priv->scan_start));
+
+	/*
+	 * When aborting, we run the scan completed background work inline
+	 * and the background work must then do nothing. The SCAN_COMPLETE
+	 * bit helps implement that logic and thus needs to be set before
+	 * queueing the work. Also, since the scan abort waits for SCAN_HW
+	 * to clear, we need to set SCAN_COMPLETE before clearing SCAN_HW
+	 * to avoid a race there.
+	 */
+	set_bit(STATUS_SCAN_COMPLETE, &priv->status);
+	clear_bit(STATUS_SCAN_HW, &priv->status);
+	queue_work(priv->workqueue, &priv->scan_completed);
+
+	if (priv->iw_mode != NL80211_IFTYPE_ADHOC &&
+	    iwl_advanced_bt_coexist(priv) &&
+	    priv->bt_status != scan_notif->bt_status) {
+		if (scan_notif->bt_status) {
+			/* BT on */
+			if (!priv->bt_ch_announce)
+				priv->bt_traffic_load =
+					IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+			/*
+			 * otherwise, no traffic load information provided
+			 * no changes made
+			 */
+		} else {
+			/* BT off */
+			priv->bt_traffic_load =
+				IWL_BT_COEX_TRAFFIC_LOAD_NONE;
+		}
+		priv->bt_status = scan_notif->bt_status;
+		queue_work(priv->workqueue,
+			   &priv->bt_traffic_change_work);
+	}
+}
+
+void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
+{
+	/* scan handlers */
+	priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan;
+	priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif;
+	priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
+					iwl_rx_scan_results_notif;
+	priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
+					iwl_rx_scan_complete_notif;
+}
+
+static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
+				     enum nl80211_band band, u8 n_probes)
+{
+	if (band == NL80211_BAND_5GHZ)
+		return IWL_ACTIVE_DWELL_TIME_52 +
+			IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+	else
+		return IWL_ACTIVE_DWELL_TIME_24 +
+			IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+
+static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
+{
+	struct iwl_rxon_context *ctx;
+	int limits[NUM_IWL_RXON_CTX] = {};
+	int n_active = 0;
+	u16 limit;
+
+	BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+	/*
+	 * If we're associated, we clamp the dwell time 98%
+	 * of the beacon interval (minus 2 * channel tune time)
+	 * If both contexts are active, we have to restrict to
+	 * 1/2 of the minimum of them, because they might be in
+	 * lock-step with the time inbetween only half of what
+	 * time we'd have in each of them.
+	 */
+	for_each_context(priv, ctx) {
+		switch (ctx->staging.dev_type) {
+		case RXON_DEV_TYPE_P2P:
+			/* no timing constraints */
+			continue;
+		case RXON_DEV_TYPE_ESS:
+		default:
+			/* timing constraints if associated */
+			if (!iwl_is_associated_ctx(ctx))
+				continue;
+			break;
+		case RXON_DEV_TYPE_CP:
+		case RXON_DEV_TYPE_2STA:
+			/*
+			 * These seem to always have timers for TBTT
+			 * active in uCode even when not associated yet.
+			 */
+			break;
+		}
+
+		limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
+	}
+
+	switch (n_active) {
+	case 0:
+		return dwell_time;
+	case 2:
+		limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+		limit /= 2;
+		dwell_time = min(limit, dwell_time);
+		/* fall through to limit further */
+	case 1:
+		limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
+		limit /= n_active;
+		return min(limit, dwell_time);
+	default:
+		WARN_ON_ONCE(1);
+		return dwell_time;
+	}
+}
+
+static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
+				      enum nl80211_band band)
+{
+	u16 passive = (band == NL80211_BAND_2GHZ) ?
+	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
+	    IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
+
+	return iwl_limit_dwell(priv, passive);
+}
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
+					enum nl80211_band band)
+{
+	struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
+	struct iwl_rxon_context *ctx;
+	int i;
+
+	for (i = 0; i < sband->n_channels; i++) {
+		bool busy = false;
+
+		for_each_context(priv, ctx) {
+			busy = sband->channels[i].hw_value ==
+				le16_to_cpu(ctx->staging.channel);
+			if (busy)
+				break;
+		}
+
+		if (busy)
+			continue;
+
+		if (!(sband->channels[i].flags & IEEE80211_CHAN_DISABLED))
+			return sband->channels[i].hw_value;
+	}
+
+	return 0;
+}
+
+static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
+					  struct ieee80211_vif *vif,
+					  enum nl80211_band band,
+					  struct iwl_scan_channel *scan_ch)
+{
+	const struct ieee80211_supported_band *sband;
+	u16 channel;
+
+	sband = iwl_get_hw_mode(priv, band);
+	if (!sband) {
+		IWL_ERR(priv, "invalid band\n");
+		return 0;
+	}
+
+	channel = iwl_get_single_channel_number(priv, band);
+	if (channel) {
+		scan_ch->channel = cpu_to_le16(channel);
+		scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+		scan_ch->active_dwell =
+			cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
+		scan_ch->passive_dwell =
+			cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
+		/* Set txpower levels to defaults */
+		scan_ch->dsp_atten = 110;
+		if (band == NL80211_BAND_5GHZ)
+			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else
+			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+		return 1;
+	}
+
+	IWL_ERR(priv, "no valid channel found\n");
+	return 0;
+}
+
+static int iwl_get_channels_for_scan(struct iwl_priv *priv,
+				     struct ieee80211_vif *vif,
+				     enum nl80211_band band,
+				     u8 is_active, u8 n_probes,
+				     struct iwl_scan_channel *scan_ch)
+{
+	struct ieee80211_channel *chan;
+	const struct ieee80211_supported_band *sband;
+	u16 passive_dwell = 0;
+	u16 active_dwell = 0;
+	int added, i;
+	u16 channel;
+
+	sband = iwl_get_hw_mode(priv, band);
+	if (!sband)
+		return 0;
+
+	active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
+	passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+	if (passive_dwell <= active_dwell)
+		passive_dwell = active_dwell + 1;
+
+	for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
+		chan = priv->scan_request->channels[i];
+
+		if (chan->band != band)
+			continue;
+
+		channel = chan->hw_value;
+		scan_ch->channel = cpu_to_le16(channel);
+
+		if (!is_active || (chan->flags & IEEE80211_CHAN_NO_IR))
+			scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+		else
+			scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+		if (n_probes)
+			scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
+
+		scan_ch->active_dwell = cpu_to_le16(active_dwell);
+		scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+		/* Set txpower levels to defaults */
+		scan_ch->dsp_atten = 110;
+
+		/* NOTE: if we were doing 6Mb OFDM for scans we'd use
+		 * power level:
+		 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+		 */
+		if (band == NL80211_BAND_5GHZ)
+			scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+		else
+			scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+		IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
+			       channel, le32_to_cpu(scan_ch->type),
+			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+				"ACTIVE" : "PASSIVE",
+			       (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
+			       active_dwell : passive_dwell);
+
+		scan_ch++;
+		added++;
+	}
+
+	IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
+	return added;
+}
+
+/**
+ * iwl_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
+			      const u8 *ies, int ie_len, const u8 *ssid,
+			      u8 ssid_len, int left)
+{
+	int len = 0;
+	u8 *pos = NULL;
+
+	/* Make sure there is enough space for the probe request,
+	 * two mandatory IEs and the data */
+	left -= 24;
+	if (left < 0)
+		return 0;
+
+	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+	eth_broadcast_addr(frame->da);
+	memcpy(frame->sa, ta, ETH_ALEN);
+	eth_broadcast_addr(frame->bssid);
+	frame->seq_ctrl = 0;
+
+	len += 24;
+
+	/* ...next IE... */
+	pos = &frame->u.probe_req.variable[0];
+
+	/* fill in our SSID IE */
+	left -= ssid_len + 2;
+	if (left < 0)
+		return 0;
+	*pos++ = WLAN_EID_SSID;
+	*pos++ = ssid_len;
+	if (ssid && ssid_len) {
+		memcpy(pos, ssid, ssid_len);
+		pos += ssid_len;
+	}
+
+	len += ssid_len + 2;
+
+	if (WARN_ON(left < ie_len))
+		return len;
+
+	if (ies && ie_len) {
+		memcpy(pos, ies, ie_len);
+		len += ie_len;
+	}
+
+	return (u16)len;
+}
+
+static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
+{
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_SCAN_CMD,
+		.len = { sizeof(struct iwl_scan_cmd), },
+	};
+	struct iwl_scan_cmd *scan;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	u32 rate_flags = 0;
+	u16 cmd_len = 0;
+	u16 rx_chain = 0;
+	enum nl80211_band band;
+	u8 n_probes = 0;
+	u8 rx_ant = priv->nvm_data->valid_rx_ant;
+	u8 rate;
+	bool is_active = false;
+	int  chan_mod;
+	u8 active_chains;
+	u8 scan_tx_antennas = priv->nvm_data->valid_tx_ant;
+	int ret;
+	int scan_cmd_size = sizeof(struct iwl_scan_cmd) +
+			    MAX_SCAN_CHANNEL * sizeof(struct iwl_scan_channel) +
+			    priv->fw->ucode_capa.max_probe_length;
+	const u8 *ssid = NULL;
+	u8 ssid_len = 0;
+
+	if (WARN_ON(priv->scan_type == IWL_SCAN_NORMAL &&
+		    (!priv->scan_request ||
+		     priv->scan_request->n_channels > MAX_SCAN_CHANNEL)))
+		return -EINVAL;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (vif)
+		ctx = iwl_rxon_ctx_from_vif(vif);
+
+	if (!priv->scan_cmd) {
+		priv->scan_cmd = kmalloc(scan_cmd_size, GFP_KERNEL);
+		if (!priv->scan_cmd) {
+			IWL_DEBUG_SCAN(priv,
+				       "fail to allocate memory for scan\n");
+			return -ENOMEM;
+		}
+	}
+	scan = priv->scan_cmd;
+	memset(scan, 0, scan_cmd_size);
+
+	scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
+	scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
+
+	if (iwl_is_any_associated(priv)) {
+		u16 interval = 0;
+		u32 extra;
+		u32 suspend_time = 100;
+		u32 scan_suspend_time = 100;
+
+		IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
+		switch (priv->scan_type) {
+		case IWL_SCAN_RADIO_RESET:
+			interval = 0;
+			break;
+		case IWL_SCAN_NORMAL:
+			interval = vif->bss_conf.beacon_int;
+			break;
+		}
+
+		scan->suspend_time = 0;
+		scan->max_out_time = cpu_to_le32(200 * 1024);
+		if (!interval)
+			interval = suspend_time;
+
+		extra = (suspend_time / interval) << 22;
+		scan_suspend_time = (extra |
+		    ((suspend_time % interval) * 1024));
+		scan->suspend_time = cpu_to_le32(scan_suspend_time);
+		IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
+			       scan_suspend_time, interval);
+	}
+
+	switch (priv->scan_type) {
+	case IWL_SCAN_RADIO_RESET:
+		IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+		/*
+		 * Override quiet time as firmware checks that active
+		 * dwell is >= quiet; since we use passive scan it'll
+		 * not actually be used.
+		 */
+		scan->quiet_time = cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
+		break;
+	case IWL_SCAN_NORMAL:
+		if (priv->scan_request->n_ssids) {
+			int i, p = 0;
+			IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
+			/*
+			 * The highest priority SSID is inserted to the
+			 * probe request template.
+			 */
+			ssid_len = priv->scan_request->ssids[0].ssid_len;
+			ssid = priv->scan_request->ssids[0].ssid;
+
+			/*
+			 * Invert the order of ssids, the firmware will invert
+			 * it back.
+			 */
+			for (i = priv->scan_request->n_ssids - 1; i >= 1; i--) {
+				scan->direct_scan[p].id = WLAN_EID_SSID;
+				scan->direct_scan[p].len =
+					priv->scan_request->ssids[i].ssid_len;
+				memcpy(scan->direct_scan[p].ssid,
+				       priv->scan_request->ssids[i].ssid,
+				       priv->scan_request->ssids[i].ssid_len);
+				n_probes++;
+				p++;
+			}
+			is_active = true;
+		} else
+			IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
+		break;
+	}
+
+	scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+	scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+	scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	switch (priv->scan_band) {
+	case NL80211_BAND_2GHZ:
+		scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+		chan_mod = le32_to_cpu(
+			priv->contexts[IWL_RXON_CTX_BSS].active.flags &
+						RXON_FLG_CHANNEL_MODE_MSK)
+				       >> RXON_FLG_CHANNEL_MODE_POS;
+		if ((priv->scan_request && priv->scan_request->no_cck) ||
+		    chan_mod == CHANNEL_MODE_PURE_40) {
+			rate = IWL_RATE_6M_PLCP;
+		} else {
+			rate = IWL_RATE_1M_PLCP;
+			rate_flags = RATE_MCS_CCK_MSK;
+		}
+		/*
+		 * Internal scans are passive, so we can indiscriminately set
+		 * the BT ignore flag on 2.4 GHz since it applies to TX only.
+		 */
+		if (priv->lib->bt_params &&
+		    priv->lib->bt_params->advanced_bt_coexist)
+			scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
+		break;
+	case NL80211_BAND_5GHZ:
+		rate = IWL_RATE_6M_PLCP;
+		break;
+	default:
+		IWL_WARN(priv, "Invalid scan band\n");
+		return -EIO;
+	}
+
+	/*
+	 * If active scanning is requested but a certain channel is
+	 * marked passive, we can do active scanning if we detect
+	 * transmissions.
+	 *
+	 * There is an issue with some firmware versions that triggers
+	 * a sysassert on a "good CRC threshold" of zero (== disabled),
+	 * on a radar channel even though this means that we should NOT
+	 * send probes.
+	 *
+	 * The "good CRC threshold" is the number of frames that we
+	 * need to receive during our dwell time on a channel before
+	 * sending out probes -- setting this to a huge value will
+	 * mean we never reach it, but at the same time work around
+	 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
+	 * here instead of IWL_GOOD_CRC_TH_DISABLED.
+	 *
+	 * This was fixed in later versions along with some other
+	 * scan changes, and the threshold behaves as a flag in those
+	 * versions.
+	 */
+	if (priv->new_scan_threshold_behaviour)
+		scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+						IWL_GOOD_CRC_TH_DISABLED;
+	else
+		scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
+						IWL_GOOD_CRC_TH_NEVER;
+
+	band = priv->scan_band;
+
+	if (band == NL80211_BAND_2GHZ &&
+	    priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		/* transmit 2.4 GHz probes only on first antenna */
+		scan_tx_antennas = first_antenna(scan_tx_antennas);
+	}
+
+	priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv,
+						    priv->scan_tx_ant[band],
+						    scan_tx_antennas);
+	rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
+	scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
+
+	/*
+	 * In power save mode while associated use one chain,
+	 * otherwise use all chains
+	 */
+	if (test_bit(STATUS_POWER_PMI, &priv->status) &&
+	    !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
+		/* rx_ant has been set to all valid chains previously */
+		active_chains = rx_ant &
+				((u8)(priv->chain_noise_data.active_chains));
+		if (!active_chains)
+			active_chains = rx_ant;
+
+		IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
+				priv->chain_noise_data.active_chains);
+
+		rx_ant = first_antenna(active_chains);
+	}
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist &&
+	    priv->bt_full_concurrent) {
+		/* operated as 1x1 in full concurrency mode */
+		rx_ant = first_antenna(rx_ant);
+	}
+
+	/* MIMO is not used here, but value is required */
+	rx_chain |=
+		priv->nvm_data->valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+	rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+	rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+	scan->rx_chain = cpu_to_le16(rx_chain);
+	switch (priv->scan_type) {
+	case IWL_SCAN_NORMAL:
+		cmd_len = iwl_fill_probe_req(
+					(struct ieee80211_mgmt *)scan->data,
+					vif->addr,
+					priv->scan_request->ie,
+					priv->scan_request->ie_len,
+					ssid, ssid_len,
+					scan_cmd_size - sizeof(*scan));
+		break;
+	case IWL_SCAN_RADIO_RESET:
+		/* use bcast addr, will not be transmitted but must be valid */
+		cmd_len = iwl_fill_probe_req(
+					(struct ieee80211_mgmt *)scan->data,
+					iwl_bcast_addr, NULL, 0,
+					NULL, 0,
+					scan_cmd_size - sizeof(*scan));
+		break;
+	default:
+		BUG();
+	}
+	scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+	scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
+			       RXON_FILTER_BCON_AWARE_MSK);
+
+	switch (priv->scan_type) {
+	case IWL_SCAN_RADIO_RESET:
+		scan->channel_count =
+			iwl_get_channel_for_reset_scan(priv, vif, band,
+				(void *)&scan->data[cmd_len]);
+		break;
+	case IWL_SCAN_NORMAL:
+		scan->channel_count =
+			iwl_get_channels_for_scan(priv, vif, band,
+				is_active, n_probes,
+				(void *)&scan->data[cmd_len]);
+		break;
+	}
+
+	if (scan->channel_count == 0) {
+		IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
+		return -EIO;
+	}
+
+	cmd.len[0] += le16_to_cpu(scan->tx_cmd.len) +
+	    scan->channel_count * sizeof(struct iwl_scan_channel);
+	cmd.data[0] = scan;
+	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+	scan->len = cpu_to_le16(cmd.len[0]);
+
+	/* set scan bit here for PAN params */
+	set_bit(STATUS_SCAN_HW, &priv->status);
+
+	ret = iwlagn_set_pan_params(priv);
+	if (ret) {
+		clear_bit(STATUS_SCAN_HW, &priv->status);
+		return ret;
+	}
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+	if (ret) {
+		clear_bit(STATUS_SCAN_HW, &priv->status);
+		iwlagn_set_pan_params(priv);
+	}
+
+	return ret;
+}
+
+void iwl_init_scan_params(struct iwl_priv *priv)
+{
+	u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
+	if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
+		priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+	if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
+		priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
+}
+
+int __must_check iwl_scan_initiate(struct iwl_priv *priv,
+				   struct ieee80211_vif *vif,
+				   enum iwl_scan_type scan_type,
+				   enum nl80211_band band)
+{
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	cancel_delayed_work(&priv->scan_check);
+
+	if (!iwl_is_ready_rf(priv)) {
+		IWL_WARN(priv, "Request scan called when driver not ready.\n");
+		return -EIO;
+	}
+
+	if (test_bit(STATUS_SCAN_HW, &priv->status)) {
+		IWL_DEBUG_SCAN(priv,
+			"Multiple concurrent scan requests in parallel.\n");
+		return -EBUSY;
+	}
+
+	if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
+		return -EBUSY;
+	}
+
+	IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
+			scan_type == IWL_SCAN_NORMAL ? "" :
+			"internal short ");
+
+	set_bit(STATUS_SCANNING, &priv->status);
+	priv->scan_type = scan_type;
+	priv->scan_start = jiffies;
+	priv->scan_band = band;
+
+	ret = iwlagn_request_scan(priv, vif);
+	if (ret) {
+		clear_bit(STATUS_SCANNING, &priv->status);
+		priv->scan_type = IWL_SCAN_NORMAL;
+		return ret;
+	}
+
+	queue_delayed_work(priv->workqueue, &priv->scan_check,
+			   IWL_SCAN_CHECK_WATCHDOG);
+
+	return 0;
+}
+
+
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+void iwl_internal_short_hw_scan(struct iwl_priv *priv)
+{
+	queue_work(priv->workqueue, &priv->start_internal_scan);
+}
+
+static void iwl_bg_start_internal_scan(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, start_internal_scan);
+
+	IWL_DEBUG_SCAN(priv, "Start internal scan\n");
+
+	mutex_lock(&priv->mutex);
+
+	if (priv->scan_type == IWL_SCAN_RADIO_RESET) {
+		IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n");
+		goto unlock;
+	}
+
+	if (test_bit(STATUS_SCANNING, &priv->status)) {
+		IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+		goto unlock;
+	}
+
+	if (iwl_scan_initiate(priv, NULL, IWL_SCAN_RADIO_RESET, priv->band))
+		IWL_DEBUG_SCAN(priv, "failed to start internal short scan\n");
+ unlock:
+	mutex_unlock(&priv->mutex);
+}
+
+static void iwl_bg_scan_check(struct work_struct *data)
+{
+	struct iwl_priv *priv =
+	    container_of(data, struct iwl_priv, scan_check.work);
+
+	IWL_DEBUG_SCAN(priv, "Scan check work\n");
+
+	/* Since we are here firmware does not finish scan and
+	 * most likely is in bad shape, so we don't bother to
+	 * send abort command, just force scan complete to mac80211 */
+	mutex_lock(&priv->mutex);
+	iwl_force_scan_end(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+static void iwl_bg_abort_scan(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
+
+	IWL_DEBUG_SCAN(priv, "Abort scan work\n");
+
+	/* We keep scan_check work queued in case when firmware will not
+	 * report back scan completed notification */
+	mutex_lock(&priv->mutex);
+	iwl_scan_cancel_timeout(priv, 200);
+	mutex_unlock(&priv->mutex);
+}
+
+static void iwl_bg_scan_completed(struct work_struct *work)
+{
+	struct iwl_priv *priv =
+		container_of(work, struct iwl_priv, scan_completed);
+
+	mutex_lock(&priv->mutex);
+	iwl_process_scan_complete(priv);
+	mutex_unlock(&priv->mutex);
+}
+
+void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
+{
+	INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
+	INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
+	INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
+	INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
+}
+
+void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
+{
+	cancel_work_sync(&priv->start_internal_scan);
+	cancel_work_sync(&priv->abort_scan);
+	cancel_work_sync(&priv->scan_completed);
+
+	if (cancel_delayed_work_sync(&priv->scan_check)) {
+		mutex_lock(&priv->mutex);
+		iwl_force_scan_end(priv);
+		mutex_unlock(&priv->mutex);
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
new file mode 100644
index 0000000..de6ec9b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -0,0 +1,1442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-trans.h"
+#include "dev.h"
+#include "agn.h"
+
+const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
+{
+	lockdep_assert_held(&priv->sta_lock);
+
+	if (sta_id >= IWLAGN_STATION_COUNT) {
+		IWL_ERR(priv, "invalid sta_id %u\n", sta_id);
+		return -EINVAL;
+	}
+	if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
+		IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u "
+			"addr %pM\n",
+			sta_id, priv->stations[sta_id].sta.sta.addr);
+
+	if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+		IWL_DEBUG_ASSOC(priv,
+				"STA id %u addr %pM already present in uCode "
+				"(according to driver)\n",
+				sta_id, priv->stations[sta_id].sta.sta.addr);
+	} else {
+		priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+		IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+				sta_id, priv->stations[sta_id].sta.sta.addr);
+	}
+	return 0;
+}
+
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+				     struct iwl_rx_packet *pkt)
+{
+	struct iwl_add_sta_resp *add_sta_resp = (void *)pkt->data;
+
+	IWL_DEBUG_INFO(priv, "Processing response for adding station\n");
+
+	spin_lock_bh(&priv->sta_lock);
+
+	switch (add_sta_resp->status) {
+	case ADD_STA_SUCCESS_MSK:
+		IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
+		break;
+	case ADD_STA_NO_ROOM_IN_TABLE:
+		IWL_ERR(priv, "Adding station failed, no room in table.\n");
+		break;
+	case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+		IWL_ERR(priv,
+			"Adding station failed, no block ack resource.\n");
+		break;
+	case ADD_STA_MODIFY_NON_EXIST_STA:
+		IWL_ERR(priv, "Attempting to modify non-existing station\n");
+		break;
+	default:
+		IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+				add_sta_resp->status);
+		break;
+	}
+
+	spin_unlock_bh(&priv->sta_lock);
+}
+
+void iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+	iwl_process_add_sta_resp(priv, pkt);
+}
+
+int iwl_send_add_sta(struct iwl_priv *priv,
+		     struct iwl_addsta_cmd *sta, u8 flags)
+{
+	int ret = 0;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_ADD_STA,
+		.flags = flags,
+		.data = { sta, },
+		.len = { sizeof(*sta), },
+	};
+	u8 sta_id __maybe_unused = sta->sta.sta_id;
+	struct iwl_rx_packet *pkt;
+	struct iwl_add_sta_resp *add_sta_resp;
+
+	IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
+		       sta_id, sta->sta.addr, flags & CMD_ASYNC ?  "a" : "");
+
+	if (!(flags & CMD_ASYNC)) {
+		cmd.flags |= CMD_WANT_SKB;
+		might_sleep();
+	}
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+
+	if (ret || (flags & CMD_ASYNC))
+		return ret;
+
+	pkt = cmd.resp_pkt;
+	add_sta_resp = (void *)pkt->data;
+
+	/* debug messages are printed in the handler */
+	if (add_sta_resp->status == ADD_STA_SUCCESS_MSK) {
+		spin_lock_bh(&priv->sta_lock);
+		ret = iwl_sta_ucode_activate(priv, sta_id);
+		spin_unlock_bh(&priv->sta_lock);
+	} else {
+		ret = -EIO;
+	}
+
+	iwl_free_resp(&cmd);
+
+	return ret;
+}
+
+bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_sta *sta)
+{
+	if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+		return false;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (priv->disable_ht40)
+		return false;
+#endif
+
+	/* special case for RXON */
+	if (!sta)
+		return true;
+
+	return sta->bandwidth >= IEEE80211_STA_RX_BW_40;
+}
+
+static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
+				  struct ieee80211_sta *sta,
+				  struct iwl_rxon_context *ctx,
+				  __le32 *flags, __le32 *mask)
+{
+	struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+
+	*mask = STA_FLG_RTS_MIMO_PROT_MSK |
+		STA_FLG_MIMO_DIS_MSK |
+		STA_FLG_HT40_EN_MSK |
+		STA_FLG_MAX_AGG_SIZE_MSK |
+		STA_FLG_AGG_MPDU_DENSITY_MSK;
+	*flags = 0;
+
+	if (!sta || !sta_ht_inf->ht_supported)
+		return;
+
+	IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
+			sta->addr,
+			(sta->smps_mode == IEEE80211_SMPS_STATIC) ?
+			"static" :
+			(sta->smps_mode == IEEE80211_SMPS_DYNAMIC) ?
+			"dynamic" : "disabled");
+
+	switch (sta->smps_mode) {
+	case IEEE80211_SMPS_STATIC:
+		*flags |= STA_FLG_MIMO_DIS_MSK;
+		break;
+	case IEEE80211_SMPS_DYNAMIC:
+		*flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+		break;
+	case IEEE80211_SMPS_OFF:
+		break;
+	default:
+		IWL_WARN(priv, "Invalid MIMO PS mode %d\n", sta->smps_mode);
+		break;
+	}
+
+	*flags |= cpu_to_le32(
+		(u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+	*flags |= cpu_to_le32(
+		(u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+	if (iwl_is_ht40_tx_allowed(priv, ctx, sta))
+		*flags |= STA_FLG_HT40_EN_MSK;
+}
+
+int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		      struct ieee80211_sta *sta)
+{
+	u8 sta_id = iwl_sta_id(sta);
+	__le32 flags, mask;
+	struct iwl_addsta_cmd cmd;
+
+	if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+		return -EINVAL;
+
+	iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].sta.station_flags &= ~mask;
+	priv->stations[sta_id].sta.station_flags |= flags;
+	spin_unlock_bh(&priv->sta_lock);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.mode = STA_CONTROL_MODIFY_MSK;
+	cmd.station_flags_msk = mask;
+	cmd.station_flags = flags;
+	cmd.sta.sta_id = sta_id;
+
+	return iwl_send_add_sta(priv, &cmd, 0);
+}
+
+static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
+				   struct ieee80211_sta *sta,
+				   struct iwl_rxon_context *ctx)
+{
+	__le32 flags, mask;
+
+	iwl_sta_calc_ht_flags(priv, sta, ctx, &flags, &mask);
+
+	lockdep_assert_held(&priv->sta_lock);
+	priv->stations[index].sta.station_flags &= ~mask;
+	priv->stations[index].sta.station_flags |= flags;
+}
+
+/**
+ * iwl_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		    const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+	struct iwl_station_entry *station;
+	int i;
+	u8 sta_id = IWL_INVALID_STATION;
+
+	if (is_ap)
+		sta_id = ctx->ap_sta_id;
+	else if (is_broadcast_ether_addr(addr))
+		sta_id = ctx->bcast_sta_id;
+	else
+		for (i = IWL_STA_ID; i < IWLAGN_STATION_COUNT; i++) {
+			if (ether_addr_equal(priv->stations[i].sta.sta.addr,
+					     addr)) {
+				sta_id = i;
+				break;
+			}
+
+			if (!priv->stations[i].used &&
+			    sta_id == IWL_INVALID_STATION)
+				sta_id = i;
+		}
+
+	/*
+	 * These two conditions have the same outcome, but keep them
+	 * separate
+	 */
+	if (unlikely(sta_id == IWL_INVALID_STATION))
+		return sta_id;
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+		IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+			       "added.\n", sta_id);
+		return sta_id;
+	}
+
+	if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+	    (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
+	    ether_addr_equal(priv->stations[sta_id].sta.sta.addr, addr)) {
+		IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+				"adding again.\n", sta_id, addr);
+		return sta_id;
+	}
+
+	station = &priv->stations[sta_id];
+	station->used = IWL_STA_DRIVER_ACTIVE;
+	IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
+			sta_id, addr);
+	priv->num_stations++;
+
+	/* Set up the REPLY_ADD_STA command to send to device */
+	memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd));
+	memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+	station->sta.mode = 0;
+	station->sta.sta.sta_id = sta_id;
+	station->sta.station_flags = ctx->station_flags;
+	station->ctxid = ctx->ctxid;
+
+	if (sta) {
+		struct iwl_station_priv *sta_priv;
+
+		sta_priv = (void *)sta->drv_priv;
+		sta_priv->ctx = ctx;
+	}
+
+	/*
+	 * OK to call unconditionally, since local stations (IBSS BSSID
+	 * STA and broadcast STA) pass in a NULL sta, and mac80211
+	 * doesn't allow HT IBSS.
+	 */
+	iwl_set_ht_add_station(priv, sta_id, sta, ctx);
+
+	return sta_id;
+
+}
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * iwl_add_station_common -
+ */
+int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			   const u8 *addr, bool is_ap,
+			   struct ieee80211_sta *sta, u8 *sta_id_r)
+{
+	int ret = 0;
+	u8 sta_id;
+	struct iwl_addsta_cmd sta_cmd;
+
+	*sta_id_r = 0;
+	spin_lock_bh(&priv->sta_lock);
+	sta_id = iwl_prep_station(priv, ctx, addr, is_ap, sta);
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
+			addr);
+		spin_unlock_bh(&priv->sta_lock);
+		return -EINVAL;
+	}
+
+	/*
+	 * uCode is not able to deal with multiple requests to add a
+	 * station. Keep track if one is in progress so that we do not send
+	 * another.
+	 */
+	if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
+		IWL_DEBUG_INFO(priv, "STA %d already in process of being "
+			       "added.\n", sta_id);
+		spin_unlock_bh(&priv->sta_lock);
+		return -EEXIST;
+	}
+
+	if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
+	    (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+		IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not "
+				"adding again.\n", sta_id, addr);
+		spin_unlock_bh(&priv->sta_lock);
+		return -EEXIST;
+	}
+
+	priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta,
+	       sizeof(struct iwl_addsta_cmd));
+	spin_unlock_bh(&priv->sta_lock);
+
+	/* Add station to device's station table */
+	ret = iwl_send_add_sta(priv, &sta_cmd, 0);
+	if (ret) {
+		spin_lock_bh(&priv->sta_lock);
+		IWL_ERR(priv, "Adding station %pM failed.\n",
+			priv->stations[sta_id].sta.sta.addr);
+		priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+		priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+		spin_unlock_bh(&priv->sta_lock);
+	}
+	*sta_id_r = sta_id;
+	return ret;
+}
+
+/**
+ * iwl_sta_ucode_deactivate - deactivate ucode status for a station
+ */
+static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
+{
+	lockdep_assert_held(&priv->sta_lock);
+
+	/* Ucode must be active and driver must be non active */
+	if ((priv->stations[sta_id].used &
+	     (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
+	      IWL_STA_UCODE_ACTIVE)
+		IWL_ERR(priv, "removed non active STA %u\n", sta_id);
+
+	priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
+
+	memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
+	IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
+}
+
+static int iwl_send_remove_station(struct iwl_priv *priv,
+				   const u8 *addr, int sta_id,
+				   bool temporary)
+{
+	struct iwl_rx_packet *pkt;
+	int ret;
+	struct iwl_rem_sta_cmd rm_sta_cmd;
+	struct iwl_rem_sta_resp *rem_sta_resp;
+
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_REMOVE_STA,
+		.len = { sizeof(struct iwl_rem_sta_cmd), },
+		.data = { &rm_sta_cmd, },
+	};
+
+	memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+	rm_sta_cmd.num_sta = 1;
+	memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+	cmd.flags |= CMD_WANT_SKB;
+
+	ret = iwl_dvm_send_cmd(priv, &cmd);
+
+	if (ret)
+		return ret;
+
+	pkt = cmd.resp_pkt;
+	rem_sta_resp = (void *)pkt->data;
+
+	switch (rem_sta_resp->status) {
+	case REM_STA_SUCCESS_MSK:
+		if (!temporary) {
+			spin_lock_bh(&priv->sta_lock);
+			iwl_sta_ucode_deactivate(priv, sta_id);
+			spin_unlock_bh(&priv->sta_lock);
+		}
+		IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
+		break;
+	}
+
+	iwl_free_resp(&cmd);
+
+	return ret;
+}
+
+/**
+ * iwl_remove_station - Remove driver's knowledge of station.
+ */
+int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
+		       const u8 *addr)
+{
+	u8 tid;
+
+	if (!iwl_is_ready(priv)) {
+		IWL_DEBUG_INFO(priv,
+			"Unable to remove station %pM, device not ready.\n",
+			addr);
+		/*
+		 * It is typical for stations to be removed when we are
+		 * going down. Return success since device will be down
+		 * soon anyway
+		 */
+		return 0;
+	}
+
+	IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d  %pM\n",
+			sta_id, addr);
+
+	if (WARN_ON(sta_id == IWL_INVALID_STATION))
+		return -EINVAL;
+
+	spin_lock_bh(&priv->sta_lock);
+
+	if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+		IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
+				addr);
+		goto out_err;
+	}
+
+	if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
+		IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
+				addr);
+		goto out_err;
+	}
+
+	if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
+		kfree(priv->stations[sta_id].lq);
+		priv->stations[sta_id].lq = NULL;
+	}
+
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+		memset(&priv->tid_data[sta_id][tid], 0,
+			sizeof(priv->tid_data[sta_id][tid]));
+
+	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+
+	priv->num_stations--;
+
+	if (WARN_ON(priv->num_stations < 0))
+		priv->num_stations = 0;
+
+	spin_unlock_bh(&priv->sta_lock);
+
+	return iwl_send_remove_station(priv, addr, sta_id, false);
+out_err:
+	spin_unlock_bh(&priv->sta_lock);
+	return -EINVAL;
+}
+
+void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
+			    const u8 *addr)
+{
+	u8 tid;
+
+	if (!iwl_is_ready(priv)) {
+		IWL_DEBUG_INFO(priv,
+			"Unable to remove station %pM, device not ready.\n",
+			addr);
+		return;
+	}
+
+	IWL_DEBUG_ASSOC(priv, "Deactivating STA: %pM (%d)\n", addr, sta_id);
+
+	if (WARN_ON_ONCE(sta_id == IWL_INVALID_STATION))
+		return;
+
+	spin_lock_bh(&priv->sta_lock);
+
+	WARN_ON_ONCE(!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE));
+
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+		memset(&priv->tid_data[sta_id][tid], 0,
+			sizeof(priv->tid_data[sta_id][tid]));
+
+	priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+	priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+
+	priv->num_stations--;
+
+	if (WARN_ON_ONCE(priv->num_stations < 0))
+		priv->num_stations = 0;
+
+	spin_unlock_bh(&priv->sta_lock);
+}
+
+static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+			    u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
+{
+	int i, r;
+	u32 rate_flags = 0;
+	__le32 rate_n_flags;
+
+	lockdep_assert_held(&priv->mutex);
+
+	memset(link_cmd, 0, sizeof(*link_cmd));
+
+	/* Set up the rate scaling to start at selected rate, fall back
+	 * all the way down to 1M in IEEE order, and then spin on 1M */
+	if (priv->band == NL80211_BAND_5GHZ)
+		r = IWL_RATE_6M_INDEX;
+	else if (ctx && ctx->vif && ctx->vif->p2p)
+		r = IWL_RATE_6M_INDEX;
+	else
+		r = IWL_RATE_1M_INDEX;
+
+	if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	rate_flags |= first_antenna(priv->nvm_data->valid_tx_ant) <<
+				RATE_MCS_ANT_POS;
+	rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+	link_cmd->general_params.single_stream_ant_msk =
+			first_antenna(priv->nvm_data->valid_tx_ant);
+
+	link_cmd->general_params.dual_stream_ant_msk =
+		priv->nvm_data->valid_tx_ant &
+		~first_antenna(priv->nvm_data->valid_tx_ant);
+	if (!link_cmd->general_params.dual_stream_ant_msk) {
+		link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+	} else if (num_of_ant(priv->nvm_data->valid_tx_ant) == 2) {
+		link_cmd->general_params.dual_stream_ant_msk =
+			priv->nvm_data->valid_tx_ant;
+	}
+
+	link_cmd->agg_params.agg_dis_start_th =
+		LINK_QUAL_AGG_DISABLE_START_DEF;
+	link_cmd->agg_params.agg_time_limit =
+		cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+	link_cmd->sta_id = sta_id;
+}
+
+/**
+ * iwl_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void iwl_clear_ucode_stations(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx)
+{
+	int i;
+	bool cleared = false;
+
+	IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
+
+	spin_lock_bh(&priv->sta_lock);
+	for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+		if (ctx && ctx->ctxid != priv->stations[i].ctxid)
+			continue;
+
+		if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
+			IWL_DEBUG_INFO(priv,
+				"Clearing ucode active for station %d\n", i);
+			priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+			cleared = true;
+		}
+	}
+	spin_unlock_bh(&priv->sta_lock);
+
+	if (!cleared)
+		IWL_DEBUG_INFO(priv,
+			       "No active stations found to be cleared\n");
+}
+
+/**
+ * iwl_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+	struct iwl_addsta_cmd sta_cmd;
+	static const struct iwl_link_quality_cmd zero_lq = {};
+	struct iwl_link_quality_cmd lq;
+	int i;
+	bool found = false;
+	int ret;
+	bool send_lq;
+
+	if (!iwl_is_ready(priv)) {
+		IWL_DEBUG_INFO(priv,
+			       "Not ready yet, not restoring any stations.\n");
+		return;
+	}
+
+	IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
+	spin_lock_bh(&priv->sta_lock);
+	for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+		if (ctx->ctxid != priv->stations[i].ctxid)
+			continue;
+		if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
+			    !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
+			IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
+					priv->stations[i].sta.sta.addr);
+			priv->stations[i].sta.mode = 0;
+			priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
+			found = true;
+		}
+	}
+
+	for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+		if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
+			memcpy(&sta_cmd, &priv->stations[i].sta,
+			       sizeof(struct iwl_addsta_cmd));
+			send_lq = false;
+			if (priv->stations[i].lq) {
+				if (priv->wowlan)
+					iwl_sta_fill_lq(priv, ctx, i, &lq);
+				else
+					memcpy(&lq, priv->stations[i].lq,
+					       sizeof(struct iwl_link_quality_cmd));
+
+				if (memcmp(&lq, &zero_lq, sizeof(lq)))
+					send_lq = true;
+			}
+			spin_unlock_bh(&priv->sta_lock);
+			ret = iwl_send_add_sta(priv, &sta_cmd, 0);
+			if (ret) {
+				spin_lock_bh(&priv->sta_lock);
+				IWL_ERR(priv, "Adding station %pM failed.\n",
+					priv->stations[i].sta.sta.addr);
+				priv->stations[i].used &=
+						~IWL_STA_DRIVER_ACTIVE;
+				priv->stations[i].used &=
+						~IWL_STA_UCODE_INPROGRESS;
+				continue;
+			}
+			/*
+			 * Rate scaling has already been initialized, send
+			 * current LQ command
+			 */
+			if (send_lq)
+				iwl_send_lq_cmd(priv, ctx, &lq, 0, true);
+			spin_lock_bh(&priv->sta_lock);
+			priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
+		}
+	}
+
+	spin_unlock_bh(&priv->sta_lock);
+	if (!found)
+		IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+			"no stations to be restored.\n");
+	else
+		IWL_DEBUG_INFO(priv, "Restoring all known stations .... "
+			"complete.\n");
+}
+
+int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < priv->sta_key_max_num; i++)
+		if (!test_and_set_bit(i, &priv->ucode_key_table))
+			return i;
+
+	return WEP_INVALID_OFFSET;
+}
+
+void iwl_dealloc_bcast_stations(struct iwl_priv *priv)
+{
+	int i;
+
+	spin_lock_bh(&priv->sta_lock);
+	for (i = 0; i < IWLAGN_STATION_COUNT; i++) {
+		if (!(priv->stations[i].used & IWL_STA_BCAST))
+			continue;
+
+		priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
+		priv->num_stations--;
+		if (WARN_ON(priv->num_stations < 0))
+			priv->num_stations = 0;
+		kfree(priv->stations[i].lq);
+		priv->stations[i].lq = NULL;
+	}
+	spin_unlock_bh(&priv->sta_lock);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+static void iwl_dump_lq_cmd(struct iwl_priv *priv,
+			   struct iwl_link_quality_cmd *lq)
+{
+	int i;
+	IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
+	IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
+		       lq->general_params.single_stream_ant_msk,
+		       lq->general_params.dual_stream_ant_msk);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+		IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
+			       i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
+				   struct iwl_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool is_lq_table_valid(struct iwl_priv *priv,
+			      struct iwl_rxon_context *ctx,
+			      struct iwl_link_quality_cmd *lq)
+{
+	int i;
+
+	if (ctx->ht.enabled)
+		return true;
+
+	IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
+		       ctx->active.channel);
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
+		    RATE_MCS_HT_MSK) {
+			IWL_DEBUG_INFO(priv,
+				       "index %d of LQ expects HT channel\n",
+				       i);
+			return false;
+		}
+	}
+	return true;
+}
+
+/**
+ * iwl_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		    struct iwl_link_quality_cmd *lq, u8 flags, bool init)
+{
+	int ret = 0;
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_TX_LINK_QUALITY_CMD,
+		.len = { sizeof(struct iwl_link_quality_cmd), },
+		.flags = flags,
+		.data = { lq, },
+	};
+
+	if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
+		return -EINVAL;
+
+
+	spin_lock_bh(&priv->sta_lock);
+	if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
+		spin_unlock_bh(&priv->sta_lock);
+		return -EINVAL;
+	}
+	spin_unlock_bh(&priv->sta_lock);
+
+	iwl_dump_lq_cmd(priv, lq);
+	if (WARN_ON(init && (cmd.flags & CMD_ASYNC)))
+		return -EINVAL;
+
+	if (is_lq_table_valid(priv, ctx, lq))
+		ret = iwl_dvm_send_cmd(priv, &cmd);
+	else
+		ret = -EINVAL;
+
+	if (cmd.flags & CMD_ASYNC)
+		return ret;
+
+	if (init) {
+		IWL_DEBUG_INFO(priv, "init LQ command complete, "
+			       "clearing sta addition status for sta %d\n",
+			       lq->sta_id);
+		spin_lock_bh(&priv->sta_lock);
+		priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
+		spin_unlock_bh(&priv->sta_lock);
+	}
+	return ret;
+}
+
+
+static struct iwl_link_quality_cmd *
+iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
+		 u8 sta_id)
+{
+	struct iwl_link_quality_cmd *link_cmd;
+
+	link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
+	if (!link_cmd) {
+		IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
+		return NULL;
+	}
+
+	iwl_sta_fill_lq(priv, ctx, sta_id, link_cmd);
+
+	return link_cmd;
+}
+
+/*
+ * iwlagn_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int iwlagn_add_bssid_station(struct iwl_priv *priv,
+			     struct iwl_rxon_context *ctx,
+			     const u8 *addr, u8 *sta_id_r)
+{
+	int ret;
+	u8 sta_id;
+	struct iwl_link_quality_cmd *link_cmd;
+
+	if (sta_id_r)
+		*sta_id_r = IWL_INVALID_STATION;
+
+	ret = iwl_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
+	if (ret) {
+		IWL_ERR(priv, "Unable to add station %pM\n", addr);
+		return ret;
+	}
+
+	if (sta_id_r)
+		*sta_id_r = sta_id;
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].used |= IWL_STA_LOCAL;
+	spin_unlock_bh(&priv->sta_lock);
+
+	/* Set up default rate scaling table in device's station table */
+	link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+	if (!link_cmd) {
+		IWL_ERR(priv,
+			"Unable to initialize rate scaling for station %pM.\n",
+			addr);
+		return -ENOMEM;
+	}
+
+	ret = iwl_send_lq_cmd(priv, ctx, link_cmd, 0, true);
+	if (ret)
+		IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].lq = link_cmd;
+	spin_unlock_bh(&priv->sta_lock);
+
+	return 0;
+}
+
+/*
+ * static WEP keys
+ *
+ * For each context, the device has a table of 4 static WEP keys
+ * (one for each key index) that is updated with the following
+ * commands.
+ */
+
+static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv,
+				      struct iwl_rxon_context *ctx,
+				      bool send_if_empty)
+{
+	int i, not_empty = 0;
+	u8 buff[sizeof(struct iwl_wep_cmd) +
+		sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
+	struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
+	size_t cmd_size  = sizeof(struct iwl_wep_cmd);
+	struct iwl_host_cmd cmd = {
+		.id = ctx->wep_key_cmd,
+		.data = { wep_cmd, },
+	};
+
+	might_sleep();
+
+	memset(wep_cmd, 0, cmd_size +
+			(sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
+
+	for (i = 0; i < WEP_KEYS_MAX ; i++) {
+		wep_cmd->key[i].key_index = i;
+		if (ctx->wep_keys[i].key_size) {
+			wep_cmd->key[i].key_offset = i;
+			not_empty = 1;
+		} else {
+			wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+		}
+
+		wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+		memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+				ctx->wep_keys[i].key_size);
+	}
+
+	wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+	wep_cmd->num_keys = WEP_KEYS_MAX;
+
+	cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
+
+	cmd.len[0] = cmd_size;
+
+	if (not_empty || send_if_empty)
+		return iwl_dvm_send_cmd(priv, &cmd);
+	else
+		return 0;
+}
+
+int iwl_restore_default_wep_keys(struct iwl_priv *priv,
+				 struct iwl_rxon_context *ctx)
+{
+	lockdep_assert_held(&priv->mutex);
+
+	return iwl_send_static_wepkey_cmd(priv, ctx, false);
+}
+
+int iwl_remove_default_wep_key(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx,
+			       struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
+		      keyconf->keyidx);
+
+	memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+	if (iwl_is_rfkill(priv)) {
+		IWL_DEBUG_WEP(priv,
+			"Not sending REPLY_WEPKEY command due to RFKILL.\n");
+		/* but keys in device are clear anyway so return success */
+		return 0;
+	}
+	ret = iwl_send_static_wepkey_cmd(priv, ctx, 1);
+	IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
+		      keyconf->keyidx, ret);
+
+	return ret;
+}
+
+int iwl_set_default_wep_key(struct iwl_priv *priv,
+			    struct iwl_rxon_context *ctx,
+			    struct ieee80211_key_conf *keyconf)
+{
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	if (keyconf->keylen != WEP_KEY_LEN_128 &&
+	    keyconf->keylen != WEP_KEY_LEN_64) {
+		IWL_DEBUG_WEP(priv,
+			      "Bad WEP key length %d\n", keyconf->keylen);
+		return -EINVAL;
+	}
+
+	keyconf->hw_key_idx = IWLAGN_HW_KEY_DEFAULT;
+
+	ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+	memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+							keyconf->keylen);
+
+	ret = iwl_send_static_wepkey_cmd(priv, ctx, false);
+	IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
+		keyconf->keylen, keyconf->keyidx, ret);
+
+	return ret;
+}
+
+/*
+ * dynamic (per-station) keys
+ *
+ * The dynamic keys are a little more complicated. The device has
+ * a key cache of up to STA_KEY_MAX_NUM/STA_KEY_MAX_NUM_PAN keys.
+ * These are linked to stations by a table that contains an index
+ * into the key table for each station/key index/{mcast,unicast},
+ * i.e. it's basically an array of pointers like this:
+ *	key_offset_t key_mapping[NUM_STATIONS][4][2];
+ * (it really works differently, but you can think of it as such)
+ *
+ * The key uploading and linking happens in the same command, the
+ * add station command with STA_MODIFY_KEY_MASK.
+ */
+
+static u8 iwlagn_key_sta_id(struct iwl_priv *priv,
+			    struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta)
+{
+	struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+	if (sta)
+		return iwl_sta_id(sta);
+
+	/*
+	 * The device expects GTKs for station interfaces to be
+	 * installed as GTKs for the AP station. If we have no
+	 * station ID, then use the ap_sta_id in that case.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION && vif_priv->ctx)
+		return vif_priv->ctx->ap_sta_id;
+
+	return IWL_INVALID_STATION;
+}
+
+static int iwlagn_send_sta_key(struct iwl_priv *priv,
+			       struct ieee80211_key_conf *keyconf,
+			       u8 sta_id, u32 tkip_iv32, u16 *tkip_p1k,
+			       u32 cmd_flags)
+{
+	__le16 key_flags;
+	struct iwl_addsta_cmd sta_cmd;
+	int i;
+
+	spin_lock_bh(&priv->sta_lock);
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+	spin_unlock_bh(&priv->sta_lock);
+
+	key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags |= STA_KEY_FLG_MAP_KEY_MSK;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		key_flags |= STA_KEY_FLG_CCMP;
+		memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		key_flags |= STA_KEY_FLG_TKIP;
+		sta_cmd.key.tkip_rx_tsc_byte2 = tkip_iv32;
+		for (i = 0; i < 5; i++)
+			sta_cmd.key.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]);
+		memcpy(sta_cmd.key.key, keyconf->key, keyconf->keylen);
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		key_flags |= STA_KEY_FLG_WEP;
+		memcpy(&sta_cmd.key.key[3], keyconf->key, keyconf->keylen);
+		break;
+	default:
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	/* key pointer (offset) */
+	sta_cmd.key.key_offset = keyconf->hw_key_idx;
+
+	sta_cmd.key.key_flags = key_flags;
+	sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+	sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+
+	return iwl_send_add_sta(priv, &sta_cmd, cmd_flags);
+}
+
+void iwl_update_tkip_key(struct iwl_priv *priv,
+			 struct ieee80211_vif *vif,
+			 struct ieee80211_key_conf *keyconf,
+			 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
+{
+	u8 sta_id = iwlagn_key_sta_id(priv, vif, sta);
+
+	if (sta_id == IWL_INVALID_STATION)
+		return;
+
+	if (iwl_scan_cancel(priv)) {
+		/* cancel scan failed, just live w/ bad key and rely
+		   briefly on SW decryption */
+		return;
+	}
+
+	iwlagn_send_sta_key(priv, keyconf, sta_id,
+			    iv32, phase1key, CMD_ASYNC);
+}
+
+int iwl_remove_dynamic_key(struct iwl_priv *priv,
+			   struct iwl_rxon_context *ctx,
+			   struct ieee80211_key_conf *keyconf,
+			   struct ieee80211_sta *sta)
+{
+	struct iwl_addsta_cmd sta_cmd;
+	u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+	__le16 key_flags;
+
+	/* if station isn't there, neither is the key */
+	if (sta_id == IWL_INVALID_STATION)
+		return -ENOENT;
+
+	spin_lock_bh(&priv->sta_lock);
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
+	if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE))
+		sta_id = IWL_INVALID_STATION;
+	spin_unlock_bh(&priv->sta_lock);
+
+	if (sta_id == IWL_INVALID_STATION)
+		return 0;
+
+	lockdep_assert_held(&priv->mutex);
+
+	ctx->key_mapping_keys--;
+
+	IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
+		      keyconf->keyidx, sta_id);
+
+	if (!test_and_clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table))
+		IWL_ERR(priv, "offset %d not used in uCode key table.\n",
+			keyconf->hw_key_idx);
+
+	key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+	key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
+		     STA_KEY_FLG_INVALID;
+
+	if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+		key_flags |= STA_KEY_MULTICAST_MSK;
+
+	sta_cmd.key.key_flags = key_flags;
+	sta_cmd.key.key_offset = keyconf->hw_key_idx;
+	sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
+	sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
+
+	return iwl_send_add_sta(priv, &sta_cmd, 0);
+}
+
+int iwl_set_dynamic_key(struct iwl_priv *priv,
+			struct iwl_rxon_context *ctx,
+			struct ieee80211_key_conf *keyconf,
+			struct ieee80211_sta *sta)
+{
+	struct ieee80211_key_seq seq;
+	u16 p1k[5];
+	int ret;
+	u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+	const u8 *addr;
+
+	if (sta_id == IWL_INVALID_STATION)
+		return -EINVAL;
+
+	lockdep_assert_held(&priv->mutex);
+
+	keyconf->hw_key_idx = iwl_get_free_ucode_key_offset(priv);
+	if (keyconf->hw_key_idx == WEP_INVALID_OFFSET)
+		return -ENOSPC;
+
+	ctx->key_mapping_keys++;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (sta)
+			addr = sta->addr;
+		else /* station mode case only */
+			addr = ctx->active.bssid_addr;
+
+		/* pre-fill phase 1 key into device cache */
+		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+		ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+					  seq.tkip.iv32, p1k, 0);
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
+					  0, NULL, 0);
+		break;
+	default:
+		IWL_ERR(priv, "Unknown cipher %x\n", keyconf->cipher);
+		ret = -EINVAL;
+	}
+
+	if (ret) {
+		ctx->key_mapping_keys--;
+		clear_bit(keyconf->hw_key_idx, &priv->ucode_key_table);
+	}
+
+	IWL_DEBUG_WEP(priv, "Set dynamic key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+		      sta ? sta->addr : NULL, ret);
+
+	return ret;
+}
+
+/**
+ * iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
+			       struct iwl_rxon_context *ctx)
+{
+	struct iwl_link_quality_cmd *link_cmd;
+	u8 sta_id;
+
+	spin_lock_bh(&priv->sta_lock);
+	sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL);
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Unable to prepare broadcast station\n");
+		spin_unlock_bh(&priv->sta_lock);
+
+		return -EINVAL;
+	}
+
+	priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
+	priv->stations[sta_id].used |= IWL_STA_BCAST;
+	spin_unlock_bh(&priv->sta_lock);
+
+	link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+	if (!link_cmd) {
+		IWL_ERR(priv,
+			"Unable to initialize rate scaling for bcast station.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].lq = link_cmd;
+	spin_unlock_bh(&priv->sta_lock);
+
+	return 0;
+}
+
+/**
+ * iwl_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwlagn. Placed here to have all bcast station management
+ * code together.
+ */
+int iwl_update_bcast_station(struct iwl_priv *priv,
+			     struct iwl_rxon_context *ctx)
+{
+	struct iwl_link_quality_cmd *link_cmd;
+	u8 sta_id = ctx->bcast_sta_id;
+
+	link_cmd = iwl_sta_alloc_lq(priv, ctx, sta_id);
+	if (!link_cmd) {
+		IWL_ERR(priv, "Unable to initialize rate scaling for bcast station.\n");
+		return -ENOMEM;
+	}
+
+	spin_lock_bh(&priv->sta_lock);
+	if (priv->stations[sta_id].lq)
+		kfree(priv->stations[sta_id].lq);
+	else
+		IWL_DEBUG_INFO(priv, "Bcast station rate scaling has not been initialized yet.\n");
+	priv->stations[sta_id].lq = link_cmd;
+	spin_unlock_bh(&priv->sta_lock);
+
+	return 0;
+}
+
+int iwl_update_bcast_stations(struct iwl_priv *priv)
+{
+	struct iwl_rxon_context *ctx;
+	int ret = 0;
+
+	for_each_context(priv, ctx) {
+		ret = iwl_update_bcast_station(priv, ctx);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+/**
+ * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
+{
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
+
+	/* Remove "disable" flag, to enable Tx for this TID */
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+	priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+	spin_unlock_bh(&priv->sta_lock);
+
+	return iwl_send_add_sta(priv, &sta_cmd, 0);
+}
+
+int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
+			 int tid, u16 ssn)
+{
+	int sta_id;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
+
+	sta_id = iwl_sta_id(sta);
+	if (sta_id == IWL_INVALID_STATION)
+		return -ENXIO;
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].sta.station_flags_msk = 0;
+	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+	priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
+	priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+	spin_unlock_bh(&priv->sta_lock);
+
+	return iwl_send_add_sta(priv, &sta_cmd, 0);
+}
+
+int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
+			int tid)
+{
+	int sta_id;
+	struct iwl_addsta_cmd sta_cmd;
+
+	lockdep_assert_held(&priv->mutex);
+
+	sta_id = iwl_sta_id(sta);
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_bh(&priv->sta_lock);
+	priv->stations[sta_id].sta.station_flags_msk = 0;
+	priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+	priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
+	priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+	memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd));
+	spin_unlock_bh(&priv->sta_lock);
+
+	return iwl_send_add_sta(priv, &sta_cmd, 0);
+}
+
+
+
+void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
+{
+	struct iwl_addsta_cmd cmd = {
+		.mode = STA_CONTROL_MODIFY_MSK,
+		.station_flags = STA_FLG_PWR_SAVE_MSK,
+		.station_flags_msk = STA_FLG_PWR_SAVE_MSK,
+		.sta.sta_id = sta_id,
+		.sta.modify_mask = STA_MODIFY_SLEEP_TX_COUNT_MSK,
+		.sleep_tx_count = cpu_to_le16(cnt),
+	};
+
+	iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
new file mode 100644
index 0000000..6524533
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -0,0 +1,687 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-modparams.h"
+#include "iwl-debug.h"
+#include "agn.h"
+#include "dev.h"
+#include "commands.h"
+#include "tt.h"
+
+/* default Thermal Throttling transaction table
+ * Current state   |         Throttling Down               |  Throttling Up
+ *=============================================================================
+ *                 Condition Nxt State  Condition Nxt State Condition Nxt State
+ *-----------------------------------------------------------------------------
+ *     IWL_TI_0     T >= 114   CT_KILL  114>T>=105   TI_1      N/A      N/A
+ *     IWL_TI_1     T >= 114   CT_KILL  114>T>=110   TI_2     T<=95     TI_0
+ *     IWL_TI_2     T >= 114   CT_KILL                        T<=100    TI_1
+ *    IWL_CT_KILL      N/A       N/A       N/A        N/A     T<=95     TI_0
+ *=============================================================================
+ */
+static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
+	{IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
+	{IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
+};
+static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
+	{IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
+	{IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
+};
+static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
+	{IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
+	{IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
+};
+static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
+	{IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
+	{IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX},
+	{IWL_TI_CT_KILL, CT_KILL_EXIT_THRESHOLD + 1, IWL_ABSOLUTE_MAX}
+};
+
+/* Advance Thermal Throttling default restriction table */
+static const struct iwl_tt_restriction restriction_range[IWL_TI_STATE_MAX] = {
+	{IWL_ANT_OK_MULTI, IWL_ANT_OK_MULTI, true },
+	{IWL_ANT_OK_SINGLE, IWL_ANT_OK_MULTI, true },
+	{IWL_ANT_OK_SINGLE, IWL_ANT_OK_SINGLE, false },
+	{IWL_ANT_OK_NONE, IWL_ANT_OK_NONE, false }
+};
+
+bool iwl_tt_is_low_power_state(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	if (tt->state >= IWL_TI_1)
+		return true;
+	return false;
+}
+
+u8 iwl_tt_current_power_mode(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	return tt->tt_power_mode;
+}
+
+bool iwl_ht_enabled(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	struct iwl_tt_restriction *restriction;
+
+	if (!priv->thermal_throttle.advanced_tt)
+		return true;
+	restriction = tt->restriction + tt->state;
+	return restriction->is_ht;
+}
+
+static bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
+{
+	s32 temp = priv->temperature; /* degrees CELSIUS except specified */
+	bool within_margin = false;
+
+	if (!priv->thermal_throttle.advanced_tt)
+		within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
+				CT_KILL_THRESHOLD_LEGACY) ? true : false;
+	else
+		within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
+				CT_KILL_THRESHOLD) ? true : false;
+	return within_margin;
+}
+
+bool iwl_check_for_ct_kill(struct iwl_priv *priv)
+{
+	bool is_ct_kill = false;
+
+	if (iwl_within_ct_kill_margin(priv)) {
+		iwl_tt_enter_ct_kill(priv);
+		is_ct_kill = true;
+	}
+	return is_ct_kill;
+}
+
+enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	struct iwl_tt_restriction *restriction;
+
+	if (!priv->thermal_throttle.advanced_tt)
+		return IWL_ANT_OK_MULTI;
+	restriction = tt->restriction + tt->state;
+	return restriction->tx_stream;
+}
+
+enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	struct iwl_tt_restriction *restriction;
+
+	if (!priv->thermal_throttle.advanced_tt)
+		return IWL_ANT_OK_MULTI;
+	restriction = tt->restriction + tt->state;
+	return restriction->rx_stream;
+}
+
+#define CT_KILL_EXIT_DURATION (5)	/* 5 seconds duration */
+#define CT_KILL_WAITING_DURATION (300)	/* 300ms duration */
+
+/*
+ * toggle the bit to wake up uCode and check the temperature
+ * if the temperature is below CT, uCode will stay awake and send card
+ * state notification with CT_KILL bit clear to inform Thermal Throttling
+ * Management to change state. Otherwise, uCode will go back to sleep
+ * without doing anything, driver should continue the 5 seconds timer
+ * to wake up uCode for temperature check until temperature drop below CT
+ */
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
+{
+	struct iwl_priv *priv = from_timer(priv, t,
+					   thermal_throttle.ct_kill_exit_tm);
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	unsigned long flags;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (tt->state == IWL_TI_CT_KILL) {
+		if (priv->thermal_throttle.ct_kill_toggle) {
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
+				    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+			priv->thermal_throttle.ct_kill_toggle = false;
+		} else {
+			iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
+				    CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+			priv->thermal_throttle.ct_kill_toggle = true;
+		}
+		iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
+		if (iwl_trans_grab_nic_access(priv->trans, &flags))
+			iwl_trans_release_nic_access(priv->trans, &flags);
+
+		/* Reschedule the ct_kill timer to occur in
+		 * CT_KILL_EXIT_DURATION seconds to ensure we get a
+		 * thermal update */
+		IWL_DEBUG_TEMP(priv, "schedule ct_kill exit timer\n");
+		mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
+			  jiffies + CT_KILL_EXIT_DURATION * HZ);
+	}
+}
+
+static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
+			   bool stop)
+{
+	if (stop) {
+		IWL_DEBUG_TEMP(priv, "Stop all queues\n");
+		if (priv->mac80211_registered)
+			ieee80211_stop_queues(priv->hw);
+		IWL_DEBUG_TEMP(priv,
+				"Schedule 5 seconds CT_KILL Timer\n");
+		mod_timer(&priv->thermal_throttle.ct_kill_exit_tm,
+			  jiffies + CT_KILL_EXIT_DURATION * HZ);
+	} else {
+		IWL_DEBUG_TEMP(priv, "Wake all queues\n");
+		if (priv->mac80211_registered)
+			ieee80211_wake_queues(priv->hw);
+	}
+}
+
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
+{
+	struct iwl_priv *priv = from_timer(priv, t,
+					   thermal_throttle.ct_kill_waiting_tm);
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	/* temperature timer expired, ready to go into CT_KILL state */
+	if (tt->state != IWL_TI_CT_KILL) {
+		IWL_DEBUG_TEMP(priv, "entering CT_KILL state when "
+				"temperature timer expired\n");
+		tt->state = IWL_TI_CT_KILL;
+		set_bit(STATUS_CT_KILL, &priv->status);
+		iwl_perform_ct_kill_task(priv, true);
+	}
+}
+
+static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
+{
+	IWL_DEBUG_TEMP(priv, "Prepare to enter IWL_TI_CT_KILL\n");
+	/* make request to retrieve statistics information */
+	iwl_send_statistics_request(priv, 0, false);
+	/* Reschedule the ct_kill wait timer */
+	mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
+		 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
+}
+
+#define IWL_MINIMAL_POWER_THRESHOLD		(CT_KILL_THRESHOLD_LEGACY)
+#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2	(100)
+#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1	(90)
+
+/*
+ * Legacy thermal throttling
+ * 1) Avoid NIC destruction due to high temperatures
+ *	Chip will identify dangerously high temperatures that can
+ *	harm the device and will power down
+ * 2) Avoid the NIC power down due to high temperature
+ *	Throttle early enough to lower the power consumption before
+ *	drastic steps are needed
+ */
+static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	enum iwl_tt_state old_state;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if ((tt->tt_previous_temp) &&
+	    (temp > tt->tt_previous_temp) &&
+	    ((temp - tt->tt_previous_temp) >
+	    IWL_TT_INCREASE_MARGIN)) {
+		IWL_DEBUG_TEMP(priv,
+			"Temperature increase %d degree Celsius\n",
+			(temp - tt->tt_previous_temp));
+	}
+#endif
+	old_state = tt->state;
+	/* in Celsius */
+	if (temp >= IWL_MINIMAL_POWER_THRESHOLD)
+		tt->state = IWL_TI_CT_KILL;
+	else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_2)
+		tt->state = IWL_TI_2;
+	else if (temp >= IWL_REDUCED_PERFORMANCE_THRESHOLD_1)
+		tt->state = IWL_TI_1;
+	else
+		tt->state = IWL_TI_0;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	tt->tt_previous_temp = temp;
+#endif
+	/* stop ct_kill_waiting_tm timer */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
+	if (tt->state != old_state) {
+		switch (tt->state) {
+		case IWL_TI_0:
+			/*
+			 * When the system is ready to go back to IWL_TI_0
+			 * we only have to call iwl_power_update_mode() to
+			 * do so.
+			 */
+			break;
+		case IWL_TI_1:
+			tt->tt_power_mode = IWL_POWER_INDEX_3;
+			break;
+		case IWL_TI_2:
+			tt->tt_power_mode = IWL_POWER_INDEX_4;
+			break;
+		default:
+			tt->tt_power_mode = IWL_POWER_INDEX_5;
+			break;
+		}
+		mutex_lock(&priv->mutex);
+		if (old_state == IWL_TI_CT_KILL)
+			clear_bit(STATUS_CT_KILL, &priv->status);
+		if (tt->state != IWL_TI_CT_KILL &&
+		    iwl_power_update_mode(priv, true)) {
+			/* TT state not updated
+			 * try again during next temperature read
+			 */
+			if (old_state == IWL_TI_CT_KILL)
+				set_bit(STATUS_CT_KILL, &priv->status);
+			tt->state = old_state;
+			IWL_ERR(priv, "Cannot update power mode, "
+					"TT state not updated\n");
+		} else {
+			if (tt->state == IWL_TI_CT_KILL) {
+				if (force) {
+					set_bit(STATUS_CT_KILL, &priv->status);
+					iwl_perform_ct_kill_task(priv, true);
+				} else {
+					iwl_prepare_ct_kill_task(priv);
+					tt->state = old_state;
+				}
+			} else if (old_state == IWL_TI_CT_KILL &&
+				 tt->state != IWL_TI_CT_KILL)
+				iwl_perform_ct_kill_task(priv, false);
+			IWL_DEBUG_TEMP(priv, "Temperature state changed %u\n",
+					tt->state);
+			IWL_DEBUG_TEMP(priv, "Power Index change to %u\n",
+					tt->tt_power_mode);
+		}
+		mutex_unlock(&priv->mutex);
+	}
+}
+
+/*
+ * Advance thermal throttling
+ * 1) Avoid NIC destruction due to high temperatures
+ *	Chip will identify dangerously high temperatures that can
+ *	harm the device and will power down
+ * 2) Avoid the NIC power down due to high temperature
+ *	Throttle early enough to lower the power consumption before
+ *	drastic steps are needed
+ *	Actions include relaxing the power down sleep thresholds and
+ *	decreasing the number of TX streams
+ * 3) Avoid throughput performance impact as much as possible
+ *
+ *=============================================================================
+ *                 Condition Nxt State  Condition Nxt State Condition Nxt State
+ *-----------------------------------------------------------------------------
+ *     IWL_TI_0     T >= 114   CT_KILL  114>T>=105   TI_1      N/A      N/A
+ *     IWL_TI_1     T >= 114   CT_KILL  114>T>=110   TI_2     T<=95     TI_0
+ *     IWL_TI_2     T >= 114   CT_KILL                        T<=100    TI_1
+ *    IWL_CT_KILL      N/A       N/A       N/A        N/A     T<=95     TI_0
+ *=============================================================================
+ */
+static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	int i;
+	bool changed = false;
+	enum iwl_tt_state old_state;
+	struct iwl_tt_trans *transaction;
+
+	old_state = tt->state;
+	for (i = 0; i < IWL_TI_STATE_MAX - 1; i++) {
+		/* based on the current TT state,
+		 * find the curresponding transaction table
+		 * each table has (IWL_TI_STATE_MAX - 1) entries
+		 * tt->transaction + ((old_state * (IWL_TI_STATE_MAX - 1))
+		 * will advance to the correct table.
+		 * then based on the current temperature
+		 * find the next state need to transaction to
+		 * go through all the possible (IWL_TI_STATE_MAX - 1) entries
+		 * in the current table to see if transaction is needed
+		 */
+		transaction = tt->transaction +
+			((old_state * (IWL_TI_STATE_MAX - 1)) + i);
+		if (temp >= transaction->tt_low &&
+		    temp <= transaction->tt_high) {
+#ifdef CONFIG_IWLWIFI_DEBUG
+			if ((tt->tt_previous_temp) &&
+			    (temp > tt->tt_previous_temp) &&
+			    ((temp - tt->tt_previous_temp) >
+			    IWL_TT_INCREASE_MARGIN)) {
+				IWL_DEBUG_TEMP(priv,
+					"Temperature increase %d "
+					"degree Celsius\n",
+					(temp - tt->tt_previous_temp));
+			}
+			tt->tt_previous_temp = temp;
+#endif
+			if (old_state !=
+			    transaction->next_state) {
+				changed = true;
+				tt->state =
+					transaction->next_state;
+			}
+			break;
+		}
+	}
+	/* stop ct_kill_waiting_tm timer */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
+	if (changed) {
+		if (tt->state >= IWL_TI_1) {
+			/* force PI = IWL_POWER_INDEX_5 in the case of TI > 0 */
+			tt->tt_power_mode = IWL_POWER_INDEX_5;
+
+			if (!iwl_ht_enabled(priv)) {
+				struct iwl_rxon_context *ctx;
+
+				for_each_context(priv, ctx) {
+					struct iwl_rxon_cmd *rxon;
+
+					rxon = &ctx->staging;
+
+					/* disable HT */
+					rxon->flags &= ~(
+						RXON_FLG_CHANNEL_MODE_MSK |
+						RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
+						RXON_FLG_HT40_PROT_MSK |
+						RXON_FLG_HT_PROT_MSK);
+				}
+			} else {
+				/* check HT capability and set
+				 * according to the system HT capability
+				 * in case get disabled before */
+				iwl_set_rxon_ht(priv, &priv->current_ht_config);
+			}
+
+		} else {
+			/*
+			 * restore system power setting -- it will be
+			 * recalculated automatically.
+			 */
+
+			/* check HT capability and set
+			 * according to the system HT capability
+			 * in case get disabled before */
+			iwl_set_rxon_ht(priv, &priv->current_ht_config);
+		}
+		mutex_lock(&priv->mutex);
+		if (old_state == IWL_TI_CT_KILL)
+			clear_bit(STATUS_CT_KILL, &priv->status);
+		if (tt->state != IWL_TI_CT_KILL &&
+		    iwl_power_update_mode(priv, true)) {
+			/* TT state not updated
+			 * try again during next temperature read
+			 */
+			IWL_ERR(priv, "Cannot update power mode, "
+					"TT state not updated\n");
+			if (old_state == IWL_TI_CT_KILL)
+				set_bit(STATUS_CT_KILL, &priv->status);
+			tt->state = old_state;
+		} else {
+			IWL_DEBUG_TEMP(priv,
+					"Thermal Throttling to new state: %u\n",
+					tt->state);
+			if (old_state != IWL_TI_CT_KILL &&
+			    tt->state == IWL_TI_CT_KILL) {
+				if (force) {
+					IWL_DEBUG_TEMP(priv,
+						"Enter IWL_TI_CT_KILL\n");
+					set_bit(STATUS_CT_KILL, &priv->status);
+					iwl_perform_ct_kill_task(priv, true);
+				} else {
+					tt->state = old_state;
+					iwl_prepare_ct_kill_task(priv);
+				}
+			} else if (old_state == IWL_TI_CT_KILL &&
+				  tt->state != IWL_TI_CT_KILL) {
+				IWL_DEBUG_TEMP(priv, "Exit IWL_TI_CT_KILL\n");
+				iwl_perform_ct_kill_task(priv, false);
+			}
+		}
+		mutex_unlock(&priv->mutex);
+	}
+}
+
+/* Card State Notification indicated reach critical temperature
+ * if PSP not enable, no Thermal Throttling function will be performed
+ * just set the GP1 bit to acknowledge the event
+ * otherwise, go into IWL_TI_CT_KILL state
+ * since Card State Notification will not provide any temperature reading
+ * for Legacy mode
+ * so just pass the CT_KILL temperature to iwl_legacy_tt_handler()
+ * for advance mode
+ * pass CT_KILL_THRESHOLD+1 to make sure move into IWL_TI_CT_KILL state
+ */
+static void iwl_bg_ct_enter(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_enter);
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (!iwl_is_ready(priv))
+		return;
+
+	if (tt->state != IWL_TI_CT_KILL) {
+		IWL_ERR(priv, "Device reached critical temperature "
+			      "- ucode going to sleep!\n");
+		if (!priv->thermal_throttle.advanced_tt)
+			iwl_legacy_tt_handler(priv,
+					      IWL_MINIMAL_POWER_THRESHOLD,
+					      true);
+		else
+			iwl_advance_tt_handler(priv,
+					       CT_KILL_THRESHOLD + 1, true);
+	}
+}
+
+/* Card State Notification indicated out of critical temperature
+ * since Card State Notification will not provide any temperature reading
+ * so pass the IWL_REDUCED_PERFORMANCE_THRESHOLD_2 temperature
+ * to iwl_legacy_tt_handler() to get out of IWL_CT_KILL state
+ */
+static void iwl_bg_ct_exit(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work, struct iwl_priv, ct_exit);
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (!iwl_is_ready(priv))
+		return;
+
+	/* stop ct_kill_exit_tm timer */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
+
+	if (tt->state == IWL_TI_CT_KILL) {
+		IWL_ERR(priv,
+			"Device temperature below critical"
+			"- ucode awake!\n");
+		/*
+		 * exit from CT_KILL state
+		 * reset the current temperature reading
+		 */
+		priv->temperature = 0;
+		if (!priv->thermal_throttle.advanced_tt)
+			iwl_legacy_tt_handler(priv,
+				      IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
+				      true);
+		else
+			iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
+					       true);
+	}
+}
+
+void iwl_tt_enter_ct_kill(struct iwl_priv *priv)
+{
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	IWL_DEBUG_TEMP(priv, "Queueing critical temperature enter.\n");
+	queue_work(priv->workqueue, &priv->ct_enter);
+}
+
+void iwl_tt_exit_ct_kill(struct iwl_priv *priv)
+{
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	IWL_DEBUG_TEMP(priv, "Queueing critical temperature exit.\n");
+	queue_work(priv->workqueue, &priv->ct_exit);
+}
+
+static void iwl_bg_tt_work(struct work_struct *work)
+{
+	struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
+	s32 temp = priv->temperature; /* degrees CELSIUS except specified */
+
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	if (!priv->thermal_throttle.advanced_tt)
+		iwl_legacy_tt_handler(priv, temp, false);
+	else
+		iwl_advance_tt_handler(priv, temp, false);
+}
+
+void iwl_tt_handler(struct iwl_priv *priv)
+{
+	if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+		return;
+
+	IWL_DEBUG_TEMP(priv, "Queueing thermal throttling work.\n");
+	queue_work(priv->workqueue, &priv->tt_work);
+}
+
+/* Thermal throttling initialization
+ * For advance thermal throttling:
+ *     Initialize Thermal Index and temperature threshold table
+ *     Initialize thermal throttling restriction table
+ */
+void iwl_tt_initialize(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+	int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
+	struct iwl_tt_trans *transaction;
+
+	IWL_DEBUG_TEMP(priv, "Initialize Thermal Throttling\n");
+
+	memset(tt, 0, sizeof(struct iwl_tt_mgmt));
+
+	tt->state = IWL_TI_0;
+	timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+		    iwl_tt_check_exit_ct_kill, 0);
+	timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+		    iwl_tt_ready_for_ct_kill, 0);
+	/* setup deferred ct kill work */
+	INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
+	INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
+	INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
+
+	if (priv->lib->adv_thermal_throttle) {
+		IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
+		tt->restriction = kcalloc(IWL_TI_STATE_MAX,
+					  sizeof(struct iwl_tt_restriction),
+					  GFP_KERNEL);
+		tt->transaction = kcalloc(IWL_TI_STATE_MAX *
+					  (IWL_TI_STATE_MAX - 1),
+					  sizeof(struct iwl_tt_trans),
+					  GFP_KERNEL);
+		if (!tt->restriction || !tt->transaction) {
+			IWL_ERR(priv, "Fallback to Legacy Throttling\n");
+			priv->thermal_throttle.advanced_tt = false;
+			kfree(tt->restriction);
+			tt->restriction = NULL;
+			kfree(tt->transaction);
+			tt->transaction = NULL;
+		} else {
+			transaction = tt->transaction +
+				(IWL_TI_0 * (IWL_TI_STATE_MAX - 1));
+			memcpy(transaction, &tt_range_0[0], size);
+			transaction = tt->transaction +
+				(IWL_TI_1 * (IWL_TI_STATE_MAX - 1));
+			memcpy(transaction, &tt_range_1[0], size);
+			transaction = tt->transaction +
+				(IWL_TI_2 * (IWL_TI_STATE_MAX - 1));
+			memcpy(transaction, &tt_range_2[0], size);
+			transaction = tt->transaction +
+				(IWL_TI_CT_KILL * (IWL_TI_STATE_MAX - 1));
+			memcpy(transaction, &tt_range_3[0], size);
+			size = sizeof(struct iwl_tt_restriction) *
+				IWL_TI_STATE_MAX;
+			memcpy(tt->restriction,
+				&restriction_range[0], size);
+			priv->thermal_throttle.advanced_tt = true;
+		}
+	} else {
+		IWL_DEBUG_TEMP(priv, "Legacy Thermal Throttling\n");
+		priv->thermal_throttle.advanced_tt = false;
+	}
+}
+
+/* cleanup thermal throttling management related memory and timer */
+void iwl_tt_exit(struct iwl_priv *priv)
+{
+	struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
+
+	/* stop ct_kill_exit_tm timer if activated */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
+	/* stop ct_kill_waiting_tm timer if activated */
+	del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
+	cancel_work_sync(&priv->tt_work);
+	cancel_work_sync(&priv->ct_enter);
+	cancel_work_sync(&priv->ct_exit);
+
+	if (priv->thermal_throttle.advanced_tt) {
+		/* free advance thermal throttling memory */
+		kfree(tt->restriction);
+		tt->restriction = NULL;
+		kfree(tt->transaction);
+		tt->transaction = NULL;
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
new file mode 100644
index 0000000..d324e9b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#ifndef __iwl_tt_setting_h__
+#define __iwl_tt_setting_h__
+
+#include "commands.h"
+
+#define IWL_ABSOLUTE_ZERO		0
+#define IWL_ABSOLUTE_MAX		0xFFFFFFFF
+#define IWL_TT_INCREASE_MARGIN	5
+#define IWL_TT_CT_KILL_MARGIN	3
+
+enum iwl_antenna_ok {
+	IWL_ANT_OK_NONE,
+	IWL_ANT_OK_SINGLE,
+	IWL_ANT_OK_MULTI,
+};
+
+/* Thermal Throttling State Machine states */
+enum  iwl_tt_state {
+	IWL_TI_0,	/* normal temperature, system power state */
+	IWL_TI_1,	/* high temperature detect, low power state */
+	IWL_TI_2,	/* higher temperature detected, lower power state */
+	IWL_TI_CT_KILL, /* critical temperature detected, lowest power state */
+	IWL_TI_STATE_MAX
+};
+
+/**
+ * struct iwl_tt_restriction - Thermal Throttling restriction table
+ * @tx_stream: number of tx stream allowed
+ * @is_ht: ht enable/disable
+ * @rx_stream: number of rx stream allowed
+ *
+ * This table is used by advance thermal throttling management
+ * based on the current thermal throttling state, and determines
+ * the number of tx/rx streams and the status of HT operation.
+ */
+struct iwl_tt_restriction {
+	enum iwl_antenna_ok tx_stream;
+	enum iwl_antenna_ok rx_stream;
+	bool is_ht;
+};
+
+/**
+ * struct iwl_tt_trans - Thermal Throttling transaction table
+ * @next_state:  next thermal throttling mode
+ * @tt_low: low temperature threshold to change state
+ * @tt_high: high temperature threshold to change state
+ *
+ * This is used by the advanced thermal throttling algorithm
+ * to determine the next thermal state to go based on the
+ * current temperature.
+ */
+struct iwl_tt_trans {
+	enum iwl_tt_state next_state;
+	u32 tt_low;
+	u32 tt_high;
+};
+
+/**
+ * struct iwl_tt_mgnt - Thermal Throttling Management structure
+ * @advanced_tt:    advanced thermal throttle required
+ * @state:          current Thermal Throttling state
+ * @tt_power_mode:  Thermal Throttling power mode index
+ *		    being used to set power level when
+ *		    when thermal throttling state != IWL_TI_0
+ *		    the tt_power_mode should set to different
+ *		    power mode based on the current tt state
+ * @tt_previous_temperature: last measured temperature
+ * @iwl_tt_restriction: ptr to restriction tbl, used by advance
+ *		    thermal throttling to determine how many tx/rx streams
+ *		    should be used in tt state; and can HT be enabled or not
+ * @iwl_tt_trans: ptr to adv trans table, used by advance thermal throttling
+ *		    state transaction
+ * @ct_kill_toggle: used to toggle the CSR bit when checking uCode temperature
+ * @ct_kill_exit_tm: timer to exit thermal kill
+ */
+struct iwl_tt_mgmt {
+	enum iwl_tt_state state;
+	bool advanced_tt;
+	u8 tt_power_mode;
+	bool ct_kill_toggle;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	s32 tt_previous_temp;
+#endif
+	struct iwl_tt_restriction *restriction;
+	struct iwl_tt_trans *transaction;
+	struct timer_list ct_kill_exit_tm;
+	struct timer_list ct_kill_waiting_tm;
+};
+
+u8 iwl_tt_current_power_mode(struct iwl_priv *priv);
+bool iwl_tt_is_low_power_state(struct iwl_priv *priv);
+bool iwl_ht_enabled(struct iwl_priv *priv);
+enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
+enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
+void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
+void iwl_tt_exit_ct_kill(struct iwl_priv *priv);
+void iwl_tt_handler(struct iwl_priv *priv);
+void iwl_tt_initialize(struct iwl_priv *priv);
+void iwl_tt_exit(struct iwl_priv *priv);
+
+#endif  /* __iwl_tt_setting_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
new file mode 100644
index 0000000..fb40ddf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -0,0 +1,1412 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/ieee80211.h>
+#include "iwl-io.h"
+#include "iwl-trans.h"
+#include "iwl-agn-hw.h"
+#include "dev.h"
+#include "agn.h"
+
+static const u8 tid_to_ac[] = {
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO,
+};
+
+static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
+				     struct ieee80211_tx_info *info,
+				     __le16 fc, __le32 *tx_flags)
+{
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
+	    info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
+	    info->flags & IEEE80211_TX_CTL_AMPDU)
+		*tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
+}
+
+/*
+ * handle build REPLY_TX command notification.
+ */
+static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
+				      struct sk_buff *skb,
+				      struct iwl_tx_cmd *tx_cmd,
+				      struct ieee80211_tx_info *info,
+				      struct ieee80211_hdr *hdr, u8 sta_id)
+{
+	__le16 fc = hdr->frame_control;
+	__le32 tx_flags = tx_cmd->tx_flags;
+
+	tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		tx_flags |= TX_CMD_FLG_ACK_MSK;
+	else
+		tx_flags &= ~TX_CMD_FLG_ACK_MSK;
+
+	if (ieee80211_is_probe_resp(fc))
+		tx_flags |= TX_CMD_FLG_TSF_MSK;
+	else if (ieee80211_is_back_req(fc))
+		tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+	else if (info->band == NL80211_BAND_2GHZ &&
+		 priv->lib->bt_params &&
+		 priv->lib->bt_params->advanced_bt_coexist &&
+		 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
+		 ieee80211_is_reassoc_req(fc) ||
+		 info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+		tx_flags |= TX_CMD_FLG_IGNORE_BT;
+
+
+	tx_cmd->sta_id = sta_id;
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	} else {
+		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+			tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+		else
+			tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+	}
+
+	iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
+
+	tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+		else
+			tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+	} else {
+		tx_cmd->timeout.pm_frame_timeout = 0;
+	}
+
+	tx_cmd->driver_txop = 0;
+	tx_cmd->tx_flags = tx_flags;
+	tx_cmd->next_frame_len = 0;
+}
+
+static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
+				     struct iwl_tx_cmd *tx_cmd,
+				     struct ieee80211_tx_info *info,
+				     struct ieee80211_sta *sta,
+				     __le16 fc)
+{
+	u32 rate_flags;
+	int rate_idx;
+	u8 rts_retry_limit;
+	u8 data_retry_limit;
+	u8 rate_plcp;
+
+	if (priv->wowlan) {
+		rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+		data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
+	} else {
+		/* Set retry limit on RTS packets */
+		rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
+
+		/* Set retry limit on DATA packets and Probe Responses*/
+		if (ieee80211_is_probe_resp(fc)) {
+			data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
+			rts_retry_limit =
+				min(data_retry_limit, rts_retry_limit);
+		} else if (ieee80211_is_back_req(fc))
+			data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
+		else
+			data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
+	}
+
+	tx_cmd->data_retry_limit = data_retry_limit;
+	tx_cmd->rts_retry_limit = rts_retry_limit;
+
+	/* DATA packets will use the uCode station table for rate/antenna
+	 * selection */
+	if (ieee80211_is_data(fc)) {
+		tx_cmd->initial_rate_index = 0;
+		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+		return;
+	} else if (ieee80211_is_back_req(fc))
+		tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+
+	/**
+	 * If the current TX rate stored in mac80211 has the MCS bit set, it's
+	 * not really a TX rate.  Thus, we use the lowest supported rate for
+	 * this band.  Also use the lowest supported rate if the stored rate
+	 * index is invalid.
+	 */
+	rate_idx = info->control.rates[0].idx;
+	if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
+			(rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
+		rate_idx = rate_lowest_index(
+				&priv->nvm_data->bands[info->band], sta);
+	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+	if (info->band == NL80211_BAND_5GHZ)
+		rate_idx += IWL_FIRST_OFDM_RATE;
+	/* Get PLCP rate for tx_cmd->rate_n_flags */
+	rate_plcp = iwl_rates[rate_idx].plcp;
+	/* Zero out flags for this packet */
+	rate_flags = 0;
+
+	/* Set CCK flag as needed */
+	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	/* Set up antennas */
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist &&
+	    priv->bt_full_concurrent) {
+		/* operated as 1x1 in full concurrency mode */
+		priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
+				first_antenna(priv->nvm_data->valid_tx_ant));
+	} else
+		priv->mgmt_tx_ant = iwl_toggle_tx_ant(
+					priv, priv->mgmt_tx_ant,
+					priv->nvm_data->valid_tx_ant);
+	rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
+
+	/* Set the rate in the TX cmd */
+	tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
+}
+
+static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
+					 struct ieee80211_tx_info *info,
+					 struct iwl_tx_cmd *tx_cmd,
+					 struct sk_buff *skb_frag)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+		memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+		if (info->flags & IEEE80211_TX_CTL_AMPDU)
+			tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
+			(keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
+
+		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+		IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
+			     "with key %d\n", keyconf->keyidx);
+		break;
+
+	default:
+		IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
+		break;
+	}
+}
+
+/**
+ * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
+				   struct ieee80211_sta *sta)
+{
+	int sta_id;
+
+	if (!sta)
+		return context->bcast_sta_id;
+
+	sta_id = iwl_sta_id(sta);
+
+	/*
+	 * mac80211 should not be passing a partially
+	 * initialised station!
+	 */
+	WARN_ON(sta_id == IWL_INVALID_STATION);
+
+	return sta_id;
+}
+
+/*
+ * start REPLY_TX command process
+ */
+int iwlagn_tx_skb(struct iwl_priv *priv,
+		  struct ieee80211_sta *sta,
+		  struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct iwl_station_priv *sta_priv = NULL;
+	struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+	struct iwl_device_cmd *dev_cmd;
+	struct iwl_tx_cmd *tx_cmd;
+	__le16 fc;
+	u8 hdr_len;
+	u16 len, seq_number = 0;
+	u8 sta_id, tid = IWL_MAX_TID_COUNT;
+	bool is_agg = false, is_data_qos = false;
+	int txq_id;
+
+	if (info->control.vif)
+		ctx = iwl_rxon_ctx_from_vif(info->control.vif);
+
+	if (iwl_is_rfkill(priv)) {
+		IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
+		goto drop_unlock_priv;
+	}
+
+	fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (ieee80211_is_auth(fc))
+		IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
+	else if (ieee80211_is_assoc_req(fc))
+		IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
+	else if (ieee80211_is_reassoc_req(fc))
+		IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
+#endif
+
+	if (unlikely(ieee80211_is_probe_resp(fc))) {
+		struct iwl_wipan_noa_data *noa_data =
+			rcu_dereference(priv->noa_data);
+
+		if (noa_data &&
+		    pskb_expand_head(skb, 0, noa_data->length,
+				     GFP_ATOMIC) == 0) {
+			skb_put_data(skb, noa_data->data, noa_data->length);
+			hdr = (struct ieee80211_hdr *)skb->data;
+		}
+	}
+
+	hdr_len = ieee80211_hdrlen(fc);
+
+	/* For management frames use broadcast id to do not break aggregation */
+	if (!ieee80211_is_data(fc))
+		sta_id = ctx->bcast_sta_id;
+	else {
+		/* Find index into station table for destination station */
+		sta_id = iwl_sta_id_or_broadcast(ctx, sta);
+		if (sta_id == IWL_INVALID_STATION) {
+			IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
+				       hdr->addr1);
+			goto drop_unlock_priv;
+		}
+	}
+
+	if (sta)
+		sta_priv = (void *)sta->drv_priv;
+
+	if (sta_priv && sta_priv->asleep &&
+	    (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
+		/*
+		 * This sends an asynchronous command to the device,
+		 * but we can rely on it being processed before the
+		 * next frame is processed -- and the next frame to
+		 * this station is the one that will consume this
+		 * counter.
+		 * For now set the counter to just 1 since we do not
+		 * support uAPSD yet.
+		 *
+		 * FIXME: If we get two non-bufferable frames one
+		 * after the other, we might only send out one of
+		 * them because this is racy.
+		 */
+		iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
+	}
+
+	dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
+
+	if (unlikely(!dev_cmd))
+		goto drop_unlock_priv;
+
+	memset(dev_cmd, 0, sizeof(*dev_cmd));
+	dev_cmd->hdr.cmd = REPLY_TX;
+	tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
+
+	/* Total # bytes to be transmitted */
+	len = (u16)skb->len;
+	tx_cmd->len = cpu_to_le16(len);
+
+	if (info->control.hw_key)
+		iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
+
+	/* TODO need this for burst mode later on */
+	iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
+
+	iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
+
+	memset(&info->status, 0, sizeof(info->status));
+	memset(info->driver_data, 0, sizeof(info->driver_data));
+
+	info->driver_data[0] = ctx;
+	info->driver_data[1] = dev_cmd;
+	/* From now on, we cannot access info->control */
+
+	spin_lock(&priv->sta_lock);
+
+	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+		u8 *qc = NULL;
+		struct iwl_tid_data *tid_data;
+		qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+			goto drop_unlock_sta;
+		tid_data = &priv->tid_data[sta_id][tid];
+
+		/* aggregation is on for this <sta,tid> */
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+		    tid_data->agg.state != IWL_AGG_ON) {
+			IWL_ERR(priv,
+				"TX_CTL_AMPDU while not in AGG: Tx flags = 0x%08x, agg.state = %d\n",
+				info->flags, tid_data->agg.state);
+			IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d\n",
+				sta_id, tid,
+				IEEE80211_SEQ_TO_SN(tid_data->seq_number));
+			goto drop_unlock_sta;
+		}
+
+		/* We can receive packets from the stack in IWL_AGG_{ON,OFF}
+		 * only. Check this here.
+		 */
+		if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
+			      tid_data->agg.state != IWL_AGG_OFF,
+			      "Tx while agg.state = %d\n", tid_data->agg.state))
+			goto drop_unlock_sta;
+
+		seq_number = tid_data->seq_number;
+		seq_number &= IEEE80211_SCTL_SEQ;
+		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+		hdr->seq_ctrl |= cpu_to_le16(seq_number);
+		seq_number += 0x10;
+
+		if (info->flags & IEEE80211_TX_CTL_AMPDU)
+			is_agg = true;
+		is_data_qos = true;
+	}
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+	txq_id = info->hw_queue;
+
+	if (is_agg)
+		txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
+	else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+		/*
+		 * The microcode will clear the more data
+		 * bit in the last frame it transmits.
+		 */
+		hdr->frame_control |=
+			cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	}
+
+	WARN_ON_ONCE(is_agg &&
+		     priv->queue_to_mac80211[txq_id] != info->hw_queue);
+
+	IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
+		     txq_id, seq_number);
+
+	if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
+		goto drop_unlock_sta;
+
+	if (is_data_qos && !ieee80211_has_morefrags(fc))
+		priv->tid_data[sta_id][tid].seq_number = seq_number;
+
+	spin_unlock(&priv->sta_lock);
+
+	/*
+	 * Avoid atomic ops if it isn't an associated client.
+	 * Also, if this is a packet for aggregation, don't
+	 * increase the counter because the ucode will stop
+	 * aggregation queues when their respective station
+	 * goes to sleep.
+	 */
+	if (sta_priv && sta_priv->client && !is_agg)
+		atomic_inc(&sta_priv->pending_frames);
+
+	return 0;
+
+drop_unlock_sta:
+	if (dev_cmd)
+		iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
+	spin_unlock(&priv->sta_lock);
+drop_unlock_priv:
+	return -1;
+}
+
+static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
+{
+	int q;
+
+	for (q = IWLAGN_FIRST_AMPDU_QUEUE;
+	     q < priv->cfg->base_params->num_of_queues; q++) {
+		if (!test_and_set_bit(q, priv->agg_q_alloc)) {
+			priv->queue_to_mac80211[q] = mq;
+			return q;
+		}
+	}
+
+	return -ENOSPC;
+}
+
+static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
+{
+	clear_bit(q, priv->agg_q_alloc);
+	priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
+}
+
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid)
+{
+	struct iwl_tid_data *tid_data;
+	int sta_id, txq_id;
+	enum iwl_agg_state agg_state;
+
+	sta_id = iwl_sta_id(sta);
+
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+		return -ENXIO;
+	}
+
+	spin_lock_bh(&priv->sta_lock);
+
+	tid_data = &priv->tid_data[sta_id][tid];
+	txq_id = tid_data->agg.txq_id;
+
+	switch (tid_data->agg.state) {
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		/*
+		* This can happen if the peer stops aggregation
+		* again before we've had a chance to drain the
+		* queue we selected previously, i.e. before the
+		* session was really started completely.
+		*/
+		IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+		goto turn_off;
+	case IWL_AGG_STARTING:
+		/*
+		 * This can happen when the session is stopped before
+		 * we receive ADDBA response
+		 */
+		IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
+		goto turn_off;
+	case IWL_AGG_ON:
+		break;
+	default:
+		IWL_WARN(priv,
+			 "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+			 sta_id, tid, tid_data->agg.state);
+		spin_unlock_bh(&priv->sta_lock);
+		return 0;
+	}
+
+	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+	/* There are still packets for this RA / TID in the HW */
+	if (!test_bit(txq_id, priv->agg_q_alloc)) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"stopping AGG on STA/TID %d/%d but hwq %d not used\n",
+			sta_id, tid, txq_id);
+	} else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+		IWL_DEBUG_TX_QUEUES(priv,
+				    "Can't proceed: ssn %d, next_recl = %d\n",
+				    tid_data->agg.ssn,
+				    tid_data->next_reclaimed);
+		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
+		spin_unlock_bh(&priv->sta_lock);
+		return 0;
+	}
+
+	IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
+			    tid_data->agg.ssn);
+turn_off:
+	agg_state = tid_data->agg.state;
+	tid_data->agg.state = IWL_AGG_OFF;
+
+	spin_unlock_bh(&priv->sta_lock);
+
+	if (test_bit(txq_id, priv->agg_q_alloc)) {
+		/*
+		 * If the transport didn't know that we wanted to start
+		 * agreggation, don't tell it that we want to stop them.
+		 * This can happen when we don't get the addBA response on
+		 * time, or we hadn't time to drain the AC queues.
+		 */
+		if (agg_state == IWL_AGG_ON)
+			iwl_trans_txq_disable(priv->trans, txq_id, true);
+		else
+			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
+					    agg_state);
+		iwlagn_dealloc_agg_txq(priv, txq_id);
+	}
+
+	ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+	return 0;
+}
+
+int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	struct iwl_tid_data *tid_data;
+	int sta_id, txq_id, ret;
+
+	IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
+		     sta->addr, tid);
+
+	sta_id = iwl_sta_id(sta);
+	if (sta_id == IWL_INVALID_STATION) {
+		IWL_ERR(priv, "Start AGG on invalid station\n");
+		return -ENXIO;
+	}
+	if (unlikely(tid >= IWL_MAX_TID_COUNT))
+		return -EINVAL;
+
+	if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
+		IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
+		return -ENXIO;
+	}
+
+	txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
+	if (txq_id < 0) {
+		IWL_DEBUG_TX_QUEUES(priv,
+			"No free aggregation queue for %pM/%d\n",
+			sta->addr, tid);
+		return txq_id;
+	}
+
+	ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
+	if (ret)
+		return ret;
+
+	spin_lock_bh(&priv->sta_lock);
+	tid_data = &priv->tid_data[sta_id][tid];
+	tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+	tid_data->agg.txq_id = txq_id;
+
+	*ssn = tid_data->agg.ssn;
+
+	if (*ssn == tid_data->next_reclaimed) {
+		IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
+				    tid_data->agg.ssn);
+		tid_data->agg.state = IWL_AGG_STARTING;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	} else {
+		IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+				    "next_reclaimed = %d\n",
+				    tid_data->agg.ssn,
+				    tid_data->next_reclaimed);
+		tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+	}
+	spin_unlock_bh(&priv->sta_lock);
+
+	return ret;
+}
+
+int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid)
+{
+	struct iwl_tid_data *tid_data;
+	enum iwl_agg_state agg_state;
+	int sta_id, txq_id;
+	sta_id = iwl_sta_id(sta);
+
+	/*
+	 * First set the agg state to OFF to avoid calling
+	 * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
+	 */
+	spin_lock_bh(&priv->sta_lock);
+
+	tid_data = &priv->tid_data[sta_id][tid];
+	txq_id = tid_data->agg.txq_id;
+	agg_state = tid_data->agg.state;
+	IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
+			    sta_id, tid, txq_id, tid_data->agg.state);
+
+	tid_data->agg.state = IWL_AGG_OFF;
+
+	spin_unlock_bh(&priv->sta_lock);
+
+	if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
+		IWL_ERR(priv, "Couldn't flush the AGG queue\n");
+
+	if (test_bit(txq_id, priv->agg_q_alloc)) {
+		/*
+		 * If the transport didn't know that we wanted to start
+		 * agreggation, don't tell it that we want to stop them.
+		 * This can happen when we don't get the addBA response on
+		 * time, or we hadn't time to drain the AC queues.
+		 */
+		if (agg_state == IWL_AGG_ON)
+			iwl_trans_txq_disable(priv->trans, txq_id, true);
+		else
+			IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
+					    agg_state);
+		iwlagn_dealloc_agg_txq(priv, txq_id);
+	}
+
+	return 0;
+}
+
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u8 buf_size)
+{
+	struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+	struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+	int q, fifo;
+	u16 ssn;
+
+	buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
+
+	spin_lock_bh(&priv->sta_lock);
+	ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+	q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
+	priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
+	spin_unlock_bh(&priv->sta_lock);
+
+	fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
+
+	iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
+			     buf_size, ssn, 0);
+
+	/*
+	 * If the limit is 0, then it wasn't initialised yet,
+	 * use the default. We can do that since we take the
+	 * minimum below, and we don't want to go above our
+	 * default due to hardware restrictions.
+	 */
+	if (sta_priv->max_agg_bufsize == 0)
+		sta_priv->max_agg_bufsize =
+			LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+	/*
+	 * Even though in theory the peer could have different
+	 * aggregation reorder buffer sizes for different sessions,
+	 * our ucode doesn't allow for that and has a global limit
+	 * for each station. Therefore, use the minimum of all the
+	 * aggregation sessions and our default value.
+	 */
+	sta_priv->max_agg_bufsize =
+		min(sta_priv->max_agg_bufsize, buf_size);
+
+	if (priv->hw_params.use_rts_for_aggregation) {
+		/*
+		 * switch to RTS/CTS if it is the prefer protection
+		 * method for HT traffic
+		 */
+
+		sta_priv->lq_sta.lq.general_params.flags |=
+			LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+	}
+	priv->agg_tids_count++;
+	IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+		     priv->agg_tids_count);
+
+	sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+		sta_priv->max_agg_bufsize;
+
+	IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+		 sta->addr, tid);
+
+	return iwl_send_lq_cmd(priv, ctx,
+			&sta_priv->lq_sta.lq, CMD_ASYNC, false);
+}
+
+static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
+{
+	struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
+	enum iwl_rxon_context_id ctx;
+	struct ieee80211_vif *vif;
+	u8 *addr;
+
+	lockdep_assert_held(&priv->sta_lock);
+
+	addr = priv->stations[sta_id].sta.sta.addr;
+	ctx = priv->stations[sta_id].ctxid;
+	vif = priv->contexts[ctx].vif;
+
+	switch (priv->tid_data[sta_id][tid].agg.state) {
+	case IWL_EMPTYING_HW_QUEUE_DELBA:
+		/* There are no packets for this RA / TID in the HW any more */
+		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+			IWL_DEBUG_TX_QUEUES(priv,
+				"Can continue DELBA flow ssn = next_recl = %d\n",
+				tid_data->next_reclaimed);
+			iwl_trans_txq_disable(priv->trans,
+					      tid_data->agg.txq_id, true);
+			iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
+			tid_data->agg.state = IWL_AGG_OFF;
+			ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
+		}
+		break;
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		/* There are no packets for this RA / TID in the HW any more */
+		if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+			IWL_DEBUG_TX_QUEUES(priv,
+				"Can continue ADDBA flow ssn = next_recl = %d\n",
+				tid_data->next_reclaimed);
+			tid_data->agg.state = IWL_AGG_STARTING;
+			ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+		}
+		break;
+	default:
+		break;
+	}
+}
+
+static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
+				     struct iwl_rxon_context *ctx,
+				     const u8 *addr1)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_station_priv *sta_priv;
+
+	rcu_read_lock();
+	sta = ieee80211_find_sta(ctx->vif, addr1);
+	if (sta) {
+		sta_priv = (void *)sta->drv_priv;
+		/* avoid atomic ops if this isn't a client */
+		if (sta_priv->client &&
+		    atomic_dec_return(&sta_priv->pending_frames) == 0)
+			ieee80211_sta_block_awake(priv->hw, sta, false);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
+				  struct ieee80211_tx_info *info)
+{
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+	info->status.antenna =
+		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+	if (rate_n_flags & RATE_MCS_HT_MSK)
+		r->flags |= IEEE80211_TX_RC_MCS;
+	if (rate_n_flags & RATE_MCS_GF_MSK)
+		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+	if (rate_n_flags & RATE_MCS_HT40_MSK)
+		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+	if (rate_n_flags & RATE_MCS_DUP_MSK)
+		r->flags |= IEEE80211_TX_RC_DUP_DATA;
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		r->flags |= IEEE80211_TX_RC_SHORT_GI;
+	r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+	switch (status & TX_STATUS_MSK) {
+	case TX_STATUS_SUCCESS:
+		return "SUCCESS";
+	TX_STATUS_POSTPONE(DELAY);
+	TX_STATUS_POSTPONE(FEW_BYTES);
+	TX_STATUS_POSTPONE(BT_PRIO);
+	TX_STATUS_POSTPONE(QUIET_PERIOD);
+	TX_STATUS_POSTPONE(CALC_TTAK);
+	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+	TX_STATUS_FAIL(SHORT_LIMIT);
+	TX_STATUS_FAIL(LONG_LIMIT);
+	TX_STATUS_FAIL(FIFO_UNDERRUN);
+	TX_STATUS_FAIL(DRAIN_FLOW);
+	TX_STATUS_FAIL(RFKILL_FLUSH);
+	TX_STATUS_FAIL(LIFE_EXPIRE);
+	TX_STATUS_FAIL(DEST_PS);
+	TX_STATUS_FAIL(HOST_ABORTED);
+	TX_STATUS_FAIL(BT_RETRY);
+	TX_STATUS_FAIL(STA_INVALID);
+	TX_STATUS_FAIL(FRAG_DROPPED);
+	TX_STATUS_FAIL(TID_DISABLE);
+	TX_STATUS_FAIL(FIFO_FLUSHED);
+	TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+	TX_STATUS_FAIL(PASSIVE_NO_RX);
+	TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+	}
+
+	return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
+{
+	status &= AGG_TX_STATUS_MSK;
+
+	switch (status) {
+	case AGG_TX_STATE_UNDERRUN_MSK:
+		priv->reply_agg_tx_stats.underrun++;
+		break;
+	case AGG_TX_STATE_BT_PRIO_MSK:
+		priv->reply_agg_tx_stats.bt_prio++;
+		break;
+	case AGG_TX_STATE_FEW_BYTES_MSK:
+		priv->reply_agg_tx_stats.few_bytes++;
+		break;
+	case AGG_TX_STATE_ABORT_MSK:
+		priv->reply_agg_tx_stats.abort++;
+		break;
+	case AGG_TX_STATE_LAST_SENT_TTL_MSK:
+		priv->reply_agg_tx_stats.last_sent_ttl++;
+		break;
+	case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
+		priv->reply_agg_tx_stats.last_sent_try++;
+		break;
+	case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
+		priv->reply_agg_tx_stats.last_sent_bt_kill++;
+		break;
+	case AGG_TX_STATE_SCD_QUERY_MSK:
+		priv->reply_agg_tx_stats.scd_query++;
+		break;
+	case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
+		priv->reply_agg_tx_stats.bad_crc32++;
+		break;
+	case AGG_TX_STATE_RESPONSE_MSK:
+		priv->reply_agg_tx_stats.response++;
+		break;
+	case AGG_TX_STATE_DUMP_TX_MSK:
+		priv->reply_agg_tx_stats.dump_tx++;
+		break;
+	case AGG_TX_STATE_DELAY_TX_MSK:
+		priv->reply_agg_tx_stats.delay_tx++;
+		break;
+	default:
+		priv->reply_agg_tx_stats.unknown++;
+		break;
+	}
+}
+
+static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
+{
+	return le32_to_cpup((__le32 *)&tx_resp->status +
+			    tx_resp->frame_count) & IEEE80211_MAX_SN;
+}
+
+static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
+				struct iwlagn_tx_resp *tx_resp)
+{
+	struct agg_tx_status *frame_status = &tx_resp->status;
+	int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+		IWLAGN_TX_RES_TID_POS;
+	int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+		IWLAGN_TX_RES_RA_POS;
+	struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
+	u32 status = le16_to_cpu(tx_resp->status.status);
+	int i;
+
+	WARN_ON(tid == IWL_TID_NON_QOS);
+
+	if (agg->wait_for_ba)
+		IWL_DEBUG_TX_REPLY(priv,
+			"got tx response w/o block-ack\n");
+
+	agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+	agg->wait_for_ba = (tx_resp->frame_count > 1);
+
+	/*
+	 * If the BT kill count is non-zero, we'll get this
+	 * notification again.
+	 */
+	if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
+	    priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
+	}
+
+	if (tx_resp->frame_count == 1)
+		return;
+
+	IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
+			   agg->txq_id,
+			   le32_to_cpu(tx_resp->rate_n_flags),
+			   iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
+
+	/* Construct bit-map of pending frames within Tx window */
+	for (i = 0; i < tx_resp->frame_count; i++) {
+		u16 fstatus = le16_to_cpu(frame_status[i].status);
+		u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
+
+		if (status & AGG_TX_STATUS_MSK)
+			iwlagn_count_agg_tx_err_status(priv, fstatus);
+
+		if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
+			      AGG_TX_STATE_ABORT_MSK))
+			continue;
+
+		if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
+			IWL_DEBUG_TX_REPLY(priv,
+					   "%d: status %s (0x%04x), try-count (0x%01x)\n",
+					   i,
+					   iwl_get_agg_tx_fail_reason(fstatus),
+					   fstatus & AGG_TX_STATUS_MSK,
+					   retry_cnt);
+	}
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
+
+const char *iwl_get_agg_tx_fail_reason(u16 status)
+{
+	status &= AGG_TX_STATUS_MSK;
+	switch (status) {
+	case AGG_TX_STATE_TRANSMITTED:
+		return "SUCCESS";
+		AGG_TX_STATE_FAIL(UNDERRUN_MSK);
+		AGG_TX_STATE_FAIL(BT_PRIO_MSK);
+		AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
+		AGG_TX_STATE_FAIL(ABORT_MSK);
+		AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
+		AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
+		AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
+		AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
+		AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
+		AGG_TX_STATE_FAIL(RESPONSE_MSK);
+		AGG_TX_STATE_FAIL(DUMP_TX_MSK);
+		AGG_TX_STATE_FAIL(DELAY_TX_MSK);
+	}
+
+	return "UNKNOWN";
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
+{
+	status &= TX_STATUS_MSK;
+
+	switch (status) {
+	case TX_STATUS_POSTPONE_DELAY:
+		priv->reply_tx_stats.pp_delay++;
+		break;
+	case TX_STATUS_POSTPONE_FEW_BYTES:
+		priv->reply_tx_stats.pp_few_bytes++;
+		break;
+	case TX_STATUS_POSTPONE_BT_PRIO:
+		priv->reply_tx_stats.pp_bt_prio++;
+		break;
+	case TX_STATUS_POSTPONE_QUIET_PERIOD:
+		priv->reply_tx_stats.pp_quiet_period++;
+		break;
+	case TX_STATUS_POSTPONE_CALC_TTAK:
+		priv->reply_tx_stats.pp_calc_ttak++;
+		break;
+	case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+		priv->reply_tx_stats.int_crossed_retry++;
+		break;
+	case TX_STATUS_FAIL_SHORT_LIMIT:
+		priv->reply_tx_stats.short_limit++;
+		break;
+	case TX_STATUS_FAIL_LONG_LIMIT:
+		priv->reply_tx_stats.long_limit++;
+		break;
+	case TX_STATUS_FAIL_FIFO_UNDERRUN:
+		priv->reply_tx_stats.fifo_underrun++;
+		break;
+	case TX_STATUS_FAIL_DRAIN_FLOW:
+		priv->reply_tx_stats.drain_flow++;
+		break;
+	case TX_STATUS_FAIL_RFKILL_FLUSH:
+		priv->reply_tx_stats.rfkill_flush++;
+		break;
+	case TX_STATUS_FAIL_LIFE_EXPIRE:
+		priv->reply_tx_stats.life_expire++;
+		break;
+	case TX_STATUS_FAIL_DEST_PS:
+		priv->reply_tx_stats.dest_ps++;
+		break;
+	case TX_STATUS_FAIL_HOST_ABORTED:
+		priv->reply_tx_stats.host_abort++;
+		break;
+	case TX_STATUS_FAIL_BT_RETRY:
+		priv->reply_tx_stats.bt_retry++;
+		break;
+	case TX_STATUS_FAIL_STA_INVALID:
+		priv->reply_tx_stats.sta_invalid++;
+		break;
+	case TX_STATUS_FAIL_FRAG_DROPPED:
+		priv->reply_tx_stats.frag_drop++;
+		break;
+	case TX_STATUS_FAIL_TID_DISABLE:
+		priv->reply_tx_stats.tid_disable++;
+		break;
+	case TX_STATUS_FAIL_FIFO_FLUSHED:
+		priv->reply_tx_stats.fifo_flush++;
+		break;
+	case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
+		priv->reply_tx_stats.insuff_cf_poll++;
+		break;
+	case TX_STATUS_FAIL_PASSIVE_NO_RX:
+		priv->reply_tx_stats.fail_hw_drop++;
+		break;
+	case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
+		priv->reply_tx_stats.sta_color_mismatch++;
+		break;
+	default:
+		priv->reply_tx_stats.unknown++;
+		break;
+	}
+}
+
+static void iwlagn_set_tx_status(struct iwl_priv *priv,
+				 struct ieee80211_tx_info *info,
+				 struct iwlagn_tx_resp *tx_resp)
+{
+	u16 status = le16_to_cpu(tx_resp->status.status);
+
+	info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+	info->status.rates[0].count = tx_resp->failure_frame + 1;
+	info->flags |= iwl_tx_status_to_mac80211(status);
+	iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
+				    info);
+	if (!iwl_is_tx_success(status))
+		iwlagn_count_tx_err_status(priv, status);
+}
+
+static void iwl_check_abort_status(struct iwl_priv *priv,
+			    u8 frame_count, u32 status)
+{
+	if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+		IWL_ERR(priv, "Tx flush command to flush out all frames\n");
+		if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
+			queue_work(priv->workqueue, &priv->tx_flush);
+	}
+}
+
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
+	struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
+	struct ieee80211_hdr *hdr;
+	u32 status = le16_to_cpu(tx_resp->status.status);
+	u16 ssn = iwlagn_get_scd_ssn(tx_resp);
+	int tid;
+	int sta_id;
+	int freed;
+	struct ieee80211_tx_info *info;
+	struct sk_buff_head skbs;
+	struct sk_buff *skb;
+	struct iwl_rxon_context *ctx;
+	bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
+
+	tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
+		IWLAGN_TX_RES_TID_POS;
+	sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
+		IWLAGN_TX_RES_RA_POS;
+
+	spin_lock_bh(&priv->sta_lock);
+
+	if (is_agg) {
+		WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
+			     tid >= IWL_MAX_TID_COUNT);
+		if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
+			IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
+				priv->tid_data[sta_id][tid].agg.txq_id);
+		iwl_rx_reply_tx_agg(priv, tx_resp);
+	}
+
+	__skb_queue_head_init(&skbs);
+
+	if (tx_resp->frame_count == 1) {
+		u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
+		next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
+
+		if (is_agg) {
+			/* If this is an aggregation queue, we can rely on the
+			 * ssn since the wifi sequence number corresponds to
+			 * the index in the TFD ring (%256).
+			 * The seq_ctl is the sequence control of the packet
+			 * to which this Tx response relates. But if there is a
+			 * hole in the bitmap of the BA we received, this Tx
+			 * response may allow to reclaim the hole and all the
+			 * subsequent packets that were already acked.
+			 * In that case, seq_ctl != ssn, and the next packet
+			 * to be reclaimed will be ssn and not seq_ctl.
+			 */
+			next_reclaimed = ssn;
+		}
+
+		if (tid != IWL_TID_NON_QOS) {
+			priv->tid_data[sta_id][tid].next_reclaimed =
+				next_reclaimed;
+			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+						  next_reclaimed);
+			iwlagn_check_ratid_empty(priv, sta_id, tid);
+		}
+
+		iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
+
+		freed = 0;
+
+		/* process frames */
+		skb_queue_walk(&skbs, skb) {
+			hdr = (struct ieee80211_hdr *)skb->data;
+
+			if (!ieee80211_is_data_qos(hdr->frame_control))
+				priv->last_seq_ctl = tx_resp->seq_ctl;
+
+			info = IEEE80211_SKB_CB(skb);
+			ctx = info->driver_data[0];
+			iwl_trans_free_tx_cmd(priv->trans,
+					      info->driver_data[1]);
+
+			memset(&info->status, 0, sizeof(info->status));
+
+			if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
+			    ctx->vif &&
+			    ctx->vif->type == NL80211_IFTYPE_STATION) {
+				/* block and stop all queues */
+				priv->passive_no_rx = true;
+				IWL_DEBUG_TX_QUEUES(priv,
+					"stop all queues: passive channel\n");
+				ieee80211_stop_queues(priv->hw);
+
+				IWL_DEBUG_TX_REPLY(priv,
+					   "TXQ %d status %s (0x%08x) "
+					   "rate_n_flags 0x%x retries %d\n",
+					   txq_id,
+					   iwl_get_tx_fail_reason(status),
+					   status,
+					   le32_to_cpu(tx_resp->rate_n_flags),
+					   tx_resp->failure_frame);
+
+				IWL_DEBUG_TX_REPLY(priv,
+					   "FrameCnt = %d, idx=%d\n",
+					   tx_resp->frame_count, cmd_index);
+			}
+
+			/* check if BAR is needed */
+			if (is_agg && !iwl_is_tx_success(status))
+				info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+			iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
+				     tx_resp);
+			if (!is_agg)
+				iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
+
+			freed++;
+		}
+
+		if (tid != IWL_TID_NON_QOS) {
+			priv->tid_data[sta_id][tid].next_reclaimed =
+				next_reclaimed;
+			IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
+					   next_reclaimed);
+		}
+
+		if (!is_agg && freed != 1)
+			IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
+
+		IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
+				   iwl_get_tx_fail_reason(status), status);
+
+		IWL_DEBUG_TX_REPLY(priv,
+				   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
+				   le32_to_cpu(tx_resp->rate_n_flags),
+				   tx_resp->failure_frame,
+				   SEQ_TO_INDEX(sequence), ssn,
+				   le16_to_cpu(tx_resp->seq_ctl));
+	}
+
+	iwl_check_abort_status(priv, tx_resp->frame_count, status);
+	spin_unlock_bh(&priv->sta_lock);
+
+	while (!skb_queue_empty(&skbs)) {
+		skb = __skb_dequeue(&skbs);
+		ieee80211_tx_status(priv->hw, skb);
+	}
+}
+
+/**
+ * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
+				   struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
+	struct iwl_ht_agg *agg;
+	struct sk_buff_head reclaimed_skbs;
+	struct sk_buff *skb;
+	int sta_id;
+	int tid;
+	int freed;
+
+	/* "flow" corresponds to Tx queue */
+	u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+	/* "ssn" is start of block-ack Tx window, corresponds to index
+	 * (in Tx queue's circular buffer) of first TFD/frame in window */
+	u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+	if (scd_flow >= priv->cfg->base_params->num_of_queues) {
+		IWL_ERR(priv,
+			"BUG_ON scd_flow is bigger than number of queues\n");
+		return;
+	}
+
+	sta_id = ba_resp->sta_id;
+	tid = ba_resp->tid;
+	agg = &priv->tid_data[sta_id][tid].agg;
+
+	spin_lock_bh(&priv->sta_lock);
+
+	if (unlikely(!agg->wait_for_ba)) {
+		if (unlikely(ba_resp->bitmap))
+			IWL_ERR(priv, "Received BA when not expected\n");
+		spin_unlock_bh(&priv->sta_lock);
+		return;
+	}
+
+	if (unlikely(scd_flow != agg->txq_id)) {
+		/*
+		 * FIXME: this is a uCode bug which need to be addressed,
+		 * log the information and return for now.
+		 * Since it is can possibly happen very often and in order
+		 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+		 */
+		IWL_DEBUG_TX_QUEUES(priv,
+				    "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
+				    scd_flow, sta_id, tid, agg->txq_id);
+		spin_unlock_bh(&priv->sta_lock);
+		return;
+	}
+
+	__skb_queue_head_init(&reclaimed_skbs);
+
+	/* Release all TFDs before the SSN, i.e. all TFDs in front of
+	 * block-ack window (we assume that they've been successfully
+	 * transmitted ... if not, it's too late anyway). */
+	iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
+			  &reclaimed_skbs);
+
+	IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
+			   "sta_id = %d\n",
+			   agg->wait_for_ba,
+			   (u8 *) &ba_resp->sta_addr_lo32,
+			   ba_resp->sta_id);
+	IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
+			   "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+			   ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
+			   (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+			   scd_flow, ba_resp_scd_ssn, ba_resp->txed,
+			   ba_resp->txed_2_done);
+
+	/* Mark that the expected block-ack response arrived */
+	agg->wait_for_ba = false;
+
+	/* Sanity check values reported by uCode */
+	if (ba_resp->txed_2_done > ba_resp->txed) {
+		IWL_DEBUG_TX_REPLY(priv,
+			"bogus sent(%d) and ack(%d) count\n",
+			ba_resp->txed, ba_resp->txed_2_done);
+		/*
+		 * set txed_2_done = txed,
+		 * so it won't impact rate scale
+		 */
+		ba_resp->txed = ba_resp->txed_2_done;
+	}
+
+	priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
+
+	iwlagn_check_ratid_empty(priv, sta_id, tid);
+	freed = 0;
+
+	skb_queue_walk(&reclaimed_skbs, skb) {
+		struct ieee80211_hdr *hdr = (void *)skb->data;
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+		if (ieee80211_is_data_qos(hdr->frame_control))
+			freed++;
+		else
+			WARN_ON_ONCE(1);
+
+		iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+
+		memset(&info->status, 0, sizeof(info->status));
+		/* Packet was transmitted successfully, failures come as single
+		 * frames because before failing a frame the firmware transmits
+		 * it without aggregation at least once.
+		 */
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+		if (freed == 1) {
+			/* this is the first skb we deliver in this batch */
+			/* put the rate scaling data there */
+			info = IEEE80211_SKB_CB(skb);
+			memset(&info->status, 0, sizeof(info->status));
+			info->flags |= IEEE80211_TX_STAT_AMPDU;
+			info->status.ampdu_ack_len = ba_resp->txed_2_done;
+			info->status.ampdu_len = ba_resp->txed;
+			iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
+						    info);
+		}
+	}
+
+	spin_unlock_bh(&priv->sta_lock);
+
+	while (!skb_queue_empty(&reclaimed_skbs)) {
+		skb = __skb_dequeue(&reclaimed_skbs);
+		ieee80211_tx_status(priv->hw, skb);
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
new file mode 100644
index 0000000..d6013bf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -0,0 +1,443 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+
+#include "iwl-io.h"
+#include "iwl-agn-hw.h"
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-op-mode.h"
+
+#include "dev.h"
+#include "agn.h"
+#include "calib.h"
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+/*
+ *  Calibration
+ */
+static int iwl_set_Xtal_calib(struct iwl_priv *priv)
+{
+	struct iwl_calib_xtal_freq_cmd cmd;
+	__le16 *xtal_calib = priv->nvm_data->xtal_calib;
+
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
+	cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+	cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
+	return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
+{
+	struct iwl_calib_temperature_offset_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+	cmd.radio_sensor_offset = priv->nvm_data->raw_temperature;
+	if (!(cmd.radio_sensor_offset))
+		cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
+
+	IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset));
+	return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+{
+	struct iwl_calib_temperature_offset_v2_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
+	cmd.radio_sensor_offset_high = priv->nvm_data->kelvin_temperature;
+	cmd.radio_sensor_offset_low = priv->nvm_data->raw_temperature;
+	if (!cmd.radio_sensor_offset_low) {
+		IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+		cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
+		cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
+	}
+	cmd.burntVoltageRef = priv->nvm_data->calib_voltage;
+
+	IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset_high));
+	IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+			le16_to_cpu(cmd.radio_sensor_offset_low));
+	IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+			le16_to_cpu(cmd.burntVoltageRef));
+
+	return iwl_calib_set(priv, (void *)&cmd, sizeof(cmd));
+}
+
+static int iwl_send_calib_cfg(struct iwl_priv *priv)
+{
+	struct iwl_calib_cfg_cmd calib_cfg_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = CALIBRATION_CFG_CMD,
+		.len = { sizeof(struct iwl_calib_cfg_cmd), },
+		.data = { &calib_cfg_cmd, },
+	};
+
+	memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
+	calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
+	calib_cfg_cmd.ucd_calib_cfg.flags =
+		IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
+
+	return iwl_dvm_send_cmd(priv, &cmd);
+}
+
+int iwl_init_alive_start(struct iwl_priv *priv)
+{
+	int ret;
+
+	if (priv->lib->bt_params &&
+	    priv->lib->bt_params->advanced_bt_coexist) {
+		/*
+		 * Tell uCode we are ready to perform calibration
+		 * need to perform this before any calibration
+		 * no need to close the envlope since we are going
+		 * to load the runtime uCode later.
+		 */
+		ret = iwl_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+			BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+		if (ret)
+			return ret;
+
+	}
+
+	ret = iwl_send_calib_cfg(priv);
+	if (ret)
+		return ret;
+
+	/**
+	 * temperature offset calibration is only needed for runtime ucode,
+	 * so prepare the value now.
+	 */
+	if (priv->lib->need_temp_offset_calib) {
+		if (priv->lib->temp_offset_v2)
+			return iwl_set_temperature_offset_calib_v2(priv);
+		else
+			return iwl_set_temperature_offset_calib(priv);
+	}
+
+	return 0;
+}
+
+static int iwl_send_wimax_coex(struct iwl_priv *priv)
+{
+	struct iwl_wimax_coex_cmd coex_cmd;
+
+	/* coexistence is disabled */
+	memset(&coex_cmd, 0, sizeof(coex_cmd));
+
+	return iwl_dvm_send_cmd_pdu(priv,
+				COEX_PRIORITY_TABLE_CMD, 0,
+				sizeof(coex_cmd), &coex_cmd);
+}
+
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_LOW << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_HIGH << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(1 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_COEX_OFF << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	((BT_COEX_PRIO_TBL_PRIO_COEX_ON << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
+		(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
+	0, 0, 0, 0, 0, 0, 0
+};
+
+void iwl_send_prio_tbl(struct iwl_priv *priv)
+{
+	struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
+
+	memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+		sizeof(iwl_bt_prio_tbl));
+	if (iwl_dvm_send_cmd_pdu(priv,
+				REPLY_BT_COEX_PRIO_TABLE, 0,
+				sizeof(prio_tbl_cmd), &prio_tbl_cmd))
+		IWL_ERR(priv, "failed to send BT prio tbl command\n");
+}
+
+int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+{
+	struct iwl_bt_coex_prot_env_cmd env_cmd;
+	int ret;
+
+	env_cmd.action = action;
+	env_cmd.type = type;
+	ret = iwl_dvm_send_cmd_pdu(priv,
+			       REPLY_BT_COEX_PROT_ENV, 0,
+			       sizeof(env_cmd), &env_cmd);
+	if (ret)
+		IWL_ERR(priv, "failed to send BT env command\n");
+	return ret;
+}
+
+static const u8 iwlagn_default_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+};
+
+static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
+	IWL_TX_FIFO_VO,
+	IWL_TX_FIFO_VI,
+	IWL_TX_FIFO_BE,
+	IWL_TX_FIFO_BK,
+	IWL_TX_FIFO_BK_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_VI_IPAN,
+	IWL_TX_FIFO_VO_IPAN,
+	IWL_TX_FIFO_BE_IPAN,
+	IWL_TX_FIFO_UNUSED,
+	IWL_TX_FIFO_AUX,
+};
+
+static int iwl_alive_notify(struct iwl_priv *priv)
+{
+	const u8 *queue_to_txf;
+	u8 n_queues;
+	int ret;
+	int i;
+
+	iwl_trans_fw_alive(priv->trans, 0);
+
+	if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
+	    priv->nvm_data->sku_cap_ipan_enable) {
+		n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
+		queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
+	} else {
+		n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
+		queue_to_txf = iwlagn_default_queue_to_tx_fifo;
+	}
+
+	for (i = 0; i < n_queues; i++)
+		if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
+			iwl_trans_ac_txq_enable(priv->trans, i,
+						queue_to_txf[i], 0);
+
+	priv->passive_no_rx = false;
+	priv->transport_queue_stop = 0;
+
+	ret = iwl_send_wimax_coex(priv);
+	if (ret)
+		return ret;
+
+	if (!priv->lib->no_xtal_calib) {
+		ret = iwl_set_Xtal_calib(priv);
+		if (ret)
+			return ret;
+	}
+
+	return iwl_send_calib_results(priv);
+}
+
+struct iwl_alive_data {
+	bool valid;
+	u8 subtype;
+};
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+			 struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_priv *priv =
+		container_of(notif_wait, struct iwl_priv, notif_wait);
+	struct iwl_alive_data *alive_data = data;
+	struct iwl_alive_resp *palive;
+
+	palive = (void *)pkt->data;
+
+	IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
+		       "0x%01X 0x%01X\n",
+		       palive->is_valid, palive->ver_type,
+		       palive->ver_subtype);
+
+	priv->device_pointers.error_event_table =
+		le32_to_cpu(palive->error_event_table_ptr);
+	priv->device_pointers.log_event_table =
+		le32_to_cpu(palive->log_event_table_ptr);
+
+	alive_data->subtype = palive->ver_subtype;
+	alive_data->valid = palive->is_valid == UCODE_VALID_OK;
+
+	return true;
+}
+
+#define UCODE_ALIVE_TIMEOUT	HZ
+#define UCODE_CALIB_TIMEOUT	(2*HZ)
+
+int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
+				 enum iwl_ucode_type ucode_type)
+{
+	struct iwl_notification_wait alive_wait;
+	struct iwl_alive_data alive_data;
+	const struct fw_img *fw;
+	int ret;
+	enum iwl_ucode_type old_type;
+	static const u16 alive_cmd[] = { REPLY_ALIVE };
+
+	fw = iwl_get_ucode_image(priv->fw, ucode_type);
+	if (WARN_ON(!fw))
+		return -EINVAL;
+
+	old_type = priv->cur_ucode;
+	priv->cur_ucode = ucode_type;
+	priv->ucode_loaded = false;
+
+	iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
+				   alive_cmd, ARRAY_SIZE(alive_cmd),
+				   iwl_alive_fn, &alive_data);
+
+	ret = iwl_trans_start_fw(priv->trans, fw, false);
+	if (ret) {
+		priv->cur_ucode = old_type;
+		iwl_remove_notification(&priv->notif_wait, &alive_wait);
+		return ret;
+	}
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the ALIVE notification here.
+	 */
+	ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
+					UCODE_ALIVE_TIMEOUT);
+	if (ret) {
+		priv->cur_ucode = old_type;
+		return ret;
+	}
+
+	if (!alive_data.valid) {
+		IWL_ERR(priv, "Loaded ucode is not valid!\n");
+		priv->cur_ucode = old_type;
+		return -EIO;
+	}
+
+	priv->ucode_loaded = true;
+
+	if (ucode_type != IWL_UCODE_WOWLAN) {
+		/* delay a bit to give rfkill time to run */
+		msleep(5);
+	}
+
+	ret = iwl_alive_notify(priv);
+	if (ret) {
+		IWL_WARN(priv,
+			"Could not complete ALIVE transition: %d\n", ret);
+		priv->cur_ucode = old_type;
+		return ret;
+	}
+
+	return 0;
+}
+
+static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
+			      struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_priv *priv = data;
+	struct iwl_calib_hdr *hdr;
+
+	if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
+		WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
+		return true;
+	}
+
+	hdr = (struct iwl_calib_hdr *)pkt->data;
+
+	if (iwl_calib_set(priv, hdr, iwl_rx_packet_payload_len(pkt)))
+		IWL_ERR(priv, "Failed to record calibration data %d\n",
+			hdr->op_code);
+
+	return false;
+}
+
+int iwl_run_init_ucode(struct iwl_priv *priv)
+{
+	struct iwl_notification_wait calib_wait;
+	static const u16 calib_complete[] = {
+		CALIBRATION_RES_NOTIFICATION,
+		CALIBRATION_COMPLETE_NOTIFICATION
+	};
+	int ret;
+
+	lockdep_assert_held(&priv->mutex);
+
+	/* No init ucode required? Curious, but maybe ok */
+	if (!priv->fw->img[IWL_UCODE_INIT].num_sec)
+		return 0;
+
+	iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
+				   calib_complete, ARRAY_SIZE(calib_complete),
+				   iwlagn_wait_calib, priv);
+
+	/* Will also start the device */
+	ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
+	if (ret)
+		goto error;
+
+	ret = iwl_init_alive_start(priv);
+	if (ret)
+		goto error;
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the calibration complete notification.
+	 */
+	ret = iwl_wait_notification(&priv->notif_wait, &calib_wait,
+					UCODE_CALIB_TIMEOUT);
+
+	goto out;
+
+ error:
+	iwl_remove_notification(&priv->notif_wait, &calib_wait);
+ out:
+	/* Whatever happened, stop the device */
+	iwl_trans_stop_device(priv->trans);
+	priv->ucode_loaded = false;
+
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
new file mode 100644
index 0000000..75cae54
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -0,0 +1,210 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "acpi.h"
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+	acpi_handle root_handle;
+	acpi_handle handle;
+	struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL};
+	acpi_status status;
+
+	root_handle = ACPI_HANDLE(dev);
+	if (!root_handle) {
+		IWL_DEBUG_DEV_RADIO(dev,
+				    "Could not retrieve root port ACPI handle\n");
+		return ERR_PTR(-ENOENT);
+	}
+
+	/* Get the method's handle */
+	status = acpi_get_handle(root_handle, method, &handle);
+	if (ACPI_FAILURE(status)) {
+		IWL_DEBUG_DEV_RADIO(dev, "%s method not found\n", method);
+		return ERR_PTR(-ENOENT);
+	}
+
+	/* Call the method with no arguments */
+	status = acpi_evaluate_object(handle, NULL, NULL, &buf);
+	if (ACPI_FAILURE(status)) {
+		IWL_DEBUG_DEV_RADIO(dev, "%s invocation failed (0x%x)\n",
+				    method, status);
+		return ERR_PTR(-ENOENT);
+	}
+
+	return buf.pointer;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
+
+union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+					 union acpi_object *data,
+					 int data_size)
+{
+	int i;
+	union acpi_object *wifi_pkg;
+
+	/*
+	 * We need at least one entry in the wifi package that
+	 * describes the domain, and one more entry, otherwise there's
+	 * no point in reading it.
+	 */
+	if (WARN_ON_ONCE(data_size < 2))
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * We need at least two packages, one for the revision and one
+	 * for the data itself.  Also check that the revision is valid
+	 * (i.e. it is an integer set to 0).
+	 */
+	if (data->type != ACPI_TYPE_PACKAGE ||
+	    data->package.count < 2 ||
+	    data->package.elements[0].type != ACPI_TYPE_INTEGER ||
+	    data->package.elements[0].integer.value != 0) {
+		IWL_DEBUG_DEV_RADIO(dev, "Unsupported packages structure\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* loop through all the packages to find the one for WiFi */
+	for (i = 1; i < data->package.count; i++) {
+		union acpi_object *domain;
+
+		wifi_pkg = &data->package.elements[i];
+
+		/* skip entries that are not a package with the right size */
+		if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
+		    wifi_pkg->package.count != data_size)
+			continue;
+
+		domain = &wifi_pkg->package.elements[0];
+		if (domain->type == ACPI_TYPE_INTEGER &&
+		    domain->integer.value == ACPI_WIFI_DOMAIN)
+			goto found;
+	}
+
+	return ERR_PTR(-ENOENT);
+
+found:
+	return wifi_pkg;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
+
+int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+	union acpi_object *wifi_pkg, *data;
+	u32 mcc_val;
+	int ret;
+
+	data = iwl_acpi_get_object(dev, ACPI_WRDD_METHOD);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE);
+	if (IS_ERR(wifi_pkg)) {
+		ret = PTR_ERR(wifi_pkg);
+		goto out_free;
+	}
+
+	if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+
+	mcc_val = wifi_pkg->package.elements[1].integer.value;
+
+	mcc[0] = (mcc_val >> 8) & 0xff;
+	mcc[1] = mcc_val & 0xff;
+	mcc[2] = '\0';
+
+	ret = 0;
+out_free:
+	kfree(data);
+	return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+	union acpi_object *data, *wifi_pkg;
+	u64 dflt_pwr_limit;
+
+	data = iwl_acpi_get_object(dev, ACPI_SPLC_METHOD);
+	if (IS_ERR(data)) {
+		dflt_pwr_limit = 0;
+		goto out;
+	}
+
+	wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data,
+					 ACPI_SPLC_WIFI_DATA_SIZE);
+	if (IS_ERR(wifi_pkg) ||
+	    wifi_pkg->package.elements[1].integer.value != ACPI_TYPE_INTEGER) {
+		dflt_pwr_limit = 0;
+		goto out_free;
+	}
+
+	dflt_pwr_limit = wifi_pkg->package.elements[1].integer.value;
+out_free:
+	kfree(data);
+out:
+	return dflt_pwr_limit;
+}
+IWL_EXPORT_SYMBOL(iwl_acpi_get_pwr_limit);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
new file mode 100644
index 0000000..0b3b122
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -0,0 +1,140 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_acpi__
+#define __iwl_fw_acpi__
+
+#include <linux/acpi.h>
+
+#define ACPI_WRDS_METHOD	"WRDS"
+#define ACPI_EWRD_METHOD	"EWRD"
+#define ACPI_WGDS_METHOD	"WGDS"
+#define ACPI_WRDD_METHOD	"WRDD"
+#define ACPI_SPLC_METHOD	"SPLC"
+
+#define ACPI_WIFI_DOMAIN	(0x07)
+
+#define ACPI_SAR_TABLE_SIZE		10
+#define ACPI_SAR_PROFILE_NUM		4
+
+#define ACPI_GEO_TABLE_SIZE		6
+#define ACPI_NUM_GEO_PROFILES		3
+#define ACPI_GEO_PER_CHAIN_SIZE		3
+
+#define ACPI_SAR_NUM_CHAIN_LIMITS	2
+#define ACPI_SAR_NUM_SUB_BANDS		5
+
+#define ACPI_WRDS_WIFI_DATA_SIZE	(ACPI_SAR_TABLE_SIZE + 2)
+#define ACPI_EWRD_WIFI_DATA_SIZE	((ACPI_SAR_PROFILE_NUM - 1) * \
+					 ACPI_SAR_TABLE_SIZE + 3)
+#define ACPI_WGDS_WIFI_DATA_SIZE	19
+#define ACPI_WRDD_WIFI_DATA_SIZE	2
+#define ACPI_SPLC_WIFI_DATA_SIZE	2
+
+#define ACPI_WGDS_NUM_BANDS		2
+#define ACPI_WGDS_TABLE_SIZE		3
+
+#ifdef CONFIG_ACPI
+
+void *iwl_acpi_get_object(struct device *dev, acpi_string method);
+union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+					 union acpi_object *data,
+					 int data_size);
+
+/**
+ * iwl_acpi_get_mcc - read MCC from ACPI, if available
+ *
+ * @dev: the struct device
+ * @mcc: output buffer (3 bytes) that will get the MCC
+ *
+ * This function tries to read the current MCC from ACPI if available.
+ */
+int iwl_acpi_get_mcc(struct device *dev, char *mcc);
+
+u64 iwl_acpi_get_pwr_limit(struct device *dev);
+
+#else /* CONFIG_ACPI */
+
+static inline void *iwl_acpi_get_object(struct device *dev, acpi_string method)
+{
+	return ERR_PTR(-ENOENT);
+}
+
+static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
+						       union acpi_object *data,
+						       int data_size)
+{
+	return ERR_PTR(-ENOENT);
+}
+
+static inline int iwl_acpi_get_mcc(struct device *dev, char *mcc)
+{
+	return -ENOENT;
+}
+
+static inline u64 iwl_acpi_get_pwr_limit(struct device *dev)
+{
+	return 0;
+}
+
+#endif /* CONFIG_ACPI */
+#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
new file mode 100644
index 0000000..08d3d8a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -0,0 +1,192 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_alive_h__
+#define __iwl_fw_api_alive_h__
+
+/* alive response is_valid values */
+#define ALIVE_RESP_UCODE_OK	BIT(0)
+#define ALIVE_RESP_RFKILL	BIT(1)
+
+/* alive response ver_type values */
+enum {
+	FW_TYPE_HW = 0,
+	FW_TYPE_PROT = 1,
+	FW_TYPE_AP = 2,
+	FW_TYPE_WOWLAN = 3,
+	FW_TYPE_TIMING = 4,
+	FW_TYPE_WIPAN = 5
+};
+
+/* alive response ver_subtype values */
+enum {
+	FW_SUBTYPE_FULL_FEATURE = 0,
+	FW_SUBTYPE_BOOTSRAP = 1, /* Not valid */
+	FW_SUBTYPE_REDUCED = 2,
+	FW_SUBTYPE_ALIVE_ONLY = 3,
+	FW_SUBTYPE_WOWLAN = 4,
+	FW_SUBTYPE_AP_SUBTYPE = 5,
+	FW_SUBTYPE_WIPAN = 6,
+	FW_SUBTYPE_INITIALIZE = 9
+};
+
+#define IWL_ALIVE_STATUS_ERR 0xDEAD
+#define IWL_ALIVE_STATUS_OK 0xCAFE
+
+#define IWL_ALIVE_FLG_RFKILL	BIT(0)
+
+struct iwl_lmac_alive {
+	__le32 ucode_major;
+	__le32 ucode_minor;
+	u8 ver_subtype;
+	u8 ver_type;
+	u8 mac;
+	u8 opt;
+	__le32 timestamp;
+	__le32 error_event_table_ptr;	/* SRAM address for error log */
+	__le32 log_event_table_ptr;	/* SRAM address for LMAC event log */
+	__le32 cpu_register_ptr;
+	__le32 dbgm_config_ptr;
+	__le32 alive_counter_ptr;
+	__le32 scd_base_ptr;		/* SRAM address for SCD */
+	__le32 st_fwrd_addr;		/* pointer to Store and forward */
+	__le32 st_fwrd_size;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_umac_alive {
+	__le32 umac_major;		/* UMAC version: major */
+	__le32 umac_minor;		/* UMAC version: minor */
+	__le32 error_info_addr;		/* SRAM address for UMAC error log */
+	__le32 dbg_print_buff_addr;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct mvm_alive_resp_v3 {
+	__le16 status;
+	__le16 flags;
+	struct iwl_lmac_alive lmac_data;
+	struct iwl_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_3 */
+
+struct mvm_alive_resp {
+	__le16 status;
+	__le16 flags;
+	struct iwl_lmac_alive lmac_data[2];
+	struct iwl_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
+/**
+ * enum iwl_extended_cfg_flag - commands driver may send before
+ *	finishing init flow
+ * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
+ * @IWL_INIT_PHY: driver is going to send PHY_DB commands
+ */
+enum iwl_extended_cfg_flags {
+	IWL_INIT_DEBUG_CFG,
+	IWL_INIT_NVM,
+	IWL_INIT_PHY,
+};
+
+/**
+ * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: values from iwl_extended_cfg_flags
+ */
+struct iwl_init_extended_cfg_cmd {
+	__le32 init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_radio_version_notif - information on the radio version
+ * ( RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor: radio flavor
+ * @radio_step: radio version step
+ * @radio_dash: radio version dash
+ */
+struct iwl_radio_version_notif {
+	__le32 radio_flavor;
+	__le32 radio_step;
+	__le32 radio_dash;
+} __packed; /* RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+enum iwl_card_state_flags {
+	CARD_ENABLED		= 0x00,
+	HW_CARD_DISABLED	= 0x01,
+	SW_CARD_DISABLED	= 0x02,
+	CT_KILL_CARD_DISABLED	= 0x04,
+	HALT_CARD_DISABLED	= 0x08,
+	CARD_DISABLED_MSK	= 0x0f,
+	CARD_IS_RX_ON		= 0x10,
+};
+
+/**
+ * struct iwl_radio_version_notif - information on the card state
+ * ( CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: &enum iwl_card_state_flags
+ */
+struct iwl_card_state_notif {
+	__le32 flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_alive_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
new file mode 100644
index 0000000..570f190
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -0,0 +1,179 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_binding_h__
+#define __iwl_fw_api_binding_h__
+
+#define MAX_MACS_IN_BINDING	(3)
+#define MAX_BINDINGS		(4)
+
+/**
+ * struct iwl_binding_cmd_v1 - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding,
+ *	&enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding,
+ *	&enum iwl_ctxt_id_and_color
+ * @phy: PHY id and color which belongs to the binding,
+ *	&enum iwl_ctxt_id_and_color
+ */
+struct iwl_binding_cmd_v1 {
+	/* COMMON_INDEX_HDR_API_S_VER_1 */
+	__le32 id_and_color;
+	__le32 action;
+	/* BINDING_DATA_API_S_VER_1 */
+	__le32 macs[MAX_MACS_IN_BINDING];
+	__le32 phy;
+} __packed; /* BINDING_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_binding_cmd - configuring bindings
+ * ( BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding,
+ *	&enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ *	&enum iwl_ctxt_id_and_color
+ * @phy: PHY id and color which belongs to the binding
+ *	&enum iwl_ctxt_id_and_color
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwl_binding_cmd {
+	/* COMMON_INDEX_HDR_API_S_VER_1 */
+	__le32 id_and_color;
+	__le32 action;
+	/* BINDING_DATA_API_S_VER_1 */
+	__le32 macs[MAX_MACS_IN_BINDING];
+	__le32 phy;
+	__le32 lmac_id;
+} __packed; /* BINDING_CMD_API_S_VER_2 */
+
+#define IWL_BINDING_CMD_SIZE_V1	sizeof(struct iwl_binding_cmd_v1)
+#define IWL_LMAC_24G_INDEX		0
+#define IWL_LMAC_5G_INDEX		1
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWL_MVM_MAX_QUOTA 128
+
+/**
+ * struct iwl_time_quota_data_v1 - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding,
+ *	&enum iwl_ctxt_id_and_color
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ *	remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwl_time_quota_data_v1 {
+	__le32 id_and_color;
+	__le32 quota;
+	__le32 max_duration;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ * Note: on non-CDB the fourth one is the auxilary mac and is
+ *	essentially zero.
+ *	On CDB the fourth one is a regular binding.
+ */
+struct iwl_time_quota_cmd_v1 {
+	struct iwl_time_quota_data_v1 quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+enum iwl_quota_low_latency {
+	IWL_QUOTA_LOW_LATENCY_NONE = 0,
+	IWL_QUOTA_LOW_LATENCY_TX = BIT(0),
+	IWL_QUOTA_LOW_LATENCY_RX = BIT(1),
+	IWL_QUOTA_LOW_LATENCY_TX_RX =
+		IWL_QUOTA_LOW_LATENCY_TX | IWL_QUOTA_LOW_LATENCY_RX,
+};
+
+/**
+ * struct iwl_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding.
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ *	remainig quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ * @low_latency: low latency status, &enum iwl_quota_low_latency
+ */
+struct iwl_time_quota_data {
+	__le32 id_and_color;
+	__le32 quota;
+	__le32 max_duration;
+	__le32 low_latency;
+} __packed; /* TIME_QUOTA_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * ( TIME_QUOTA_CMD = 0x2c )
+ * Note: on non-CDB the fourth one is the auxilary mac and is essentially zero.
+ * On CDB the fourth one is a regular binding.
+ *
+ * @quotas: allocations per binding
+ */
+struct iwl_time_quota_cmd {
+	struct iwl_time_quota_data quotas[MAX_BINDINGS];
+} __packed; /* TIME_QUOTA_ALLOCATION_CMD_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_binding_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h
new file mode 100644
index 0000000..ea4a3f0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h
@@ -0,0 +1,211 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_cmdhdr_h__
+#define __iwl_fw_api_cmdhdr_h__
+
+/**
+ * DOC: Host command section
+ *
+ * A host command is a command issued by the upper layer to the fw. There are
+ * several versions of fw that have several APIs. The transport layer is
+ * completely agnostic to these differences.
+ * The transport does provide helper functionality (i.e. SYNC / ASYNC mode),
+ */
+#define SEQ_TO_QUEUE(s)	(((s) >> 8) & 0x1f)
+#define QUEUE_TO_SEQ(q)	(((q) & 0x1f) << 8)
+#define SEQ_TO_INDEX(s)	((s) & 0xff)
+#define INDEX_TO_SEQ(i)	((i) & 0xff)
+#define SEQ_RX_FRAME	cpu_to_le16(0x8000)
+
+/*
+ * those functions retrieve specific information from
+ * the id field in the iwl_host_cmd struct which contains
+ * the command id, the group id and the version of the command
+ * and vice versa
+*/
+static inline u8 iwl_cmd_opcode(u32 cmdid)
+{
+	return cmdid & 0xFF;
+}
+
+static inline u8 iwl_cmd_groupid(u32 cmdid)
+{
+	return ((cmdid & 0xFF00) >> 8);
+}
+
+static inline u8 iwl_cmd_version(u32 cmdid)
+{
+	return ((cmdid & 0xFF0000) >> 16);
+}
+
+static inline u32 iwl_cmd_id(u8 opcode, u8 groupid, u8 version)
+{
+	return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make u16 wide id out of u8 group and opcode */
+#define WIDE_ID(grp, opcode) (((grp) << 8) | (opcode))
+#define DEF_ID(opcode) ((1 << 8) | (opcode))
+
+/* due to the conversion, this group is special; new groups
+ * should be defined in the appropriate fw-api header files
+ */
+#define IWL_ALWAYS_LONG_GROUP	1
+
+/**
+ * struct iwl_cmd_header - (short) command header format
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ */
+struct iwl_cmd_header {
+	/**
+	 * @cmd: Command ID: REPLY_RXON, etc.
+	 */
+	u8 cmd;
+	/**
+	 * @group_id: group ID, for commands with groups
+	 */
+	u8 group_id;
+	/**
+	 * @sequence:
+	 * Sequence number for the command.
+	 *
+	 * The driver sets up the sequence number to values of its choosing.
+	 * uCode does not use this value, but passes it back to the driver
+	 * when sending the response to each driver-originated command, so
+	 * the driver can match the response to the command.  Since the values
+	 * don't get used by uCode, the driver may set up an arbitrary format.
+	 *
+	 * There is one exception:  uCode sets bit 15 when it originates
+	 * the response/notification, i.e. when the response/notification
+	 * is not a direct response to a command sent by the driver.  For
+	 * example, uCode issues REPLY_RX when it sends a received frame
+	 * to the driver; it is not a direct response to any driver command.
+	 *
+	 * The Linux driver uses the following format:
+	 *
+	 *  0:7		tfd index - position within TX queue
+	 *  8:12	TX queue id
+	 *  13:14	reserved
+	 *  15		unsolicited RX or uCode-originated notification
+	 */
+	__le16 sequence;
+} __packed;
+
+/**
+ * struct iwl_cmd_header_wide
+ *
+ * This header format appears in the beginning of each command sent from the
+ * driver, and each response/notification received from uCode.
+ * this is the wide version that contains more information about the command
+ * like length, version and command type
+ *
+ * @cmd: command ID, like in &struct iwl_cmd_header
+ * @group_id: group ID, like in &struct iwl_cmd_header
+ * @sequence: sequence, like in &struct iwl_cmd_header
+ * @length: length of the command
+ * @reserved: reserved
+ * @version: command version
+ */
+struct iwl_cmd_header_wide {
+	u8 cmd;
+	u8 group_id;
+	__le16 sequence;
+	__le16 length;
+	u8 reserved;
+	u8 version;
+} __packed;
+
+/**
+ * struct iwl_calib_res_notif_phy_db - Receive phy db chunk after calibrations
+ * @type: type of the result - mostly ignored
+ * @length: length of the data
+ * @data: data, length in @length
+ */
+struct iwl_calib_res_notif_phy_db {
+	__le16 type;
+	__le16 length;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_phy_db_cmd - configure operational ucode
+ * @type: type of the data
+ * @length: length of the data
+ * @data: data, length in @length
+ */
+struct iwl_phy_db_cmd {
+	__le16 type;
+	__le16 length;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwl_cmd_response {
+	__le32 status;
+};
+
+#endif /* __iwl_fw_api_cmdhdr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
new file mode 100644
index 0000000..87c1dde
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -0,0 +1,242 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_coex_h__
+#define __iwl_fw_api_coex_h__
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#define BITS(nb) (BIT(nb) - 1)
+
+enum iwl_bt_coex_lut_type {
+	BT_COEX_TIGHT_LUT = 0,
+	BT_COEX_LOOSE_LUT,
+	BT_COEX_TX_DIS_LUT,
+
+	BT_COEX_MAX_LUT,
+	BT_COEX_INVALID_LUT = 0xff,
+}; /* BT_COEX_DECISION_LUT_INDEX_API_E_VER_1 */
+
+#define BT_REDUCED_TX_POWER_BIT BIT(7)
+
+enum iwl_bt_coex_mode {
+	BT_COEX_DISABLE			= 0x0,
+	BT_COEX_NW			= 0x1,
+	BT_COEX_BT			= 0x2,
+	BT_COEX_WIFI			= 0x3,
+}; /* BT_COEX_MODES_E */
+
+enum iwl_bt_coex_enabled_modules {
+	BT_COEX_MPLUT_ENABLED		= BIT(0),
+	BT_COEX_MPLUT_BOOST_ENABLED	= BIT(1),
+	BT_COEX_SYNC2SCO_ENABLED	= BIT(2),
+	BT_COEX_CORUN_ENABLED		= BIT(3),
+	BT_COEX_HIGH_BAND_RET		= BIT(4),
+}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_cmd - bt coex configuration command
+ * @mode: &enum iwl_bt_coex_mode
+ * @enabled_modules: &enum iwl_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwl_bt_coex_cmd {
+	__le32 mode;
+	__le32 enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_bt_coex_reduced_txp_update_cmd
+ * @reduced_txp: bit BT_REDUCED_TX_POWER_BIT to enable / disable, rest of the
+ *	bits are the sta_id (value)
+ */
+struct iwl_bt_coex_reduced_txp_update_cmd {
+	__le32 reduced_txp;
+} __packed; /* BT_COEX_UPDATE_REDUCED_TX_POWER_API_S_VER_1 */
+
+/**
+ * struct iwl_bt_coex_ci_cmd - bt coex channel inhibition command
+ * @bt_primary_ci: primary channel inhibition bitmap
+ * @primary_ch_phy_id: primary channel PHY ID
+ * @bt_secondary_ci: secondary channel inhibition bitmap
+ * @secondary_ch_phy_id: secondary channel PHY ID
+ *
+ * Used for BT_COEX_CI command
+ */
+struct iwl_bt_coex_ci_cmd {
+	__le64 bt_primary_ci;
+	__le32 primary_ch_phy_id;
+
+	__le64 bt_secondary_ci;
+	__le32 secondary_ch_phy_id;
+} __packed; /* BT_CI_MSG_API_S_VER_2 */
+
+#define BT_MBOX(n_dw, _msg, _pos, _nbits)	\
+	BT_MBOX##n_dw##_##_msg##_POS = (_pos),	\
+	BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
+
+enum iwl_bt_mxbox_dw0 {
+	BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
+	BT_MBOX(0, LE_PROF1, 3, 1),
+	BT_MBOX(0, LE_PROF2, 4, 1),
+	BT_MBOX(0, LE_PROF_OTHER, 5, 1),
+	BT_MBOX(0, CHL_SEQ_N, 8, 4),
+	BT_MBOX(0, INBAND_S, 13, 1),
+	BT_MBOX(0, LE_MIN_RSSI, 16, 4),
+	BT_MBOX(0, LE_SCAN, 20, 1),
+	BT_MBOX(0, LE_ADV, 21, 1),
+	BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
+	BT_MBOX(0, OPEN_CON_1, 28, 2),
+};
+
+enum iwl_bt_mxbox_dw1 {
+	BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
+	BT_MBOX(1, IP_SR, 4, 1),
+	BT_MBOX(1, LE_MSTR, 5, 1),
+	BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
+	BT_MBOX(1, MSG_TYPE, 16, 3),
+	BT_MBOX(1, SSN, 19, 2),
+};
+
+enum iwl_bt_mxbox_dw2 {
+	BT_MBOX(2, SNIFF_ACT, 0, 3),
+	BT_MBOX(2, PAG, 3, 1),
+	BT_MBOX(2, INQUIRY, 4, 1),
+	BT_MBOX(2, CONN, 5, 1),
+	BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
+	BT_MBOX(2, DISC, 13, 1),
+	BT_MBOX(2, SCO_TX_ACT, 16, 2),
+	BT_MBOX(2, SCO_RX_ACT, 18, 2),
+	BT_MBOX(2, ESCO_RE_TX, 20, 2),
+	BT_MBOX(2, SCO_DURATION, 24, 6),
+};
+
+enum iwl_bt_mxbox_dw3 {
+	BT_MBOX(3, SCO_STATE, 0, 1),
+	BT_MBOX(3, SNIFF_STATE, 1, 1),
+	BT_MBOX(3, A2DP_STATE, 2, 1),
+	BT_MBOX(3, ACL_STATE, 3, 1),
+	BT_MBOX(3, MSTR_STATE, 4, 1),
+	BT_MBOX(3, OBX_STATE, 5, 1),
+	BT_MBOX(3, A2DP_SRC, 6, 1),
+	BT_MBOX(3, OPEN_CON_2, 8, 2),
+	BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
+	BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
+	BT_MBOX(3, INBAND_P, 13, 1),
+	BT_MBOX(3, MSG_TYPE_2, 16, 3),
+	BT_MBOX(3, SSN_2, 19, 2),
+	BT_MBOX(3, UPDATE_REQUEST, 21, 1),
+};
+
+#define BT_MBOX_MSG(_notif, _num, _field)				     \
+	((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
+	>> BT_MBOX##_num##_##_field##_POS)
+
+#define BT_MBOX_PRINT(_num, _field, _end)				    \
+			pos += scnprintf(buf + pos, bufsz - pos,	    \
+					 "\t%s: %d%s",			    \
+					 #_field,			    \
+					 BT_MBOX_MSG(notif, _num, _field),  \
+					 true ? "\n" : ", ");
+enum iwl_bt_activity_grading {
+	BT_OFF			= 0,
+	BT_ON_NO_CONNECTION	= 1,
+	BT_LOW_TRAFFIC		= 2,
+	BT_HIGH_TRAFFIC		= 3,
+
+	BT_MAX_AG,
+}; /* BT_COEX_BT_ACTIVITY_GRADING_API_E_VER_1 */
+
+enum iwl_bt_ci_compliance {
+	BT_CI_COMPLIANCE_NONE		= 0,
+	BT_CI_COMPLIANCE_PRIMARY	= 1,
+	BT_CI_COMPLIANCE_SECONDARY	= 2,
+	BT_CI_COMPLIANCE_BOTH		= 3,
+}; /* BT_COEX_CI_COMPLIENCE_E_VER_1 */
+
+/**
+ * struct iwl_bt_coex_profile_notif - notification about BT coex
+ * @mbox_msg: message from BT to WiFi
+ * @msg_idx: the index of the message
+ * @bt_ci_compliance: enum %iwl_bt_ci_compliance
+ * @primary_ch_lut: LUT used for primary channel &enum iwl_bt_coex_lut_type
+ * @secondary_ch_lut: LUT used for secondary channel &enum iwl_bt_coex_lut_type
+ * @bt_activity_grading: the activity of BT &enum iwl_bt_activity_grading
+ * @ttc_status: is TTC enabled - one bit per PHY
+ * @rrc_status: is RRC enabled - one bit per PHY
+ * @reserved: reserved
+ */
+struct iwl_bt_coex_profile_notif {
+	__le32 mbox_msg[4];
+	__le32 msg_idx;
+	__le32 bt_ci_compliance;
+
+	__le32 primary_ch_lut;
+	__le32 secondary_ch_lut;
+	__le32 bt_activity_grading;
+	u8 ttc_status;
+	u8 rrc_status;
+	__le16 reserved;
+} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_4 */
+
+#endif /* __iwl_fw_api_coex_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
new file mode 100644
index 0000000..6dad748
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -0,0 +1,652 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_commands_h__
+#define __iwl_fw_api_commands_h__
+
+/**
+ * enum iwl_mvm_command_groups - command groups for the firmware
+ * @LEGACY_GROUP: legacy group, uses command IDs from &enum iwl_legacy_cmds
+ * @LONG_GROUP: legacy group with long header, also uses command IDs
+ *	from &enum iwl_legacy_cmds
+ * @SYSTEM_GROUP: system group, uses command IDs from
+ *	&enum iwl_system_subcmd_ids
+ * @MAC_CONF_GROUP: MAC configuration group, uses command IDs from
+ *	&enum iwl_mac_conf_subcmd_ids
+ * @PHY_OPS_GROUP: PHY operations group, uses command IDs from
+ *	&enum iwl_phy_ops_subcmd_ids
+ * @DATA_PATH_GROUP: data path group, uses command IDs from
+ *	&enum iwl_data_path_subcmd_ids
+ * @NAN_GROUP: NAN group, uses command IDs from &enum iwl_nan_subcmd_ids
+ * @TOF_GROUP: TOF group, uses command IDs from &enum iwl_tof_subcmd_ids
+ * @PROT_OFFLOAD_GROUP: protocol offload group, uses command IDs from
+ *	&enum iwl_prot_offload_subcmd_ids
+ * @REGULATORY_AND_NVM_GROUP: regulatory/NVM group, uses command IDs from
+ *	&enum iwl_regulatory_and_nvm_subcmd_ids
+ * @DEBUG_GROUP: Debug group, uses command IDs from &enum iwl_debug_cmds
+ */
+enum iwl_mvm_command_groups {
+	LEGACY_GROUP = 0x0,
+	LONG_GROUP = 0x1,
+	SYSTEM_GROUP = 0x2,
+	MAC_CONF_GROUP = 0x3,
+	PHY_OPS_GROUP = 0x4,
+	DATA_PATH_GROUP = 0x5,
+	NAN_GROUP = 0x7,
+	TOF_GROUP = 0x8,
+	PROT_OFFLOAD_GROUP = 0xb,
+	REGULATORY_AND_NVM_GROUP = 0xc,
+	DEBUG_GROUP = 0xf,
+};
+
+/**
+ * enum iwl_legacy_cmds - legacy group command IDs
+ */
+enum iwl_legacy_cmds {
+	/**
+	 * @MVM_ALIVE:
+	 * Alive data from the firmware, as described in
+	 * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
+	 */
+	MVM_ALIVE = 0x1,
+
+	/**
+	 * @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
+	 */
+	REPLY_ERROR = 0x2,
+
+	/**
+	 * @ECHO_CMD: Send data to the device to have it returned immediately.
+	 */
+	ECHO_CMD = 0x3,
+
+	/**
+	 * @INIT_COMPLETE_NOTIF: Notification that initialization is complete.
+	 */
+	INIT_COMPLETE_NOTIF = 0x4,
+
+	/**
+	 * @PHY_CONTEXT_CMD:
+	 * Add/modify/remove a PHY context, using &struct iwl_phy_context_cmd.
+	 */
+	PHY_CONTEXT_CMD = 0x8,
+
+	/**
+	 * @DBG_CFG: Debug configuration command.
+	 */
+	DBG_CFG = 0x9,
+
+	/**
+	 * @SCAN_ITERATION_COMPLETE_UMAC:
+	 * Firmware indicates a scan iteration completed, using
+	 * &struct iwl_umac_scan_iter_complete_notif.
+	 */
+	SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
+
+	/**
+	 * @SCAN_CFG_CMD:
+	 * uses &struct iwl_scan_config_v1 or &struct iwl_scan_config
+	 */
+	SCAN_CFG_CMD = 0xc,
+
+	/**
+	 * @SCAN_REQ_UMAC: uses &struct iwl_scan_req_umac
+	 */
+	SCAN_REQ_UMAC = 0xd,
+
+	/**
+	 * @SCAN_ABORT_UMAC: uses &struct iwl_umac_scan_abort
+	 */
+	SCAN_ABORT_UMAC = 0xe,
+
+	/**
+	 * @SCAN_COMPLETE_UMAC: uses &struct iwl_umac_scan_complete
+	 */
+	SCAN_COMPLETE_UMAC = 0xf,
+
+	/**
+	 * @BA_WINDOW_STATUS_NOTIFICATION_ID:
+	 * uses &struct iwl_ba_window_status_notif
+	 */
+	BA_WINDOW_STATUS_NOTIFICATION_ID = 0x13,
+
+	/**
+	 * @ADD_STA_KEY:
+	 * &struct iwl_mvm_add_sta_key_cmd_v1 or
+	 * &struct iwl_mvm_add_sta_key_cmd.
+	 */
+	ADD_STA_KEY = 0x17,
+
+	/**
+	 * @ADD_STA:
+	 * &struct iwl_mvm_add_sta_cmd or &struct iwl_mvm_add_sta_cmd_v7.
+	 */
+	ADD_STA = 0x18,
+
+	/**
+	 * @REMOVE_STA: &struct iwl_mvm_rm_sta_cmd
+	 */
+	REMOVE_STA = 0x19,
+
+	/**
+	 * @FW_GET_ITEM_CMD: uses &struct iwl_fw_get_item_cmd
+	 */
+	FW_GET_ITEM_CMD = 0x1a,
+
+	/**
+	 * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
+	 *	&struct iwl_tx_cmd_gen3,
+	 *	response in &struct iwl_mvm_tx_resp or
+	 *	&struct iwl_mvm_tx_resp_v3
+	 */
+	TX_CMD = 0x1c,
+
+	/**
+	 * @TXPATH_FLUSH: &struct iwl_tx_path_flush_cmd
+	 */
+	TXPATH_FLUSH = 0x1e,
+
+	/**
+	 * @MGMT_MCAST_KEY:
+	 * &struct iwl_mvm_mgmt_mcast_key_cmd or
+	 * &struct iwl_mvm_mgmt_mcast_key_cmd_v1
+	 */
+	MGMT_MCAST_KEY = 0x1f,
+
+	/* scheduler config */
+	/**
+	 * @SCD_QUEUE_CFG: &struct iwl_scd_txq_cfg_cmd for older hardware,
+	 *	&struct iwl_tx_queue_cfg_cmd with &struct iwl_tx_queue_cfg_rsp
+	 *	for newer (22000) hardware.
+	 */
+	SCD_QUEUE_CFG = 0x1d,
+
+	/**
+	 * @WEP_KEY: uses &struct iwl_mvm_wep_key_cmd
+	 */
+	WEP_KEY = 0x20,
+
+	/**
+	 * @SHARED_MEM_CFG:
+	 * retrieve shared memory configuration - response in
+	 * &struct iwl_shared_mem_cfg
+	 */
+	SHARED_MEM_CFG = 0x25,
+
+	/**
+	 * @TDLS_CHANNEL_SWITCH_CMD: uses &struct iwl_tdls_channel_switch_cmd
+	 */
+	TDLS_CHANNEL_SWITCH_CMD = 0x27,
+
+	/**
+	 * @TDLS_CHANNEL_SWITCH_NOTIFICATION:
+	 * uses &struct iwl_tdls_channel_switch_notif
+	 */
+	TDLS_CHANNEL_SWITCH_NOTIFICATION = 0xaa,
+
+	/**
+	 * @TDLS_CONFIG_CMD:
+	 * &struct iwl_tdls_config_cmd, response in &struct iwl_tdls_config_res
+	 */
+	TDLS_CONFIG_CMD = 0xa7,
+
+	/**
+	 * @MAC_CONTEXT_CMD: &struct iwl_mac_ctx_cmd
+	 */
+	MAC_CONTEXT_CMD = 0x28,
+
+	/**
+	 * @TIME_EVENT_CMD:
+	 * &struct iwl_time_event_cmd, response in &struct iwl_time_event_resp
+	 */
+	TIME_EVENT_CMD = 0x29, /* both CMD and response */
+
+	/**
+	 * @TIME_EVENT_NOTIFICATION: &struct iwl_time_event_notif
+	 */
+	TIME_EVENT_NOTIFICATION = 0x2a,
+
+	/**
+	 * @BINDING_CONTEXT_CMD:
+	 * &struct iwl_binding_cmd or &struct iwl_binding_cmd_v1
+	 */
+	BINDING_CONTEXT_CMD = 0x2b,
+
+	/**
+	 * @TIME_QUOTA_CMD: &struct iwl_time_quota_cmd
+	 */
+	TIME_QUOTA_CMD = 0x2c,
+
+	/**
+	 * @NON_QOS_TX_COUNTER_CMD:
+	 * command is &struct iwl_nonqos_seq_query_cmd
+	 */
+	NON_QOS_TX_COUNTER_CMD = 0x2d,
+
+	/**
+	 * @LEDS_CMD: command is &struct iwl_led_cmd
+	 */
+	LEDS_CMD = 0x48,
+
+	/**
+	 * @LQ_CMD: using &struct iwl_lq_cmd
+	 */
+	LQ_CMD = 0x4e,
+
+	/**
+	 * @FW_PAGING_BLOCK_CMD:
+	 * &struct iwl_fw_paging_cmd
+	 */
+	FW_PAGING_BLOCK_CMD = 0x4f,
+
+	/**
+	 * @SCAN_OFFLOAD_REQUEST_CMD: uses &struct iwl_scan_req_lmac
+	 */
+	SCAN_OFFLOAD_REQUEST_CMD = 0x51,
+
+	/**
+	 * @SCAN_OFFLOAD_ABORT_CMD: abort the scan - no further contents
+	 */
+	SCAN_OFFLOAD_ABORT_CMD = 0x52,
+
+	/**
+	 * @HOT_SPOT_CMD: uses &struct iwl_hs20_roc_req
+	 */
+	HOT_SPOT_CMD = 0x53,
+
+	/**
+	 * @SCAN_OFFLOAD_COMPLETE:
+	 * notification, &struct iwl_periodic_scan_complete
+	 */
+	SCAN_OFFLOAD_COMPLETE = 0x6D,
+
+	/**
+	 * @SCAN_OFFLOAD_UPDATE_PROFILES_CMD:
+	 * update scan offload (scheduled scan) profiles/blacklist/etc.
+	 */
+	SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+
+	/**
+	 * @MATCH_FOUND_NOTIFICATION: scan match found
+	 */
+	MATCH_FOUND_NOTIFICATION = 0xd9,
+
+	/**
+	 * @SCAN_ITERATION_COMPLETE:
+	 * uses &struct iwl_lmac_scan_complete_notif
+	 */
+	SCAN_ITERATION_COMPLETE = 0xe7,
+
+	/* Phy */
+	/**
+	 * @PHY_CONFIGURATION_CMD: &struct iwl_phy_cfg_cmd
+	 */
+	PHY_CONFIGURATION_CMD = 0x6a,
+
+	/**
+	 * @CALIB_RES_NOTIF_PHY_DB: &struct iwl_calib_res_notif_phy_db
+	 */
+	CALIB_RES_NOTIF_PHY_DB = 0x6b,
+
+	/**
+	 * @PHY_DB_CMD: &struct iwl_phy_db_cmd
+	 */
+	PHY_DB_CMD = 0x6c,
+
+	/**
+	 * @TOF_CMD: &struct iwl_tof_config_cmd
+	 */
+	TOF_CMD = 0x10,
+
+	/**
+	 * @TOF_NOTIFICATION: &struct iwl_tof_gen_resp_cmd
+	 */
+	TOF_NOTIFICATION = 0x11,
+
+	/**
+	 * @POWER_TABLE_CMD: &struct iwl_device_power_cmd
+	 */
+	POWER_TABLE_CMD = 0x77,
+
+	/**
+	 * @PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION:
+	 * &struct iwl_uapsd_misbehaving_ap_notif
+	 */
+	PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
+
+	/**
+	 * @LTR_CONFIG: &struct iwl_ltr_config_cmd
+	 */
+	LTR_CONFIG = 0xee,
+
+	/**
+	 * @REPLY_THERMAL_MNG_BACKOFF:
+	 * Thermal throttling command
+	 */
+	REPLY_THERMAL_MNG_BACKOFF = 0x7e,
+
+	/**
+	 * @DC2DC_CONFIG_CMD:
+	 * Set/Get DC2DC frequency tune
+	 * Command is &struct iwl_dc2dc_config_cmd,
+	 * response is &struct iwl_dc2dc_config_resp
+	 */
+	DC2DC_CONFIG_CMD = 0x83,
+
+	/**
+	 * @NVM_ACCESS_CMD: using &struct iwl_nvm_access_cmd
+	 */
+	NVM_ACCESS_CMD = 0x88,
+
+	/**
+	 * @BEACON_NOTIFICATION: &struct iwl_extended_beacon_notif
+	 */
+	BEACON_NOTIFICATION = 0x90,
+
+	/**
+	 * @BEACON_TEMPLATE_CMD:
+	 *	Uses one of &struct iwl_mac_beacon_cmd_v6,
+	 *	&struct iwl_mac_beacon_cmd_v7 or &struct iwl_mac_beacon_cmd
+	 *	depending on the device version.
+	 */
+	BEACON_TEMPLATE_CMD = 0x91,
+	/**
+	 * @TX_ANT_CONFIGURATION_CMD: &struct iwl_tx_ant_cfg_cmd
+	 */
+	TX_ANT_CONFIGURATION_CMD = 0x98,
+
+	/**
+	 * @STATISTICS_CMD: &struct iwl_statistics_cmd
+	 */
+	STATISTICS_CMD = 0x9c,
+
+	/**
+	 * @STATISTICS_NOTIFICATION:
+	 * one of &struct iwl_notif_statistics_v10,
+	 * &struct iwl_notif_statistics_v11,
+	 * &struct iwl_notif_statistics_cdb
+	 */
+	STATISTICS_NOTIFICATION = 0x9d,
+
+	/**
+	 * @EOSP_NOTIFICATION:
+	 * Notify that a service period ended,
+	 * &struct iwl_mvm_eosp_notification
+	 */
+	EOSP_NOTIFICATION = 0x9e,
+
+	/**
+	 * @REDUCE_TX_POWER_CMD:
+	 * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd
+	 */
+	REDUCE_TX_POWER_CMD = 0x9f,
+
+	/**
+	 * @CARD_STATE_NOTIFICATION:
+	 * Card state (RF/CT kill) notification,
+	 * uses &struct iwl_card_state_notif
+	 */
+	CARD_STATE_NOTIFICATION = 0xa1,
+
+	/**
+	 * @MISSED_BEACONS_NOTIFICATION: &struct iwl_missed_beacons_notif
+	 */
+	MISSED_BEACONS_NOTIFICATION = 0xa2,
+
+	/**
+	 * @MAC_PM_POWER_TABLE: using &struct iwl_mac_power_cmd
+	 */
+	MAC_PM_POWER_TABLE = 0xa9,
+
+	/**
+	 * @MFUART_LOAD_NOTIFICATION: &struct iwl_mfuart_load_notif
+	 */
+	MFUART_LOAD_NOTIFICATION = 0xb1,
+
+	/**
+	 * @RSS_CONFIG_CMD: &struct iwl_rss_config_cmd
+	 */
+	RSS_CONFIG_CMD = 0xb3,
+
+	/**
+	 * @REPLY_RX_PHY_CMD: &struct iwl_rx_phy_info
+	 */
+	REPLY_RX_PHY_CMD = 0xc0,
+
+	/**
+	 * @REPLY_RX_MPDU_CMD:
+	 * &struct iwl_rx_mpdu_res_start or &struct iwl_rx_mpdu_desc
+	 */
+	REPLY_RX_MPDU_CMD = 0xc1,
+
+	/**
+	 * @FRAME_RELEASE:
+	 * Frame release (reorder helper) notification, uses
+	 * &struct iwl_frame_release
+	 */
+	FRAME_RELEASE = 0xc3,
+
+	/**
+	 * @BA_NOTIF:
+	 * BlockAck notification, uses &struct iwl_mvm_compressed_ba_notif
+	 * or &struct iwl_mvm_ba_notif depending on the HW
+	 */
+	BA_NOTIF = 0xc5,
+
+	/* Location Aware Regulatory */
+	/**
+	 * @MCC_UPDATE_CMD: using &struct iwl_mcc_update_cmd
+	 */
+	MCC_UPDATE_CMD = 0xc8,
+
+	/**
+	 * @MCC_CHUB_UPDATE_CMD: using &struct iwl_mcc_chub_notif
+	 */
+	MCC_CHUB_UPDATE_CMD = 0xc9,
+
+	/**
+	 * @MARKER_CMD: trace marker command, uses &struct iwl_mvm_marker
+	 * with &struct iwl_mvm_marker_rsp
+	 */
+	MARKER_CMD = 0xcb,
+
+	/**
+	 * @BT_PROFILE_NOTIFICATION: &struct iwl_bt_coex_profile_notif
+	 */
+	BT_PROFILE_NOTIFICATION = 0xce,
+
+	/**
+	 * @BT_CONFIG: &struct iwl_bt_coex_cmd
+	 */
+	BT_CONFIG = 0x9b,
+
+	/**
+	 * @BT_COEX_UPDATE_REDUCED_TXP:
+	 * &struct iwl_bt_coex_reduced_txp_update_cmd
+	 */
+	BT_COEX_UPDATE_REDUCED_TXP = 0x5c,
+
+	/**
+	 * @BT_COEX_CI: &struct iwl_bt_coex_ci_cmd
+	 */
+	BT_COEX_CI = 0x5d,
+
+	/**
+	 * @REPLY_SF_CFG_CMD: &struct iwl_sf_cfg_cmd
+	 */
+	REPLY_SF_CFG_CMD = 0xd1,
+	/**
+	 * @REPLY_BEACON_FILTERING_CMD: &struct iwl_beacon_filter_cmd
+	 */
+	REPLY_BEACON_FILTERING_CMD = 0xd2,
+
+	/**
+	 * @DTS_MEASUREMENT_NOTIFICATION:
+	 * &struct iwl_dts_measurement_notif_v1 or
+	 * &struct iwl_dts_measurement_notif_v2
+	 */
+	DTS_MEASUREMENT_NOTIFICATION = 0xdd,
+
+	/**
+	 * @LDBG_CONFIG_CMD: configure continuous trace recording
+	 */
+	LDBG_CONFIG_CMD = 0xf6,
+
+	/**
+	 * @DEBUG_LOG_MSG: Debugging log data from firmware
+	 */
+	DEBUG_LOG_MSG = 0xf7,
+
+	/**
+	 * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
+	 */
+	BCAST_FILTER_CMD = 0xcf,
+
+	/**
+	 * @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
+	 */
+	MCAST_FILTER_CMD = 0xd0,
+
+	/**
+	 * @D3_CONFIG_CMD: &struct iwl_d3_manager_config
+	 */
+	D3_CONFIG_CMD = 0xd3,
+
+	/**
+	 * @PROT_OFFLOAD_CONFIG_CMD: Depending on firmware, uses one of
+	 * &struct iwl_proto_offload_cmd_v1, &struct iwl_proto_offload_cmd_v2,
+	 * &struct iwl_proto_offload_cmd_v3_small,
+	 * &struct iwl_proto_offload_cmd_v3_large
+	 */
+	PROT_OFFLOAD_CONFIG_CMD = 0xd4,
+
+	/**
+	 * @OFFLOADS_QUERY_CMD:
+	 * No data in command, response in &struct iwl_wowlan_status
+	 */
+	OFFLOADS_QUERY_CMD = 0xd5,
+
+	/**
+	 * @REMOTE_WAKE_CONFIG_CMD: &struct iwl_wowlan_remote_wake_config
+	 */
+	REMOTE_WAKE_CONFIG_CMD = 0xd6,
+
+	/**
+	 * @D0I3_END_CMD: End D0i3/D3 state, no command data
+	 */
+	D0I3_END_CMD = 0xed,
+
+	/**
+	 * @WOWLAN_PATTERNS: &struct iwl_wowlan_patterns_cmd
+	 */
+	WOWLAN_PATTERNS = 0xe0,
+
+	/**
+	 * @WOWLAN_CONFIGURATION: &struct iwl_wowlan_config_cmd
+	 */
+	WOWLAN_CONFIGURATION = 0xe1,
+
+	/**
+	 * @WOWLAN_TSC_RSC_PARAM: &struct iwl_wowlan_rsc_tsc_params_cmd
+	 */
+	WOWLAN_TSC_RSC_PARAM = 0xe2,
+
+	/**
+	 * @WOWLAN_TKIP_PARAM: &struct iwl_wowlan_tkip_params_cmd
+	 */
+	WOWLAN_TKIP_PARAM = 0xe3,
+
+	/**
+	 * @WOWLAN_KEK_KCK_MATERIAL: &struct iwl_wowlan_kek_kck_material_cmd
+	 */
+	WOWLAN_KEK_KCK_MATERIAL = 0xe4,
+
+	/**
+	 * @WOWLAN_GET_STATUSES: response in &struct iwl_wowlan_status
+	 */
+	WOWLAN_GET_STATUSES = 0xe5,
+
+	/**
+	 * @SCAN_OFFLOAD_PROFILES_QUERY_CMD:
+	 * No command data, response is &struct iwl_scan_offload_profiles_query
+	 */
+	SCAN_OFFLOAD_PROFILES_QUERY_CMD = 0x56,
+};
+
+/**
+ * enum iwl_system_subcmd_ids - system group command IDs
+ */
+enum iwl_system_subcmd_ids {
+	/**
+	 * @SHARED_MEM_CFG_CMD:
+	 * response in &struct iwl_shared_mem_cfg or
+	 * &struct iwl_shared_mem_cfg_v2
+	 */
+	SHARED_MEM_CFG_CMD = 0x0,
+
+	/**
+	 * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
+	 */
+	INIT_EXTENDED_CFG_CMD = 0x03,
+};
+
+#endif /* __iwl_fw_api_commands_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
new file mode 100644
index 0000000..7f645b6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -0,0 +1,184 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_config_h__
+#define __iwl_fw_api_config_h__
+
+/*
+ * struct iwl_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwl_dqa_enable_cmd {
+	__le32 cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/*
+ * struct iwl_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwl_tx_ant_cfg_cmd {
+	__le32 valid;
+} __packed;
+
+/**
+ * struct iwl_calib_ctrl - Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ *		flow triggers, using &enum iwl_calib_cfg
+ * @event_trigger: bitmap for which calibrations to perform according to
+ *		event triggers, using &enum iwl_calib_cfg
+ */
+struct iwl_calib_ctrl {
+	__le32 flow_trigger;
+	__le32 event_trigger;
+} __packed;
+
+/* This enum defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through CALIBRATION_CFG_CMD.
+ */
+enum iwl_calib_cfg {
+	IWL_CALIB_CFG_XTAL_IDX			= BIT(0),
+	IWL_CALIB_CFG_TEMPERATURE_IDX		= BIT(1),
+	IWL_CALIB_CFG_VOLTAGE_READ_IDX		= BIT(2),
+	IWL_CALIB_CFG_PAPD_IDX			= BIT(3),
+	IWL_CALIB_CFG_TX_PWR_IDX		= BIT(4),
+	IWL_CALIB_CFG_DC_IDX			= BIT(5),
+	IWL_CALIB_CFG_BB_FILTER_IDX		= BIT(6),
+	IWL_CALIB_CFG_LO_LEAKAGE_IDX		= BIT(7),
+	IWL_CALIB_CFG_TX_IQ_IDX			= BIT(8),
+	IWL_CALIB_CFG_TX_IQ_SKEW_IDX		= BIT(9),
+	IWL_CALIB_CFG_RX_IQ_IDX			= BIT(10),
+	IWL_CALIB_CFG_RX_IQ_SKEW_IDX		= BIT(11),
+	IWL_CALIB_CFG_SENSITIVITY_IDX		= BIT(12),
+	IWL_CALIB_CFG_CHAIN_NOISE_IDX		= BIT(13),
+	IWL_CALIB_CFG_DISCONNECTED_ANT_IDX	= BIT(14),
+	IWL_CALIB_CFG_ANT_COUPLING_IDX		= BIT(15),
+	IWL_CALIB_CFG_DAC_IDX			= BIT(16),
+	IWL_CALIB_CFG_ABS_IDX			= BIT(17),
+	IWL_CALIB_CFG_AGC_IDX			= BIT(18),
+};
+
+/**
+ * struct iwl_phy_cfg_cmd - Phy configuration command
+ * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
+ * @calib_control: calibration control data
+ */
+struct iwl_phy_cfg_cmd {
+	__le32	phy_cfg;
+	struct iwl_calib_ctrl calib_control;
+} __packed;
+
+#define PHY_CFG_RADIO_TYPE	(BIT(0) | BIT(1))
+#define PHY_CFG_RADIO_STEP	(BIT(2) | BIT(3))
+#define PHY_CFG_RADIO_DASH	(BIT(4) | BIT(5))
+#define PHY_CFG_PRODUCT_NUMBER	(BIT(6) | BIT(7))
+#define PHY_CFG_TX_CHAIN_A	BIT(8)
+#define PHY_CFG_TX_CHAIN_B	BIT(9)
+#define PHY_CFG_TX_CHAIN_C	BIT(10)
+#define PHY_CFG_RX_CHAIN_A	BIT(12)
+#define PHY_CFG_RX_CHAIN_B	BIT(13)
+#define PHY_CFG_RX_CHAIN_C	BIT(14)
+
+/*
+ * enum iwl_dc2dc_config_id - flag ids
+ *
+ * Ids of dc2dc configuration flags
+ */
+enum iwl_dc2dc_config_id {
+	DCDC_LOW_POWER_MODE_MSK_SET  = 0x1, /* not used */
+	DCDC_FREQ_TUNE_SET = 0x2,
+}; /* MARKER_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_cmd - configure dc2dc values
+ *
+ * (DC2DC_CONFIG_CMD = 0x83)
+ *
+ * Set/Get & configure dc2dc values.
+ * The command always returns the current dc2dc values.
+ *
+ * @flags: set/get dc2dc
+ * @enable_low_power_mode: not used.
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_cmd {
+	__le32 flags;
+	__le32 enable_low_power_mode; /* not used */
+	__le32 dc2dc_freq_tune0;
+	__le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_dc2dc_config_resp - response for iwl_dc2dc_config_cmd
+ *
+ * Current dc2dc values returned by the FW.
+ *
+ * @dc2dc_freq_tune0: frequency divider - digital domain
+ * @dc2dc_freq_tune1: frequency divider - analog domain
+ */
+struct iwl_dc2dc_config_resp {
+	__le32 dc2dc_freq_tune0;
+	__le32 dc2dc_freq_tune1;
+} __packed; /* DC2DC_CONFIG_RESP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_config_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/context.h b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
new file mode 100644
index 0000000..2f0d7c4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/context.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_context_h__
+#define __iwl_fw_api_context_h__
+
+/**
+ * enum iwl_ctxt_id_and_color - ID and color fields in context dword
+ * @FW_CTXT_ID_POS: position of the ID
+ * @FW_CTXT_ID_MSK: mask of the ID
+ * @FW_CTXT_COLOR_POS: position of the color
+ * @FW_CTXT_COLOR_MSK: mask of the color
+ * @FW_CTXT_INVALID: value used to indicate unused/invalid
+ */
+enum iwl_ctxt_id_and_color {
+	FW_CTXT_ID_POS		= 0,
+	FW_CTXT_ID_MSK		= 0xff << FW_CTXT_ID_POS,
+	FW_CTXT_COLOR_POS	= 8,
+	FW_CTXT_COLOR_MSK	= 0xff << FW_CTXT_COLOR_POS,
+	FW_CTXT_INVALID		= 0xffffffff,
+};
+
+#define FW_CMD_ID_AND_COLOR(_id, _color) (((_id) << FW_CTXT_ID_POS) |\
+					  ((_color) << FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+enum iwl_ctxt_action {
+	FW_CTXT_ACTION_STUB = 0,
+	FW_CTXT_ACTION_ADD,
+	FW_CTXT_ACTION_MODIFY,
+	FW_CTXT_ACTION_REMOVE,
+	FW_CTXT_ACTION_NUM
+}; /* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+#endif /* __iwl_fw_api_context_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
new file mode 100644
index 0000000..57f4bc2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -0,0 +1,466 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_d3_h__
+#define __iwl_fw_api_d3_h__
+
+/**
+ * enum iwl_d3_wakeup_flags - D3 manager wakeup flags
+ * @IWL_WAKEUP_D3_CONFIG_FW_ERROR: wake up on firmware sysassert
+ */
+enum iwl_d3_wakeup_flags {
+	IWL_WAKEUP_D3_CONFIG_FW_ERROR = BIT(0),
+}; /* D3_MANAGER_WAKEUP_CONFIG_API_E_VER_3 */
+
+/**
+ * struct iwl_d3_manager_config - D3 manager configuration command
+ * @min_sleep_time: minimum sleep time (in usec)
+ * @wakeup_flags: wakeup flags, see &enum iwl_d3_wakeup_flags
+ * @wakeup_host_timer: force wakeup after this many seconds
+ *
+ * The structure is used for the D3_CONFIG_CMD command.
+ */
+struct iwl_d3_manager_config {
+	__le32 min_sleep_time;
+	__le32 wakeup_flags;
+	__le32 wakeup_host_timer;
+} __packed; /* D3_MANAGER_CONFIG_CMD_S_VER_4 */
+
+
+/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
+
+/**
+ * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
+ * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
+ * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
+ * @IWL_D3_PROTO_IPV6_VALID: IPv6 data is valid
+ */
+enum iwl_proto_offloads {
+	IWL_D3_PROTO_OFFLOAD_ARP = BIT(0),
+	IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
+	IWL_D3_PROTO_IPV4_VALID = BIT(2),
+	IWL_D3_PROTO_IPV6_VALID = BIT(3),
+};
+
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1	2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2	6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L	12
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S	4
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX	12
+
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L	4
+#define IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S	2
+
+/**
+ * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
+ * @enabled: enable flags
+ * @remote_ipv4_addr: remote address to answer to (or zero if all)
+ * @host_ipv4_addr: our IPv4 address to respond to queries for
+ * @arp_mac_addr: our MAC address for ARP responses
+ * @reserved: unused
+ */
+struct iwl_proto_offload_cmd_common {
+	__le32 enabled;
+	__be32 remote_ipv4_addr;
+	__be32 host_ipv4_addr;
+	u8 arp_mac_addr[ETH_ALEN];
+	__le16 reserved;
+} __packed;
+
+/**
+ * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ *	for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor solicitation response MAC address
+ * @reserved2: reserved
+ */
+struct iwl_proto_offload_cmd_v1 {
+	struct iwl_proto_offload_cmd_common common;
+	u8 remote_ipv6_addr[16];
+	u8 solicited_node_ipv6_addr[16];
+	u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
+	u8 ndp_mac_addr[ETH_ALEN];
+	__le16 reserved2;
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+
+/**
+ * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ *	for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor solicitation response MAC address
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @reserved2: reserved
+ */
+struct iwl_proto_offload_cmd_v2 {
+	struct iwl_proto_offload_cmd_common common;
+	u8 remote_ipv6_addr[16];
+	u8 solicited_node_ipv6_addr[16];
+	u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+	u8 ndp_mac_addr[ETH_ALEN];
+	u8 num_valid_ipv6_addrs;
+	u8 reserved2[3];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+
+struct iwl_ns_config {
+	struct in6_addr source_ipv6_addr;
+	struct in6_addr dest_ipv6_addr;
+	u8 target_mac_addr[ETH_ALEN];
+	__le16 reserved;
+} __packed; /* NS_OFFLOAD_CONFIG */
+
+struct iwl_targ_addr {
+	struct in6_addr addr;
+	__le32 config_num;
+} __packed; /* TARGET_IPV6_ADDRESS */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_small - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_small {
+	struct iwl_proto_offload_cmd_common common;
+	__le32 num_valid_ipv6_addrs;
+	struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S];
+	struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/**
+ * struct iwl_proto_offload_cmd_v3_large - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @num_valid_ipv6_addrs: number of valid IPv6 addresses
+ * @targ_addrs: target IPv6 addresses
+ * @ns_config: NS offload configurations
+ */
+struct iwl_proto_offload_cmd_v3_large {
+	struct iwl_proto_offload_cmd_common common;
+	__le32 num_valid_ipv6_addrs;
+	struct iwl_targ_addr targ_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L];
+	struct iwl_ns_config ns_config[IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_3 */
+
+/*
+ * WOWLAN_PATTERNS
+ */
+#define IWL_WOWLAN_MIN_PATTERN_LEN	16
+#define IWL_WOWLAN_MAX_PATTERN_LEN	128
+
+struct iwl_wowlan_pattern {
+	u8 mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+	u8 pattern[IWL_WOWLAN_MAX_PATTERN_LEN];
+	u8 mask_size;
+	u8 pattern_size;
+	__le16 reserved;
+} __packed; /* WOWLAN_PATTERN_API_S_VER_1 */
+
+#define IWL_WOWLAN_MAX_PATTERNS	20
+
+struct iwl_wowlan_patterns_cmd {
+	__le32 n_patterns;
+	struct iwl_wowlan_pattern patterns[];
+} __packed; /* WOWLAN_PATTERN_ARRAY_API_S_VER_1 */
+
+enum iwl_wowlan_wakeup_filters {
+	IWL_WOWLAN_WAKEUP_MAGIC_PACKET			= BIT(0),
+	IWL_WOWLAN_WAKEUP_PATTERN_MATCH			= BIT(1),
+	IWL_WOWLAN_WAKEUP_BEACON_MISS			= BIT(2),
+	IWL_WOWLAN_WAKEUP_LINK_CHANGE			= BIT(3),
+	IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL		= BIT(4),
+	IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ			= BIT(5),
+	IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE		= BIT(6),
+	IWL_WOWLAN_WAKEUP_ENABLE_NET_DETECT		= BIT(7),
+	IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT		= BIT(8),
+	IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS		= BIT(9),
+	IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE	= BIT(10),
+	IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL		= BIT(11),
+	IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET		= BIT(12),
+	IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET		= BIT(13),
+	IWL_WOWLAN_WAKEUP_HOST_TIMER			= BIT(14),
+	IWL_WOWLAN_WAKEUP_RX_FRAME			= BIT(15),
+	IWL_WOWLAN_WAKEUP_BCN_FILTERING			= BIT(16),
+}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
+
+enum iwl_wowlan_flags {
+	IS_11W_ASSOC		= BIT(0),
+	ENABLE_L3_FILTERING	= BIT(1),
+	ENABLE_NBNS_FILTERING	= BIT(2),
+	ENABLE_DHCP_FILTERING	= BIT(3),
+	ENABLE_STORE_BEACON	= BIT(4),
+};
+
+/**
+ * struct iwl_wowlan_config_cmd - WoWLAN configuration
+ * @wakeup_filter: filter from &enum iwl_wowlan_wakeup_filters
+ * @non_qos_seq: non-QoS sequence counter to use next
+ * @qos_seq: QoS sequence counters to use next
+ * @wowlan_ba_teardown_tids: bitmap of BA sessions to tear down
+ * @is_11n_connection: indicates HT connection
+ * @offloading_tid: TID reserved for firmware use
+ * @flags: extra flags, see &enum iwl_wowlan_flags
+ * @reserved: reserved
+ */
+struct iwl_wowlan_config_cmd {
+	__le32 wakeup_filter;
+	__le16 non_qos_seq;
+	__le16 qos_seq[8];
+	u8 wowlan_ba_teardown_tids;
+	u8 is_11n_connection;
+	u8 offloading_tid;
+	u8 flags;
+	u8 reserved[2];
+} __packed; /* WOWLAN_CONFIG_API_S_VER_4 */
+
+/*
+ * WOWLAN_TSC_RSC_PARAMS
+ */
+#define IWL_NUM_RSC	16
+
+struct tkip_sc {
+	__le16 iv16;
+	__le16 pad;
+	__le32 iv32;
+} __packed; /* TKIP_SC_API_U_VER_1 */
+
+struct iwl_tkip_rsc_tsc {
+	struct tkip_sc unicast_rsc[IWL_NUM_RSC];
+	struct tkip_sc multicast_rsc[IWL_NUM_RSC];
+	struct tkip_sc tsc;
+} __packed; /* TKIP_TSC_RSC_API_S_VER_1 */
+
+struct aes_sc {
+	__le64 pn;
+} __packed; /* TKIP_AES_SC_API_U_VER_1 */
+
+struct iwl_aes_rsc_tsc {
+	struct aes_sc unicast_rsc[IWL_NUM_RSC];
+	struct aes_sc multicast_rsc[IWL_NUM_RSC];
+	struct aes_sc tsc;
+} __packed; /* AES_TSC_RSC_API_S_VER_1 */
+
+union iwl_all_tsc_rsc {
+	struct iwl_tkip_rsc_tsc tkip;
+	struct iwl_aes_rsc_tsc aes;
+}; /* ALL_TSC_RSC_API_S_VER_2 */
+
+struct iwl_wowlan_rsc_tsc_params_cmd {
+	union iwl_all_tsc_rsc all_tsc_rsc;
+} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+
+#define IWL_MIC_KEY_SIZE	8
+struct iwl_mic_keys {
+	u8 tx[IWL_MIC_KEY_SIZE];
+	u8 rx_unicast[IWL_MIC_KEY_SIZE];
+	u8 rx_mcast[IWL_MIC_KEY_SIZE];
+} __packed; /* MIC_KEYS_API_S_VER_1 */
+
+#define IWL_P1K_SIZE		5
+struct iwl_p1k_cache {
+	__le16 p1k[IWL_P1K_SIZE];
+} __packed;
+
+#define IWL_NUM_RX_P1K_CACHE	2
+
+struct iwl_wowlan_tkip_params_cmd {
+	struct iwl_mic_keys mic_keys;
+	struct iwl_p1k_cache tx;
+	struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+	struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+
+#define IWL_KCK_MAX_SIZE	32
+#define IWL_KEK_MAX_SIZE	32
+
+struct iwl_wowlan_kek_kck_material_cmd {
+	u8	kck[IWL_KCK_MAX_SIZE];
+	u8	kek[IWL_KEK_MAX_SIZE];
+	__le16	kck_len;
+	__le16	kek_len;
+	__le64	replay_ctr;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+
+#define RF_KILL_INDICATOR_FOR_WOWLAN	0x87
+
+enum iwl_wowlan_rekey_status {
+	IWL_WOWLAN_REKEY_POST_REKEY = 0,
+	IWL_WOWLAN_REKEY_WHILE_REKEY = 1,
+}; /* WOWLAN_REKEY_STATUS_API_E_VER_1 */
+
+enum iwl_wowlan_wakeup_reason {
+	IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS			= 0,
+	IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET			= BIT(0),
+	IWL_WOWLAN_WAKEUP_BY_PATTERN				= BIT(1),
+	IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON	= BIT(2),
+	IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH		= BIT(3),
+	IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE			= BIT(4),
+	IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED			= BIT(5),
+	IWL_WOWLAN_WAKEUP_BY_UCODE_ERROR			= BIT(6),
+	IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST			= BIT(7),
+	IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE			= BIT(8),
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS			= BIT(9),
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE		= BIT(10),
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL		= BIT(11),
+	IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET		= BIT(12),
+	IWL_WOWLAN_WAKEUP_BY_IOAC_MAGIC_PACKET			= BIT(13),
+	IWL_WOWLAN_WAKEUP_BY_D3_WAKEUP_HOST_TIMER		= BIT(14),
+	IWL_WOWLAN_WAKEUP_BY_RXFRAME_FILTERED_IN		= BIT(15),
+	IWL_WOWLAN_WAKEUP_BY_BEACON_FILTERED_IN			= BIT(16),
+
+}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
+
+struct iwl_wowlan_gtk_status {
+	u8 key_index;
+	u8 reserved[3];
+	u8 decrypt_key[16];
+	u8 tkip_mic_key[8];
+	struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
+
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @transmitted_ndps: number of transmitted neighbor discovery packets
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status {
+	struct iwl_wowlan_gtk_status gtk;
+	__le64 replay_ctr;
+	__le16 pattern_number;
+	__le16 non_qos_seq_ctr;
+	__le16 qos_seq_ctr[8];
+	__le32 wakeup_reasons;
+	__le32 num_of_gtk_rekeys;
+	__le32 transmitted_ndps;
+	__le32 received_beacons;
+	__le32 wake_packet_length;
+	__le32 wake_packet_bufsize;
+	u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
+
+#define IWL_WOWLAN_TCP_MAX_PACKET_LEN		64
+#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN	128
+#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS	2048
+
+struct iwl_tcp_packet_info {
+	__le16 tcp_pseudo_header_checksum;
+	__le16 tcp_payload_length;
+} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
+
+struct iwl_tcp_packet {
+	struct iwl_tcp_packet_info info;
+	u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+	u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
+} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
+
+struct iwl_remote_wake_packet {
+	struct iwl_tcp_packet_info info;
+	u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
+	u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
+} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
+
+struct iwl_wowlan_remote_wake_config {
+	__le32 connection_max_time; /* unused */
+	/* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
+	u8 max_syn_retries;
+	u8 max_data_retries;
+	u8 tcp_syn_ack_timeout;
+	u8 tcp_ack_timeout;
+
+	struct iwl_tcp_packet syn_tx;
+	struct iwl_tcp_packet synack_rx;
+	struct iwl_tcp_packet keepalive_ack_rx;
+	struct iwl_tcp_packet fin_tx;
+
+	struct iwl_remote_wake_packet keepalive_tx;
+	struct iwl_remote_wake_packet wake_rx;
+
+	/* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
+	u8 sequence_number_offset;
+	u8 sequence_number_length;
+	u8 token_offset;
+	u8 token_length;
+	/* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
+	__le32 initial_sequence_number;
+	__le16 keepalive_interval;
+	__le16 num_tokens;
+	u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
+} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
+
+/* TODO: NetDetect API */
+
+#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
new file mode 100644
index 0000000..59b3c6e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -0,0 +1,149 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_datapath_h__
+#define __iwl_fw_api_datapath_h__
+
+/**
+ * enum iwl_data_path_subcmd_ids - data path group commands
+ */
+enum iwl_data_path_subcmd_ids {
+	/**
+	 * @DQA_ENABLE_CMD: &struct iwl_dqa_enable_cmd
+	 */
+	DQA_ENABLE_CMD = 0x0,
+
+	/**
+	 * @UPDATE_MU_GROUPS_CMD: &struct iwl_mu_group_mgmt_cmd
+	 */
+	UPDATE_MU_GROUPS_CMD = 0x1,
+
+	/**
+	 * @TRIGGER_RX_QUEUES_NOTIF_CMD: &struct iwl_rxq_sync_cmd
+	 */
+	TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
+
+	/**
+	 * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
+	 */
+	STA_HE_CTXT_CMD = 0x7,
+
+	/**
+	 * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
+	 */
+	RFH_QUEUE_CONFIG_CMD = 0xD,
+
+	/**
+	 * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
+	 */
+	TLC_MNG_CONFIG_CMD = 0xF,
+
+	/**
+	 * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif
+	 */
+	TLC_MNG_UPDATE_NOTIF = 0xF7,
+
+	/**
+	 * @STA_PM_NOTIF: &struct iwl_mvm_pm_state_notification
+	 */
+	STA_PM_NOTIF = 0xFD,
+
+	/**
+	 * @MU_GROUP_MGMT_NOTIF: &struct iwl_mu_group_mgmt_notif
+	 */
+	MU_GROUP_MGMT_NOTIF = 0xFE,
+
+	/**
+	 * @RX_QUEUES_NOTIFICATION: &struct iwl_rxq_sync_notification
+	 */
+	RX_QUEUES_NOTIFICATION = 0xFF,
+};
+
+/**
+ * struct iwl_mu_group_mgmt_cmd - VHT MU-MIMO group configuration
+ *
+ * @reserved: reserved
+ * @membership_status: a bitmap of MU groups
+ * @user_position:the position of station in a group. If the station is in the
+ *	group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_cmd {
+	__le32 reserved;
+	__le32 membership_status[2];
+	__le32 user_position[4];
+} __packed; /* MU_GROUP_ID_MNG_TABLE_API_S_VER_1 */
+
+/**
+ * struct iwl_mu_group_mgmt_notif - VHT MU-MIMO group id notification
+ *
+ * @membership_status: a bitmap of MU groups
+ * @user_position: the position of station in a group. If the station is in the
+ *	group then bits (group * 2) is the position -1
+ */
+struct iwl_mu_group_mgmt_notif {
+	__le32 membership_status[2];
+	__le32 user_position[4];
+} __packed; /* MU_GROUP_MNG_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_datapath_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
new file mode 100644
index 0000000..1067823
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -0,0 +1,356 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_debug_h__
+#define __iwl_fw_api_debug_h__
+
+/**
+ * enum iwl_debug_cmds - debug commands
+ */
+enum iwl_debug_cmds {
+	/**
+	 * @LMAC_RD_WR:
+	 * LMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+	 * &struct iwl_dbg_mem_access_rsp
+	 */
+	LMAC_RD_WR = 0x0,
+	/**
+	 * @UMAC_RD_WR:
+	 * UMAC memory read/write, using &struct iwl_dbg_mem_access_cmd and
+	 * &struct iwl_dbg_mem_access_rsp
+	 */
+	UMAC_RD_WR = 0x1,
+	/**
+	 * @MFU_ASSERT_DUMP_NTF:
+	 * &struct iwl_mfu_assert_dump_notif
+	 */
+	MFU_ASSERT_DUMP_NTF = 0xFE,
+};
+
+/* Error response/notification */
+enum {
+	FW_ERR_UNKNOWN_CMD = 0x0,
+	FW_ERR_INVALID_CMD_PARAM = 0x1,
+	FW_ERR_SERVICE = 0x2,
+	FW_ERR_ARC_MEMORY = 0x3,
+	FW_ERR_ARC_CODE = 0x4,
+	FW_ERR_WATCH_DOG = 0x5,
+	FW_ERR_WEP_GRP_KEY_INDX = 0x10,
+	FW_ERR_WEP_KEY_SIZE = 0x11,
+	FW_ERR_OBSOLETE_FUNC = 0x12,
+	FW_ERR_UNEXPECTED = 0xFE,
+	FW_ERR_FATAL = 0xFF
+};
+
+/**
+ * struct iwl_error_resp - FW error indication
+ * ( REPLY_ERROR = 0x2 )
+ * @error_type: one of FW_ERR_*
+ * @cmd_id: the command ID for which the error occurred
+ * @reserved1: reserved
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ *     error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwl_error_resp {
+	__le32 error_type;
+	u8 cmd_id;
+	u8 reserved1;
+	__le16 bad_cmd_seq_num;
+	__le32 error_service;
+	__le64 timestamp;
+} __packed;
+
+#define TX_FIFO_MAX_NUM_9000		8
+#define TX_FIFO_MAX_NUM			15
+#define RX_FIFO_MAX_NUM			2
+#define TX_FIFO_INTERNAL_MAX_NUM	6
+
+/**
+ * struct iwl_shared_mem_cfg_v2 - Shared memory configuration information
+ *
+ * @shared_mem_addr: shared memory addr (pre 8000 HW set to 0x0 as MARBH is not
+ *	accessible)
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr (pre 8000 HW set to
+ *	0x0 as accessible only via DBGM RDAT)
+ * @sample_buff_size: internal sample buff size
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB), (pre
+ *	8000 HW set to 0x0 as not accessible)
+ * @txfifo_size: size of TXF0 ... TXF7
+ * @rxfifo_size: RXF1, RXF2 sizes. If there is no RXF2, it'll have a value of 0
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ *	when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ *	 set, the last 3 members don't exist.
+ */
+struct iwl_shared_mem_cfg_v2 {
+	__le32 shared_mem_addr;
+	__le32 shared_mem_size;
+	__le32 sample_buff_addr;
+	__le32 sample_buff_size;
+	__le32 txfifo_addr;
+	__le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
+	__le32 rxfifo_size[RX_FIFO_MAX_NUM];
+	__le32 page_buff_addr;
+	__le32 page_buff_size;
+	__le32 rxfifo_addr;
+	__le32 internal_txfifo_addr;
+	__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
+
+/**
+ * struct iwl_shared_mem_lmac_cfg - LMAC shared memory configuration
+ *
+ * @txfifo_addr: start addr of TXF0 (excluding the context table 0.5KB)
+ * @txfifo_size: size of TX FIFOs
+ * @rxfifo1_addr: RXF1 addr
+ * @rxfifo1_size: RXF1 size
+ */
+struct iwl_shared_mem_lmac_cfg {
+	__le32 txfifo_addr;
+	__le32 txfifo_size[TX_FIFO_MAX_NUM];
+	__le32 rxfifo1_addr;
+	__le32 rxfifo1_size;
+
+} __packed; /* SHARED_MEM_ALLOC_LMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_shared_mem_cfg - Shared memory configuration information
+ *
+ * @shared_mem_addr: shared memory address
+ * @shared_mem_size: shared memory size
+ * @sample_buff_addr: internal sample (mon/adc) buff addr
+ * @sample_buff_size: internal sample buff size
+ * @rxfifo2_addr: start addr of RXF2
+ * @rxfifo2_size: size of RXF2
+ * @page_buff_addr: used by UMAC and performance debug (page miss analysis),
+ *	when paging is not supported this should be 0
+ * @page_buff_size: size of %page_buff_addr
+ * @lmac_num: number of LMACs (1 or 2)
+ * @lmac_smem: per - LMAC smem data
+ */
+struct iwl_shared_mem_cfg {
+	__le32 shared_mem_addr;
+	__le32 shared_mem_size;
+	__le32 sample_buff_addr;
+	__le32 sample_buff_size;
+	__le32 rxfifo2_addr;
+	__le32 rxfifo2_size;
+	__le32 page_buff_addr;
+	__le32 page_buff_size;
+	__le32 lmac_num;
+	struct iwl_shared_mem_lmac_cfg lmac_smem[2];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
+
+/**
+ * struct iwl_mfuart_load_notif - mfuart image version & status
+ * ( MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+ * @image_size: MFUART image size in bytes
+*/
+struct iwl_mfuart_load_notif {
+	__le32 installed_ver;
+	__le32 external_ver;
+	__le32 status;
+	__le32 duration;
+	/* image size valid only in v2 of the command */
+	__le32 image_size;
+} __packed; /* MFU_LOADER_NTFY_API_S_VER_2 */
+
+/**
+ * struct iwl_mfu_assert_dump_notif - mfuart dump logs
+ * ( MFU_ASSERT_DUMP_NTF = 0xfe )
+ * @assert_id: mfuart assert id that cause the notif
+ * @curr_reset_num: number of asserts since uptime
+ * @index_num: current chunk id
+ * @parts_num: total number of chunks
+ * @data_size: number of data bytes sent
+ * @data: data buffer
+ */
+struct iwl_mfu_assert_dump_notif {
+	__le32   assert_id;
+	__le32   curr_reset_num;
+	__le16   index_num;
+	__le16   parts_num;
+	__le32   data_size;
+	__le32   data[0];
+} __packed; /* MFU_DUMP_ASSERT_API_S_VER_1 */
+
+/**
+ * enum iwl_mvm_marker_id - marker ids
+ *
+ * The ids for different type of markers to insert into the usniffer logs
+ *
+ * @MARKER_ID_TX_FRAME_LATENCY: TX latency marker
+ * @MARKER_ID_SYNC_CLOCK: sync FW time and systime
+ */
+enum iwl_mvm_marker_id {
+	MARKER_ID_TX_FRAME_LATENCY = 1,
+	MARKER_ID_SYNC_CLOCK = 2,
+}; /* MARKER_ID_API_E_VER_2 */
+
+/**
+ * struct iwl_mvm_marker - mark info into the usniffer logs
+ *
+ * (MARKER_CMD = 0xcb)
+ *
+ * Mark the UTC time stamp into the usniffer logs together with additional
+ * metadata, so the usniffer output can be parsed.
+ * In the command response the ucode will return the GP2 time.
+ *
+ * @dw_len: The amount of dwords following this byte including this byte.
+ * @marker_id: A unique marker id (iwl_mvm_marker_id).
+ * @reserved: reserved.
+ * @timestamp: in milliseconds since 1970-01-01 00:00:00 UTC
+ * @metadata: additional meta data that will be written to the unsiffer log
+ */
+struct iwl_mvm_marker {
+	u8 dw_len;
+	u8 marker_id;
+	__le16 reserved;
+	__le64 timestamp;
+	__le32 metadata[0];
+} __packed; /* MARKER_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_marker_rsp - Response to marker cmd
+ *
+ * @gp2: The gp2 clock value in the FW
+ */
+struct iwl_mvm_marker_rsp {
+	__le32 gp2;
+} __packed;
+
+/* Operation types for the debug mem access */
+enum {
+	DEBUG_MEM_OP_READ = 0,
+	DEBUG_MEM_OP_WRITE = 1,
+	DEBUG_MEM_OP_WRITE_BYTES = 2,
+};
+
+#define DEBUG_MEM_MAX_SIZE_DWORDS 32
+
+/**
+ * struct iwl_dbg_mem_access_cmd - Request the device to read/write memory
+ * @op: DEBUG_MEM_OP_*
+ * @addr: address to read/write from/to
+ * @len: in dwords, to read/write
+ * @data: for write opeations, contains the source buffer
+ */
+struct iwl_dbg_mem_access_cmd {
+	__le32 op;
+	__le32 addr;
+	__le32 len;
+	__le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_CMD_API_S_VER_1 */
+
+/* Status responses for the debug mem access */
+enum {
+	DEBUG_MEM_STATUS_SUCCESS = 0x0,
+	DEBUG_MEM_STATUS_FAILED = 0x1,
+	DEBUG_MEM_STATUS_LOCKED = 0x2,
+	DEBUG_MEM_STATUS_HIDDEN = 0x3,
+	DEBUG_MEM_STATUS_LENGTH = 0x4,
+};
+
+/**
+ * struct iwl_dbg_mem_access_rsp - Response to debug mem commands
+ * @status: DEBUG_MEM_STATUS_*
+ * @len: read dwords (0 for write operations)
+ * @data: contains the read DWs
+ */
+struct iwl_dbg_mem_access_rsp {
+	__le32 status;
+	__le32 len;
+	__le32 data[];
+} __packed; /* DEBUG_(U|L)MAC_RD_WR_RSP_API_S_VER_1 */
+
+#define CONT_REC_COMMAND_SIZE	80
+#define ENABLE_CONT_RECORDING	0x15
+#define DISABLE_CONT_RECORDING	0x16
+
+/*
+ * struct iwl_continuous_record_mode - recording mode
+ */
+struct iwl_continuous_record_mode {
+	__le16 enable_recording;
+} __packed;
+
+/*
+ * struct iwl_continuous_record_cmd - enable/disable continuous recording
+ */
+struct iwl_continuous_record_cmd {
+	struct iwl_continuous_record_mode record_mode;
+	u8 pad[CONT_REC_COMMAND_SIZE -
+		sizeof(struct iwl_continuous_record_mode)];
+} __packed;
+
+#endif /* __iwl_fw_api_debug_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
new file mode 100644
index 0000000..befc3b1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -0,0 +1,183 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_filter_h__
+#define __iwl_fw_api_filter_h__
+
+#include "fw/api/mac.h"
+
+#define MAX_PORT_ID_NUM	2
+#define MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwl_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id:	Multicast MAC addresses array specifier. This is a strange way
+ *		to identify network interface adopted in host-device IF.
+ *		It is used by FW as index in array of addresses. This array has
+ *		MAX_PORT_ID_NUM members.
+ * @count:	Number of MAC addresses in the array
+ * @pass_all:	Set 1 to pass all multicast packets.
+ * @bssid:	current association BSSID.
+ * @reserved:	reserved
+ * @addr_list:	Place holder for array of MAC addresses.
+ *		IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwl_mcast_filter_cmd {
+	u8 filter_own;
+	u8 port_id;
+	u8 count;
+	u8 pass_all;
+	u8 bssid[6];
+	u8 reserved[2];
+	u8 addr_list[0];
+} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ *	start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+	BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+	BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type:	&enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset:	starting offset of this pattern.
+ * @reserved1:	reserved
+ * @val:	value to match - big endian (MSB is the first
+ *		byte to match from offset pos).
+ * @mask:	mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+	u8 offset_type;
+	u8 offset;
+	__le16 reserved1;
+	__be32 val;
+	__be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+	BCAST_FILTER_FRAME_TYPE_ALL = 0,
+	BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @reserved1: reserved
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ *	only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+	u8 discard;
+	u8 frame_type;
+	u8 num_attrs;
+	u8 reserved1;
+	struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @reserved1: reserved
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+	u8 default_discard;
+	u8 reserved1;
+	__le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @reserved1: reserved
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+	u8 disable;
+	u8 max_bcast_filters;
+	u8 max_macs;
+	u8 reserved1;
+	struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+	struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/led.h b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h
new file mode 100644
index 0000000..b30c9d2
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/led.h
@@ -0,0 +1,71 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_led_h__
+#define __iwl_fw_api_led_h__
+
+/**
+ * struct iwl_led_cmd - LED switching command
+ *
+ * @status: LED status (on/off)
+ */
+struct iwl_led_cmd {
+	__le32 status;
+} __packed; /* LEDS_CMD_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_led_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
new file mode 100644
index 0000000..17c7ef1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -0,0 +1,104 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_mac_cfg_h__
+#define __iwl_fw_api_mac_cfg_h__
+
+/**
+ * enum iwl_mac_conf_subcmd_ids - mac configuration command IDs
+ */
+enum iwl_mac_conf_subcmd_ids {
+	/**
+	 * @LOW_LATENCY_CMD: &struct iwl_mac_low_latency_cmd
+	 */
+	LOW_LATENCY_CMD = 0x3,
+	/**
+	 * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
+	 */
+	CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
+};
+
+/**
+ * struct iwl_channel_switch_noa_notif - Channel switch NOA notification
+ *
+ * @id_and_color: ID and color of the MAC
+ */
+struct iwl_channel_switch_noa_notif {
+	__le32 id_and_color;
+} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_low_latency_cmd - set/clear mac to 'low-latency mode'
+ *
+ * @mac_id: MAC ID to whom to apply the low-latency configurations
+ * @low_latency_rx: 1/0 to set/clear Rx low latency direction
+ * @low_latency_tx: 1/0 to set/clear Tx low latency direction
+ * @reserved: reserved for alignment purposes
+ */
+struct iwl_mac_low_latency_cmd {
+	__le32 mac_id;
+	u8 low_latency_rx;
+	u8 low_latency_tx;
+	__le16 reserved;
+} __packed; /* MAC_LOW_LATENCY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
new file mode 100644
index 0000000..55594c9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -0,0 +1,581 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_mac_h__
+#define __iwl_fw_api_mac_h__
+
+/*
+ * The first MAC indices (starting from 0) are available to the driver,
+ * AUX indices follows - 1 for non-CDB, 2 for CDB.
+ */
+#define MAC_INDEX_AUX		4
+#define MAC_INDEX_MIN_DRIVER	0
+#define NUM_MAC_INDEX_DRIVER	MAC_INDEX_AUX
+#define NUM_MAC_INDEX		(NUM_MAC_INDEX_DRIVER + 1)
+#define NUM_MAC_INDEX_CDB	(NUM_MAC_INDEX_DRIVER + 2)
+
+#define IWL_MVM_STATION_COUNT		16
+#define IWL_MVM_INVALID_STA		0xFF
+
+enum iwl_ac {
+	AC_BK,
+	AC_BE,
+	AC_VI,
+	AC_VO,
+	AC_NUM,
+};
+
+/**
+ * enum iwl_mac_protection_flags - MAC context flags
+ * @MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ *	this will require CCK RTS/CTS2self.
+ *	RTS/CTS will protect full burst time.
+ * @MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+enum iwl_mac_protection_flags {
+	MAC_PROT_FLG_TGG_PROTECT	= BIT(3),
+	MAC_PROT_FLG_HT_PROT		= BIT(23),
+	MAC_PROT_FLG_FAT_PROT		= BIT(24),
+	MAC_PROT_FLG_SELF_CTS_EN	= BIT(30),
+};
+
+#define MAC_FLG_SHORT_SLOT		BIT(4)
+#define MAC_FLG_SHORT_PREAMBLE		BIT(5)
+
+/**
+ * enum iwl_mac_types - Supported MAC types
+ * @FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @FW_MAC_TYPE_IBSS: IBSS
+ * @FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @FW_MAC_TYPE_P2P_STA: P2P client
+ * @FW_MAC_TYPE_GO: P2P GO
+ * @FW_MAC_TYPE_TEST: ?
+ * @FW_MAC_TYPE_MAX: highest support MAC type
+ */
+enum iwl_mac_types {
+	FW_MAC_TYPE_FIRST = 1,
+	FW_MAC_TYPE_AUX = FW_MAC_TYPE_FIRST,
+	FW_MAC_TYPE_LISTENER,
+	FW_MAC_TYPE_PIBSS,
+	FW_MAC_TYPE_IBSS,
+	FW_MAC_TYPE_BSS_STA,
+	FW_MAC_TYPE_P2P_DEVICE,
+	FW_MAC_TYPE_P2P_STA,
+	FW_MAC_TYPE_GO,
+	FW_MAC_TYPE_TEST,
+	FW_MAC_TYPE_MAX = FW_MAC_TYPE_TEST
+}; /* MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_tsf_id - TSF hw timer ID
+ * @TSF_ID_A: use TSF A
+ * @TSF_ID_B: use TSF B
+ * @TSF_ID_C: use TSF C
+ * @TSF_ID_D: use TSF D
+ * @NUM_TSF_IDS: number of TSF timers available
+ */
+enum iwl_tsf_id {
+	TSF_ID_A = 0,
+	TSF_ID_B = 1,
+	TSF_ID_C = 2,
+	TSF_ID_D = 3,
+	NUM_TSF_IDS = 4,
+}; /* TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwl_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic.
+ *	NOTE: obsolete from VER2 and on
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ap {
+	__le32 beacon_time;
+	__le64 beacon_tsf;
+	__le32 bi;
+	__le32 bi_reciprocal;
+	__le32 dtim_interval;
+	__le32 dtim_reciprocal;
+	__le32 mcast_qid;
+	__le32 beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
+ */
+struct iwl_mac_data_ibss {
+	__le32 beacon_time;
+	__le64 beacon_tsf;
+	__le32 bi;
+	__le32 bi_reciprocal;
+	__le32 beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @bi_reciprocal: 2^32 / bi , applicable only when associated
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ * @assoc_beacon_arrive_time: TSF of first beacon after association
+ */
+struct iwl_mac_data_sta {
+	__le32 is_assoc;
+	__le32 dtim_time;
+	__le64 dtim_tsf;
+	__le32 bi;
+	__le32 bi_reciprocal;
+	__le32 dtim_interval;
+	__le32 dtim_reciprocal;
+	__le32 listen_interval;
+	__le32 assoc_id;
+	__le32 assoc_beacon_arrive_time;
+} __packed; /* STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwl_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ *	0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwl_mac_data_go {
+	struct iwl_mac_data_ap ap;
+	__le32 ctwin;
+	__le32 opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwl_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ *	0 indicates that there is no CT window.
+ */
+struct iwl_mac_data_p2p_sta {
+	struct iwl_mac_data_sta sta;
+	__le32 ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwl_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwl_mac_data_pibss {
+	__le32 stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwl_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ *	other channels as well. This should be to true only in case that the
+ *	device is discoverable and there is an active GO. Note that setting this
+ *	field when not needed, will increase the number of interrupts and have
+ *	effect on the platform power, as this setting opens the Rx filters on
+ *	all macs.
+ */
+struct iwl_mac_data_p2p_dev {
+	__le32 is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwl_mac_filter_flags - MAC context filter flags
+ * @MAC_FILTER_IN_PROMISC: accept all data frames
+ * @MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
+ *	control frames to the host
+ * @MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ *	(in station mode when associated)
+ * @MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+enum iwl_mac_filter_flags {
+	MAC_FILTER_IN_PROMISC		= BIT(0),
+	MAC_FILTER_IN_CONTROL_AND_MGMT	= BIT(1),
+	MAC_FILTER_ACCEPT_GRP		= BIT(2),
+	MAC_FILTER_DIS_DECRYPT		= BIT(3),
+	MAC_FILTER_DIS_GRP_DECRYPT	= BIT(4),
+	MAC_FILTER_IN_BEACON		= BIT(6),
+	MAC_FILTER_OUT_BCAST		= BIT(8),
+	MAC_FILTER_IN_CRC32		= BIT(11),
+	MAC_FILTER_IN_PROBE_REQUEST	= BIT(12),
+	/**
+	 * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
+	 */
+	MAC_FILTER_IN_11AX		= BIT(14),
+};
+
+/**
+ * enum iwl_mac_qos_flags - QoS flags
+ * @MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @MAC_QOS_FLG_TGN: HT is enabled
+ * @MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+enum iwl_mac_qos_flags {
+	MAC_QOS_FLG_UPDATE_EDCA	= BIT(0),
+	MAC_QOS_FLG_TGN		= BIT(1),
+	MAC_QOS_FLG_TXOP_TYPE	= BIT(4),
+};
+
+/**
+ * struct iwl_ac_qos - QOS timing params for MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ *	Should be a power-of-2, minus 1.  Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ *	Should be a power-of-2, minus 1.  Device's default is 0x3f.
+ * @aifsn:  Number of slots in Arbitration Interframe Space (before
+ *	performing random backoff timing prior to Tx).  Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop:  Length of Tx opportunity, in uSecs.  Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwl_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry.  Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwl_ac_qos {
+	__le16 cw_min;
+	__le16 cw_max;
+	u8 aifsn;
+	u8 fifos_mask;
+	__le16 edca_txop;
+} __packed; /* AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwl_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @mac_type: one of &enum iwl_mac_types
+ * @tsf_id: TSF HW timer, one of &enum iwl_tsf_id
+ * @node_addr: MAC address
+ * @reserved_for_node_addr: reserved
+ * @bssid_addr: BSSID
+ * @reserved_for_bssid_addr: reserved
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of &enum iwl_mac_protection_flags
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of &enum iwl_mac_filter_flags
+ * @qos_flags: from &enum iwl_mac_qos_flags
+ * @ac: one iwl_mac_qos configuration for each AC
+ */
+struct iwl_mac_ctx_cmd {
+	/* COMMON_INDEX_HDR_API_S_VER_1 */
+	__le32 id_and_color;
+	__le32 action;
+	/* MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+	__le32 mac_type;
+	__le32 tsf_id;
+	u8 node_addr[6];
+	__le16 reserved_for_node_addr;
+	u8 bssid_addr[6];
+	__le16 reserved_for_bssid_addr;
+	__le32 cck_rates;
+	__le32 ofdm_rates;
+	__le32 protection_flags;
+	__le32 cck_short_preamble;
+	__le32 short_slot;
+	__le32 filter_flags;
+	/* MAC_QOS_PARAM_API_S_VER_1 */
+	__le32 qos_flags;
+	struct iwl_ac_qos ac[AC_NUM+1];
+	/* MAC_CONTEXT_COMMON_DATA_API_S */
+	union {
+		struct iwl_mac_data_ap ap;
+		struct iwl_mac_data_go go;
+		struct iwl_mac_data_sta sta;
+		struct iwl_mac_data_p2p_sta p2p_sta;
+		struct iwl_mac_data_p2p_dev p2p_dev;
+		struct iwl_mac_data_pibss pibss;
+		struct iwl_mac_data_ibss ibss;
+	};
+} __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline u32 iwl_mvm_reciprocal(u32 v)
+{
+	if (!v)
+		return 0;
+	return 0xFFFFFFFF / v;
+}
+
+#define IWL_NONQOS_SEQ_GET	0x1
+#define IWL_NONQOS_SEQ_SET	0x2
+struct iwl_nonqos_seq_query_cmd {
+	__le32 get_set_flag;
+	__le32 mac_id_n_color;
+	__le16 value;
+	__le16 reserved;
+} __packed; /* NON_QOS_TX_COUNTER_GET_SET_API_S_VER_1 */
+
+/**
+ * struct iwl_missed_beacons_notif - information on missed beacons
+ * ( MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ *	beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons: number of expected beacons
+ * @num_recvd_beacons: number of received beacons
+ */
+struct iwl_missed_beacons_notif {
+	__le32 mac_id;
+	__le32 consec_missed_beacons_since_last_rx;
+	__le32 consec_missed_beacons;
+	__le32 num_expected_beacons;
+	__le32 num_recvd_beacons;
+} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwl_he_backoff_conf - used for backoff configuration
+ * Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
+ * used for backoff configuration of TXF5..TXF8 trigger based.
+ * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
+ * trigger-based TX.
+ * @cwmin: CW min
+ * @cwmax: CW max
+ * @aifsn: AIFSN
+ *	AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
+ *	allowed till the MU-TIMER is 0
+ * @mu_time: MU time in 8TU units
+ */
+struct iwl_he_backoff_conf {
+	__le16 cwmin;
+	__le16 cwmax;
+	__le16 aifsn;
+	__le16 mu_time;
+} __packed; /* AC_QOS_DOT11AX_API_S */
+
+#define MAX_HE_SUPP_NSS	2
+#define MAX_HE_CHANNEL_BW_INDX	4
+
+/**
+ * struct iwl_he_pkt_ext - QAM thresholds
+ * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
+ * The IE is organized in the following way:
+ * Support for Nss x BW (or RU) matrix:
+ *	(0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
+ * Each entry contains 2 QAM thresholds for 8us and 16us:
+ *	0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
+ *	QAM_tx < QAM_th1            --> PPE=0us
+ *	QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
+ *	QAM_th2 <= QAM_tx           --> PPE=16us
+ * @pkt_ext_qam_th: QAM thresholds
+ *	For each Nss/Bw define 2 QAM thrsholds (0..5)
+ *	For rates below the low_th, no need for PPE
+ *	For rates between low_th and high_th, need 8us PPE
+ *	For rates equal or higher then the high_th, need 16us PPE
+ *	Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
+ *		(0-low_th, 1-high_th)
+ */
+struct iwl_he_pkt_ext {
+	u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
+} __packed; /* PKT_EXT_DOT11AX_API_S */
+
+/**
+ * enum iwl_he_sta_ctxt_flags - HE STA context flags
+ * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
+ *	control frames such as TRIG, NDPA, BACK)
+ * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
+ *	color for RX filter but use MAC header
+ * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
+ * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
+ *	of 32-bits
+ * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
+ *	and should be used
+ * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
+ *	is enabled according to UORA element existence
+ * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
+ * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
+ *	enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
+ * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
+ *	parameter set, i.e. the backoff counters for trig-based ACs
+ */
+enum iwl_he_sta_ctxt_flags {
+	STA_CTXT_HE_REF_BSSID_VALID		= BIT(4),
+	STA_CTXT_HE_BSS_COLOR_DIS		= BIT(5),
+	STA_CTXT_HE_PARTIAL_BSS_COLOR		= BIT(6),
+	STA_CTXT_HE_32BIT_BA_BITMAP		= BIT(7),
+	STA_CTXT_HE_PACKET_EXT			= BIT(8),
+	STA_CTXT_HE_TRIG_RND_ALLOC		= BIT(9),
+	STA_CTXT_HE_CONST_TRIG_RND_ALLOC	= BIT(10),
+	STA_CTXT_HE_ACK_ENABLED			= BIT(11),
+	STA_CTXT_HE_MU_EDCA_CW			= BIT(12),
+};
+
+/**
+ * enum iwl_he_htc_flags - HE HTC support flags
+ * @IWL_HE_HTC_SUPPORT: HE-HTC support
+ * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
+ *	support via A-control field
+ * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
+ * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
+ * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
+ */
+enum iwl_he_htc_flags {
+	IWL_HE_HTC_SUPPORT			= BIT(0),
+	IWL_HE_HTC_UL_MU_RESP_SCHED		= BIT(3),
+	IWL_HE_HTC_BSR_SUPP			= BIT(4),
+	IWL_HE_HTC_OMI_SUPP			= BIT(5),
+	IWL_HE_HTC_BQR_SUPP			= BIT(6),
+};
+
+/*
+ * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
+ * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
+ *      response to HE MRQ and if the STA provides unsolicited HE MFB
+ */
+#define IWL_HE_HTC_LINK_ADAP_POS		(1)
+#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK	(0)
+#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED	(2 << IWL_HE_HTC_LINK_ADAP_POS)
+#define IWL_HE_HTC_LINK_ADAP_BOTH		(3 << IWL_HE_HTC_LINK_ADAP_POS)
+
+/**
+ * struct iwl_he_sta_context_cmd - configure FW to work with HE AP
+ * @sta_id: STA id
+ * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
+ *	0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
+ * @reserved1: reserved byte for future use
+ * @reserved2: reserved byte for future use
+ * @flags: see %iwl_11ax_sta_ctxt_flags
+ * @ref_bssid_addr: reference BSSID used by the AP
+ * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
+ * @htc_flags: which features are supported in HTC
+ * @frag_flags: frag support in A-MSDU
+ * @frag_level: frag support level
+ * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
+ * @frag_min_size: min frag size (except last frag)
+ * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
+ * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
+ * @htc_trig_based_pkt_ext: default PE in 4us units
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
+ * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
+ * @reserved3: reserved byte for future use
+ * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
+ */
+struct iwl_he_sta_context_cmd {
+	u8 sta_id;
+	u8 tid_limit;
+	u8 reserved1;
+	u8 reserved2;
+	__le32 flags;
+
+	/* The below fields are set via Multiple BSSID IE */
+	u8 ref_bssid_addr[6];
+	__le16 reserved0;
+
+	/* The below fields are set via HE-capabilities IE */
+	__le32 htc_flags;
+
+	u8 frag_flags;
+	u8 frag_level;
+	u8 frag_max_num;
+	u8 frag_min_size;
+
+	/* The below fields are set via PPE thresholds element */
+	struct iwl_he_pkt_ext pkt_ext;
+
+	/* The below fields are set via HE-Operation IE */
+	u8 bss_color;
+	u8 htc_trig_based_pkt_ext;
+	__le16 frame_time_rts_th;
+
+	/* Random access parameter set (i.e. RAPS) */
+	u8 rand_alloc_ecwmin;
+	u8 rand_alloc_ecwmax;
+	__le16 reserved3;
+
+	/* The below fields are set via MU EDCA parameter set element */
+	struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
+} __packed; /* STA_CONTEXT_DOT11AX_API_S */
+
+#endif /* __iwl_fw_api_mac_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
new file mode 100644
index 0000000..6c53383
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -0,0 +1,418 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright (C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_nvm_reg_h__
+#define __iwl_fw_api_nvm_reg_h__
+
+/**
+ * enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
+ */
+enum iwl_regulatory_and_nvm_subcmd_ids {
+	/**
+	 * @NVM_ACCESS_COMPLETE: &struct iwl_nvm_access_complete_cmd
+	 */
+	NVM_ACCESS_COMPLETE = 0x0,
+
+	/**
+	 * @NVM_GET_INFO:
+	 * Command is &struct iwl_nvm_get_info,
+	 * response is &struct iwl_nvm_get_info_rsp
+	 */
+	NVM_GET_INFO = 0x2,
+};
+
+/**
+ * enum iwl_nvm_access_op - NVM access opcode
+ * @IWL_NVM_READ: read NVM
+ * @IWL_NVM_WRITE: write NVM
+ */
+enum iwl_nvm_access_op {
+	IWL_NVM_READ	= 0,
+	IWL_NVM_WRITE	= 1,
+};
+
+/**
+ * enum iwl_nvm_access_target - target of the NVM_ACCESS_CMD
+ * @NVM_ACCESS_TARGET_CACHE: access the cache
+ * @NVM_ACCESS_TARGET_OTP: access the OTP
+ * @NVM_ACCESS_TARGET_EEPROM: access the EEPROM
+ */
+enum iwl_nvm_access_target {
+	NVM_ACCESS_TARGET_CACHE = 0,
+	NVM_ACCESS_TARGET_OTP = 1,
+	NVM_ACCESS_TARGET_EEPROM = 2,
+};
+
+/**
+ * enum iwl_nvm_section_type - section types for NVM_ACCESS_CMD
+ * @NVM_SECTION_TYPE_SW: software section
+ * @NVM_SECTION_TYPE_REGULATORY: regulatory section
+ * @NVM_SECTION_TYPE_CALIBRATION: calibration section
+ * @NVM_SECTION_TYPE_PRODUCTION: production section
+ * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series
+ * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section
+ * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section
+ * @NVM_MAX_NUM_SECTIONS: number of sections
+ */
+enum iwl_nvm_section_type {
+	NVM_SECTION_TYPE_SW = 1,
+	NVM_SECTION_TYPE_REGULATORY = 3,
+	NVM_SECTION_TYPE_CALIBRATION = 4,
+	NVM_SECTION_TYPE_PRODUCTION = 5,
+	NVM_SECTION_TYPE_REGULATORY_SDP = 8,
+	NVM_SECTION_TYPE_MAC_OVERRIDE = 11,
+	NVM_SECTION_TYPE_PHY_SKU = 12,
+	NVM_MAX_NUM_SECTIONS = 13,
+};
+
+/**
+ * struct iwl_nvm_access_cmd - Request the device to send an NVM section
+ * @op_code: &enum iwl_nvm_access_op
+ * @target: &enum iwl_nvm_access_target
+ * @type: &enum iwl_nvm_section_type
+ * @offset: offset in bytes into the section
+ * @length: in bytes, to read/write
+ * @data: if write operation, the data to write. On read its empty
+ */
+struct iwl_nvm_access_cmd {
+	u8 op_code;
+	u8 target;
+	__le16 type;
+	__le16 offset;
+	__le16 length;
+	u8 data[];
+} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * @offset: offset in bytes into the section
+ * @length: in bytes, either how much was written or read
+ * @type: NVM_SECTION_TYPE_*
+ * @status: 0 for success, fail otherwise
+ * @data: if read operation, the data returned. Empty on write.
+ */
+struct iwl_nvm_access_resp {
+	__le16 offset;
+	__le16 length;
+	__le16 type;
+	__le16 status;
+	u8 data[];
+} __packed; /* NVM_ACCESS_CMD_RESP_API_S_VER_2 */
+
+/*
+ * struct iwl_nvm_get_info - request to get NVM data
+ */
+struct iwl_nvm_get_info {
+	__le32 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
+
+/**
+ * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
+ * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
+ */
+enum iwl_nvm_info_general_flags {
+	NVM_GENERAL_FLAGS_EMPTY_OTP	= BIT(0),
+};
+
+/**
+ * struct iwl_nvm_get_info_general - general NVM data
+ * @flags: bit 0: 1 - empty, 0 - non-empty
+ * @nvm_version: nvm version
+ * @board_type: board type
+ * @reserved: reserved
+ */
+struct iwl_nvm_get_info_general {
+	__le32 flags;
+	__le16 nvm_version;
+	u8 board_type;
+	u8 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
+
+/**
+ * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+ * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
+ * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
+ * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
+ * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
+ * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
+ * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
+ * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
+ */
+enum iwl_nvm_mac_sku_flags {
+	NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED	= BIT(0),
+	NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED	= BIT(1),
+	NVM_MAC_SKU_FLAGS_802_11N_ENABLED	= BIT(2),
+	NVM_MAC_SKU_FLAGS_802_11AC_ENABLED	= BIT(3),
+	/**
+	 * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+	 */
+	NVM_MAC_SKU_FLAGS_802_11AX_ENABLED	= BIT(4),
+	NVM_MAC_SKU_FLAGS_MIMO_DISABLED		= BIT(5),
+	NVM_MAC_SKU_FLAGS_WAPI_ENABLED		= BIT(8),
+	NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED	= BIT(14),
+	NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED	= BIT(15),
+};
+
+/**
+ * struct iwl_nvm_get_info_sku - mac information
+ * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
+ */
+struct iwl_nvm_get_info_sku {
+	__le32 mac_sku_flags;
+} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
+
+/**
+ * struct iwl_nvm_get_info_phy - phy information
+ * @tx_chains: BIT 0 chain A, BIT 1 chain B
+ * @rx_chains: BIT 0 chain A, BIT 1 chain B
+ */
+struct iwl_nvm_get_info_phy {
+	__le32 tx_chains;
+	__le32 rx_chains;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+#define IWL_NUM_CHANNELS (51)
+
+/**
+ * struct iwl_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @channel_profile: regulatory data of this channel
+ * @reserved: reserved
+ */
+struct iwl_nvm_get_info_regulatory {
+	__le32 lar_enabled;
+	__le16 channel_profile[IWL_NUM_CHANNELS];
+	__le16 reserved;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+/**
+ * struct iwl_nvm_get_info_rsp - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwl_nvm_get_info_rsp {
+	struct iwl_nvm_get_info_general general;
+	struct iwl_nvm_get_info_sku mac_sku;
+	struct iwl_nvm_get_info_phy phy_sku;
+	struct iwl_nvm_get_info_regulatory regulatory;
+} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
+
+/**
+ * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ * @reserved: reserved
+ */
+struct iwl_nvm_access_complete_cmd {
+	__le32 reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mcc_update_cmd_v1 - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ */
+struct iwl_mcc_update_cmd_v1 {
+	__le16 mcc;
+	u8 source_id;
+	u8 reserved;
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwl_mcc_source
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwl_mcc_update_cmd {
+	__le16 mcc;
+	u8 source_id;
+	u8 reserved;
+	__le32 key;
+	u8 reserved2[20];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mcc_update_resp_v1  - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ *		channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ *	16bits are used.
+ */
+struct iwl_mcc_update_resp_v1  {
+	__le32 status;
+	__le16 mcc;
+	u8 cap;
+	u8 source_id;
+	__le32 n_channels;
+	__le32 channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
+
+/**
+ * enum iwl_geo_information - geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ *	for the 5 GHz band.
+ */
+enum iwl_geo_information {
+	GEO_NO_INFO =			0,
+	GEO_WMM_ETSI_5GHZ_INFO =	BIT(0),
+};
+
+/**
+ * struct iwl_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwl_mcc_source
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @geo_info: geographic specific profile information
+ *	see &enum iwl_geo_information.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ *		channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ *	16bits are used.
+ */
+struct iwl_mcc_update_resp {
+	__le32 status;
+	__le16 mcc;
+	u8 cap;
+	u8 source_id;
+	__le16 time;
+	__le16 geo_info;
+	__le32 n_channels;
+	__le32 channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+
+/**
+ * struct iwl_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory  profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwl_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwl_mcc_chub_notif {
+	__le16 mcc;
+	u8 source_id;
+	u8 reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwl_mcc_update_status {
+	MCC_RESP_NEW_CHAN_PROFILE,
+	MCC_RESP_SAME_CHAN_PROFILE,
+	MCC_RESP_INVALID,
+	MCC_RESP_NVM_DISABLED,
+	MCC_RESP_ILLEGAL,
+	MCC_RESP_LOW_PRIORITY,
+	MCC_RESP_TEST_MODE_ACTIVE,
+	MCC_RESP_TEST_MODE_NOT_ACTIVE,
+	MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+};
+
+enum iwl_mcc_source {
+	MCC_SOURCE_OLD_FW = 0,
+	MCC_SOURCE_ME = 1,
+	MCC_SOURCE_BIOS = 2,
+	MCC_SOURCE_3G_LTE_HOST = 3,
+	MCC_SOURCE_3G_LTE_DEVICE = 4,
+	MCC_SOURCE_WIFI = 5,
+	MCC_SOURCE_RESERVED = 6,
+	MCC_SOURCE_DEFAULT = 7,
+	MCC_SOURCE_UNINITIALIZED = 8,
+	MCC_SOURCE_MCC_API = 9,
+	MCC_SOURCE_GET_CURRENT = 0x10,
+	MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
+};
+
+#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
new file mode 100644
index 0000000..53cab99
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -0,0 +1,101 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_offload_h__
+#define __iwl_fw_api_offload_h__
+
+/**
+ * enum iwl_prot_offload_subcmd_ids - protocol offload commands
+ */
+enum iwl_prot_offload_subcmd_ids {
+	/**
+	 * @STORED_BEACON_NTF: &struct iwl_stored_beacon_notif
+	 */
+	STORED_BEACON_NTF = 0xFF,
+};
+
+#define MAX_STORED_BEACON_SIZE 600
+
+/**
+ * struct iwl_stored_beacon_notif - Stored beacon notification
+ *
+ * @system_time: system time on air rise
+ * @tsf: TSF on air rise
+ * @beacon_timestamp: beacon on air rise
+ * @band: band, matches &RX_RES_PHY_FLAGS_BAND_24 definition
+ * @channel: channel this beacon was received on
+ * @rates: rate in ucode internal format
+ * @byte_count: frame's byte count
+ * @data: beacon data, length in @byte_count
+ */
+struct iwl_stored_beacon_notif {
+	__le32 system_time;
+	__le64 tsf;
+	__le32 beacon_timestamp;
+	__le16 band;
+	__le16 channel;
+	__le32 rates;
+	__le32 byte_count;
+	u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_2 */
+
+#endif /* __iwl_fw_api_offload_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
new file mode 100644
index 0000000..721b9fe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/paging.h
@@ -0,0 +1,84 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_paging_h__
+#define __iwl_fw_api_paging_h__
+
+#define NUM_OF_FW_PAGING_BLOCKS	33 /* 32 for data and 1 block for CSS */
+
+/**
+ * struct iwl_fw_paging_cmd - paging layout
+ *
+ * Send to FW the paging layout in the driver.
+ *
+ * @flags: various flags for the command
+ * @block_size: the block size in powers of 2
+ * @block_num: number of blocks specified in the command.
+ * @device_phy_addr: virtual addresses from device side
+ */
+struct iwl_fw_paging_cmd {
+	__le32 flags;
+	__le32 block_size;
+	__le32 block_num;
+	__le32 device_phy_addr[NUM_OF_FW_PAGING_BLOCKS];
+} __packed; /* FW_PAGING_BLOCK_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_paging_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
new file mode 100644
index 0000000..45f61c6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_phy_ctxt_h__
+#define __iwl_fw_api_phy_ctxt_h__
+
+/* Supported bands */
+#define PHY_BAND_5  (0)
+#define PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define PHY_VHT_CHANNEL_MODE20	(0x0)
+#define PHY_VHT_CHANNEL_MODE40	(0x1)
+#define PHY_VHT_CHANNEL_MODE80	(0x2)
+#define PHY_VHT_CHANNEL_MODE160	(0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ *   bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ *                                   center_freq
+ *                                        |
+ * 40Mhz                          |_______|_______|
+ * 80Mhz                  |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code      011     010     001     000  |  100     101     110    111
+ */
+#define PHY_VHT_CTRL_POS_1_BELOW  (0x0)
+#define PHY_VHT_CTRL_POS_2_BELOW  (0x1)
+#define PHY_VHT_CTRL_POS_3_BELOW  (0x2)
+#define PHY_VHT_CTRL_POS_4_BELOW  (0x3)
+#define PHY_VHT_CTRL_POS_1_ABOVE  (0x4)
+#define PHY_VHT_CTRL_POS_2_ABOVE  (0x5)
+#define PHY_VHT_CTRL_POS_3_ABOVE  (0x6)
+#define PHY_VHT_CTRL_POS_4_ABOVE  (0x7)
+
+/*
+ * @band: PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwl_fw_channel_info {
+	u8 band;
+	u8 channel;
+	u8 width;
+	u8 ctrl_pos;
+} __packed;
+
+#define PHY_RX_CHAIN_DRIVER_FORCE_POS	(0)
+#define PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+	(0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define PHY_RX_CHAIN_VALID_POS		(1)
+#define PHY_RX_CHAIN_VALID_MSK \
+	(0x7 << PHY_RX_CHAIN_VALID_POS)
+#define PHY_RX_CHAIN_FORCE_SEL_POS	(4)
+#define PHY_RX_CHAIN_FORCE_SEL_MSK \
+	(0x7 << PHY_RX_CHAIN_FORCE_SEL_POS)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS	(7)
+#define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+	(0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define PHY_RX_CHAIN_CNT_POS		(10)
+#define PHY_RX_CHAIN_CNT_MSK \
+	(0x3 << PHY_RX_CHAIN_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_CNT_POS	(12)
+#define PHY_RX_CHAIN_MIMO_CNT_MSK \
+	(0x3 << PHY_RX_CHAIN_MIMO_CNT_POS)
+#define PHY_RX_CHAIN_MIMO_FORCE_POS	(14)
+#define PHY_RX_CHAIN_MIMO_FORCE_MSK \
+	(0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define NUM_PHY_CTX	3
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ *	other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @ci: channel info
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+struct iwl_phy_context_cmd {
+	/* COMMON_INDEX_HDR_API_S_VER_1 */
+	__le32 id_and_color;
+	__le32 action;
+	/* PHY_CONTEXT_DATA_API_S_VER_1 */
+	__le32 apply_time;
+	__le32 tx_param_color;
+	struct iwl_fw_channel_info ci;
+	__le32 txchain_info;
+	__le32 rxchain_info;
+	__le32 acquisition_data;
+	__le32 dsp_cfg_flags;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+
+#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
new file mode 100644
index 0000000..9cc59e0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -0,0 +1,258 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_phy_h__
+#define __iwl_fw_api_phy_h__
+
+/**
+ * enum iwl_phy_ops_subcmd_ids - PHY group commands
+ */
+enum iwl_phy_ops_subcmd_ids {
+	/**
+	 * @CMD_DTS_MEASUREMENT_TRIGGER_WIDE:
+	 * Uses either &struct iwl_dts_measurement_cmd or
+	 * &struct iwl_ext_dts_measurement_cmd
+	 */
+	CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
+
+	/**
+	 * @CTDP_CONFIG_CMD: &struct iwl_mvm_ctdp_cmd
+	 */
+	CTDP_CONFIG_CMD = 0x03,
+
+	/**
+	 * @TEMP_REPORTING_THRESHOLDS_CMD: &struct temp_report_ths_cmd
+	 */
+	TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
+
+	/**
+	 * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd
+	 */
+	GEO_TX_POWER_LIMIT = 0x05,
+
+	/**
+	 * @CT_KILL_NOTIFICATION: &struct ct_kill_notif
+	 */
+	CT_KILL_NOTIFICATION = 0xFE,
+
+	/**
+	 * @DTS_MEASUREMENT_NOTIF_WIDE:
+	 * &struct iwl_dts_measurement_notif_v1 or
+	 * &struct iwl_dts_measurement_notif_v2
+	 */
+	DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
+};
+
+/* DTS measurements */
+
+enum iwl_dts_measurement_flags {
+	DTS_TRIGGER_CMD_FLAGS_TEMP	= BIT(0),
+	DTS_TRIGGER_CMD_FLAGS_VOLT	= BIT(1),
+};
+
+/**
+ * struct iwl_dts_measurement_cmd - request DTS temp and/or voltage measurements
+ *
+ * @flags: indicates which measurements we want as specified in
+ *	&enum iwl_dts_measurement_flags
+ */
+struct iwl_dts_measurement_cmd {
+	__le32 flags;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_CMD_S */
+
+/**
+* enum iwl_dts_control_measurement_mode - DTS measurement type
+* @DTS_AUTOMATIC: Automatic mode (full SW control). Provide temperature read
+*                 back (latest value. Not waiting for new value). Use automatic
+*                 SW DTS configuration.
+* @DTS_REQUEST_READ: Request DTS read. Configure DTS with manual settings,
+*                    trigger DTS reading and provide read back temperature read
+*                    when available.
+* @DTS_OVER_WRITE: over-write the DTS temperatures in the SW until next read
+* @DTS_DIRECT_WITHOUT_MEASURE: DTS returns its latest temperature result,
+*                              without measurement trigger.
+*/
+enum iwl_dts_control_measurement_mode {
+	DTS_AUTOMATIC			= 0,
+	DTS_REQUEST_READ		= 1,
+	DTS_OVER_WRITE			= 2,
+	DTS_DIRECT_WITHOUT_MEASURE	= 3,
+};
+
+/**
+* enum iwl_dts_used - DTS to use or used for measurement in the DTS request
+* @DTS_USE_TOP: Top
+* @DTS_USE_CHAIN_A: chain A
+* @DTS_USE_CHAIN_B: chain B
+* @DTS_USE_CHAIN_C: chain C
+* @XTAL_TEMPERATURE: read temperature from xtal
+*/
+enum iwl_dts_used {
+	DTS_USE_TOP		= 0,
+	DTS_USE_CHAIN_A		= 1,
+	DTS_USE_CHAIN_B		= 2,
+	DTS_USE_CHAIN_C		= 3,
+	XTAL_TEMPERATURE	= 4,
+};
+
+/**
+* enum iwl_dts_bit_mode - bit-mode to use in DTS request read mode
+* @DTS_BIT6_MODE: bit 6 mode
+* @DTS_BIT8_MODE: bit 8 mode
+*/
+enum iwl_dts_bit_mode {
+	DTS_BIT6_MODE	= 0,
+	DTS_BIT8_MODE	= 1,
+};
+
+/**
+ * struct iwl_ext_dts_measurement_cmd - request extended DTS temp measurements
+ * @control_mode: see &enum iwl_dts_control_measurement_mode
+ * @temperature: used when over write DTS mode is selected
+ * @sensor: set temperature sensor to use. See &enum iwl_dts_used
+ * @avg_factor: average factor to DTS in request DTS read mode
+ * @bit_mode: value defines the DTS bit mode to use. See &enum iwl_dts_bit_mode
+ * @step_duration: step duration for the DTS
+ */
+struct iwl_ext_dts_measurement_cmd {
+	__le32 control_mode;
+	__le32 temperature;
+	__le32 sensor;
+	__le32 avg_factor;
+	__le32 bit_mode;
+	__le32 step_duration;
+} __packed; /* XVT_FW_DTS_CONTROL_MEASUREMENT_REQUEST_API_S */
+
+/**
+ * struct iwl_dts_measurement_notif_v1 - measurements notification
+ *
+ * @temp: the measured temperature
+ * @voltage: the measured voltage
+ */
+struct iwl_dts_measurement_notif_v1 {
+	__le32 temp;
+	__le32 voltage;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_1*/
+
+/**
+ * struct iwl_dts_measurement_notif_v2 - measurements notification
+ *
+ * @temp: the measured temperature
+ * @voltage: the measured voltage
+ * @threshold_idx: the trip index that was crossed
+ */
+struct iwl_dts_measurement_notif_v2 {
+	__le32 temp;
+	__le32 voltage;
+	__le32 threshold_idx;
+} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
+
+/**
+ * struct ct_kill_notif - CT-kill entry notification
+ *
+ * @temperature: the current temperature in celsius
+ * @reserved: reserved
+ */
+struct ct_kill_notif {
+	__le16 temperature;
+	__le16 reserved;
+} __packed; /* GRP_PHY_CT_KILL_NTF */
+
+/**
+* enum ctdp_cmd_operation - CTDP command operations
+* @CTDP_CMD_OPERATION_START: update the current budget
+* @CTDP_CMD_OPERATION_STOP: stop ctdp
+* @CTDP_CMD_OPERATION_REPORT: get the average budget
+*/
+enum iwl_mvm_ctdp_cmd_operation {
+	CTDP_CMD_OPERATION_START	= 0x1,
+	CTDP_CMD_OPERATION_STOP		= 0x2,
+	CTDP_CMD_OPERATION_REPORT	= 0x4,
+};/* CTDP_CMD_OPERATION_TYPE_E */
+
+/**
+ * struct iwl_mvm_ctdp_cmd - track and manage the FW power consumption budget
+ *
+ * @operation: see &enum iwl_mvm_ctdp_cmd_operation
+ * @budget: the budget in milliwatt
+ * @window_size: defined in API but not used
+ */
+struct iwl_mvm_ctdp_cmd {
+	__le32 operation;
+	__le32 budget;
+	__le32 window_size;
+} __packed;
+
+#define IWL_MAX_DTS_TRIPS	8
+
+/**
+ * struct temp_report_ths_cmd - set temperature thresholds
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct temp_report_ths_cmd {
+	__le32 num_temps;
+	__le16 thresholds[IWL_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
+
+#endif /* __iwl_fw_api_phy_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
new file mode 100644
index 0000000..a3c77e0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -0,0 +1,525 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_power_h__
+#define __iwl_fw_api_power_h__
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ *	memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ *	reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ *	D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ * @LTR_CFG_FLAG_UPDATE_VALUES: update config values and short
+ *	idle timeout
+ */
+enum iwl_ltr_config_flags {
+	LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+	LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+	LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+	LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+	LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+	LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+	LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+	LTR_CFG_FLAG_UPDATE_VALUES = BIT(7),
+};
+
+/**
+ * struct iwl_ltr_config_cmd_v1 - configures the LTR
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
+ */
+struct iwl_ltr_config_cmd_v1 {
+	__le32 flags;
+	__le32 static_long;
+	__le32 static_short;
+} __packed; /* LTR_CAPABLE_API_S_VER_1 */
+
+#define LTR_VALID_STATES_NUM 4
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See &enum iwl_ltr_config_flags
+ * @static_long: static LTR Long register value.
+ * @static_short: static LTR Short register value.
+ * @ltr_cfg_values: LTR parameters table values (in usec) in folowing order:
+ *	TX, RX, Short Idle, Long Idle. Used only if %LTR_CFG_FLAG_UPDATE_VALUES
+ *	is set.
+ * @ltr_short_idle_timeout: LTR Short Idle timeout (in usec). Used only if
+ *	%LTR_CFG_FLAG_UPDATE_VALUES is set.
+ */
+struct iwl_ltr_config_cmd {
+	__le32 flags;
+	__le32 static_long;
+	__le32 static_short;
+	__le32 ltr_cfg_values[LTR_VALID_STATES_NUM];
+	__le32 ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define POWER_LPRX_RSSI_THRESHOLD	75
+#define POWER_LPRX_RSSI_THRESHOLD_MAX	94
+#define POWER_LPRX_RSSI_THRESHOLD_MIN	30
+
+/**
+ * enum iwl_power_flags - masks for power table command flags
+ * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ *		receiver and transmitter. '0' - does not allow.
+ * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ *		'1' Driver enables PM (use rest of parameters)
+ * @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ *		'1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ *		access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ *		PBW Snoozing enabled
+ * @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ *		detection enablement
+*/
+enum iwl_power_flags {
+	POWER_FLAGS_POWER_SAVE_ENA_MSK		= BIT(0),
+	POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK	= BIT(1),
+	POWER_FLAGS_SKIP_OVER_DTIM_MSK		= BIT(2),
+	POWER_FLAGS_SNOOZE_ENA_MSK		= BIT(5),
+	POWER_FLAGS_BT_SCO_ENA			= BIT(8),
+	POWER_FLAGS_ADVANCE_PM_ENA_MSK		= BIT(9),
+	POWER_FLAGS_LPRX_ENA_MSK		= BIT(11),
+	POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK	= BIT(12),
+};
+
+#define IWL_POWER_VEC_SIZE 5
+
+/**
+ * struct iwl_powertable_cmd - legacy power command. Beside old API support this
+ *	is used also with a new	power API for device wide power settings.
+ * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags:		Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ *			Minimum allowed:- 3 * DTIM. Keep alive period must be
+ *			set regardless of power scheme or current power state.
+ *			FW use this value also when PM is disabled.
+ * @debug_flags:	debug flags
+ * @rx_data_timeout:    Minimum time (usec) from last Rx packet for AM to
+ *			PSM transition - legacy PM
+ * @tx_data_timeout:    Minimum time (usec) from last Tx packet for AM to
+ *			PSM transition - legacy PM
+ * @sleep_interval:	not in use
+ * @skip_dtim_periods:	Number of DTIM periods to skip if Skip over DTIM flag
+ *			is set. For example, if it is required to skip over
+ *			one DTIM, this value need to be set to 2 (DTIM periods).
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ *			Default: 80dbm
+ */
+struct iwl_powertable_cmd {
+	/* PM_POWER_TABLE_CMD_API_S_VER_6 */
+	__le16 flags;
+	u8 keep_alive_seconds;
+	u8 debug_flags;
+	__le32 rx_data_timeout;
+	__le32 tx_data_timeout;
+	__le32 sleep_interval[IWL_POWER_VEC_SIZE];
+	__le32 skip_dtim_periods;
+	__le32 lprx_rssi_threshold;
+} __packed;
+
+/**
+ * enum iwl_device_power_flags - masks for device power command flags
+ * @DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ *	'1' Allow to save power by turning off
+ *	receiver and transmitter. '0' - does not allow.
+*/
+enum iwl_device_power_flags {
+	DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK	= BIT(0),
+};
+
+/**
+ * struct iwl_device_power_cmd - device wide power command.
+ * DEVICE_POWER_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags:	Power table command flags from &enum iwl_device_power_flags
+ * @reserved: reserved (padding)
+ */
+struct iwl_device_power_cmd {
+	/* PM_POWER_TABLE_CMD_API_S_VER_6 */
+	__le16 flags;
+	__le16 reserved;
+} __packed;
+
+/**
+ * struct iwl_mac_power_cmd - New power command containing uAPSD support
+ * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color:	MAC contex identifier, &enum iwl_ctxt_id_and_color
+ * @flags:		Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds:	Keep alive period in seconds. Default - 25 sec.
+ *			Minimum allowed:- 3 * DTIM. Keep alive period must be
+ *			set regardless of power scheme or current power state.
+ *			FW use this value also when PM is disabled.
+ * @rx_data_timeout:    Minimum time (usec) from last Rx packet for AM to
+ *			PSM transition - legacy PM
+ * @tx_data_timeout:    Minimum time (usec) from last Tx packet for AM to
+ *			PSM transition - legacy PM
+ * @skip_dtim_periods:	Number of DTIM periods to skip if Skip over DTIM flag
+ *			is set. For example, if it is required to skip over
+ *			one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ *			PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ *			PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ *			Default: 80dbm
+ * @snooze_interval:	Maximum time between attempts to retrieve buffered data
+ *			from the AP [msec]
+ * @snooze_window:	A window of time in which PBW snoozing insures that all
+ *			packets received. It is also the minimum time from last
+ *			received unicast RX packet, before client stops snoozing
+ *			for data. [msec]
+ * @snooze_step:	TBD
+ * @qndp_tid:		TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags:	Set trigger-enabled and delivery-enabled indication for
+ *			each corresponding AC.
+ *			Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp:	Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ *			values.
+ * @heavy_tx_thld_packets:	TX threshold measured in number of packets
+ * @heavy_rx_thld_packets:	RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage:	TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage:	RX threshold measured in load's percentage
+ * @limited_ps_threshold: (unused)
+ * @reserved: reserved (padding)
+ */
+struct iwl_mac_power_cmd {
+	/* CONTEXT_DESC_API_T_VER_1 */
+	__le32 id_and_color;
+
+	/* CLIENT_PM_POWER_TABLE_S_VER_1 */
+	__le16 flags;
+	__le16 keep_alive_seconds;
+	__le32 rx_data_timeout;
+	__le32 tx_data_timeout;
+	__le32 rx_data_timeout_uapsd;
+	__le32 tx_data_timeout_uapsd;
+	u8 lprx_rssi_threshold;
+	u8 skip_dtim_periods;
+	__le16 snooze_interval;
+	__le16 snooze_window;
+	u8 snooze_step;
+	u8 qndp_tid;
+	u8 uapsd_ac_flags;
+	u8 uapsd_max_sp;
+	u8 heavy_tx_thld_packets;
+	u8 heavy_rx_thld_packets;
+	u8 heavy_tx_thld_percentage;
+	u8 heavy_rx_thld_percentage;
+	u8 limited_ps_threshold;
+	u8 reserved;
+} __packed;
+
+/*
+ * struct iwl_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ *	    this context.
+ */
+struct iwl_uapsd_misbehaving_ap_notif {
+	__le32 sta_id;
+	u8 mac_id;
+	u8 reserved[3];
+} __packed;
+
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+	u8 flags;
+	u8 mac_context_id;
+	__le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
+enum iwl_dev_tx_power_cmd_mode {
+	IWL_TX_POWER_MODE_SET_MAC = 0,
+	IWL_TX_POWER_MODE_SET_DEVICE = 1,
+	IWL_TX_POWER_MODE_SET_CHAINS = 2,
+	IWL_TX_POWER_MODE_SET_ACK = 3,
+}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_4 */;
+
+#define IWL_NUM_CHAIN_LIMITS	2
+#define IWL_NUM_SUB_BANDS	5
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @set_mode: see &enum iwl_dev_tx_power_cmd_mode
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in 1/8 dBms.
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ * @per_chain_restriction: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd_v3 {
+	__le32 set_mode;
+	__le32 mac_context_id;
+	__le16 pwr_restriction;
+	__le16 dev_24;
+	__le16 dev_52_low;
+	__le16 dev_52_high;
+	__le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
+
+#define IWL_DEV_MAX_TX_POWER 0x7FFF
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * @v3: version 3 of the command, embedded here for easier software handling
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ *	reduction.
+ * @reserved: reserved (padding)
+ */
+struct iwl_dev_tx_power_cmd {
+	/* v4 is just an extension of v3 - keep this here */
+	struct iwl_dev_tx_power_cmd_v3 v3;
+	u8 enable_ack_reduction;
+	u8 reserved[3];
+} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
+
+#define IWL_NUM_GEO_PROFILES   3
+
+/**
+ * enum iwl_geo_per_chain_offset_operation - type of operation
+ * @IWL_PER_CHAIN_OFFSET_SET_TABLES: send the tables from the host to the FW.
+ * @IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE: retrieve the last configured table.
+ */
+enum iwl_geo_per_chain_offset_operation {
+	IWL_PER_CHAIN_OFFSET_SET_TABLES,
+	IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
+};  /* GEO_TX_POWER_LIMIT FLAGS TYPE */
+
+/**
+ * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
+ * @max_tx_power: maximum allowed tx power.
+ * @chain_a: tx power offset for chain a.
+ * @chain_b: tx power offset for chain b.
+ */
+struct iwl_per_chain_offset {
+	__le16 max_tx_power;
+	u8 chain_a;
+	u8 chain_b;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
+
+struct iwl_per_chain_offset_group {
+	struct iwl_per_chain_offset lb;
+	struct iwl_per_chain_offset hb;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ */
+struct iwl_geo_tx_power_profiles_cmd {
+	__le32 ops;
+	struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+} __packed; /* GEO_TX_POWER_LIMIT */
+
+/**
+ * struct iwl_geo_tx_power_profiles_resp -  response to GEO_TX_POWER_LIMIT cmd
+ * @profile_idx: current geo profile in use
+ */
+struct iwl_geo_tx_power_profiles_resp {
+	__le32 profile_idx;
+} __packed; /* GEO_TX_POWER_LIMIT_RESP */
+
+/**
+ * struct iwl_beacon_filter_cmd
+ * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ *      to driver if delta in Energy values calculated for this and last
+ *      passed beacon is greater than this threshold. Zero value means that
+ *      the Energy change is ignored for beacon filtering, and beacon will
+ *      not be forced to be sent to driver regardless of this delta. Typical
+ *      energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ *      Send beacon to driver if delta in Energy values calculated for this
+ *      and last passed beacon is greater than this threshold. Zero value
+ *      means that the Energy change is ignored for beacon filtering while in
+ *      Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ *      calculated for current beacon is less than the threshold, use
+ *      Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ *      Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ *	filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ *	If the current temperature is above this threshold - Fast filter
+ *	will be used, If the current temperature is below this threshold -
+ *	Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ *      calculated for this and the last passed beacon is greater than this
+ *      threshold. Zero value means that the temperature change is ignored for
+ *      beacon filtering; beacons will not be  forced to be sent to driver
+ *      regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ *      calculated for this and the last passed beacon is greater than this
+ *      threshold. Zero value means that the temperature change is ignored for
+ *      beacon filtering; beacons will not be forced to be sent to driver
+ *      regardless of whether its temerature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_debug_flag: beacon filtering debug configuration
+ * @bf_escape_timer: Send beacons to to driver if no beacons were passed
+ *      for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ *      for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ */
+struct iwl_beacon_filter_cmd {
+	__le32 bf_energy_delta;
+	__le32 bf_roaming_energy_delta;
+	__le32 bf_roaming_state;
+	__le32 bf_temp_threshold;
+	__le32 bf_temp_fast_filter;
+	__le32 bf_temp_slow_filter;
+	__le32 bf_enable_beacon_filter;
+	__le32 bf_debug_flag;
+	__le32 bf_escape_timer;
+	__le32 ba_escape_timer;
+	__le32 ba_enable_beacon_abort;
+} __packed;
+
+/* Beacon filtering and beacon abort */
+#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
+#define IWL_BF_ENERGY_DELTA_MAX 255
+#define IWL_BF_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
+#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
+#define IWL_BF_ROAMING_STATE_MAX 255
+#define IWL_BF_ROAMING_STATE_MIN 0
+
+#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
+#define IWL_BF_TEMP_THRESHOLD_MAX 255
+#define IWL_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
+#define IWL_BF_TEMP_FAST_FILTER_MAX 255
+#define IWL_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 20
+#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
+
+#define IWL_BF_ESCAPE_TIMER_DEFAULT 0
+#define IWL_BF_ESCAPE_TIMER_D0I3 0
+#define IWL_BF_ESCAPE_TIMER_MAX 1024
+#define IWL_BF_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
+#define IWL_BA_ESCAPE_TIMER_D3 9
+#define IWL_BA_ESCAPE_TIMER_MAX 1024
+#define IWL_BA_ESCAPE_TIMER_MIN 0
+
+#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWL_BF_CMD_CONFIG(mode)					     \
+	.bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode),	      \
+	.bf_roaming_energy_delta =					      \
+		cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode),	      \
+	.bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode),	      \
+	.bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode),      \
+	.bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode),  \
+	.bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode),  \
+	.bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode),	      \
+	.bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode),	      \
+	.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
+#endif /* __iwl_fw_api_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
new file mode 100644
index 0000000..087fae9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -0,0 +1,646 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_rs_h__
+#define __iwl_fw_api_rs_h__
+
+#include "mac.h"
+
+/**
+ * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC
+ * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ */
+enum iwl_tlc_mng_cfg_flags {
+	IWL_TLC_MNG_CFG_FLAGS_STBC_MSK		= BIT(0),
+	IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK		= BIT(1),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_cw - channel width options
+ * @IWL_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWL_TLC_MNG_CH_WIDTH_LAST: maximum value
+ */
+enum iwl_tlc_mng_cfg_cw {
+	IWL_TLC_MNG_CH_WIDTH_20MHZ,
+	IWL_TLC_MNG_CH_WIDTH_40MHZ,
+	IWL_TLC_MNG_CH_WIDTH_80MHZ,
+	IWL_TLC_MNG_CH_WIDTH_160MHZ,
+	IWL_TLC_MNG_CH_WIDTH_LAST = IWL_TLC_MNG_CH_WIDTH_160MHZ,
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_chains - possible chains
+ * @IWL_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWL_TLC_MNG_CHAIN_B_MSK: chain B
+ */
+enum iwl_tlc_mng_cfg_chains {
+	IWL_TLC_MNG_CHAIN_A_MSK = BIT(0),
+	IWL_TLC_MNG_CHAIN_B_MSK = BIT(1),
+};
+
+/**
+ * enum iwl_tlc_mng_cfg_mode - supported modes
+ * @IWL_TLC_MNG_MODE_CCK: enable CCK
+ * @IWL_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWL_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWL_TLC_MNG_MODE_HT: enable HT
+ * @IWL_TLC_MNG_MODE_VHT: enable VHT
+ * @IWL_TLC_MNG_MODE_HE: enable HE
+ * @IWL_TLC_MNG_MODE_INVALID: invalid value
+ * @IWL_TLC_MNG_MODE_NUM: a count of possible modes
+ */
+enum iwl_tlc_mng_cfg_mode {
+	IWL_TLC_MNG_MODE_CCK = 0,
+	IWL_TLC_MNG_MODE_OFDM_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_NON_HT = IWL_TLC_MNG_MODE_CCK,
+	IWL_TLC_MNG_MODE_HT,
+	IWL_TLC_MNG_MODE_VHT,
+	IWL_TLC_MNG_MODE_HE,
+	IWL_TLC_MNG_MODE_INVALID,
+	IWL_TLC_MNG_MODE_NUM = IWL_TLC_MNG_MODE_INVALID,
+};
+
+/**
+ * enum iwl_tlc_mng_ht_rates - HT/VHT/HE rates
+ * @IWL_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWL_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWL_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWL_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWL_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWL_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWL_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWL_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWL_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWL_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWL_TLC_MNG_HT_RATE_MCS10: index of MCS10
+ * @IWL_TLC_MNG_HT_RATE_MCS11: index of MCS11
+ * @IWL_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwl_tlc_mng_ht_rates {
+	IWL_TLC_MNG_HT_RATE_MCS0 = 0,
+	IWL_TLC_MNG_HT_RATE_MCS1,
+	IWL_TLC_MNG_HT_RATE_MCS2,
+	IWL_TLC_MNG_HT_RATE_MCS3,
+	IWL_TLC_MNG_HT_RATE_MCS4,
+	IWL_TLC_MNG_HT_RATE_MCS5,
+	IWL_TLC_MNG_HT_RATE_MCS6,
+	IWL_TLC_MNG_HT_RATE_MCS7,
+	IWL_TLC_MNG_HT_RATE_MCS8,
+	IWL_TLC_MNG_HT_RATE_MCS9,
+	IWL_TLC_MNG_HT_RATE_MCS10,
+	IWL_TLC_MNG_HT_RATE_MCS11,
+	IWL_TLC_MNG_HT_RATE_MAX = IWL_TLC_MNG_HT_RATE_MCS11,
+};
+
+/* Maximum supported tx antennas number */
+#define MAX_NSS 2
+
+/**
+ * struct tlc_config_cmd - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwl_tlc_mng_cfg_cw
+ * @mode: &enum iwl_tlc_mng_cfg_mode
+ * @chains: bitmask of &enum iwl_tlc_mng_cfg_chains
+ * @amsdu: TX amsdu is supported
+ * @flags: bitmask of &enum iwl_tlc_mng_cfg_flags
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: bitmap of &enum iwl_tlc_mng_ht_rates, per <nss, channel-width>
+ *	      pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ *		       use BIT(@enum iwl_tlc_mng_cfg_cw)
+ * @reserved2: reserved
+ */
+struct iwl_tlc_config_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	u8 max_ch_width;
+	u8 mode;
+	u8 chains;
+	u8 amsdu;
+	__le16 flags;
+	__le16 non_ht_rates;
+	__le16 ht_rates[MAX_NSS][2];
+	__le16 max_mpdu_len;
+	u8 sgi_ch_width_supp;
+	u8 reserved2[1];
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * enum iwl_tlc_update_flags - updated fields
+ * @IWL_TLC_NOTIF_FLAG_RATE: last initial rate update
+ * @IWL_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
+ */
+enum iwl_tlc_update_flags {
+	IWL_TLC_NOTIF_FLAG_RATE  = BIT(0),
+	IWL_TLC_NOTIF_FLAG_AMSDU = BIT(1),
+};
+
+/**
+ * struct iwl_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @rate: current initial rate
+ * @amsdu_size: Max AMSDU size, in bytes
+ * @amsdu_enabled: bitmap for per-TID AMSDU enablement
+ */
+struct iwl_tlc_update_notif {
+	u8 sta_id;
+	u8 reserved[3];
+	__le32 flags;
+	__le32 rate;
+	__le32 amsdu_size;
+	__le32 amsdu_enabled;
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+
+/**
+ * enum iwl_tlc_debug_flags - debug options
+ * @IWL_TLC_DEBUG_FIXED_RATE: set fixed rate for rate scaling
+ * @IWL_TLC_DEBUG_STATS_TH: threshold for sending statistics to the driver, in
+ *	frames
+ * @IWL_TLC_DEBUG_STATS_TIME_TH: threshold for sending statistics to the
+ *	driver, in msec
+ * @IWL_TLC_DEBUG_AGG_TIME_LIM: time limit for a BA session
+ * @IWL_TLC_DEBUG_AGG_DIS_START_TH: frame with try-count greater than this
+ *	threshold should not start an aggregation session
+ * @IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM: set max number of frames in an aggregation
+ * @IWL_TLC_DEBUG_RENEW_ADDBA_DELAY: delay between retries of ADD BA
+ * @IWL_TLC_DEBUG_START_AC_RATE_IDX: frames per second to start a BA session
+ * @IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK: disable BW scaling
+ */
+enum iwl_tlc_debug_flags {
+	IWL_TLC_DEBUG_FIXED_RATE,
+	IWL_TLC_DEBUG_STATS_TH,
+	IWL_TLC_DEBUG_STATS_TIME_TH,
+	IWL_TLC_DEBUG_AGG_TIME_LIM,
+	IWL_TLC_DEBUG_AGG_DIS_START_TH,
+	IWL_TLC_DEBUG_AGG_FRAME_CNT_LIM,
+	IWL_TLC_DEBUG_RENEW_ADDBA_DELAY,
+	IWL_TLC_DEBUG_START_AC_RATE_IDX,
+	IWL_TLC_DEBUG_NO_FAR_RANGE_TWEAK,
+}; /* TLC_MNG_DEBUG_FLAGS_API_E_VER_1 */
+
+/**
+ * struct iwl_dhc_tlc_dbg - fixed debug config
+ * @sta_id: bit 0 - enable/disable, bits 1 - 7 hold station id
+ * @reserved1: reserved
+ * @flags: bitmap of %IWL_TLC_DEBUG_\*
+ * @fixed_rate: rate value
+ * @stats_threshold: if number of tx-ed frames is greater, send statistics
+ * @time_threshold: statistics threshold in usec
+ * @agg_time_lim: max agg time
+ * @agg_dis_start_threshold: frames with try-cont greater than this count will
+ *			     not be aggregated
+ * @agg_frame_count_lim: agg size
+ * @addba_retry_delay: delay between retries of ADD BA
+ * @start_ac_rate_idx: frames per second to start a BA session
+ * @no_far_range_tweak: disable BW scaling
+ * @reserved2: reserved
+ */
+struct iwl_dhc_tlc_cmd {
+	u8 sta_id;
+	u8 reserved1[3];
+	__le32 flags;
+	__le32 fixed_rate;
+	__le16 stats_threshold;
+	__le16 time_threshold;
+	__le16 agg_time_lim;
+	__le16 agg_dis_start_threshold;
+	__le16 agg_frame_count_lim;
+	__le16 addba_retry_delay;
+	u8 start_ac_rate_idx[IEEE80211_NUM_ACS];
+	u8 no_far_range_tweak;
+	u8 reserved2[3];
+} __packed;
+
+/*
+ * These serve as indexes into
+ * struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
+ * TODO: avoid overlap between legacy and HT rates
+ */
+enum {
+	IWL_RATE_1M_INDEX = 0,
+	IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
+	IWL_RATE_2M_INDEX,
+	IWL_RATE_5M_INDEX,
+	IWL_RATE_11M_INDEX,
+	IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
+	IWL_RATE_6M_INDEX,
+	IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
+	IWL_RATE_MCS_0_INDEX = IWL_RATE_6M_INDEX,
+	IWL_FIRST_HT_RATE = IWL_RATE_MCS_0_INDEX,
+	IWL_FIRST_VHT_RATE = IWL_RATE_MCS_0_INDEX,
+	IWL_RATE_9M_INDEX,
+	IWL_RATE_12M_INDEX,
+	IWL_RATE_MCS_1_INDEX = IWL_RATE_12M_INDEX,
+	IWL_RATE_18M_INDEX,
+	IWL_RATE_MCS_2_INDEX = IWL_RATE_18M_INDEX,
+	IWL_RATE_24M_INDEX,
+	IWL_RATE_MCS_3_INDEX = IWL_RATE_24M_INDEX,
+	IWL_RATE_36M_INDEX,
+	IWL_RATE_MCS_4_INDEX = IWL_RATE_36M_INDEX,
+	IWL_RATE_48M_INDEX,
+	IWL_RATE_MCS_5_INDEX = IWL_RATE_48M_INDEX,
+	IWL_RATE_54M_INDEX,
+	IWL_RATE_MCS_6_INDEX = IWL_RATE_54M_INDEX,
+	IWL_LAST_NON_HT_RATE = IWL_RATE_54M_INDEX,
+	IWL_RATE_60M_INDEX,
+	IWL_RATE_MCS_7_INDEX = IWL_RATE_60M_INDEX,
+	IWL_LAST_HT_RATE = IWL_RATE_MCS_7_INDEX,
+	IWL_RATE_MCS_8_INDEX,
+	IWL_RATE_MCS_9_INDEX,
+	IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
+	IWL_RATE_MCS_10_INDEX,
+	IWL_RATE_MCS_11_INDEX,
+	IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
+	IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
+	IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
+};
+
+#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+enum {
+	IWL_RATE_6M_PLCP  = 13,
+	IWL_RATE_9M_PLCP  = 15,
+	IWL_RATE_12M_PLCP = 5,
+	IWL_RATE_18M_PLCP = 7,
+	IWL_RATE_24M_PLCP = 9,
+	IWL_RATE_36M_PLCP = 11,
+	IWL_RATE_48M_PLCP = 1,
+	IWL_RATE_54M_PLCP = 3,
+	IWL_RATE_1M_PLCP  = 10,
+	IWL_RATE_2M_PLCP  = 20,
+	IWL_RATE_5M_PLCP  = 55,
+	IWL_RATE_11M_PLCP = 110,
+	IWL_RATE_INVM_PLCP = -1,
+};
+
+/*
+ * rate_n_flags bit fields
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ *	bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ *	bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ *	bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ *	bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define RATE_MCS_HT_POS 8
+#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM.  HT (bit 8) must be "0" for this bit to be valid */
+#define RATE_MCS_CCK_POS 9
+#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define RATE_MCS_VHT_POS 26
+#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ *  2-0:  MCS rate base
+ *        0)   6 Mbps
+ *        1)  12 Mbps
+ *        2)  18 Mbps
+ *        3)  24 Mbps
+ *        4)  36 Mbps
+ *        5)  48 Mbps
+ *        6)  54 Mbps
+ *        7)  60 Mbps
+ *  4-3:  0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ *    5:  Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ *  (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define RATE_HT_MCS_RATE_CODE_MSK	0x7
+#define RATE_HT_MCS_NSS_POS             3
+#define RATE_HT_MCS_NSS_MSK             (3 << RATE_HT_MCS_NSS_POS)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define RATE_HT_MCS_GF_POS		10
+#define RATE_HT_MCS_GF_MSK		(1 << RATE_HT_MCS_GF_POS)
+
+#define RATE_HT_MCS_INDEX_MSK		0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ *  3-0:  VHT MCS (0-9)
+ *  5-4:  number of streams - 1:
+ *        0)  Single stream (SISO)
+ *        1)  Dual stream (MIMO)
+ *        2)  Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define RATE_VHT_MCS_RATE_CODE_MSK	0xf
+#define RATE_VHT_MCS_NSS_POS		4
+#define RATE_VHT_MCS_NSS_MSK		(3 << RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ *  3-0:  0xD)   6 Mbps
+ *        0xF)   9 Mbps
+ *        0x5)  12 Mbps
+ *        0x7)  18 Mbps
+ *        0x9)  24 Mbps
+ *        0xB)  36 Mbps
+ *        0x1)  48 Mbps
+ *        0x3)  54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ *  6-0:   10)  1 Mbps
+ *         20)  2 Mbps
+ *         55)  5.5 Mbps
+ *        110)  11 Mbps
+ * (bit 7 is 0)
+ */
+#define RATE_LEGACY_RATE_MSK 0xff
+
+/* Bit 10 - OFDM HE */
+#define RATE_MCS_HE_POS		10
+#define RATE_MCS_HE_MSK		BIT(RATE_MCS_HE_POS)
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define RATE_MCS_CHAN_WIDTH_POS		11
+#define RATE_MCS_CHAN_WIDTH_MSK		(3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20		(0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40		(1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80		(2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160		(3 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define RATE_MCS_SGI_POS		13
+#define RATE_MCS_SGI_MSK		(1 << RATE_MCS_SGI_POS)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
+#define RATE_MCS_ANT_POS		14
+#define RATE_MCS_ANT_A_MSK		(1 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_B_MSK		(2 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_C_MSK		(4 << RATE_MCS_ANT_POS)
+#define RATE_MCS_ANT_AB_MSK		(RATE_MCS_ANT_A_MSK | \
+					 RATE_MCS_ANT_B_MSK)
+#define RATE_MCS_ANT_ABC_MSK		(RATE_MCS_ANT_AB_MSK | \
+					 RATE_MCS_ANT_C_MSK)
+#define RATE_MCS_ANT_MSK		RATE_MCS_ANT_ABC_MSK
+
+/* Bit 17: (0) SS, (1) SS*2 */
+#define RATE_MCS_STBC_POS		17
+#define RATE_MCS_STBC_MSK		BIT(RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define RATE_HE_DUAL_CARRIER_MODE	18
+#define RATE_HE_DUAL_CARRIER_MODE_MSK	BIT(RATE_HE_DUAL_CARRIER_MODE)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define RATE_MCS_BF_POS			19
+#define RATE_MCS_BF_MSK			(1 << RATE_MCS_BF_POS)
+
+/*
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ *	0			1xLTF+0.8us
+ *	1			2xLTF+0.8us
+ *	2			2xLTF+1.6us
+ *	3 & SGI (bit 13) clear	4xLTF+3.2us
+ *	3 & SGI (bit 13) set	4xLTF+0.8us
+ * HE MU:
+ *	0			4xLTF+0.8us
+ *	1			2xLTF+0.8us
+ *	2			2xLTF+1.6us
+ *	3			4xLTF+3.2us
+ * HE TRIG:
+ *	0			1xLTF+1.6us
+ *	1			2xLTF+1.6us
+ *	2			4xLTF+3.2us
+ *	3			(does not occur)
+ */
+#define RATE_MCS_HE_GI_LTF_POS		20
+#define RATE_MCS_HE_GI_LTF_MSK		(3 << RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS		22
+#define RATE_MCS_HE_TYPE_SU		(0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU		(1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU		(2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG		(3 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MSK		(3 << RATE_MCS_HE_TYPE_POS)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define RATE_MCS_DUP_POS		24
+#define RATE_MCS_DUP_MSK		(3 << RATE_MCS_DUP_POS)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS		27
+#define RATE_MCS_LDPC_MSK		(1 << RATE_MCS_LDPC_POS)
+
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS		28
+#define RATE_MCS_HE_106T_MSK		(1 << RATE_MCS_HE_106T_POS)
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define  LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define LQ_FLAG_USE_RTS_POS             0
+#define LQ_FLAG_USE_RTS_MSK	        (1 << LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define LQ_FLAG_COLOR_POS               1
+#define LQ_FLAG_COLOR_MSK               (7 << LQ_FLAG_COLOR_POS)
+#define LQ_FLAG_COLOR_GET(_f)		(((_f) & LQ_FLAG_COLOR_MSK) >>\
+					 LQ_FLAG_COLOR_POS)
+#define LQ_FLAGS_COLOR_INC(_c)		((((_c) + 1) << LQ_FLAG_COLOR_POS) &\
+					 LQ_FLAG_COLOR_MSK)
+#define LQ_FLAG_COLOR_SET(_f, _c)	((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK))
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define LQ_FLAG_RTS_BW_SIG_POS          4
+#define LQ_FLAG_RTS_BW_SIG_NONE         (0 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_STATIC       (1 << LQ_FLAG_RTS_BW_SIG_POS)
+#define LQ_FLAG_RTS_BW_SIG_DYNAMIC      (2 << LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dyanmic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define LQ_FLAG_DYNAMIC_BW_POS          6
+#define LQ_FLAG_DYNAMIC_BW_MSK          (1 << LQ_FLAG_DYNAMIC_BW_POS)
+
+/* Single Stream Tx Parameters (lq_cmd->ss_params)
+ * Flags to control a smart FW decision about whether BFER/STBC/SISO will be
+ * used for single stream Tx.
+ */
+
+/* Bit 0-1: Max STBC streams allowed. Can be 0-3.
+ * (0) - No STBC allowed
+ * (1) - 2x1 STBC allowed (HT/VHT)
+ * (2) - 4x2 STBC allowed (HT/VHT)
+ * (3) - 3x2 STBC allowed (HT only)
+ * All our chips are at most 2 antennas so only (1) is valid for now.
+ */
+#define LQ_SS_STBC_ALLOWED_POS          0
+#define LQ_SS_STBC_ALLOWED_MSK		(3 << LQ_SS_STBC_ALLOWED_MSK)
+
+/* 2x1 STBC is allowed */
+#define LQ_SS_STBC_1SS_ALLOWED		(1 << LQ_SS_STBC_ALLOWED_POS)
+
+/* Bit 2: Beamformer (VHT only) is allowed */
+#define LQ_SS_BFER_ALLOWED_POS		2
+#define LQ_SS_BFER_ALLOWED		(1 << LQ_SS_BFER_ALLOWED_POS)
+
+/* Bit 3: Force BFER or STBC for testing
+ * If this is set:
+ * If BFER is allowed then force the ucode to choose BFER else
+ * If STBC is allowed then force the ucode to choose STBC over SISO
+ */
+#define LQ_SS_FORCE_POS			3
+#define LQ_SS_FORCE			(1 << LQ_SS_FORCE_POS)
+
+/* Bit 31: ss_params field is valid. Used for FW backward compatibility
+ * with other drivers which don't support the ss_params API yet
+ */
+#define LQ_SS_PARAMS_VALID_POS		31
+#define LQ_SS_PARAMS_VALID		(1 << LQ_SS_PARAMS_VALID_POS)
+
+/**
+ * struct iwl_lq_cmd - link quality command
+ * @sta_id: station to update
+ * @reduced_tpc: reduced transmit power control value
+ * @control: not used
+ * @flags: combination of LQ_FLAG_*
+ * @mimo_delim: the first SISO index in rs_table, which separates MIMO
+ *	and SISO rates
+ * @single_stream_ant_msk: best antenna for SISO (can be dual in CDD).
+ *	Should be ANT_[ABC]
+ * @dual_stream_ant_msk: best antennas for MIMO, combination of ANT_[ABC]
+ * @initial_rate_index: first index from rs_table per AC category
+ * @agg_time_limit: aggregation max time threshold in usec/100, meaning
+ *	value of 100 is one usec. Range is 100 to 8000
+ * @agg_disable_start_th: try-count threshold for starting aggregation.
+ *	If a frame has higher try-count, it should not be selected for
+ *	starting an aggregation sequence.
+ * @agg_frame_cnt_limit: max frame count in an aggregation.
+ *	0: no limit
+ *	1: no aggregation (one frame per aggregation)
+ *	2 - 0x3f: maximal number of frames (up to 3f == 63)
+ * @reserved2: reserved
+ * @rs_table: array of rates for each TX try, each is rate_n_flags,
+ *	meaning it is a combination of RATE_MCS_* and IWL_RATE_*_PLCP
+ * @ss_params: single stream features. declare whether STBC or BFER are allowed.
+ */
+struct iwl_lq_cmd {
+	u8 sta_id;
+	u8 reduced_tpc;
+	__le16 control;
+	/* LINK_QUAL_GENERAL_PARAMS_API_S_VER_1 */
+	u8 flags;
+	u8 mimo_delim;
+	u8 single_stream_ant_msk;
+	u8 dual_stream_ant_msk;
+	u8 initial_rate_index[AC_NUM];
+	/* LINK_QUAL_AGG_PARAMS_API_S_VER_1 */
+	__le16 agg_time_limit;
+	u8 agg_disable_start_th;
+	u8 agg_frame_cnt_limit;
+	__le32 reserved2;
+	__le32 rs_table[LQ_MAX_RETRY_NUM];
+	__le32 ss_params;
+}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
new file mode 100644
index 0000000..2f59935
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -0,0 +1,750 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_rx_h__
+#define __iwl_fw_api_rx_h__
+
+/* API for pre-9000 hardware */
+
+#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
+enum iwl_mac_context_info {
+	MAC_CONTEXT_INFO_NONE,
+	MAC_CONTEXT_INFO_GSCAN,
+};
+
+/**
+ * struct iwl_rx_phy_info - phy info
+ * (REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1: reserved
+ * @system_timestamp: GP2  at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy: for various implementations of non_cfg_phy
+ * @rate_n_flags: RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ *	calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ * @mac_context_info: additional info on the context in which the frame was
+ *	received as defined in &enum iwl_mac_context_info
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwl_rx_phy_info {
+	u8 non_cfg_phy_cnt;
+	u8 cfg_phy_cnt;
+	u8 stat_id;
+	u8 reserved1;
+	__le32 system_timestamp;
+	__le64 timestamp;
+	__le32 beacon_time_stamp;
+	__le16 phy_flags;
+	__le16 channel;
+	__le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
+	__le32 rate_n_flags;
+	__le32 byte_count;
+	u8 mac_active_msk;
+	u8 mac_context_info;
+	__le16 frame_time;
+} __packed;
+
+/*
+ * TCP offload Rx assist info
+ *
+ * bits 0:3 - reserved
+ * bits 4:7 - MIC CRC length
+ * bits 8:12 - MAC header length
+ * bit 13 - Padding indication
+ * bit 14 - A-AMSDU indication
+ * bit 15 - Offload enabled
+ */
+enum iwl_csum_rx_assist_info {
+	CSUM_RXA_RESERVED_MASK	= 0x000f,
+	CSUM_RXA_MICSIZE_MASK	= 0x00f0,
+	CSUM_RXA_HEADERLEN_MASK	= 0x1f00,
+	CSUM_RXA_PADD		= BIT(13),
+	CSUM_RXA_AMSDU		= BIT(14),
+	CSUM_RXA_ENA		= BIT(15)
+};
+
+/**
+ * struct iwl_rx_mpdu_res_start - phy info
+ * @byte_count: byte count of the frame
+ * @assist: see &enum iwl_csum_rx_assist_info
+ */
+struct iwl_rx_mpdu_res_start {
+	__le16 byte_count;
+	__le16 assist;
+} __packed; /* _RX_MPDU_RES_START_API_S_VER_2 */
+
+/**
+ * enum iwl_rx_phy_flags - to parse %iwl_rx_phy_info phy_flags
+ * @RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @RX_RES_PHY_FLAGS_MOD_CCK: modulation is CCK
+ * @RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @RX_RES_PHY_FLAGS_NARROW_BAND: narrow band (<20 MHz) receive
+ * @RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @RX_RES_PHY_FLAGS_ANTENNA_POS: antenna bit position
+ * @RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+enum iwl_rx_phy_flags {
+	RX_RES_PHY_FLAGS_BAND_24	= BIT(0),
+	RX_RES_PHY_FLAGS_MOD_CCK	= BIT(1),
+	RX_RES_PHY_FLAGS_SHORT_PREAMBLE	= BIT(2),
+	RX_RES_PHY_FLAGS_NARROW_BAND	= BIT(3),
+	RX_RES_PHY_FLAGS_ANTENNA	= (0x7 << 4),
+	RX_RES_PHY_FLAGS_ANTENNA_POS	= 4,
+	RX_RES_PHY_FLAGS_AGG		= BIT(7),
+	RX_RES_PHY_FLAGS_OFDM_HT	= BIT(8),
+	RX_RES_PHY_FLAGS_OFDM_GF	= BIT(9),
+	RX_RES_PHY_FLAGS_OFDM_VHT	= BIT(10),
+};
+
+/**
+ * enum iwl_mvm_rx_status - written by fw for each Rx packet
+ * @RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found
+ * @RX_MPDU_RES_STATUS_KEY_VALID: key was valid
+ * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable
+ * @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ *	in the driver.
+ * @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR:  valid for alg = CCM_CMAC or
+ *	alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ *	%RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @RX_MPDU_RES_STATUS_SEC_EXT_ENC: this frame is encrypted using extension
+ *	algorithm
+ * @RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP)
+ * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done
+ * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
+ * @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
+ * @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
+ * @RX_MDPU_RES_STATUS_STA_ID_SHIFT: station ID bit shift
+ * @RX_MPDU_RES_STATUS_FILTERING_MSK: filter status
+ * @RX_MPDU_RES_STATUS2_FILTERING_MSK: filter status 2
+ */
+enum iwl_mvm_rx_status {
+	RX_MPDU_RES_STATUS_CRC_OK			= BIT(0),
+	RX_MPDU_RES_STATUS_OVERRUN_OK			= BIT(1),
+	RX_MPDU_RES_STATUS_SRC_STA_FOUND		= BIT(2),
+	RX_MPDU_RES_STATUS_KEY_VALID			= BIT(3),
+	RX_MPDU_RES_STATUS_KEY_PARAM_OK			= BIT(4),
+	RX_MPDU_RES_STATUS_ICV_OK			= BIT(5),
+	RX_MPDU_RES_STATUS_MIC_OK			= BIT(6),
+	RX_MPDU_RES_STATUS_TTAK_OK			= BIT(7),
+	RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR		= BIT(7),
+	RX_MPDU_RES_STATUS_SEC_NO_ENC			= (0 << 8),
+	RX_MPDU_RES_STATUS_SEC_WEP_ENC			= (1 << 8),
+	RX_MPDU_RES_STATUS_SEC_CCM_ENC			= (2 << 8),
+	RX_MPDU_RES_STATUS_SEC_TKIP_ENC			= (3 << 8),
+	RX_MPDU_RES_STATUS_SEC_EXT_ENC			= (4 << 8),
+	RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC		= (6 << 8),
+	RX_MPDU_RES_STATUS_SEC_ENC_ERR			= (7 << 8),
+	RX_MPDU_RES_STATUS_SEC_ENC_MSK			= (7 << 8),
+	RX_MPDU_RES_STATUS_DEC_DONE			= BIT(11),
+	RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP		= BIT(13),
+	RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT		= BIT(14),
+	RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME		= BIT(15),
+	RX_MPDU_RES_STATUS_CSUM_DONE			= BIT(16),
+	RX_MPDU_RES_STATUS_CSUM_OK			= BIT(17),
+	RX_MDPU_RES_STATUS_STA_ID_SHIFT			= 24,
+	RX_MPDU_RES_STATUS_STA_ID_MSK			= 0x1f << RX_MDPU_RES_STATUS_STA_ID_SHIFT,
+	RX_MPDU_RES_STATUS_FILTERING_MSK		= (0xc00000),
+	RX_MPDU_RES_STATUS2_FILTERING_MSK		= (0xc0000000),
+};
+
+/* 9000 series API */
+enum iwl_rx_mpdu_mac_flags1 {
+	IWL_RX_MDPU_MFLG1_ADDRTYPE_MASK		= 0x03,
+	IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK	= 0xf0,
+	/* shift should be 4, but the length is measured in 2-byte
+	 * words, so shifting only by 3 gives a byte result
+	 */
+	IWL_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT	= 3,
+};
+
+enum iwl_rx_mpdu_mac_flags2 {
+	/* in 2-byte words */
+	IWL_RX_MPDU_MFLG2_HDR_LEN_MASK		= 0x1f,
+	IWL_RX_MPDU_MFLG2_PAD			= 0x20,
+	IWL_RX_MPDU_MFLG2_AMSDU			= 0x40,
+};
+
+enum iwl_rx_mpdu_amsdu_info {
+	IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK	= 0x7f,
+	IWL_RX_MPDU_AMSDU_LAST_SUBFRAME		= 0x80,
+};
+
+enum iwl_rx_l3_proto_values {
+	IWL_RX_L3_TYPE_NONE,
+	IWL_RX_L3_TYPE_IPV4,
+	IWL_RX_L3_TYPE_IPV4_FRAG,
+	IWL_RX_L3_TYPE_IPV6_FRAG,
+	IWL_RX_L3_TYPE_IPV6,
+	IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+	IWL_RX_L3_TYPE_ARP,
+	IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
+enum iwl_rx_l3l4_flags {
+	IWL_RX_L3L4_IP_HDR_CSUM_OK		= BIT(0),
+	IWL_RX_L3L4_TCP_UDP_CSUM_OK		= BIT(1),
+	IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH		= BIT(2),
+	IWL_RX_L3L4_TCP_ACK			= BIT(3),
+	IWL_RX_L3L4_L3_PROTO_MASK		= 0xf << IWL_RX_L3_PROTO_POS,
+	IWL_RX_L3L4_L4_PROTO_MASK		= 0xf << 8,
+	IWL_RX_L3L4_RSS_HASH_MASK		= 0xf << 12,
+};
+
+enum iwl_rx_mpdu_status {
+	IWL_RX_MPDU_STATUS_CRC_OK		= BIT(0),
+	IWL_RX_MPDU_STATUS_OVERRUN_OK		= BIT(1),
+	IWL_RX_MPDU_STATUS_SRC_STA_FOUND	= BIT(2),
+	IWL_RX_MPDU_STATUS_KEY_VALID		= BIT(3),
+	IWL_RX_MPDU_STATUS_KEY_PARAM_OK		= BIT(4),
+	IWL_RX_MPDU_STATUS_ICV_OK		= BIT(5),
+	IWL_RX_MPDU_STATUS_MIC_OK		= BIT(6),
+	IWL_RX_MPDU_RES_STATUS_TTAK_OK		= BIT(7),
+	IWL_RX_MPDU_STATUS_SEC_MASK		= 0x7 << 8,
+	IWL_RX_MPDU_STATUS_SEC_UNKNOWN		= IWL_RX_MPDU_STATUS_SEC_MASK,
+	IWL_RX_MPDU_STATUS_SEC_NONE		= 0x0 << 8,
+	IWL_RX_MPDU_STATUS_SEC_WEP		= 0x1 << 8,
+	IWL_RX_MPDU_STATUS_SEC_CCM		= 0x2 << 8,
+	IWL_RX_MPDU_STATUS_SEC_TKIP		= 0x3 << 8,
+	IWL_RX_MPDU_STATUS_SEC_EXT_ENC		= 0x4 << 8,
+	IWL_RX_MPDU_STATUS_SEC_GCM		= 0x5 << 8,
+	IWL_RX_MPDU_STATUS_DECRYPTED		= BIT(11),
+	IWL_RX_MPDU_STATUS_WEP_MATCH		= BIT(12),
+	IWL_RX_MPDU_STATUS_EXT_IV_MATCH		= BIT(13),
+	IWL_RX_MPDU_STATUS_KEY_ID_MATCH		= BIT(14),
+	IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME	= BIT(15),
+};
+
+enum iwl_rx_mpdu_hash_filter {
+	IWL_RX_MPDU_HF_A1_HASH_MASK		= 0x3f,
+	IWL_RX_MPDU_HF_FILTER_STATUS_MASK	= 0xc0,
+};
+
+enum iwl_rx_mpdu_sta_id_flags {
+	IWL_RX_MPDU_SIF_STA_ID_MASK		= 0x1f,
+	IWL_RX_MPDU_SIF_RRF_ABORT		= 0x20,
+	IWL_RX_MPDU_SIF_FILTER_STATUS_MASK	= 0xc0,
+};
+
+#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+enum iwl_rx_mpdu_reorder_data {
+	IWL_RX_MPDU_REORDER_NSSN_MASK		= 0x00000fff,
+	IWL_RX_MPDU_REORDER_SN_MASK		= 0x00fff000,
+	IWL_RX_MPDU_REORDER_SN_SHIFT		= 12,
+	IWL_RX_MPDU_REORDER_BAID_MASK		= 0x7f000000,
+	IWL_RX_MPDU_REORDER_BAID_SHIFT		= 24,
+	IWL_RX_MPDU_REORDER_BA_OLD_SN		= 0x80000000,
+};
+
+enum iwl_rx_mpdu_phy_info {
+	IWL_RX_MPDU_PHY_AMPDU		= BIT(5),
+	IWL_RX_MPDU_PHY_AMPDU_TOGGLE	= BIT(6),
+	IWL_RX_MPDU_PHY_SHORT_PREAMBLE	= BIT(7),
+	IWL_RX_MPDU_PHY_TSF_OVERLOAD	= BIT(8),
+};
+
+enum iwl_rx_mpdu_mac_info {
+	IWL_RX_MPDU_PHY_MAC_INDEX_MASK		= 0x0f,
+	IWL_RX_MPDU_PHY_PHY_INDEX_MASK		= 0xf0,
+};
+
+/*
+ * enum iwl_rx_he_phy - HE PHY data
+ */
+enum iwl_rx_he_phy {
+	IWL_RX_HE_PHY_BEAM_CHNG			= BIT(0),
+	IWL_RX_HE_PHY_UPLINK			= BIT(1),
+	IWL_RX_HE_PHY_BSS_COLOR_MASK		= 0xfc,
+	IWL_RX_HE_PHY_SPATIAL_REUSE_MASK	= 0xf00,
+	IWL_RX_HE_PHY_SU_EXT_BW10		= BIT(12),
+	IWL_RX_HE_PHY_TXOP_DUR_MASK		= 0xfe000,
+	IWL_RX_HE_PHY_LDPC_EXT_SYM		= BIT(20),
+	IWL_RX_HE_PHY_PRE_FEC_PAD_MASK		= 0x600000,
+	IWL_RX_HE_PHY_PE_DISAMBIG		= BIT(23),
+	IWL_RX_HE_PHY_DOPPLER			= BIT(24),
+	/* 6 bits reserved */
+	IWL_RX_HE_PHY_DELIM_EOF			= BIT(31),
+
+	/* second dword - MU data */
+	IWL_RX_HE_PHY_SIGB_COMPRESSION		= BIT_ULL(32 + 0),
+	IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK	= 0x1e00000000ULL,
+	IWL_RX_HE_PHY_HE_LTF_NUM_MASK		= 0xe000000000ULL,
+	IWL_RX_HE_PHY_RU_ALLOC_SEC80		= BIT_ULL(32 + 8),
+	/* trigger encoded */
+	IWL_RX_HE_PHY_RU_ALLOC_MASK		= 0xfe0000000000ULL,
+	IWL_RX_HE_PHY_SIGB_MCS_MASK		= 0xf000000000000ULL,
+	/* 1 bit reserved */
+	IWL_RX_HE_PHY_SIGB_DCM			= BIT_ULL(32 + 21),
+	IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK	= 0xc0000000000000ULL,
+	/* 8 bits reserved */
+};
+
+/**
+ * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v1 {
+	/* DW7 - carries rss_hash only when rpa_en == 1 */
+	/**
+	 * @rss_hash: RSS hash value
+	 */
+	__le32 rss_hash;
+	/* DW8 - carries filter_match only when rpa_en == 1 */
+	/**
+	 * @filter_match: filter match value
+	 */
+	__le32 filter_match;
+	/* DW9 */
+	/**
+	 * @rate_n_flags: RX rate/flags encoding
+	 */
+	__le32 rate_n_flags;
+	/* DW10 */
+	/**
+	 * @energy_a: energy chain A
+	 */
+	u8 energy_a;
+	/**
+	 * @energy_b: energy chain B
+	 */
+	u8 energy_b;
+	/**
+	 * @channel: channel number
+	 */
+	u8 channel;
+	/**
+	 * @mac_context: MAC context mask
+	 */
+	u8 mac_context;
+	/* DW11 */
+	/**
+	 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+	 */
+	__le32 gp2_on_air_rise;
+	/* DW12 & DW13 */
+	union {
+		/**
+		 * @tsf_on_air_rise:
+		 * TSF value on air rise (INA), only valid if
+		 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+		 */
+		__le64 tsf_on_air_rise;
+		/**
+		 * @he_phy_data:
+		 * HE PHY data, see &enum iwl_rx_he_phy, valid
+		 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+		 */
+		__le64 he_phy_data;
+	};
+} __packed;
+
+/**
+ * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc_v3 {
+	/* DW7 - carries filter_match only when rpa_en == 1 */
+	/**
+	 * @filter_match: filter match value
+	 */
+	__le32 filter_match;
+	/* DW8 - carries rss_hash only when rpa_en == 1 */
+	/**
+	 * @rss_hash: RSS hash value
+	 */
+	__le32 rss_hash;
+	/* DW9 */
+	/**
+	 * @partial_hash: 31:0 ip/tcp header hash
+	 *	w/o some fields (such as IP SRC addr)
+	 */
+	__le32 partial_hash;
+	/* DW10 */
+	/**
+	 * @raw_xsum: raw xsum value
+	 */
+	__le32 raw_xsum;
+	/* DW11 */
+	/**
+	 * @rate_n_flags: RX rate/flags encoding
+	 */
+	__le32 rate_n_flags;
+	/* DW12 */
+	/**
+	 * @energy_a: energy chain A
+	 */
+	u8 energy_a;
+	/**
+	 * @energy_b: energy chain B
+	 */
+	u8 energy_b;
+	/**
+	 * @channel: channel number
+	 */
+	u8 channel;
+	/**
+	 * @mac_context: MAC context mask
+	 */
+	u8 mac_context;
+	/* DW13 */
+	/**
+	 * @gp2_on_air_rise: GP2 timer value on air rise (INA)
+	 */
+	__le32 gp2_on_air_rise;
+	/* DW14 & DW15 */
+	union {
+		/**
+		 * @tsf_on_air_rise:
+		 * TSF value on air rise (INA), only valid if
+		 * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+		 */
+		__le64 tsf_on_air_rise;
+		/**
+		 * @he_phy_data:
+		 * HE PHY data, see &enum iwl_rx_he_phy, valid
+		 * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
+		 */
+		__le64 he_phy_data;
+	};
+	/* DW16 & DW17 */
+	/**
+	 * @reserved: reserved
+	 */
+	__le32 reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+/**
+ * struct iwl_rx_mpdu_desc - RX MPDU descriptor
+ */
+struct iwl_rx_mpdu_desc {
+	/* DW2 */
+	/**
+	 * @mpdu_len: MPDU length
+	 */
+	__le16 mpdu_len;
+	/**
+	 * @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
+	 */
+	u8 mac_flags1;
+	/**
+	 * @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
+	 */
+	u8 mac_flags2;
+	/* DW3 */
+	/**
+	 * @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
+	 */
+	u8 amsdu_info;
+	/**
+	 * @phy_info: &enum iwl_rx_mpdu_phy_info
+	 */
+	__le16 phy_info;
+	/**
+	 * @mac_phy_idx: MAC/PHY index
+	 */
+	u8 mac_phy_idx;
+	/* DW4 - carries csum data only when rpa_en == 1 */
+	/**
+	 * @raw_csum: raw checksum (alledgedly unreliable)
+	 */
+	__le16 raw_csum;
+	/**
+	 * @l3l4_flags: &enum iwl_rx_l3l4_flags
+	 */
+	__le16 l3l4_flags;
+	/* DW5 */
+	/**
+	 * @status: &enum iwl_rx_mpdu_status
+	 */
+	__le16 status;
+	/**
+	 * @hash_filter: hash filter value
+	 */
+	u8 hash_filter;
+	/**
+	 * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
+	 */
+	u8 sta_id_flags;
+	/* DW6 */
+	/**
+	 * @reorder_data: &enum iwl_rx_mpdu_reorder_data
+	 */
+	__le32 reorder_data;
+
+	union {
+		struct iwl_rx_mpdu_desc_v1 v1;
+		struct iwl_rx_mpdu_desc_v3 v3;
+	};
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+
+#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
+
+struct iwl_frame_release {
+	u8 baid;
+	u8 reserved;
+	__le16 nssn;
+};
+
+enum iwl_rss_hash_func_en {
+	IWL_RSS_HASH_TYPE_IPV4_TCP,
+	IWL_RSS_HASH_TYPE_IPV4_UDP,
+	IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+	IWL_RSS_HASH_TYPE_IPV6_TCP,
+	IWL_RSS_HASH_TYPE_IPV6_UDP,
+	IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+};
+
+#define IWL_RSS_HASH_KEY_CNT 10
+#define IWL_RSS_INDIRECTION_TABLE_SIZE 128
+#define IWL_RSS_ENABLE 1
+
+/**
+ * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration
+ *
+ * @flags: 1 - enable, 0 - disable
+ * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en
+ * @reserved: reserved
+ * @secret_key: 320 bit input of random key configuration from driver
+ * @indirection_table: indirection table
+ */
+struct iwl_rss_config_cmd {
+	__le32 flags;
+	u8 hash_mask;
+	u8 reserved[3];
+	__le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+	u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
+
+#define IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE 128
+#define IWL_MULTI_QUEUE_SYNC_SENDER_POS 0
+#define IWL_MULTI_QUEUE_SYNC_SENDER_MSK 0xf
+
+/**
+ * struct iwl_rxq_sync_cmd - RXQ notification trigger
+ *
+ * @flags: flags of the notification. bit 0:3 are the sender queue
+ * @rxq_mask: rx queues to send the notification on
+ * @count: number of bytes in payload, should be DWORD aligned
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_cmd {
+	__le32 flags;
+	__le32 rxq_mask;
+	__le32 count;
+	u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_rxq_sync_notification - Notification triggered by RXQ
+ * sync command
+ *
+ * @count: number of bytes in payload
+ * @payload: data to send to rx queues
+ */
+struct iwl_rxq_sync_notification {
+	__le32 count;
+	u8 payload[];
+} __packed; /* MULTI_QUEUE_DRV_SYNC_HDR_CMD_API_S_VER_1 */
+
+/**
+ * enum iwl_mvm_rxq_notif_type - Internal message identifier
+ *
+ * @IWL_MVM_RXQ_EMPTY: empty sync notification
+ * @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
+ */
+enum iwl_mvm_rxq_notif_type {
+	IWL_MVM_RXQ_EMPTY,
+	IWL_MVM_RXQ_NOTIF_DEL_BA,
+};
+
+/**
+ * struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
+ * in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+ * FW is agnostic to the payload, so there are no endianity requirements.
+ *
+ * @type: value from &iwl_mvm_rxq_notif_type
+ * @sync: ctrl path is waiting for all notifications to be received
+ * @cookie: internal cookie to identify old notifications
+ * @data: payload
+ */
+struct iwl_mvm_internal_rxq_notif {
+	u16 type;
+	u16 sync;
+	u32 cookie;
+	u8 data[];
+} __packed;
+
+/**
+ * enum iwl_mvm_pm_event - type of station PM event
+ * @IWL_MVM_PM_EVENT_AWAKE: station woke up
+ * @IWL_MVM_PM_EVENT_ASLEEP: station went to sleep
+ * @IWL_MVM_PM_EVENT_UAPSD: station sent uAPSD trigger
+ * @IWL_MVM_PM_EVENT_PS_POLL: station sent PS-Poll
+ */
+enum iwl_mvm_pm_event {
+	IWL_MVM_PM_EVENT_AWAKE,
+	IWL_MVM_PM_EVENT_ASLEEP,
+	IWL_MVM_PM_EVENT_UAPSD,
+	IWL_MVM_PM_EVENT_PS_POLL,
+}; /* PEER_PM_NTFY_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_pm_state_notification - station PM state notification
+ * @sta_id: station ID of the station changing state
+ * @type: the new powersave state, see &enum iwl_mvm_pm_event
+ */
+struct iwl_mvm_pm_state_notification {
+	u8 sta_id;
+	u8 type;
+	/* private: */
+	__le16 reserved;
+} __packed; /* PEER_PM_NTFY_API_S_VER_1 */
+
+#define BA_WINDOW_STREAMS_MAX		16
+#define BA_WINDOW_STATUS_TID_MSK	0x000F
+#define BA_WINDOW_STATUS_STA_ID_POS	4
+#define BA_WINDOW_STATUS_STA_ID_MSK	0x01F0
+#define BA_WINDOW_STATUS_VALID_MSK	BIT(9)
+
+/**
+ * struct iwl_ba_window_status_notif - reordering window's status notification
+ * @bitmap: bitmap of received frames [start_seq_num + 0]..[start_seq_num + 63]
+ * @ra_tid: bit 3:0 - TID, bit 8:4 - STA_ID, bit 9 - valid
+ * @start_seq_num: the start sequence number of the bitmap
+ * @mpdu_rx_count: the number of received MPDUs since entering D0i3
+ */
+struct iwl_ba_window_status_notif {
+	__le64 bitmap[BA_WINDOW_STREAMS_MAX];
+	__le16 ra_tid[BA_WINDOW_STREAMS_MAX];
+	__le32 start_seq_num[BA_WINDOW_STREAMS_MAX];
+	__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
+} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @q_num: Q num
+ * @enable: enable queue
+ * @reserved: alignment
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @fr_bd_cb: DMA address of freeRB table
+ * @ur_bd_cb: DMA address of used RB table
+ * @fr_bd_wid: Initial index of the free table
+ */
+struct iwl_rfh_queue_data {
+	u8 q_num;
+	u8 enable;
+	__le16 reserved;
+	__le64 urbd_stts_wrptr;
+	__le64 fr_bd_cb;
+	__le64 ur_bd_cb;
+	__le32 fr_bd_wid;
+} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
+
+/**
+ * struct iwl_rfh_queue_config - RX queue configuration
+ * @num_queues: number of queues configured
+ * @reserved: alignment
+ * @data: DMA addresses per-queue
+ */
+struct iwl_rfh_queue_config {
+	u8 num_queues;
+	u8 reserved[3];
+	struct iwl_rfh_queue_data data[];
+} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_rx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
new file mode 100644
index 0000000..a17c4a7
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -0,0 +1,854 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_scan_h__
+#define __iwl_fw_api_scan_h__
+
+/* Scan Commands, Responses, Notifications */
+
+/* Max number of IEs for direct SSID scans in a command */
+#define PROBE_OPTION_MAX		20
+
+/**
+ * struct iwl_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ *
+ * @id: element ID
+ * @len: element length
+ * @ssid: element (SSID) data
+ */
+struct iwl_ssid_ie {
+	u8 id;
+	u8 len;
+	u8 ssid[IEEE80211_MAX_SSID_LEN];
+} __packed; /* SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/* scan offload */
+#define IWL_SCAN_MAX_BLACKLIST_LEN	64
+#define IWL_SCAN_SHORT_BLACKLIST_LEN	16
+#define IWL_SCAN_MAX_PROFILES		11
+#define SCAN_OFFLOAD_PROBE_REQ_SIZE	512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWL_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define CAN_ABORT_STATUS 1
+
+#define IWL_FULL_SCAN_MULTIPLIER 5
+#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWL_MAX_SCHED_SCAN_PLANS 2
+
+enum scan_framework_client {
+	SCAN_CLIENT_SCHED_SCAN		= BIT(0),
+	SCAN_CLIENT_NETDETECT		= BIT(1),
+	SCAN_CLIENT_ASSET_TRACKING	= BIT(2),
+};
+
+/**
+ * struct iwl_scan_offload_blacklist - SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid:		MAC address to filter out
+ * @reported_rssi:	AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry  - enum scan_framework_client
+ */
+struct iwl_scan_offload_blacklist {
+	u8 ssid[ETH_ALEN];
+	u8 reported_rssi;
+	u8 client_bitmap;
+} __packed;
+
+enum iwl_scan_offload_network_type {
+	IWL_NETWORK_TYPE_BSS	= 1,
+	IWL_NETWORK_TYPE_IBSS	= 2,
+	IWL_NETWORK_TYPE_ANY	= 3,
+};
+
+enum iwl_scan_offload_band_selection {
+	IWL_SCAN_OFFLOAD_SELECT_2_4	= 0x4,
+	IWL_SCAN_OFFLOAD_SELECT_5_2	= 0x8,
+	IWL_SCAN_OFFLOAD_SELECT_ANY	= 0xc,
+};
+
+/**
+ * struct iwl_scan_offload_profile - SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index:		index to ssid list in fixed part
+ * @unicast_cipher:	encryption algorithm to match - bitmap
+ * @auth_alg:		authentication algorithm to match - bitmap
+ * @network_type:	enum iwl_scan_offload_network_type
+ * @band_selection:	enum iwl_scan_offload_band_selection
+ * @client_bitmap:	clients waiting for match - enum scan_framework_client
+ * @reserved:		reserved
+ */
+struct iwl_scan_offload_profile {
+	u8 ssid_index;
+	u8 unicast_cipher;
+	u8 auth_alg;
+	u8 network_type;
+	u8 band_selection;
+	u8 client_bitmap;
+	u8 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_scan_offload_profile_cfg - SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @profiles:		profiles to search for match
+ * @blacklist_len:	length of blacklist
+ * @num_profiles:	num of profiles in the list
+ * @match_notify:	clients waiting for match found notification
+ * @pass_match:		clients waiting for the results
+ * @active_clients:	active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify:	clients waiting for match notification without match
+ * @reserved:		reserved
+ */
+struct iwl_scan_offload_profile_cfg {
+	struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
+	u8 blacklist_len;
+	u8 num_profiles;
+	u8 match_notify;
+	u8 pass_match;
+	u8 active_clients;
+	u8 any_beacon_notify;
+	u8 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_scan_schedule_lmac - schedule of scan offload
+ * @delay:		delay between iterations, in seconds.
+ * @iterations:		num of scan iterations
+ * @full_scan_mul:	number of partial scans before each full scan
+ */
+struct iwl_scan_schedule_lmac {
+	__le16 delay;
+	u8 iterations;
+	u8 full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+enum iwl_scan_offload_complete_status {
+	IWL_SCAN_OFFLOAD_COMPLETED	= 1,
+	IWL_SCAN_OFFLOAD_ABORTED	= 2,
+};
+
+enum iwl_scan_ebs_status {
+	IWL_SCAN_EBS_SUCCESS,
+	IWL_SCAN_EBS_FAILED,
+	IWL_SCAN_EBS_CHAN_NOT_FOUND,
+	IWL_SCAN_EBS_INACTIVE,
+};
+
+/**
+ * struct iwl_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *	cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_req_tx_cmd {
+	__le32 tx_flags;
+	__le32 rate_n_flags;
+	u8 sta_id;
+	u8 reserved[3];
+} __packed;
+
+enum iwl_scan_channel_flags_lmac {
+	IWL_UNIFIED_SCAN_CHANNEL_FULL		= BIT(27),
+	IWL_UNIFIED_SCAN_CHANNEL_PARTIAL	= BIT(28),
+};
+
+/**
+ * struct iwl_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags:		bits 1-20: directed scan to i'th ssid
+ *			other bits &enum iwl_scan_channel_flags_lmac
+ * @channel_num:	channel number 1-13 etc
+ * @iter_count:		scan iteration on this channel
+ * @iter_interval:	interval in seconds between iterations on one channel
+ */
+struct iwl_scan_channel_cfg_lmac {
+	__le32 flags;
+	__le16 channel_num;
+	__le16 iter_count;
+	__le32 iter_interval;
+} __packed;
+
+/*
+ * struct iwl_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwl_scan_probe_segment {
+	__le16 offset;
+	__le16 len;
+} __packed;
+
+/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwl_scan_probe_req {
+	struct iwl_scan_probe_segment mac_header;
+	struct iwl_scan_probe_segment band_data[2];
+	struct iwl_scan_probe_segment common_data;
+	u8 buf[SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+enum iwl_scan_channel_flags {
+	IWL_SCAN_CHANNEL_FLAG_EBS		= BIT(0),
+	IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE	= BIT(1),
+	IWL_SCAN_CHANNEL_FLAG_CACHE_ADD		= BIT(2),
+};
+
+/* struct iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ *	involved.
+ *	1 - EBS is disabled.
+ *	2 - every second scan will be full scan(and so on).
+ */
+struct iwl_scan_channel_opt {
+	__le16 flags;
+	__le16 non_ebs_ratio;
+} __packed;
+
+/**
+ * enum iwl_mvm_lmac_scan_flags - LMAC scan flags
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
+ *	without filtering.
+ * @IWL_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
+ * @IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
+ * @IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
+ * @IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS: multiple SSID matching
+ * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
+ * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
+ *	and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels
+ *	1, 6 and 11.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
+ */
+enum iwl_mvm_lmac_scan_flags {
+	IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL		= BIT(0),
+	IWL_MVM_LMAC_SCAN_FLAG_PASSIVE		= BIT(1),
+	IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION	= BIT(2),
+	IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE	= BIT(3),
+	IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS	= BIT(4),
+	IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED	= BIT(5),
+	IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED	= BIT(6),
+	IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL	= BIT(7),
+	IWL_MVM_LMAC_SCAN_FLAG_MATCH		= BIT(9),
+};
+
+enum iwl_scan_priority {
+	IWL_SCAN_PRIORITY_LOW,
+	IWL_SCAN_PRIORITY_MEDIUM,
+	IWL_SCAN_PRIORITY_HIGH,
+};
+
+enum iwl_scan_priority_ext {
+	IWL_SCAN_PRIORITY_EXT_0_LOWEST,
+	IWL_SCAN_PRIORITY_EXT_1,
+	IWL_SCAN_PRIORITY_EXT_2,
+	IWL_SCAN_PRIORITY_EXT_3,
+	IWL_SCAN_PRIORITY_EXT_4,
+	IWL_SCAN_PRIORITY_EXT_5,
+	IWL_SCAN_PRIORITY_EXT_6,
+	IWL_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
+/**
+ * struct iwl_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * @reserved1: for alignment and future use
+ * @n_channels: num of channels to scan
+ * @active_dwell: dwell time for active channels
+ * @passive_dwell: dwell time for passive channels
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases)
+ * @reserved2: for alignment and future use
+ * @rx_chain_select: PHY_RX_CHAIN_* flags
+ * @scan_flags: &enum iwl_mvm_lmac_scan_flags
+ * @max_out_time: max time (in TU) to be out of associated channel
+ * @suspend_time: pause scan this long (TUs) when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXON filter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_prio: enum iwl_scan_priority
+ * @iter_num: number of scan iterations
+ * @delay: delay in seconds before first iteration
+ * @schedule: two scheduling plans. The first one is finite, the second one can
+ *	be infinite.
+ * @channel_opt: channel optimization options, for full and partial scan
+ * @data: channel configuration and probe request packet.
+ */
+struct iwl_scan_req_lmac {
+	/* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
+	__le32 reserved1;
+	u8 n_channels;
+	u8 active_dwell;
+	u8 passive_dwell;
+	u8 fragmented_dwell;
+	u8 extended_dwell;
+	u8 reserved2;
+	__le16 rx_chain_select;
+	__le32 scan_flags;
+	__le32 max_out_time;
+	__le32 suspend_time;
+	/* RX_ON_FLAGS_API_S_VER_1 */
+	__le32 flags;
+	__le32 filter_flags;
+	struct iwl_scan_req_tx_cmd tx_cmd[2];
+	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+	__le32 scan_prio;
+	/* SCAN_REQ_PERIODIC_PARAMS_API_S */
+	__le32 iter_num;
+	__le32 delay;
+	struct iwl_scan_schedule_lmac schedule[IWL_MAX_SCHED_SCAN_PLANS];
+	struct iwl_scan_channel_opt channel_opt[2];
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_scan_results_notif - scan results for one channel -
+ *	SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwl_scan_results_notif {
+	u8 channel;
+	u8 band;
+	u8 probe_status;
+	u8 num_probe_not_sent;
+	__le32 duration;
+} __packed;
+
+/**
+ * struct iwl_lmac_scan_complete_notif - notifies end of scanning (all channels)
+ *	SCAN_COMPLETE_NTF_API_S_VER_3
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: an array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwl_lmac_scan_complete_notif {
+	u8 scanned_channels;
+	u8 status;
+	u8 bt_status;
+	u8 last_channel;
+	__le32 tsf_low;
+	__le32 tsf_high;
+	struct iwl_scan_results_notif results[];
+} __packed;
+
+/**
+ * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwl_scan_ebs_status
+ * @time_after_last_iter: time in seconds elapsed after last iteration
+ * @reserved: reserved
+ */
+struct iwl_periodic_scan_complete {
+	u8 last_schedule_line;
+	u8 last_schedule_iteration;
+	u8 status;
+	u8 ebs_status;
+	__le32 time_after_last_iter;
+	__le32 reserved;
+} __packed;
+
+/* UMAC Scan API */
+
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ */
+#define IWL_MVM_MAX_UMAC_SCANS 8
+#define IWL_MVM_MAX_LMAC_SCANS 1
+
+enum scan_config_flags {
+	SCAN_CONFIG_FLAG_ACTIVATE			= BIT(0),
+	SCAN_CONFIG_FLAG_DEACTIVATE			= BIT(1),
+	SCAN_CONFIG_FLAG_FORBID_CHUB_REQS		= BIT(2),
+	SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS		= BIT(3),
+	SCAN_CONFIG_FLAG_SET_TX_CHAINS			= BIT(8),
+	SCAN_CONFIG_FLAG_SET_RX_CHAINS			= BIT(9),
+	SCAN_CONFIG_FLAG_SET_AUX_STA_ID			= BIT(10),
+	SCAN_CONFIG_FLAG_SET_ALL_TIMES			= BIT(11),
+	SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES		= BIT(12),
+	SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS		= BIT(13),
+	SCAN_CONFIG_FLAG_SET_LEGACY_RATES		= BIT(14),
+	SCAN_CONFIG_FLAG_SET_MAC_ADDR			= BIT(15),
+	SCAN_CONFIG_FLAG_SET_FRAGMENTED			= BIT(16),
+	SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED		= BIT(17),
+	SCAN_CONFIG_FLAG_SET_CAM_MODE			= BIT(18),
+	SCAN_CONFIG_FLAG_CLEAR_CAM_MODE			= BIT(19),
+	SCAN_CONFIG_FLAG_SET_PROMISC_MODE		= BIT(20),
+	SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE		= BIT(21),
+	SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED		= BIT(22),
+	SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED		= BIT(23),
+
+	/* Bits 26-31 are for num of channels in channel_array */
+#define SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+};
+
+enum scan_config_rates {
+	/* OFDM basic rates */
+	SCAN_CONFIG_RATE_6M	= BIT(0),
+	SCAN_CONFIG_RATE_9M	= BIT(1),
+	SCAN_CONFIG_RATE_12M	= BIT(2),
+	SCAN_CONFIG_RATE_18M	= BIT(3),
+	SCAN_CONFIG_RATE_24M	= BIT(4),
+	SCAN_CONFIG_RATE_36M	= BIT(5),
+	SCAN_CONFIG_RATE_48M	= BIT(6),
+	SCAN_CONFIG_RATE_54M	= BIT(7),
+	/* CCK basic rates */
+	SCAN_CONFIG_RATE_1M	= BIT(8),
+	SCAN_CONFIG_RATE_2M	= BIT(9),
+	SCAN_CONFIG_RATE_5M	= BIT(10),
+	SCAN_CONFIG_RATE_11M	= BIT(11),
+
+	/* Bits 16-27 are for supported rates */
+#define SCAN_CONFIG_SUPPORTED_RATE(rate)	((rate) << 16)
+};
+
+enum iwl_channel_flags {
+	IWL_CHANNEL_FLAG_EBS				= BIT(0),
+	IWL_CHANNEL_FLAG_ACCURATE_EBS			= BIT(1),
+	IWL_CHANNEL_FLAG_EBS_ADD			= BIT(2),
+	IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE	= BIT(3),
+};
+
+/**
+ * struct iwl_scan_dwell
+ * @active:		default dwell time for active scan
+ * @passive:		default dwell time for passive scan
+ * @fragmented:		default dwell time for fragmented scan
+ * @extended:		default dwell time for channels 1, 6 and 11
+ */
+struct iwl_scan_dwell {
+	u8 active;
+	u8 passive;
+	u8 fragmented;
+	u8 extended;
+} __packed;
+
+/**
+ * struct iwl_scan_config
+ * @flags:			enum scan_config_flags
+ * @tx_chains:			valid_tx antenna - ANT_* definitions
+ * @rx_chains:			valid_rx antenna - ANT_* definitions
+ * @legacy_rates:		default legacy rates - enum scan_config_rates
+ * @out_of_channel_time:	default max out of serving channel time
+ * @suspend_time:		default max suspend time
+ * @dwell:			dwells for the scan
+ * @mac_addr:			default mac address to be used in probes
+ * @bcast_sta_id:		the index of the station in the fw
+ * @channel_flags:		default channel flags - enum iwl_channel_flags
+ *				scan_config_channel_flag
+ * @channel_array:		default supported channels
+ */
+struct iwl_scan_config_v1 {
+	__le32 flags;
+	__le32 tx_chains;
+	__le32 rx_chains;
+	__le32 legacy_rates;
+	__le32 out_of_channel_time;
+	__le32 suspend_time;
+	struct iwl_scan_dwell dwell;
+	u8 mac_addr[ETH_ALEN];
+	u8 bcast_sta_id;
+	u8 channel_flags;
+	u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+
+#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
+
+struct iwl_scan_config {
+	__le32 flags;
+	__le32 tx_chains;
+	__le32 rx_chains;
+	__le32 legacy_rates;
+	__le32 out_of_channel_time[SCAN_TWO_LMACS];
+	__le32 suspend_time[SCAN_TWO_LMACS];
+	struct iwl_scan_dwell dwell;
+	u8 mac_addr[ETH_ALEN];
+	u8 bcast_sta_id;
+	u8 channel_flags;
+	u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
+
+/**
+ * enum iwl_umac_scan_flags - UMAC scan flags
+ * @IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ *	can be preempted by other scan requests with higher priority.
+ *	The low priority scan will be resumed when the higher proirity scan is
+ *	completed.
+ * @IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ *	when scan starts.
+ */
+enum iwl_umac_scan_flags {
+	IWL_UMAC_SCAN_FLAG_PREEMPTIVE		= BIT(0),
+	IWL_UMAC_SCAN_FLAG_START_NOTIF		= BIT(1),
+};
+
+enum iwl_umac_scan_uid_offsets {
+	IWL_UMAC_SCAN_UID_TYPE_OFFSET		= 0,
+	IWL_UMAC_SCAN_UID_SEQ_OFFSET		= 8,
+};
+
+enum iwl_umac_scan_general_flags {
+	IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC		= BIT(0),
+	IWL_UMAC_SCAN_GEN_FLAGS_OVER_BT			= BIT(1),
+	IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL		= BIT(2),
+	IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE			= BIT(3),
+	IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT		= BIT(4),
+	IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE		= BIT(5),
+	IWL_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID		= BIT(6),
+	IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED		= BIT(7),
+	IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED		= BIT(8),
+	IWL_UMAC_SCAN_GEN_FLAGS_MATCH			= BIT(9),
+	IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL		= BIT(10),
+	/* Extended dwell is obselete when adaptive dwell is used, making this
+	 * bit reusable. Hence, probe request defer is used only when adaptive
+	 * dwell is supported. */
+	IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP	= BIT(10),
+	IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED	= BIT(11),
+	IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL		= BIT(13),
+	IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME		= BIT(14),
+	IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE	= BIT(15),
+};
+
+/**
+ * enum iwl_umac_scan_general_flags2 - UMAC scan general flags #2
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ *	notification per channel or not.
+ */
+enum iwl_umac_scan_general_flags2 {
+	IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL	= BIT(0),
+};
+
+/**
+ * struct iwl_scan_channel_cfg_umac
+ * @flags:		bitmap - 0-19:	directed scan to i'th ssid.
+ * @channel_num:	channel number 1-13 etc.
+ * @iter_count:		repetition count for the channel.
+ * @iter_interval:	interval between two scan iterations on one channel.
+ */
+struct iwl_scan_channel_cfg_umac {
+	__le32 flags;
+	u8 channel_num;
+	u8 iter_count;
+	__le16 iter_interval;
+} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
+
+/**
+ * struct iwl_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwl_scan_umac_schedule {
+	__le16 interval;
+	u8 iter_count;
+	u8 reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ *      parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwl_scan_req_umac_tail {
+	/* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+	struct iwl_scan_umac_schedule schedule[IWL_MAX_SCHED_SCAN_PLANS];
+	__le16 delay;
+	__le16 reserved;
+	/* SCAN_PROBE_PARAMS_API_S_VER_1 */
+	struct iwl_scan_probe_req preq;
+	struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwl_scan_umac_chan_param
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ */
+struct iwl_scan_umac_chan_param {
+	u8 flags;
+	u8 count;
+	__le16 reserved;
+} __packed; /*SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+
+/**
+ * struct iwl_scan_req_umac
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan per LMAC
+ * @passive_dwell: dwell time for passive scan per LMAC
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ *	per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ *	number of APs per social (1,6,11) channel
+ * @general_flags2: &enum iwl_umac_scan_general_flags2
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ *	to total scan time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ *	are 2 LMACs
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @num_of_fragments: Number of fragments needed for full coverage per band.
+ *	Relevant only for fragmented scan.
+ * @channel: &struct iwl_scan_umac_chan_param
+ * @reserved: for future use and alignment
+ * @reserved3: for future use and alignment
+ * @data: &struct iwl_scan_channel_cfg_umac and
+ *	&struct iwl_scan_req_umac_tail
+ */
+struct iwl_scan_req_umac {
+	__le32 flags;
+	__le32 uid;
+	__le32 ooc_priority;
+	__le16 general_flags;
+	u8 reserved;
+	u8 scan_start_mac_id;
+	union {
+		struct {
+			u8 extended_dwell;
+			u8 active_dwell;
+			u8 passive_dwell;
+			u8 fragmented_dwell;
+			__le32 max_out_time;
+			__le32 suspend_time;
+			__le32 scan_priority;
+			struct iwl_scan_umac_chan_param channel;
+			u8 data[];
+		} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+		struct {
+			u8 extended_dwell;
+			u8 active_dwell;
+			u8 passive_dwell;
+			u8 fragmented_dwell;
+			__le32 max_out_time[SCAN_TWO_LMACS];
+			__le32 suspend_time[SCAN_TWO_LMACS];
+			__le32 scan_priority;
+			struct iwl_scan_umac_chan_param channel;
+			u8 data[];
+		} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+		struct {
+			u8 active_dwell;
+			u8 passive_dwell;
+			u8 fragmented_dwell;
+			u8 adwell_default_n_aps;
+			u8 adwell_default_n_aps_social;
+			u8 reserved3;
+			__le16 adwell_max_budget;
+			__le32 max_out_time[SCAN_TWO_LMACS];
+			__le32 suspend_time[SCAN_TWO_LMACS];
+			__le32 scan_priority;
+			struct iwl_scan_umac_chan_param channel;
+			u8 data[];
+		} v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+		struct {
+			u8 active_dwell[SCAN_TWO_LMACS];
+			u8 reserved2;
+			u8 adwell_default_n_aps;
+			u8 adwell_default_n_aps_social;
+			u8 general_flags2;
+			__le16 adwell_max_budget;
+			__le32 max_out_time[SCAN_TWO_LMACS];
+			__le32 suspend_time[SCAN_TWO_LMACS];
+			__le32 scan_priority;
+			u8 passive_dwell[SCAN_TWO_LMACS];
+			u8 num_of_fragments[SCAN_TWO_LMACS];
+			struct iwl_scan_umac_chan_param channel;
+			u8 data[];
+		} v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+	};
+} __packed;
+
+#define IWL_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWL_SCAN_REQ_UMAC_SIZE_V1 36
+
+/**
+ * struct iwl_umac_scan_abort
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwl_umac_scan_abort {
+	__le32 uid;
+	__le32 flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwl_umac_scan_complete
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @status: &enum iwl_scan_offload_complete_status
+ * @ebs_status: &enum iwl_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwl_umac_scan_complete {
+	__le32 uid;
+	u8 last_schedule;
+	u8 last_iter;
+	u8 status;
+	u8 ebs_status;
+	__le32 time_from_last_iter;
+	__le32 reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwl_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @reserved: reserved
+ * @channel: channel where the match occurred
+ * @energy: energy
+ * @matching_feature: feature matches
+ * @matching_channels: bitmap of channels that matched, referencing
+ *	the channels passed in tue scan offload request
+ */
+struct iwl_scan_offload_profile_match {
+	u8 bssid[ETH_ALEN];
+	__le16 reserved;
+	u8 channel;
+	u8 energy;
+	u8 matching_feature;
+	u8 matching_channels[SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwl_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ *	matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwl_scan_offload_profiles_query {
+	__le32 matched_profiles;
+	__le32 last_scan_age;
+	__le32 n_scans_done;
+	__le32 gp2_d0u;
+	__le32 gp2_invoked;
+	u8 resume_while_scanning;
+	u8 self_recovery;
+	__le16 reserved;
+	struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwl_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ *	results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @start_tsf: TSF timer in usecs of the scan start time for the mac specified
+ *	in &struct iwl_scan_req_umac.
+ * @results: array of scan results, length in @scanned_channels
+ */
+struct iwl_umac_scan_iter_complete_notif {
+	__le32 uid;
+	u8 scanned_channels;
+	u8 status;
+	u8 bt_status;
+	u8 last_channel;
+	__le64 start_tsf;
+	struct iwl_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h
new file mode 100644
index 0000000..e517b55
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sf.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_sf_h__
+#define __iwl_fw_api_sf_h__
+
+/* Smart Fifo state */
+enum iwl_sf_state {
+	SF_LONG_DELAY_ON = 0, /* should never be called by driver */
+	SF_FULL_ON,
+	SF_UNINIT,
+	SF_INIT_OFF,
+	SF_HW_NUM_STATES
+};
+
+/* Smart Fifo possible scenario */
+enum iwl_sf_scenario {
+	SF_SCENARIO_SINGLE_UNICAST,
+	SF_SCENARIO_AGG_UNICAST,
+	SF_SCENARIO_MULTICAST,
+	SF_SCENARIO_BA_RESP,
+	SF_SCENARIO_TX_RESP,
+	SF_NUM_SCENARIO
+};
+
+#define SF_TRANSIENT_STATES_NUMBER 2	/* SF_LONG_DELAY_ON and SF_FULL_ON */
+#define SF_NUM_TIMEOUT_TYPES 2		/* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define SF_W_MARK_SISO 6144
+#define SF_W_MARK_MIMO2 8192
+#define SF_W_MARK_MIMO3 6144
+#define SF_W_MARK_LEGACY 4096
+#define SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160	/* 150 uSec  */
+#define SF_SINGLE_UNICAST_AGING_TIMER_DEF 400	/* 0.4 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER_DEF 160		/* 150 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER_DEF 400		/* 0.4 mSec */
+#define SF_MCAST_IDLE_TIMER_DEF 160		/* 150 mSec */
+#define SF_MCAST_AGING_TIMER_DEF 400		/* 0.4 mSec */
+#define SF_BA_IDLE_TIMER_DEF 160			/* 150 uSec */
+#define SF_BA_AGING_TIMER_DEF 400			/* 0.4 mSec */
+#define SF_TX_RE_IDLE_TIMER_DEF 160			/* 150 uSec */
+#define SF_TX_RE_AGING_TIMER_DEF 400		/* 0.4 mSec */
+
+/* SF Scenarios timers for BSS MAC configuration (aligned to 32 uSec) */
+#define SF_SINGLE_UNICAST_IDLE_TIMER 320	/* 300 uSec  */
+#define SF_SINGLE_UNICAST_AGING_TIMER 2016	/* 2 mSec */
+#define SF_AGG_UNICAST_IDLE_TIMER 320		/* 300 uSec */
+#define SF_AGG_UNICAST_AGING_TIMER 2016		/* 2 mSec */
+#define SF_MCAST_IDLE_TIMER 2016		/* 2 mSec */
+#define SF_MCAST_AGING_TIMER 10016		/* 10 mSec */
+#define SF_BA_IDLE_TIMER 320			/* 300 uSec */
+#define SF_BA_AGING_TIMER 2016			/* 2 mSec */
+#define SF_TX_RE_IDLE_TIMER 320			/* 300 uSec */
+#define SF_TX_RE_AGING_TIMER 2016		/* 2 mSec */
+
+#define SF_LONG_DELAY_AGING_TIMER 1000000	/* 1 Sec */
+
+#define SF_CFG_DUMMY_NOTIF_OFF	BIT(16)
+
+/**
+ * struct iwl_sf_cfg_cmd - Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in &enum iwl_sf_state.
+ * @watermark: Minimum allowed available free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwl_sf_cfg_cmd {
+	__le32 state;
+	__le32 watermark[SF_TRANSIENT_STATES_NUMBER];
+	__le32 long_delay_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+	__le32 full_on_timeouts[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES];
+} __packed; /* SF_CFG_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_sf_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
new file mode 100644
index 0000000..dc40cbd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -0,0 +1,569 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_sta_h__
+#define __iwl_fw_api_sta_h__
+
+/**
+ * enum iwl_sta_flags - flags for the ADD_STA host command
+ * @STA_FLG_REDUCED_TX_PWR_CTRL: reduced TX power (control frames)
+ * @STA_FLG_REDUCED_TX_PWR_DATA: reduced TX power (data frames)
+ * @STA_FLG_DISABLE_TX: set if TX should be disabled
+ * @STA_FLG_PS: set if STA is in Power Save
+ * @STA_FLG_DRAIN_FLOW: drain flow
+ * @STA_FLG_PAN: STA is for PAN interface
+ * @STA_FLG_CLASS_AUTH: station is authenticated
+ * @STA_FLG_CLASS_ASSOC: station is associated
+ * @STA_FLG_RTS_MIMO_PROT: station requires RTS MIMO protection (dynamic SMPS)
+ * @STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU (mask)
+ * @STA_FLG_MAX_AGG_SIZE_SHIFT: maximal size for A-MPDU (bit shift)
+ * @STA_FLG_MAX_AGG_SIZE_8K: maximal size for A-MPDU (8k supported)
+ * @STA_FLG_MAX_AGG_SIZE_16K: maximal size for A-MPDU (16k supported)
+ * @STA_FLG_MAX_AGG_SIZE_32K: maximal size for A-MPDU (32k supported)
+ * @STA_FLG_MAX_AGG_SIZE_64K: maximal size for A-MPDU (64k supported)
+ * @STA_FLG_MAX_AGG_SIZE_128K: maximal size for A-MPDU (128k supported)
+ * @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported)
+ * @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported)
+ * @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported)
+ * @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ *	initialised by driver and can be updated by fw upon reception of
+ *	action frames that can change the channel width. When cleared the fw
+ *	will send all the frames in 20MHz even when FAT channel is requested.
+ * @STA_FLG_FAT_EN_20MHZ: no wide channels are supported, only 20 MHz
+ * @STA_FLG_FAT_EN_40MHZ: wide channels up to 40 MHz supported
+ * @STA_FLG_FAT_EN_80MHZ: wide channels up to 80 MHz supported
+ * @STA_FLG_FAT_EN_160MHZ: wide channels up to 160 MHz supported
+ * @STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ *	driver and can be updated by fw upon reception of action frames.
+ * @STA_FLG_MIMO_EN_SISO: no support for MIMO
+ * @STA_FLG_MIMO_EN_MIMO2: 2 streams supported
+ * @STA_FLG_MIMO_EN_MIMO3: 3 streams supported
+ * @STA_FLG_AGG_MPDU_DENS_MSK: A-MPDU density (mask)
+ * @STA_FLG_AGG_MPDU_DENS_SHIFT: A-MPDU density (bit shift)
+ * @STA_FLG_AGG_MPDU_DENS_2US: A-MPDU density (2 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_4US: A-MPDU density (4 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_8US: A-MPDU density (8 usec gap)
+ * @STA_FLG_AGG_MPDU_DENS_16US: A-MPDU density (16 usec gap)
+ */
+enum iwl_sta_flags {
+	STA_FLG_REDUCED_TX_PWR_CTRL	= BIT(3),
+	STA_FLG_REDUCED_TX_PWR_DATA	= BIT(6),
+
+	STA_FLG_DISABLE_TX		= BIT(4),
+
+	STA_FLG_PS			= BIT(8),
+	STA_FLG_DRAIN_FLOW		= BIT(12),
+	STA_FLG_PAN			= BIT(13),
+	STA_FLG_CLASS_AUTH		= BIT(14),
+	STA_FLG_CLASS_ASSOC		= BIT(15),
+	STA_FLG_RTS_MIMO_PROT		= BIT(17),
+
+	STA_FLG_MAX_AGG_SIZE_SHIFT	= 19,
+	STA_FLG_MAX_AGG_SIZE_8K		= (0 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_16K	= (1 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_32K	= (2 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_64K	= (3 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_128K	= (4 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_256K	= (5 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_512K	= (6 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_1024K	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+	STA_FLG_MAX_AGG_SIZE_MSK	= (7 << STA_FLG_MAX_AGG_SIZE_SHIFT),
+
+	STA_FLG_AGG_MPDU_DENS_SHIFT	= 23,
+	STA_FLG_AGG_MPDU_DENS_2US	= (4 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+	STA_FLG_AGG_MPDU_DENS_4US	= (5 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+	STA_FLG_AGG_MPDU_DENS_8US	= (6 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+	STA_FLG_AGG_MPDU_DENS_16US	= (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+	STA_FLG_AGG_MPDU_DENS_MSK	= (7 << STA_FLG_AGG_MPDU_DENS_SHIFT),
+
+	STA_FLG_FAT_EN_20MHZ		= (0 << 26),
+	STA_FLG_FAT_EN_40MHZ		= (1 << 26),
+	STA_FLG_FAT_EN_80MHZ		= (2 << 26),
+	STA_FLG_FAT_EN_160MHZ		= (3 << 26),
+	STA_FLG_FAT_EN_MSK		= (3 << 26),
+
+	STA_FLG_MIMO_EN_SISO		= (0 << 28),
+	STA_FLG_MIMO_EN_MIMO2		= (1 << 28),
+	STA_FLG_MIMO_EN_MIMO3		= (2 << 28),
+	STA_FLG_MIMO_EN_MSK		= (3 << 28),
+};
+
+/**
+ * enum iwl_sta_key_flag - key flags for the ADD_STA host command
+ * @STA_KEY_FLG_NO_ENC: no encryption
+ * @STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @STA_KEY_FLG_GCMP: GCMP encryption algorithm
+ * @STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @STA_KEY_FLG_EN_MSK: mask for encryption algorithmi value
+ * @STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ *	station info array (1 - n 1X mode)
+ * @STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @STA_KEY_FLG_KEYID_POS: key index bit position
+ * @STA_KEY_NOT_VALID: key is invalid
+ * @STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @STA_KEY_FLG_KEY_32BYTES: for non-wep key set for 32 bytes key
+ * @STA_KEY_MULTICAST: set for multical key
+ * @STA_KEY_MFP: key is used for Management Frame Protection
+ */
+enum iwl_sta_key_flag {
+	STA_KEY_FLG_NO_ENC		= (0 << 0),
+	STA_KEY_FLG_WEP			= (1 << 0),
+	STA_KEY_FLG_CCM			= (2 << 0),
+	STA_KEY_FLG_TKIP		= (3 << 0),
+	STA_KEY_FLG_EXT			= (4 << 0),
+	STA_KEY_FLG_GCMP		= (5 << 0),
+	STA_KEY_FLG_CMAC		= (6 << 0),
+	STA_KEY_FLG_ENC_UNKNOWN		= (7 << 0),
+	STA_KEY_FLG_EN_MSK		= (7 << 0),
+
+	STA_KEY_FLG_WEP_KEY_MAP		= BIT(3),
+	STA_KEY_FLG_KEYID_POS		 = 8,
+	STA_KEY_FLG_KEYID_MSK		= (3 << STA_KEY_FLG_KEYID_POS),
+	STA_KEY_NOT_VALID		= BIT(11),
+	STA_KEY_FLG_WEP_13BYTES		= BIT(12),
+	STA_KEY_FLG_KEY_32BYTES		= BIT(12),
+	STA_KEY_MULTICAST		= BIT(14),
+	STA_KEY_MFP			= BIT(15),
+};
+
+/**
+ * enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
+ * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
+ * @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @STA_MODIFY_UAPSD_ACS: this command modifies %uapsd_acs
+ * @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @STA_MODIFY_PROT_TH: modify RTS threshold
+ * @STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+enum iwl_sta_modify_flag {
+	STA_MODIFY_QUEUE_REMOVAL		= BIT(0),
+	STA_MODIFY_TID_DISABLE_TX		= BIT(1),
+	STA_MODIFY_UAPSD_ACS			= BIT(2),
+	STA_MODIFY_ADD_BA_TID			= BIT(3),
+	STA_MODIFY_REMOVE_BA_TID		= BIT(4),
+	STA_MODIFY_SLEEPING_STA_TX_COUNT	= BIT(5),
+	STA_MODIFY_PROT_TH			= BIT(6),
+	STA_MODIFY_QUEUES			= BIT(7),
+};
+
+/**
+ * enum iwl_sta_mode - station command mode
+ * @STA_MODE_ADD: add new station
+ * @STA_MODE_MODIFY: modify the station
+ */
+enum iwl_sta_mode {
+	STA_MODE_ADD	= 0,
+	STA_MODE_MODIFY	= 1,
+};
+
+/**
+ * enum iwl_sta_sleep_flag - type of sleep of the station
+ * @STA_SLEEP_STATE_AWAKE: station is awake
+ * @STA_SLEEP_STATE_PS_POLL: station is PS-polling
+ * @STA_SLEEP_STATE_UAPSD: station uses U-APSD
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ *	(last) released frame
+ */
+enum iwl_sta_sleep_flag {
+	STA_SLEEP_STATE_AWAKE		= 0,
+	STA_SLEEP_STATE_PS_POLL		= BIT(0),
+	STA_SLEEP_STATE_UAPSD		= BIT(1),
+	STA_SLEEP_STATE_MOREDATA	= BIT(2),
+};
+
+#define STA_KEY_MAX_NUM (16)
+#define STA_KEY_IDX_INVALID (0xff)
+#define STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWL_MAX_GLOBAL_KEYS (4)
+#define STA_KEY_LEN_WEP40 (5)
+#define STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwl_mvm_keyinfo - key information
+ * @key_flags: type &enum iwl_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @reserved1: reserved
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @reserved2: reserved
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwl_mvm_keyinfo {
+	__le16 key_flags;
+	u8 tkip_rx_tsc_byte2;
+	u8 reserved1;
+	__le16 tkip_rx_ttak[5];
+	u8 key_offset;
+	u8 reserved2;
+	u8 key[16];
+	__le64 tx_secur_seq_cnt;
+	__le64 hw_tkip_mic_rx_key;
+	__le64 hw_tkip_mic_tx_key;
+} __packed;
+
+#define IWL_ADD_STA_STATUS_MASK		0xFF
+#define IWL_ADD_STA_BAID_VALID_MASK	0x8000
+#define IWL_ADD_STA_BAID_MASK		0x7F00
+#define IWL_ADD_STA_BAID_SHIFT		8
+
+/**
+ * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *	AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ *	see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ *	alone. 1 - modify, 0 - don't change.
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ *	also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ *	Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ *	add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ *	Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ *	add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ *	asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ *	keeps track of STA sleep state.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ *	mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd_v7 {
+	u8 add_modify;
+	u8 awake_acs;
+	__le16 tid_disable_tx;
+	__le32 mac_id_n_color;
+	u8 addr[ETH_ALEN];	/* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+	__le16 reserved2;
+	u8 sta_id;
+	u8 modify_mask;
+	__le16 reserved3;
+	__le32 station_flags;
+	__le32 station_flags_msk;
+	u8 add_immediate_ba_tid;
+	u8 remove_immediate_ba_tid;
+	__le16 add_immediate_ba_ssn;
+	__le16 sleep_tx_count;
+	__le16 sleep_state_flags;
+	__le16 assoc_id;
+	__le16 beamform_flags;
+	__le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+
+/**
+ * enum iwl_sta_type - FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWL_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWL_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ *	and probe responses.
+ * @IWL_STA_MULTICAST: multicast traffic,
+ * @IWL_STA_TDLS_LINK: TDLS link station
+ * @IWL_STA_AUX_ACTIVITY: auxilary station (scan, ROC and so on).
+ */
+enum iwl_sta_type {
+	IWL_STA_LINK,
+	IWL_STA_GENERAL_PURPOSE,
+	IWL_STA_MULTICAST,
+	IWL_STA_TDLS_LINK,
+	IWL_STA_AUX_ACTIVITY,
+};
+
+/**
+ * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *	AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ *	see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ *	alone. 1 - modify, 0 - don't change.
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ *	also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ *	Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ *	add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ *	Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ *	add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ *	asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ *	keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ *	mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station.
+ *	Obselete for new TX API (9 and above).
+ * @rx_ba_window: aggregation window size
+ * @sp_length: the size of the SP as it appears in the WME IE
+ * @uapsd_acs:  4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ *	enabled ACs.
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd {
+	u8 add_modify;
+	u8 awake_acs;
+	__le16 tid_disable_tx;
+	__le32 mac_id_n_color;
+	u8 addr[ETH_ALEN];	/* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+	__le16 reserved2;
+	u8 sta_id;
+	u8 modify_mask;
+	__le16 reserved3;
+	__le32 station_flags;
+	__le32 station_flags_msk;
+	u8 add_immediate_ba_tid;
+	u8 remove_immediate_ba_tid;
+	__le16 add_immediate_ba_ssn;
+	__le16 sleep_tx_count;
+	u8 sleep_state_flags;
+	u8 station_type;
+	__le16 assoc_id;
+	__le16 beamform_flags;
+	__le32 tfd_queue_msk;
+	__le16 rx_ba_window;
+	u8 sp_length;
+	u8 uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
+
+/**
+ * struct iwl_mvm_add_sta_key_common - add/modify sta key common part
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: type &enum iwl_sta_key_flag
+ * @key: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ */
+struct iwl_mvm_add_sta_key_common {
+	u8 sta_id;
+	u8 key_offset;
+	__le16 key_flags;
+	u8 key[32];
+	u8 rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @reserved: reserved
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwl_mvm_add_sta_key_cmd_v1 {
+	struct iwl_mvm_add_sta_key_common common;
+	u8 tkip_rx_tsc_byte2;
+	u8 reserved;
+	__le16 tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwl_mvm_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwl_mvm_add_sta_key_cmd {
+	struct iwl_mvm_add_sta_key_common common;
+	__le64 rx_mic_key;
+	__le64 tx_mic_key;
+	__le64 transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
+/**
+ * enum iwl_mvm_add_sta_rsp_status - status in the response to ADD_STA command
+ * @ADD_STA_SUCCESS: operation was executed successfully
+ * @ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station that
+ *	doesn't exist.
+ */
+enum iwl_mvm_add_sta_rsp_status {
+	ADD_STA_SUCCESS			= 0x1,
+	ADD_STA_STATIONS_OVERLOAD	= 0x2,
+	ADD_STA_IMMEDIATE_BA_FAILURE	= 0x4,
+	ADD_STA_MODIFY_NON_EXISTING_STA	= 0x8,
+};
+
+/**
+ * struct iwl_mvm_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ * @reserved: reserved
+ */
+struct iwl_mvm_rm_sta_cmd {
+	u8 sta_id;
+	u8 reserved[3];
+} __packed; /* REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd_v1
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: &enum iwl_sta_key_flag
+ * @igtk: IGTK key material
+ * @k1: unused
+ * @k2: unused
+ * @sta_id: station ID that support IGTK
+ * @key_id: key ID
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd_v1 {
+	__le32 ctrl_flags;
+	u8 igtk[16];
+	u8 k1[16];
+	u8 k2[16];
+	__le32 key_id;
+	__le32 sta_id;
+	__le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_mgmt_mcast_key_cmd
+ * ( MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: &enum iwl_sta_key_flag
+ * @igtk: IGTK master key
+ * @sta_id: station ID that support IGTK
+ * @key_id: key ID
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwl_mvm_mgmt_mcast_key_cmd {
+	__le32 ctrl_flags;
+	u8 igtk[32];
+	__le32 key_id;
+	__le32 sta_id;
+	__le64 receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_2 */
+
+struct iwl_mvm_wep_key {
+	u8 key_index;
+	u8 key_offset;
+	__le16 reserved1;
+	u8 key_size;
+	u8 reserved2[3];
+	u8 key[16];
+} __packed;
+
+struct iwl_mvm_wep_key_cmd {
+	__le32 mac_id_n_color;
+	u8 num_keys;
+	u8 decryption_type;
+	u8 flags;
+	u8 reserved;
+	struct iwl_mvm_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ *	short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+	__le32 remain_frame_count;
+	__le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
new file mode 100644
index 0000000..53cb622
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -0,0 +1,474 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_stats_h__
+#define __iwl_fw_api_stats_h__
+#include "mac.h"
+
+struct mvm_statistics_dbg {
+	__le32 burst_check;
+	__le32 burst_count;
+	__le32 wait_for_silence_timeout_cnt;
+	u8 reserved[12];
+} __packed; /* STATISTICS_DEBUG_API_S_VER_2 */
+
+struct mvm_statistics_div {
+	__le32 tx_on_a;
+	__le32 tx_on_b;
+	__le32 exec_time;
+	__le32 probe_time;
+	__le32 rssi_ant;
+	__le32 reserved2;
+} __packed; /* STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+/**
+ * struct mvm_statistics_rx_non_phy
+ * @bogus_cts: CTS received when not expecting CTS
+ * @bogus_ack: ACK received when not expecting ACK
+ * @non_channel_beacons: beacons with our bss id but not on our serving channel
+ * @channel_beacons: beacons with our bss id and in our serving channel
+ * @num_missed_bcon: number of missed beacons
+ * @adc_rx_saturation_time: count in 0.8us units the time the ADC was in
+ *	saturation
+ * @ina_detection_search_time: total time (in 0.8us) searched for INA
+ * @beacon_silence_rssi_a: RSSI silence after beacon frame
+ * @beacon_silence_rssi_b: RSSI silence after beacon frame
+ * @beacon_silence_rssi_c: RSSI silence after beacon frame
+ * @interference_data_flag: flag for interference data availability. 1 when data
+ *	is available.
+ * @channel_load: counts RX Enable time in uSec
+ * @beacon_rssi_a: beacon RSSI on anntena A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @beacon_rssi_c: beacon RSSI on antenna C
+ * @beacon_energy_a: beacon energy on antenna A
+ * @beacon_energy_b: beacon energy on antenna B
+ * @beacon_energy_c: beacon energy on antenna C
+ * @num_bt_kills: number of BT "kills" (frame TX aborts)
+ * @mac_id: mac ID
+ */
+struct mvm_statistics_rx_non_phy {
+	__le32 bogus_cts;
+	__le32 bogus_ack;
+	__le32 non_channel_beacons;
+	__le32 channel_beacons;
+	__le32 num_missed_bcon;
+	__le32 adc_rx_saturation_time;
+	__le32 ina_detection_search_time;
+	__le32 beacon_silence_rssi_a;
+	__le32 beacon_silence_rssi_b;
+	__le32 beacon_silence_rssi_c;
+	__le32 interference_data_flag;
+	__le32 channel_load;
+	__le32 beacon_rssi_a;
+	__le32 beacon_rssi_b;
+	__le32 beacon_rssi_c;
+	__le32 beacon_energy_a;
+	__le32 beacon_energy_b;
+	__le32 beacon_energy_c;
+	__le32 num_bt_kills;
+	__le32 mac_id;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_4 */
+
+struct mvm_statistics_rx_non_phy_v3 {
+	__le32 bogus_cts;	/* CTS received when not expecting CTS */
+	__le32 bogus_ack;	/* ACK received when not expecting ACK */
+	__le32 non_bssid_frames;	/* number of frames with BSSID that
+					 * doesn't belong to the STA BSSID */
+	__le32 filtered_frames;	/* count frames that were dumped in the
+				 * filtering process */
+	__le32 non_channel_beacons;	/* beacons with our bss id but not on
+					 * our serving channel */
+	__le32 channel_beacons;	/* beacons with our bss id and in our
+				 * serving channel */
+	__le32 num_missed_bcon;	/* number of missed beacons */
+	__le32 adc_rx_saturation_time;	/* count in 0.8us units the time the
+					 * ADC was in saturation */
+	__le32 ina_detection_search_time;/* total time (in 0.8us) searched
+					  * for INA */
+	__le32 beacon_silence_rssi_a;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_b;	/* RSSI silence after beacon frame */
+	__le32 beacon_silence_rssi_c;	/* RSSI silence after beacon frame */
+	__le32 interference_data_flag;	/* flag for interference data
+					 * availability. 1 when data is
+					 * available. */
+	__le32 channel_load;		/* counts RX Enable time in uSec */
+	__le32 dsp_false_alarms;	/* DSP false alarm (both OFDM
+					 * and CCK) counter */
+	__le32 beacon_rssi_a;
+	__le32 beacon_rssi_b;
+	__le32 beacon_rssi_c;
+	__le32 beacon_energy_a;
+	__le32 beacon_energy_b;
+	__le32 beacon_energy_c;
+	__le32 num_bt_kills;
+	__le32 mac_id;
+	__le32 directed_data_mpdu;
+} __packed; /* STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy {
+	__le32 unresponded_rts;
+	__le32 rxe_frame_lmt_overrun;
+	__le32 sent_ba_rsp_cnt;
+	__le32 dsp_self_kill;
+	__le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_3 */
+
+struct mvm_statistics_rx_phy_v2 {
+	__le32 ina_cnt;
+	__le32 fina_cnt;
+	__le32 plcp_err;
+	__le32 crc32_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 false_alarm_cnt;
+	__le32 fina_sync_err_cnt;
+	__le32 sfd_timeout;
+	__le32 fina_timeout;
+	__le32 unresponded_rts;
+	__le32 rxe_frame_lmt_overrun;
+	__le32 sent_ack_cnt;
+	__le32 sent_cts_cnt;
+	__le32 sent_ba_rsp_cnt;
+	__le32 dsp_self_kill;
+	__le32 mh_format_err;
+	__le32 re_acq_main_rssi_sum;
+	__le32 reserved;
+} __packed; /* STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_rx_ht_phy_v1 {
+	__le32 plcp_err;
+	__le32 overrun_err;
+	__le32 early_overrun_err;
+	__le32 crc32_good;
+	__le32 crc32_err;
+	__le32 mh_format_err;
+	__le32 agg_crc32_good;
+	__le32 agg_mpdu_cnt;
+	__le32 agg_cnt;
+	__le32 unsupport_mcs;
+} __packed;  /* STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+struct mvm_statistics_rx_ht_phy {
+	__le32 mh_format_err;
+	__le32 agg_mpdu_cnt;
+	__le32 agg_cnt;
+	__le32 unsupport_mcs;
+} __packed;  /* STATISTICS_HT_RX_PHY_API_S_VER_2 */
+
+struct mvm_statistics_tx_non_phy_v3 {
+	__le32 preamble_cnt;
+	__le32 rx_detected_cnt;
+	__le32 bt_prio_defer_cnt;
+	__le32 bt_prio_kill_cnt;
+	__le32 few_bytes_cnt;
+	__le32 cts_timeout;
+	__le32 ack_timeout;
+	__le32 expected_ack_cnt;
+	__le32 actual_ack_cnt;
+	__le32 dump_msdu_cnt;
+	__le32 burst_abort_next_frame_mismatch_cnt;
+	__le32 burst_abort_missing_next_frame_cnt;
+	__le32 cts_timeout_collision;
+	__le32 ack_or_ba_timeout_collision;
+} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_3 */
+
+struct mvm_statistics_tx_non_phy {
+	__le32 bt_prio_defer_cnt;
+	__le32 bt_prio_kill_cnt;
+	__le32 few_bytes_cnt;
+	__le32 cts_timeout;
+	__le32 ack_timeout;
+	__le32 dump_msdu_cnt;
+	__le32 burst_abort_next_frame_mismatch_cnt;
+	__le32 burst_abort_missing_next_frame_cnt;
+	__le32 cts_timeout_collision;
+	__le32 ack_or_ba_timeout_collision;
+} __packed; /* STATISTICS_TX_NON_PHY_API_S_VER_4 */
+
+#define MAX_CHAINS 3
+
+struct mvm_statistics_tx_non_phy_agg {
+	__le32 ba_timeout;
+	__le32 ba_reschedule_frames;
+	__le32 scd_query_agg_frame_cnt;
+	__le32 scd_query_no_agg;
+	__le32 scd_query_agg;
+	__le32 scd_query_mismatch;
+	__le32 frame_not_ready;
+	__le32 underrun;
+	__le32 bt_prio_kill;
+	__le32 rx_ba_rsp_cnt;
+	__s8 txpower[MAX_CHAINS];
+	__s8 reserved;
+	__le32 reserved2;
+} __packed; /* STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct mvm_statistics_tx_channel_width {
+	__le32 ext_cca_narrow_ch20[1];
+	__le32 ext_cca_narrow_ch40[2];
+	__le32 ext_cca_narrow_ch80[3];
+	__le32 ext_cca_narrow_ch160[4];
+	__le32 last_tx_ch_width_indx;
+	__le32 rx_detected_per_ch_width[4];
+	__le32 success_per_ch_width[4];
+	__le32 fail_per_ch_width[4];
+}; /* STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct mvm_statistics_tx_v4 {
+	struct mvm_statistics_tx_non_phy_v3 general;
+	struct mvm_statistics_tx_non_phy_agg agg;
+	struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_4 */
+
+struct mvm_statistics_tx {
+	struct mvm_statistics_tx_non_phy general;
+	struct mvm_statistics_tx_non_phy_agg agg;
+	struct mvm_statistics_tx_channel_width channel_width;
+} __packed; /* STATISTICS_TX_API_S_VER_5 */
+
+
+struct mvm_statistics_bt_activity {
+	__le32 hi_priority_tx_req_cnt;
+	__le32 hi_priority_tx_denied_cnt;
+	__le32 lo_priority_tx_req_cnt;
+	__le32 lo_priority_tx_denied_cnt;
+	__le32 hi_priority_rx_req_cnt;
+	__le32 hi_priority_rx_denied_cnt;
+	__le32 lo_priority_rx_req_cnt;
+	__le32 lo_priority_rx_denied_cnt;
+} __packed;  /* STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct mvm_statistics_general_common_v19 {
+	__le32 radio_temperature;
+	__le32 radio_voltage;
+	struct mvm_statistics_dbg dbg;
+	__le32 sleep_time;
+	__le32 slots_out;
+	__le32 slots_idle;
+	__le32 ttl_timestamp;
+	struct mvm_statistics_div slow_div;
+	__le32 rx_enable_counter;
+	/*
+	 * num_of_sos_states:
+	 *  count the number of times we have to re-tune
+	 *  in order to get out of bad PHY status
+	 */
+	__le32 num_of_sos_states;
+	__le32 beacon_filtered;
+	__le32 missed_beacons;
+	u8 beacon_filter_average_energy;
+	u8 beacon_filter_reason;
+	u8 beacon_filter_current_energy;
+	u8 beacon_filter_reserved;
+	__le32 beacon_filter_delta_time;
+	struct mvm_statistics_bt_activity bt_activity;
+	__le64 rx_time;
+	__le64 on_time_rf;
+	__le64 on_time_scan;
+	__le64 tx_time;
+} __packed;
+
+struct mvm_statistics_general_common {
+	__le32 radio_temperature;
+	struct mvm_statistics_dbg dbg;
+	__le32 sleep_time;
+	__le32 slots_out;
+	__le32 slots_idle;
+	__le32 ttl_timestamp;
+	struct mvm_statistics_div slow_div;
+	__le32 rx_enable_counter;
+	/*
+	 * num_of_sos_states:
+	 *  count the number of times we have to re-tune
+	 *  in order to get out of bad PHY status
+	 */
+	__le32 num_of_sos_states;
+	__le32 beacon_filtered;
+	__le32 missed_beacons;
+	u8 beacon_filter_average_energy;
+	u8 beacon_filter_reason;
+	u8 beacon_filter_current_energy;
+	u8 beacon_filter_reserved;
+	__le32 beacon_filter_delta_time;
+	struct mvm_statistics_bt_activity bt_activity;
+	__le64 rx_time;
+	__le64 on_time_rf;
+	__le64 on_time_scan;
+	__le64 tx_time;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct mvm_statistics_general_v8 {
+	struct mvm_statistics_general_common_v19 common;
+	__le32 beacon_counter[NUM_MAC_INDEX];
+	u8 beacon_average_energy[NUM_MAC_INDEX];
+	u8 reserved[4 - (NUM_MAC_INDEX % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_8 */
+
+struct mvm_statistics_general_cdb_v9 {
+	struct mvm_statistics_general_common_v19 common;
+	__le32 beacon_counter[NUM_MAC_INDEX_CDB];
+	u8 beacon_average_energy[NUM_MAC_INDEX_CDB];
+	u8 reserved[4 - (NUM_MAC_INDEX_CDB % 4)];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_9 */
+
+struct mvm_statistics_general_cdb {
+	struct mvm_statistics_general_common common;
+	__le32 beacon_counter[MAC_INDEX_AUX];
+	u8 beacon_average_energy[MAC_INDEX_AUX];
+	u8 reserved[8 - MAC_INDEX_AUX];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+/**
+ * struct mvm_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct mvm_statistics_load {
+	__le32 air_time[MAC_INDEX_AUX];
+	__le32 byte_count[MAC_INDEX_AUX];
+	__le32 pkt_count[MAC_INDEX_AUX];
+	u8 avg_energy[IWL_MVM_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
+
+struct mvm_statistics_load_v1 {
+	__le32 air_time[NUM_MAC_INDEX];
+	__le32 byte_count[NUM_MAC_INDEX];
+	__le32 pkt_count[NUM_MAC_INDEX];
+	u8 avg_energy[IWL_MVM_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
+
+struct mvm_statistics_rx {
+	struct mvm_statistics_rx_phy ofdm;
+	struct mvm_statistics_rx_phy cck;
+	struct mvm_statistics_rx_non_phy general;
+	struct mvm_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_4 */
+
+struct mvm_statistics_rx_v3 {
+	struct mvm_statistics_rx_phy_v2 ofdm;
+	struct mvm_statistics_rx_phy_v2 cck;
+	struct mvm_statistics_rx_non_phy_v3 general;
+	struct mvm_statistics_rx_ht_phy_v1 ofdm_ht;
+} __packed; /* STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated.  To disable this behavior, set DISABLE_NOTIF flag in the
+ * STATISTICS_CMD (0x9c), below.
+ */
+
+struct iwl_notif_statistics_v10 {
+	__le32 flag;
+	struct mvm_statistics_rx_v3 rx;
+	struct mvm_statistics_tx_v4 tx;
+	struct mvm_statistics_general_v8 general;
+} __packed; /* STATISTICS_NTFY_API_S_VER_10 */
+
+struct iwl_notif_statistics_v11 {
+	__le32 flag;
+	struct mvm_statistics_rx_v3 rx;
+	struct mvm_statistics_tx_v4 tx;
+	struct mvm_statistics_general_v8 general;
+	struct mvm_statistics_load_v1 load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_11 */
+
+struct iwl_notif_statistics_cdb {
+	__le32 flag;
+	struct mvm_statistics_rx rx;
+	struct mvm_statistics_tx tx;
+	struct mvm_statistics_general_cdb general;
+	struct mvm_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
+
+/**
+ * enum iwl_statistics_notif_flags - flags used in statistics notification
+ * @IWL_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+enum iwl_statistics_notif_flags {
+	IWL_STATISTICS_REPLY_FLG_CLEAR		= 0x1,
+};
+
+/**
+ * enum iwl_statistics_cmd_flags - flags used in statistics command
+ * @IWL_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ *	that's sent after this command
+ * @IWL_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ *	notifications
+ */
+enum iwl_statistics_cmd_flags {
+	IWL_STATISTICS_FLG_CLEAR		= 0x1,
+	IWL_STATISTICS_FLG_DISABLE_NOTIF	= 0x2,
+};
+
+/**
+ * struct iwl_statistics_cmd - statistics config command
+ * @flags: flags from &enum iwl_statistics_cmd_flags
+ */
+struct iwl_statistics_cmd {
+	__le32 flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
new file mode 100644
index 0000000..7c6c246
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h
@@ -0,0 +1,208 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_tdls_h__
+#define __iwl_fw_api_tdls_h__
+
+#include "fw/api/tx.h"
+#include "fw/api/phy-ctxt.h"
+
+#define IWL_MVM_TDLS_STA_COUNT	4
+
+/* Type of TDLS request */
+enum iwl_tdls_channel_switch_type {
+	TDLS_SEND_CHAN_SW_REQ = 0,
+	TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH,
+	TDLS_MOVE_CH,
+}; /* TDLS_STA_CHANNEL_SWITCH_CMD_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_timing - Switch timing in TDLS channel-switch
+ * @frame_timestamp: GP2 timestamp of channel-switch request/response packet
+ *	received from peer
+ * @max_offchan_duration: What amount of microseconds out of a DTIM is given
+ *	to the TDLS off-channel communication. For instance if the DTIM is
+ *	200TU and the TDLS peer is to be given 25% of the time, the value
+ *	given will be 50TU, or 50 * 1024 if translated into microseconds.
+ * @switch_time: switch time the peer sent in its channel switch timing IE
+ * @switch_timeout: switch timeout the peer sent in its channel switch timing IE
+ */
+struct iwl_tdls_channel_switch_timing {
+	__le32 frame_timestamp; /* GP2 time of peer packet Rx */
+	__le32 max_offchan_duration; /* given in micro-seconds */
+	__le32 switch_time; /* given in micro-seconds */
+	__le32 switch_timeout; /* given in micro-seconds */
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_TIMING_DATA_API_S_VER_1 */
+
+#define IWL_TDLS_CH_SW_FRAME_MAX_SIZE 200
+
+/**
+ * struct iwl_tdls_channel_switch_frame - TDLS channel switch frame template
+ *
+ * A template representing a TDLS channel-switch request or response frame
+ *
+ * @switch_time_offset: offset to the channel switch timing IE in the template
+ * @tx_cmd: Tx parameters for the frame
+ * @data: frame data
+ */
+struct iwl_tdls_channel_switch_frame {
+	__le32 switch_time_offset;
+	struct iwl_tx_cmd tx_cmd;
+	u8 data[IWL_TDLS_CH_SW_FRAME_MAX_SIZE];
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_FRAME_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_cmd - TDLS channel switch command
+ *
+ * The command is sent to initiate a channel switch and also in response to
+ * incoming TDLS channel-switch request/response packets from remote peers.
+ *
+ * @switch_type: see &enum iwl_tdls_channel_switch_type
+ * @peer_sta_id: station id of TDLS peer
+ * @ci: channel we switch to
+ * @timing: timing related data for command
+ * @frame: channel-switch request/response template, depending to switch_type
+ */
+struct iwl_tdls_channel_switch_cmd {
+	u8 switch_type;
+	__le32 peer_sta_id;
+	struct iwl_fw_channel_info ci;
+	struct iwl_tdls_channel_switch_timing timing;
+	struct iwl_tdls_channel_switch_frame frame;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_channel_switch_notif - TDLS channel switch start notification
+ *
+ * @status: non-zero on success
+ * @offchannel_duration: duration given in microseconds
+ * @sta_id: peer currently performing the channel-switch with
+ */
+struct iwl_tdls_channel_switch_notif {
+	__le32 status;
+	__le32 offchannel_duration;
+	__le32 sta_id;
+} __packed; /* TDLS_STA_CHANNEL_SWITCH_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_sta_info - TDLS station info
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_tid: TID reserved vs. the peer for FW based Tx
+ * @tx_to_peer_ssn: initial SSN the FW should use for Tx on its TID vs the peer
+ * @is_initiator: 1 if the peer is the TDLS link initiator, 0 otherwise
+ */
+struct iwl_tdls_sta_info {
+	u8 sta_id;
+	u8 tx_to_peer_tid;
+	__le16 tx_to_peer_ssn;
+	__le32 is_initiator;
+} __packed; /* TDLS_STA_INFO_VER_1 */
+
+/**
+ * struct iwl_tdls_config_cmd - TDLS basic config command
+ *
+ * @id_and_color: MAC id and color being configured
+ * @tdls_peer_count: amount of currently connected TDLS peers
+ * @tx_to_ap_tid: TID reverved vs. the AP for FW based Tx
+ * @tx_to_ap_ssn: initial SSN the FW should use for Tx on its TID vs. the AP
+ * @sta_info: per-station info. Only the first tdls_peer_count entries are set
+ * @pti_req_data_offset: offset of network-level data for the PTI template
+ * @pti_req_tx_cmd: Tx parameters for PTI request template
+ * @pti_req_template: PTI request template data
+ */
+struct iwl_tdls_config_cmd {
+	__le32 id_and_color; /* mac id and color */
+	u8 tdls_peer_count;
+	u8 tx_to_ap_tid;
+	__le16 tx_to_ap_ssn;
+	struct iwl_tdls_sta_info sta_info[IWL_MVM_TDLS_STA_COUNT];
+
+	__le32 pti_req_data_offset;
+	struct iwl_tx_cmd pti_req_tx_cmd;
+	u8 pti_req_template[0];
+} __packed; /* TDLS_CONFIG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tdls_config_sta_info_res - TDLS per-station config information
+ *
+ * @sta_id: station id of the TDLS peer
+ * @tx_to_peer_last_seq: last sequence number used by FW during FW-based Tx to
+ *	the peer
+ */
+struct iwl_tdls_config_sta_info_res {
+	__le16 sta_id;
+	__le16 tx_to_peer_last_seq;
+} __packed; /* TDLS_STA_INFO_RSP_VER_1 */
+
+/**
+ * struct iwl_tdls_config_res - TDLS config information from FW
+ *
+ * @tx_to_ap_last_seq: last sequence number used by FW during FW-based Tx to AP
+ * @sta_info: per-station TDLS config information
+ */
+struct iwl_tdls_config_res {
+	__le32 tx_to_ap_last_seq;
+	struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
+} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_tdls_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
new file mode 100644
index 0000000..f824beb
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -0,0 +1,386 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_time_event_h__
+#define __iwl_fw_api_time_event_h__
+
+#include "fw/api/phy-ctxt.h"
+
+/* Time Event types, according to MAC type */
+enum iwl_time_event_type {
+	/* BSS Station Events */
+	TE_BSS_STA_AGGRESSIVE_ASSOC,
+	TE_BSS_STA_ASSOC,
+	TE_BSS_EAP_DHCP_PROT,
+	TE_BSS_QUIET_PERIOD,
+
+	/* P2P Device Events */
+	TE_P2P_DEVICE_DISCOVERABLE,
+	TE_P2P_DEVICE_LISTEN,
+	TE_P2P_DEVICE_ACTION_SCAN,
+	TE_P2P_DEVICE_FULL_SCAN,
+
+	/* P2P Client Events */
+	TE_P2P_CLIENT_AGGRESSIVE_ASSOC,
+	TE_P2P_CLIENT_ASSOC,
+	TE_P2P_CLIENT_QUIET_PERIOD,
+
+	/* P2P GO Events */
+	TE_P2P_GO_ASSOC_PROT,
+	TE_P2P_GO_REPETITIVET_NOA,
+	TE_P2P_GO_CT_WINDOW,
+
+	/* WiDi Sync Events */
+	TE_WIDI_TX_SYNC,
+
+	/* Channel Switch NoA */
+	TE_CHANNEL_SWITCH_PERIOD,
+
+	TE_MAX
+}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ *	the first fragment is scheduled.
+ * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ *	the first 2 fragments are scheduled.
+ * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ *	number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+	TE_V1_FRAG_NONE = 0,
+	TE_V1_FRAG_SINGLE = 1,
+	TE_V1_FRAG_DUAL = 2,
+	TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_V1_FRAG_MAX_MSK	0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define TE_V1_REPEAT_ENDLESS	0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V1_REPEAT_MAX_MSK_V1	0x0fffffff
+
+/* Time Event dependencies: none, on another TE, or in a specific time */
+enum {
+	TE_V1_INDEPENDENT		= 0,
+	TE_V1_DEP_OTHER			= BIT(0),
+	TE_V1_DEP_TSF			= BIT(1),
+	TE_V1_EVENT_SOCIOPATHIC		= BIT(2),
+}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
+/*
+ * @TE_V1_NOTIF_NONE: no notifications
+ * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
+ * Supported Time event notifications configuration.
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ */
+enum {
+	TE_V1_NOTIF_NONE = 0,
+	TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+	TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+	TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+	TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+	TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+	TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+	TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+	TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
+}; /* MAC_EVENT_ACTION_API_E_VER_2 */
+
+/* Time event - defines for command API */
+
+/*
+ * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ *  the first fragment is scheduled.
+ * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ *  the first 2 fragments are scheduled.
+ * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ *  number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+	TE_V2_FRAG_NONE = 0,
+	TE_V2_FRAG_SINGLE = 1,
+	TE_V2_FRAG_DUAL = 2,
+	TE_V2_FRAG_MAX = 0xfe,
+	TE_V2_FRAG_ENDLESS = 0xff
+};
+
+/* Repeat the time event endlessly (until removed) */
+#define TE_V2_REPEAT_ENDLESS	0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V2_REPEAT_MAX	0xfe
+
+#define TE_V2_PLACEMENT_POS	12
+#define TE_V2_ABSENCE_POS	15
+
+/**
+ * enum iwl_time_event_policy - Time event policy values
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @TE_V2_START_IMMEDIATELY: start time event immediately
+ * @TE_V2_DEP_OTHER: depends on another time event
+ * @TE_V2_DEP_TSF: depends on a specific time
+ * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum iwl_time_event_policy {
+	TE_V2_DEFAULT_POLICY = 0x0,
+
+	/* notifications (event start/stop, fragment start/stop) */
+	TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+	TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+	TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+	TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+
+	TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+	TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+	TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+	TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+	TE_V2_START_IMMEDIATELY = BIT(11),
+
+	/* placement characteristics */
+	TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+	TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+	TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+
+	/* are we present or absent during the Time Event. */
+	TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+};
+
+/**
+ * struct iwl_time_event_cmd - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWL_UCODE_TLV_FLAGS)
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC,
+ *	&enum iwl_ctxt_id_and_color
+ * @action: action to perform, one of &enum iwl_ctxt_action
+ * @id: this field has two meanings, depending on the action:
+ *	If the action is ADD, then it means the type of event to add.
+ *	For all other actions it is the unique event ID assigned when the
+ *	event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ *	on event and/or fragment start and/or end
+ *	using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ *	TE_EVENT_SOCIOPATHIC
+ *	using TE_ABSENCE and using TE_NOTIF_*,
+ *	&enum iwl_time_event_policy
+ */
+struct iwl_time_event_cmd {
+	/* COMMON_INDEX_HDR_API_S_VER_1 */
+	__le32 id_and_color;
+	__le32 action;
+	__le32 id;
+	/* MAC_TIME_EVENT_DATA_API_S_VER_2 */
+	__le32 apply_time;
+	__le32 max_delay;
+	__le32 depends_on;
+	__le32 interval;
+	__le32 duration;
+	u8 repeat;
+	u8 max_frags;
+	__le16 policy;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_time_event_resp - response structure to iwl_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC,
+ *	&enum iwl_ctxt_id_and_color
+ */
+struct iwl_time_event_resp {
+	__le32 status;
+	__le32 id;
+	__le32 unique_id;
+	__le32 id_and_color;
+} __packed; /* MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwl_time_event_notif - notifications of time event start/stop
+ * ( TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: &enum iwl_time_event_policy
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwl_time_event_notif {
+	__le32 timestamp;
+	__le32 session_id;
+	__le32 unique_id;
+	__le32 id_and_color;
+	__le32 action;
+	__le32 status;
+} __packed; /* MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+/*
+ * Aux ROC command
+ *
+ * Command requests the firmware to create a time event for a certain duration
+ * and remain on the given channel. This is done by using the Aux framework in
+ * the FW.
+ * The command was first used for Hot Spot issues - but can be used regardless
+ * to Hot Spot.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @event_unique_id: If the action FW_CTXT_ACTION_REMOVE then the
+ *	event_unique_id should be the id of the time event assigned by ucode.
+ *	Otherwise ignore the event_unique_id.
+ * @sta_id_and_color: station id and color, resumed during "Remain On Channel"
+ *	activity.
+ * @channel_info: channel info
+ * @node_addr: Our MAC Address
+ * @reserved: reserved for alignment
+ * @apply_time: GP2 value to start (should always be the current GP2 value)
+ * @apply_time_max_delay: Maximum apply time delay value in TU. Defines max
+ *	time by which start of the event is allowed to be postponed.
+ * @duration: event duration in TU To calculate event duration:
+ *	timeEventDuration = min(duration, remainingQuota)
+ */
+struct iwl_hs20_roc_req {
+	/* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+	__le32 id_and_color;
+	__le32 action;
+	__le32 event_unique_id;
+	__le32 sta_id_and_color;
+	struct iwl_fw_channel_info channel_info;
+	u8 node_addr[ETH_ALEN];
+	__le16 reserved;
+	__le32 apply_time;
+	__le32 apply_time_max_delay;
+	__le32 duration;
+} __packed; /* HOT_SPOT_CMD_API_S_VER_1 */
+
+/*
+ * values for AUX ROC result values
+ */
+enum iwl_mvm_hot_spot {
+	HOT_SPOT_RSP_STATUS_OK,
+	HOT_SPOT_RSP_STATUS_TOO_MANY_EVENTS,
+	HOT_SPOT_MAX_NUM_OF_SESSIONS,
+};
+
+/*
+ * Aux ROC command response
+ *
+ * In response to iwl_hs20_roc_req the FW sends this command to notify the
+ * driver the uid of the timevent.
+ *
+ * ( HOT_SPOT_CMD 0x53 )
+ *
+ * @event_unique_id: Unique ID of time event assigned by ucode
+ * @status: Return status 0 is success, all the rest used for specific errors
+ */
+struct iwl_hs20_roc_res {
+	__le32 event_unique_id;
+	__le32 status;
+} __packed; /* HOT_SPOT_RSP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
new file mode 100644
index 0000000..7328a16
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tof.h
@@ -0,0 +1,393 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_tof_h__
+#define __iwl_fw_api_tof_h__
+
+/* ToF sub-group command IDs */
+enum iwl_mvm_tof_sub_grp_ids {
+	TOF_RANGE_REQ_CMD = 0x1,
+	TOF_CONFIG_CMD = 0x2,
+	TOF_RANGE_ABORT_CMD = 0x3,
+	TOF_RANGE_REQ_EXT_CMD = 0x4,
+	TOF_RESPONDER_CONFIG_CMD = 0x5,
+	TOF_NW_INITIATED_RES_SEND_CMD = 0x6,
+	TOF_NEIGHBOR_REPORT_REQ_CMD = 0x7,
+	TOF_NEIGHBOR_REPORT_RSP_NOTIF = 0xFC,
+	TOF_NW_INITIATED_REQ_RCVD_NOTIF = 0xFD,
+	TOF_RANGE_RESPONSE_NOTIF = 0xFE,
+	TOF_MCSI_DEBUG_NOTIF = 0xFB,
+};
+
+/**
+ * struct iwl_tof_config_cmd - ToF configuration
+ * @tof_disabled: 0 enabled, 1 - disabled
+ * @one_sided_disabled: 0 enabled, 1 - disabled
+ * @is_debug_mode: 1 debug mode, 0 - otherwise
+ * @is_buf_required: 1 channel estimation buffer required, 0 - otherwise
+ */
+struct iwl_tof_config_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 tof_disabled;
+	u8 one_sided_disabled;
+	u8 is_debug_mode;
+	u8 is_buf_required;
+} __packed;
+
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode (for debug)
+ * @burst_period: future use: (currently hard coded in the LMAC)
+ *		  The interval between two sequential bursts.
+ * @min_delta_ftm: future use: (currently hard coded in the LMAC)
+ *		   The minimum delay between two sequential FTM Responses
+ *		   in the same burst.
+ * @burst_duration: future use: (currently hard coded in the LMAC)
+ *		   The total time for all FTMs handshake in the same burst.
+ *		   Affect the time events duration in the LMAC.
+ * @num_of_burst_exp: future use: (currently hard coded in the LMAC)
+ *		   The number of bursts for the current ToF request. Affect
+ *		   the number of events allocations in the current iteration.
+ * @get_ch_est: for xVT only, NA for driver
+ * @abort_responder: when set to '1' - Responder will terminate its activity
+ *		     (all other fields in the command are ignored)
+ * @recv_sta_req_params: 1 - Responder will ignore the other Responder's
+ *			 params and use the recomended Initiator params.
+ *			 0 - otherwise
+ * @channel_num: current AP Channel
+ * @bandwidth: current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rate: current AP rate
+ * @ctrl_ch_position: coding of the control channel position relative to
+ *	the center frequency:
+ *
+ *	40 MHz
+ *		0 below center, 1 above center
+ *
+ *	80 MHz
+ *		bits [0..1]
+ *		 * 0  the near 20MHz to the center,
+ *		 * 1  the far  20MHz to the center
+ *		bit[2]
+ *		 as above 40MHz
+ * @ftm_per_burst: FTMs per Burst
+ * @ftm_resp_ts_avail: '0' - we don't measure over the Initial FTM Response,
+ *		  '1' - we measure over the Initial FTM Response
+ * @asap_mode: ASAP / Non ASAP mode for the current WLS station
+ * @sta_id: index of the AP STA when in AP mode
+ * @tsf_timer_offset_msecs: The dictated time offset (mSec) from the AP's TSF
+ * @toa_offset: Artificial addition [0.1nsec] for the ToA - to be used for debug
+ *		purposes, simulating station movement by adding various values
+ *		to this field
+ * @bssid: Current AP BSSID
+ */
+struct iwl_tof_responder_config_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 burst_period;
+	u8 min_delta_ftm;
+	u8 burst_duration;
+	u8 num_of_burst_exp;
+	u8 get_ch_est;
+	u8 abort_responder;
+	u8 recv_sta_req_params;
+	u8 channel_num;
+	u8 bandwidth;
+	u8 rate;
+	u8 ctrl_ch_position;
+	u8 ftm_per_burst;
+	u8 ftm_resp_ts_avail;
+	u8 asap_mode;
+	u8 sta_id;
+	__le16 tsf_timer_offset_msecs;
+	__le16 toa_offset;
+	u8 bssid[ETH_ALEN];
+} __packed;
+
+/**
+ * struct iwl_tof_range_request_ext_cmd - extended range req for WLS
+ * @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
+ * @reserved: reserved
+ * @min_delta_ftm: Minimal time between two consecutive measurements,
+ *		   in units of 100us. 0 means no preference by station
+ * @ftm_format_and_bw20M: FTM Channel Spacing/Format for 20MHz: recommended
+ *			value be sent to the AP
+ * @ftm_format_and_bw40M: FTM Channel Spacing/Format for 40MHz: recommended
+ *			value to be sent to the AP
+ * @ftm_format_and_bw80M: FTM Channel Spacing/Format for 80MHz: recommended
+ *			value to be sent to the AP
+ */
+struct iwl_tof_range_req_ext_cmd {
+	__le32 sub_grp_cmd_id;
+	__le16 tsf_timer_offset_msec;
+	__le16 reserved;
+	u8 min_delta_ftm;
+	u8 ftm_format_and_bw20M;
+	u8 ftm_format_and_bw40M;
+	u8 ftm_format_and_bw80M;
+} __packed;
+
+#define IWL_MVM_TOF_MAX_APS 21
+
+/**
+ * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * @channel_num: Current AP Channel
+ * @bandwidth: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @tsf_delta_direction: TSF relatively to the subject AP
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ *	     center frequency.
+ *	     40MHz  0 below center, 1 above center
+ *	     80MHz  bits [0..1]: 0  the near 20MHz to the center,
+ *				 1  the far  20MHz to the center
+ *		    bit[2]  as above 40MHz
+ * @bssid: AP's bss id
+ * @measure_type: Measurement type: 0 - two sided, 1 - One sided
+ * @num_of_bursts: Recommended value to be sent to the AP.  2s Exponent of the
+ *		   number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ *		  periodicity In units of 100ms. ignored if num_of_bursts = 0
+ * @samples_per_burst: 2-sided: the number of FTMs pairs in single Burst (1-31)
+ *		       1-sided: how many rts/cts pairs should be used per burst.
+ * @retries_per_sample: Max number of retries that the LMAC should send
+ *			in case of no replies by the AP.
+ * @tsf_delta: TSF Delta in units of microseconds.
+ *	       The difference between the AP TSF and the device local clock.
+ * @location_req: Location Request Bit[0] LCI should be sent in the FTMR
+ *			      Bit[1] Civic should be sent in the FTMR
+ * @asap_mode: 0 - non asap mode, 1 - asap mode (not relevant for one sided)
+ * @enable_dyn_ack: Enable Dynamic ACK BW.
+ *	    0  Initiator interact with regular AP
+ *	    1  Initiator interact with Responder machine: need to send the
+ *	    Initiator Acks with HT 40MHz / 80MHz, since the Responder should
+ *	    use it for its ch est measurement (this flag will be set when we
+ *	    configure the opposite machine to be Responder).
+ * @rssi: Last received value
+ *	  leagal values: -128-0 (0x7f). above 0x0 indicating an invalid value.
+ */
+struct iwl_tof_range_req_ap_entry {
+	u8 channel_num;
+	u8 bandwidth;
+	u8 tsf_delta_direction;
+	u8 ctrl_ch_position;
+	u8 bssid[ETH_ALEN];
+	u8 measure_type;
+	u8 num_of_bursts;
+	__le16 burst_period;
+	u8 samples_per_burst;
+	u8 retries_per_sample;
+	__le32 tsf_delta;
+	u8 location_req;
+	u8 asap_mode;
+	u8 enable_dyn_ack;
+	s8 rssi;
+} __packed;
+
+/**
+ * enum iwl_tof_response_mode
+ * @IWL_MVM_TOF_RESPOSE_ASAP: report each AP measurement separately as soon as
+ *			      possible (not supported for this release)
+ * @IWL_MVM_TOF_RESPOSE_TIMEOUT: report all AP measurements as a batch upon
+ *				 timeout expiration
+ * @IWL_MVM_TOF_RESPOSE_COMPLETE: report all AP measurements as a batch at the
+ *				  earlier of: measurements completion / timeout
+ *				  expiration.
+ */
+enum iwl_tof_response_mode {
+	IWL_MVM_TOF_RESPOSE_ASAP = 1,
+	IWL_MVM_TOF_RESPOSE_TIMEOUT,
+	IWL_MVM_TOF_RESPOSE_COMPLETE,
+};
+
+/**
+ * struct iwl_tof_range_req_cmd - start measurement cmd
+ * @request_id: A Token incremented per request. The same Token will be
+ *		sent back in the range response
+ * @initiator: 0- NW initiated,  1 - Client Initiated
+ * @one_sided_los_disable: '0'- run ML-Algo for both ToF/OneSided,
+ *			   '1' - run ML-Algo for ToF only
+ * @req_timeout: Requested timeout of the response in units of 100ms.
+ *	     This is equivalent to the session time configured to the
+ *	     LMAC in Initiator Request
+ * @report_policy: Supported partially for this release: For current release -
+ *		   the range report will be uploaded as a batch when ready or
+ *		   when the session is done (successfully / partially).
+ *		   one of iwl_tof_response_mode.
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @macaddr_random: '0' Use default source MAC address (i.e. p2_p),
+ *	            '1' Use MAC Address randomization according to the below
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ *		  Bits set to 1 shall be randomized by the UMAC
+ * @ap: per-AP request data
+ */
+struct iwl_tof_range_req_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 initiator;
+	u8 one_sided_los_disable;
+	u8 req_timeout;
+	u8 report_policy;
+	u8 los_det_disable;
+	u8 num_of_ap;
+	u8 macaddr_random;
+	u8 macaddr_template[ETH_ALEN];
+	u8 macaddr_mask[ETH_ALEN];
+	struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+/**
+ * struct iwl_tof_gen_resp_cmd - generic ToF response
+ */
+struct iwl_tof_gen_resp_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ *	&enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0  20MHz, 1  40MHz, 2  80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ *	 current AP [nSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ *	       values measured for current AP in the current session [nsec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ *	        measured for current AP in the current session
+ * @reserved: reserved
+ * @range: Measured range [cm]
+ * @range_variance: Measured range variance [cm]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ *	       uploaded by the LMAC
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy {
+	u8 bssid[ETH_ALEN];
+	u8 measure_status;
+	u8 measure_bw;
+	__le32 rtt;
+	__le32 rtt_variance;
+	__le32 rtt_spread;
+	s8 rssi;
+	u8 rssi_spread;
+	__le16 reserved;
+	__le32 range;
+	__le32 range_variance;
+	__le32 timestamp;
+} __packed;
+
+/**
+ * struct iwl_tof_range_rsp_ntfy -
+ * @request_id: A Token ID of the corresponding Range request
+ * @request_status: status of current measurement session
+ * @last_in_batch: reprot policy (when not all responses are uploaded at once)
+ * @num_of_aps: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy {
+	u8 request_id;
+	u8 request_status;
+	u8 last_in_batch;
+	u8 num_of_aps;
+	struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+} __packed;
+
+#define IWL_MVM_TOF_MCSI_BUF_SIZE  (245)
+/**
+ * struct iwl_tof_mcsi_notif - used for debug
+ * @token: token ID for the current session
+ * @role: '0' - initiator, '1' - responder
+ * @reserved: reserved
+ * @initiator_bssid: initiator machine
+ * @responder_bssid: responder machine
+ * @mcsi_buffer: debug data
+ */
+struct iwl_tof_mcsi_notif {
+	u8 token;
+	u8 role;
+	__le16 reserved;
+	u8 initiator_bssid[ETH_ALEN];
+	u8 responder_bssid[ETH_ALEN];
+	u8 mcsi_buffer[IWL_MVM_TOF_MCSI_BUF_SIZE * 4];
+} __packed;
+
+/**
+ * struct iwl_tof_neighbor_report_notif
+ * @bssid: BSSID of the AP which sent the report
+ * @request_token: same token as the corresponding request
+ * @status:
+ * @report_ie_len: the length of the response frame starting from the Element ID
+ * @data: the IEs
+ */
+struct iwl_tof_neighbor_report {
+	u8 bssid[ETH_ALEN];
+	u8 request_token;
+	u8 status;
+	__le16 report_ie_len;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tof_range_abort_cmd
+ * @request_id: corresponds to a range request
+ * @reserved: reserved
+ */
+struct iwl_tof_range_abort_cmd {
+	__le32 sub_grp_cmd_id;
+	u8 request_id;
+	u8 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
new file mode 100644
index 0000000..514b861
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -0,0 +1,944 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_api_tx_h__
+#define __iwl_fw_api_tx_h__
+
+/**
+ * enum iwl_tx_flags - bitmasks for tx_flags in TX command
+ * @TX_CMD_FLG_PROT_REQUIRE: use RTS or CTS-to-self to protect the frame
+ * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame
+ * @TX_CMD_FLG_ACK: expect ACK from receiving station
+ * @TX_CMD_FLG_STA_RATE: use RS table with initial index from the TX command.
+ *	Otherwise, use rate_n_flags from the TX command
+ * @TX_CMD_FLG_BAR: this frame is a BA request, immediate BAR is expected
+ *	Must set TX_CMD_FLG_ACK with this flag.
+ * @TX_CMD_FLG_TXOP_PROT: TXOP protection requested
+ * @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
+ * @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
+ * @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
+ * @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
+ *	on old firmwares).
+ * @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
+ * @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
+ *	Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
+ * @TX_CMD_FLG_MORE_FRAG: this frame is non-last MPDU
+ * @TX_CMD_FLG_TSF: FW should calculate and insert TSF in the frame
+ *	Should be set for beacons and probe responses
+ * @TX_CMD_FLG_CALIB: activate PA TX power calibrations
+ * @TX_CMD_FLG_KEEP_SEQ_CTL: if seq_ctl is set, don't increase inner seq count
+ * @TX_CMD_FLG_MH_PAD: driver inserted 2 byte padding after MAC header.
+ *	Should be set for 26/30 length MAC headers
+ * @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
+ * @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
+ * @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
+ * @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
+ * @TX_CMD_FLG_EXEC_PAPD: execute PAPD
+ * @TX_CMD_FLG_PAPD_TYPE: 0 for reference power, 1 for nominal power
+ * @TX_CMD_FLG_HCCA_CHUNK: mark start of TSPEC chunk
+ */
+enum iwl_tx_flags {
+	TX_CMD_FLG_PROT_REQUIRE		= BIT(0),
+	TX_CMD_FLG_WRITE_TX_POWER	= BIT(1),
+	TX_CMD_FLG_ACK			= BIT(3),
+	TX_CMD_FLG_STA_RATE		= BIT(4),
+	TX_CMD_FLG_BAR			= BIT(6),
+	TX_CMD_FLG_TXOP_PROT		= BIT(7),
+	TX_CMD_FLG_VHT_NDPA		= BIT(8),
+	TX_CMD_FLG_HT_NDPA		= BIT(9),
+	TX_CMD_FLG_CSI_FDBK2HOST	= BIT(10),
+	TX_CMD_FLG_BT_PRIO_POS		= 11,
+	TX_CMD_FLG_BT_DIS		= BIT(12),
+	TX_CMD_FLG_SEQ_CTL		= BIT(13),
+	TX_CMD_FLG_MORE_FRAG		= BIT(14),
+	TX_CMD_FLG_TSF			= BIT(16),
+	TX_CMD_FLG_CALIB		= BIT(17),
+	TX_CMD_FLG_KEEP_SEQ_CTL		= BIT(18),
+	TX_CMD_FLG_MH_PAD		= BIT(20),
+	TX_CMD_FLG_RESP_TO_DRV		= BIT(21),
+	TX_CMD_FLG_TKIP_MIC_DONE	= BIT(23),
+	TX_CMD_FLG_DUR			= BIT(25),
+	TX_CMD_FLG_FW_DROP		= BIT(26),
+	TX_CMD_FLG_EXEC_PAPD		= BIT(27),
+	TX_CMD_FLG_PAPD_TYPE		= BIT(28),
+	TX_CMD_FLG_HCCA_CHUNK		= BIT(31)
+}; /* TX_FLAGS_BITS_API_S_VER_1 */
+
+/**
+ * enum iwl_tx_cmd_flags - bitmasks for tx_flags in TX command for 22000
+ * @IWL_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWL_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ *	to a secured STA
+ * @IWL_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ *	selection, retry limits and BT kill
+ */
+enum iwl_tx_cmd_flags {
+	IWL_TX_FLAGS_CMD_RATE		= BIT(0),
+	IWL_TX_FLAGS_ENCRYPT_DIS	= BIT(1),
+	IWL_TX_FLAGS_HIGH_PRI		= BIT(2),
+}; /* TX_FLAGS_BITS_API_S_VER_3 */
+
+/**
+ * enum iwl_tx_pm_timeouts - pm timeout values in TX command
+ * @PM_FRAME_NONE: no need to suspend sleep mode
+ * @PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwl_tx_pm_timeouts {
+	PM_FRAME_NONE		= 0,
+	PM_FRAME_MGMT		= 2,
+	PM_FRAME_ASSOC		= 3,
+};
+
+#define TX_CMD_SEC_MSK			0x07
+#define TX_CMD_SEC_WEP_KEY_IDX_POS	6
+#define TX_CMD_SEC_WEP_KEY_IDX_MSK	0xc0
+
+/**
+ * enum iwl_tx_cmd_sec_ctrl - bitmasks for security control in TX command
+ * @TX_CMD_SEC_WEP: WEP encryption algorithm.
+ * @TX_CMD_SEC_CCM: CCM encryption algorithm.
+ * @TX_CMD_SEC_TKIP: TKIP encryption algorithm.
+ * @TX_CMD_SEC_EXT: extended cipher algorithm.
+ * @TX_CMD_SEC_GCMP: GCMP encryption algorithm.
+ * @TX_CMD_SEC_KEY128: set for 104 bits WEP key.
+ * @TX_CMD_SEC_KEY_FROM_TABLE: for a non-WEP key, set if the key should be taken
+ *	from the table instead of from the TX command.
+ *	If the key is taken from the key table its index should be given by the
+ *	first byte of the TX command key field.
+ */
+enum iwl_tx_cmd_sec_ctrl {
+	TX_CMD_SEC_WEP			= 0x01,
+	TX_CMD_SEC_CCM			= 0x02,
+	TX_CMD_SEC_TKIP			= 0x03,
+	TX_CMD_SEC_EXT			= 0x04,
+	TX_CMD_SEC_GCMP			= 0x05,
+	TX_CMD_SEC_KEY128		= 0x08,
+	TX_CMD_SEC_KEY_FROM_TABLE	= 0x10,
+};
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define TX_CMD_LIFE_TIME_INFINITE	0xFFFFFFFF
+#define TX_CMD_LIFE_TIME_DEFAULT	2000000 /* 2000 ms*/
+#define TX_CMD_LIFE_TIME_PROBE_RESP	40000 /* 40 ms */
+#define TX_CMD_LIFE_TIME_EXPIRED_FRAME	0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWL_TID_NON_QOS	IWL_MAX_TID_COUNT
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWL_DEFAULT_TX_RETRY			15
+#define IWL_MGMT_DFAULT_RETRY_LIMIT		3
+#define IWL_RTS_DFAULT_RETRY_LIMIT		60
+#define IWL_BAR_DFAULT_RETRY_LIMIT		60
+#define IWL_LOW_RETRY_LIMIT			7
+
+/**
+ * enum iwl_tx_offload_assist_flags_pos -  set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
+ *	from mac header end. For normal case it is 4 words for SNAP.
+ *	note: tx_cmd, mac header and pad are not counted in the offset.
+ *	This is used to help the offload in case there is tunneling such as
+ *	IPv6 in IPv4, in such case the ip header offset should point to the
+ *	inner ip header and IPv4 checksum of the external header should be
+ *	calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ *	field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ *	alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+	TX_CMD_OFFLD_IP_HDR =		0,
+	TX_CMD_OFFLD_L4_EN =		6,
+	TX_CMD_OFFLD_L3_EN =		7,
+	TX_CMD_OFFLD_MH_SIZE =		8,
+	TX_CMD_OFFLD_PAD =		13,
+	TX_CMD_OFFLD_AMSDU =		14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK	0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK	0x3f
+
+/* TODO: complete documentation for try_cnt and btkill_cnt */
+/**
+ * struct iwl_tx_cmd - TX command struct to FW
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @scratch: scratch buffer used by the device
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *	cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @sec_ctl: security control, TX_CMD_SEC_*
+ * @initial_rate_index: index into the the rate table for initial TX attempt.
+ *	Applied if TX_CMD_FLG_STA_RATE_MSK is set, normally 0 for data frames.
+ * @reserved2: reserved
+ * @key: security key
+ * @reserved3: reserved
+ * @life_time: frame life time (usecs??)
+ * @dram_lsb_ptr: Physical address of scratch area in the command (try_cnt +
+ *	btkill_cnd + reserved), first 32 bits. "0" disables usage.
+ * @dram_msb_ptr: upper bits of the scratch physical address
+ * @rts_retry_limit: max attempts for RTS
+ * @data_retry_limit: max attempts to send the data packet
+ * @tid_tspec: TID/tspec
+ * @pm_frame_timeout: PM TX frame timeout
+ * @reserved4: reserved
+ * @payload: payload (same as @hdr)
+ * @hdr: 802.11 header (same as @payload)
+ *
+ * The byte count (both len and next_frame_len) includes MAC header
+ * (24/26/30/32 bytes)
+ * + 2 bytes pad if 26/30 header size
+ * + 8 byte IV for CCM or TKIP (not used for WEP)
+ * + Data payload
+ * + 8-byte MIC (not used for CCM/WEP)
+ * It does not include post-MAC padding, i.e.,
+ * MIC (CCM) 8 bytes, ICV (WEP/TKIP/CKIP) 4 bytes, CRC 4 bytes.
+ * Range of len: 14-2342 bytes.
+ *
+ * After the struct fields the MAC header is placed, plus any padding,
+ * and then the actial payload.
+ */
+struct iwl_tx_cmd {
+	__le16 len;
+	__le16 offload_assist;
+	__le32 tx_flags;
+	struct {
+		u8 try_cnt;
+		u8 btkill_cnt;
+		__le16 reserved;
+	} scratch; /* DRAM_SCRATCH_API_U_VER_1 */
+	__le32 rate_n_flags;
+	u8 sta_id;
+	u8 sec_ctl;
+	u8 initial_rate_index;
+	u8 reserved2;
+	u8 key[16];
+	__le32 reserved3;
+	__le32 life_time;
+	__le32 dram_lsb_ptr;
+	u8 dram_msb_ptr;
+	u8 rts_retry_limit;
+	u8 data_retry_limit;
+	u8 tid_tspec;
+	__le16 pm_frame_timeout;
+	__le16 reserved4;
+	u8 payload[0];
+	struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_6 */
+
+struct iwl_dram_sec_info {
+	__le32 pn_low;
+	__le16 pn_high;
+	__le16 aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *	cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen2 {
+	__le16 len;
+	__le16 offload_assist;
+	__le32 flags;
+	struct iwl_dram_sec_info dram_info;
+	__le32 rate_n_flags;
+	struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7 */
+
+/**
+ * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of &enum iwl_tx_cmd_flags
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ *	cleared. Combination of RATE_MCS_*
+ * @ttl: time to live - packet lifetime limit. The FW should drop if
+ *	passed.
+ * @hdr: 802.11 header
+ */
+struct iwl_tx_cmd_gen3 {
+	__le16 len;
+	__le16 flags;
+	__le32 offload_assist;
+	struct iwl_dram_sec_info dram_info;
+	__le32 rate_n_flags;
+	__le64 ttl;
+	struct ieee80211_hdr hdr[0];
+} __packed; /* TX_CMD_API_S_VER_8 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * enum iwl_tx_status - status that is returned by the fw after attempts to Tx
+ * @TX_STATUS_SUCCESS:
+ * @TX_STATUS_DIRECT_DONE:
+ * @TX_STATUS_POSTPONE_DELAY:
+ * @TX_STATUS_POSTPONE_FEW_BYTES:
+ * @TX_STATUS_POSTPONE_BT_PRIO:
+ * @TX_STATUS_POSTPONE_QUIET_PERIOD:
+ * @TX_STATUS_POSTPONE_CALC_TTAK:
+ * @TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
+ * @TX_STATUS_FAIL_SHORT_LIMIT:
+ * @TX_STATUS_FAIL_LONG_LIMIT:
+ * @TX_STATUS_FAIL_UNDERRUN:
+ * @TX_STATUS_FAIL_DRAIN_FLOW:
+ * @TX_STATUS_FAIL_RFKILL_FLUSH:
+ * @TX_STATUS_FAIL_LIFE_EXPIRE:
+ * @TX_STATUS_FAIL_DEST_PS:
+ * @TX_STATUS_FAIL_HOST_ABORTED:
+ * @TX_STATUS_FAIL_BT_RETRY:
+ * @TX_STATUS_FAIL_STA_INVALID:
+ * @TX_TATUS_FAIL_FRAG_DROPPED:
+ * @TX_STATUS_FAIL_TID_DISABLE:
+ * @TX_STATUS_FAIL_FIFO_FLUSHED:
+ * @TX_STATUS_FAIL_SMALL_CF_POLL:
+ * @TX_STATUS_FAIL_FW_DROP:
+ * @TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ *	STA table
+ * @TX_FRAME_STATUS_INTERNAL_ABORT:
+ * @TX_MODE_MSK:
+ * @TX_MODE_NO_BURST:
+ * @TX_MODE_IN_BURST_SEQ:
+ * @TX_MODE_FIRST_IN_BURST:
+ * @TX_QUEUE_NUM_MSK:
+ *
+ * Valid only if frame_count =1
+ * TODO: complete documentation
+ */
+enum iwl_tx_status {
+	TX_STATUS_MSK = 0x000000ff,
+	TX_STATUS_SUCCESS = 0x01,
+	TX_STATUS_DIRECT_DONE = 0x02,
+	/* postpone TX */
+	TX_STATUS_POSTPONE_DELAY = 0x40,
+	TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
+	TX_STATUS_POSTPONE_BT_PRIO = 0x42,
+	TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
+	TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
+	/* abort TX */
+	TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
+	TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
+	TX_STATUS_FAIL_LONG_LIMIT = 0x83,
+	TX_STATUS_FAIL_UNDERRUN = 0x84,
+	TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
+	TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
+	TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
+	TX_STATUS_FAIL_DEST_PS = 0x88,
+	TX_STATUS_FAIL_HOST_ABORTED = 0x89,
+	TX_STATUS_FAIL_BT_RETRY = 0x8a,
+	TX_STATUS_FAIL_STA_INVALID = 0x8b,
+	TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
+	TX_STATUS_FAIL_TID_DISABLE = 0x8d,
+	TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
+	TX_STATUS_FAIL_SMALL_CF_POLL = 0x8f,
+	TX_STATUS_FAIL_FW_DROP = 0x90,
+	TX_STATUS_FAIL_STA_COLOR_MISMATCH = 0x91,
+	TX_STATUS_INTERNAL_ABORT = 0x92,
+	TX_MODE_MSK = 0x00000f00,
+	TX_MODE_NO_BURST = 0x00000000,
+	TX_MODE_IN_BURST_SEQ = 0x00000100,
+	TX_MODE_FIRST_IN_BURST = 0x00000200,
+	TX_QUEUE_NUM_MSK = 0x0001f000,
+	TX_NARROW_BW_MSK = 0x00060000,
+	TX_NARROW_BW_1DIV2 = 0x00020000,
+	TX_NARROW_BW_1DIV4 = 0x00040000,
+	TX_NARROW_BW_1DIV8 = 0x00060000,
+};
+
+/*
+ * enum iwl_tx_agg_status - TX aggregation status
+ * @AGG_TX_STATE_STATUS_MSK:
+ * @AGG_TX_STATE_TRANSMITTED:
+ * @AGG_TX_STATE_UNDERRUN:
+ * @AGG_TX_STATE_BT_PRIO:
+ * @AGG_TX_STATE_FEW_BYTES:
+ * @AGG_TX_STATE_ABORT:
+ * @AGG_TX_STATE_TX_ON_AIR_DROP: TX_ON_AIR signal drop without underrun or
+ *	BT detection
+ * @AGG_TX_STATE_LAST_SENT_TRY_CNT:
+ * @AGG_TX_STATE_LAST_SENT_BT_KILL:
+ * @AGG_TX_STATE_SCD_QUERY:
+ * @AGG_TX_STATE_TEST_BAD_CRC32:
+ * @AGG_TX_STATE_RESPONSE:
+ * @AGG_TX_STATE_DUMP_TX:
+ * @AGG_TX_STATE_DELAY_TX:
+ * @AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ *	occur if tx failed for this frame when it was a member of a previous
+ *	aggregation block). If rate scaling is used, retry count indicates the
+ *	rate table entry used for all frames in the new agg.
+ * @AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ *	this frame
+ *
+ * TODO: complete documentation
+ */
+enum iwl_tx_agg_status {
+	AGG_TX_STATE_STATUS_MSK = 0x00fff,
+	AGG_TX_STATE_TRANSMITTED = 0x000,
+	AGG_TX_STATE_UNDERRUN = 0x001,
+	AGG_TX_STATE_BT_PRIO = 0x002,
+	AGG_TX_STATE_FEW_BYTES = 0x004,
+	AGG_TX_STATE_ABORT = 0x008,
+	AGG_TX_STATE_TX_ON_AIR_DROP = 0x010,
+	AGG_TX_STATE_LAST_SENT_TRY_CNT = 0x020,
+	AGG_TX_STATE_LAST_SENT_BT_KILL = 0x040,
+	AGG_TX_STATE_SCD_QUERY = 0x080,
+	AGG_TX_STATE_TEST_BAD_CRC32 = 0x0100,
+	AGG_TX_STATE_RESPONSE = 0x1ff,
+	AGG_TX_STATE_DUMP_TX = 0x200,
+	AGG_TX_STATE_DELAY_TX = 0x400,
+	AGG_TX_STATE_TRY_CNT_POS = 12,
+	AGG_TX_STATE_TRY_CNT_MSK = 0xf << AGG_TX_STATE_TRY_CNT_POS,
+};
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define AGG_TX_STAT_FRAME_NOT_SENT (AGG_TX_STATE_FEW_BYTES | \
+				    AGG_TX_STATE_ABORT | \
+				    AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1)	No aggregation (frame_count == 1).  This reports Tx results for a single
+ *	frame. Multiple attempts, at various bit rates, may have been made for
+ *	this frame.
+ *
+ * 2)	Aggregation (frame_count > 1).  This reports Tx results for two or more
+ *	frames that used block-acknowledge.  All frames were transmitted at
+ *	same rate. Rate scaling may have been used if first frame in this new
+ *	agg block failed in previous agg block(s).
+ *
+ *	Note that, for aggregation, ACK (block-ack) status is not delivered
+ *	here; block-ack has not been received by the time the device records
+ *	this status.
+ *	This status relates to reasons the tx might have been blocked or aborted
+ *	within the device, rather than whether it was received successfully by
+ *	the destination station.
+ */
+
+/**
+ * struct agg_tx_status - per packet TX aggregation status
+ * @status: See &enum iwl_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct agg_tx_status {
+	__le16 status;
+	__le16 sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define TX_RES_RATE_TABLE_COLOR_POS 4
+#define TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define TX_RES_INV_RATE_INDEX_MSK 0x80
+#define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\
+				       TX_RES_RATE_TABLE_COLOR_POS)
+
+#define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwl_mvm_tx_resp_v3 - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ *	Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ *	for agg: RTS + CTS + aggregation tx time + block-ack time.
+ *	in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg:  frame status TX_STATUS_*
+ *	for agg: status of 1st frame, AGG_TX_STATE_*; other frame status fields
+ *	follow this one, up to frame_count. Length in @frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp_v3 {
+	u8 frame_count;
+	u8 bt_kill_count;
+	u8 failure_rts;
+	u8 failure_frame;
+	__le32 initial_rate;
+	__le16 wireless_media_time;
+
+	u8 pa_status;
+	u8 pa_integ_res_a[3];
+	u8 pa_integ_res_b[3];
+	u8 pa_integ_res_c[3];
+	__le16 measurement_req_id;
+	u8 reduced_tpc;
+	u8 reserved;
+
+	__le32 tfd_info;
+	__le16 seq_ctl;
+	__le16 byte_cnt;
+	u8 tlc_info;
+	u8 ra_tid;
+	__le16 frame_ctrl;
+	struct agg_tx_status status[];
+} __packed; /* TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwl_mvm_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ *	Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ *	for agg: RTS + CTS + aggregation tx time + block-ack time.
+ *	in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @reserved2: reserved for padding/alignment
+ * @status: for non-agg:  frame status TX_STATUS_*
+ *	For version 6 TX response isn't received for aggregation at all.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwl_mvm_tx_resp {
+	u8 frame_count;
+	u8 bt_kill_count;
+	u8 failure_rts;
+	u8 failure_frame;
+	__le32 initial_rate;
+	__le16 wireless_media_time;
+
+	u8 pa_status;
+	u8 pa_integ_res_a[3];
+	u8 pa_integ_res_b[3];
+	u8 pa_integ_res_c[3];
+	__le16 measurement_req_id;
+	u8 reduced_tpc;
+	u8 reserved;
+
+	__le32 tfd_info;
+	__le16 seq_ctl;
+	__le16 byte_cnt;
+	u8 tlc_info;
+	u8 ra_tid;
+	__le16 frame_ctrl;
+	__le16 tx_queue;
+	__le16 reserved2;
+	struct agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_6 */
+
+/**
+ * struct iwl_mvm_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @sta_addr: MAC address
+ * @reserved: reserved
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @tid: tid of the session
+ * @seq_ctl: sequence control field
+ * @bitmap: the bitmap of the BA notification as seen in the air
+ * @scd_flow: the tx queue this BA relates to
+ * @scd_ssn: the index of the last contiguously sent packet
+ * @txed: number of Txed frames in this batch
+ * @txed_2_done: number of Acked frames in this batch
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ *	not a copy from the LQ command. Thus, if not the first rate was used
+ *	for Tx-ing then this value will be set to 0 by FW.
+ * @reserved1: reserved
+ */
+struct iwl_mvm_ba_notif {
+	u8 sta_addr[ETH_ALEN];
+	__le16 reserved;
+
+	u8 sta_id;
+	u8 tid;
+	__le16 seq_ctl;
+	__le64 bitmap;
+	__le16 scd_flow;
+	__le16 scd_ssn;
+	u8 txed;
+	u8 txed_2_done;
+	u8 reduced_txp;
+	u8 reserved1;
+} __packed;
+
+/**
+ * struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the  TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
+ * @tid: TID of the queue (0-7)
+ * @reserved: reserved for alignment
+ */
+struct iwl_mvm_compressed_ba_tfd {
+	__le16 q_num;
+	__le16 tfd_index;
+	u8 scd_queue;
+	u8 tid;
+	u8 reserved[2];
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwl_mvm_compressed_ba_ratid {
+	u8 q_num;
+	u8 tid;
+	__le16 ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwl_mvm_ba_resp_flags - TX aggregation status
+ * @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ *	expected time
+ */
+enum iwl_mvm_ba_resp_flags {
+	IWL_MVM_BA_RESP_TX_AGG,
+	IWL_MVM_BA_RESP_TX_BAR,
+	IWL_MVM_BA_RESP_TX_AGG_FAIL,
+	IWL_MVM_BA_RESP_TX_UNDERRUN,
+	IWL_MVM_BA_RESP_TX_BT_KILL,
+	IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwl_mvm_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ *	not a copy from the LQ command. Thus, if not the first rate was used
+ *	for Tx-ing then this value will be set to 0 by FW.
+ * @tlc_rate_info: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @reserved: reserved (for alignment)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ * @tfd: array of TFD queue status updates. See &iwl_mvm_compressed_ba_tfd
+ *	for details.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ *	&iwl_mvm_compressed_ba_ratid for more details.
+ */
+struct iwl_mvm_compressed_ba_notif {
+	__le32 flags;
+	u8 sta_id;
+	u8 reduced_txp;
+	u8 tlc_rate_info;
+	u8 retry_cnt;
+	__le32 query_byte_cnt;
+	__le16 query_frame_cnt;
+	__le16 txed;
+	__le16 done;
+	__le16 reserved;
+	__le32 wireless_time;
+	__le32 tx_rate;
+	__le16 tfd_cnt;
+	__le16 ra_tid_cnt;
+	struct iwl_mvm_compressed_ba_tfd tfd[1];
+	struct iwl_mvm_compressed_ba_ratid ra_tid[0];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
+/**
+ * struct iwl_mac_beacon_cmd_v6 - beacon template command
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ *  mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd_v6 {
+	struct iwl_tx_cmd tx;
+	__le32 template_id;
+	__le32 tim_idx;
+	__le32 tim_size;
+	struct ieee80211_hdr frame[0];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_6 */
+
+/**
+ * struct iwl_mac_beacon_cmd_v7 - beacon template command with offloaded CSA
+ * @tx: the tx commands associated with the beacon frame
+ * @template_id: currently equal to the mac context id of the coresponding
+ *  mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @ecsa_offset: offset to the ECSA IE if present
+ * @csa_offset: offset to the CSA IE if present
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd_v7 {
+	struct iwl_tx_cmd tx;
+	__le32 template_id;
+	__le32 tim_idx;
+	__le32 tim_size;
+	__le32 ecsa_offset;
+	__le32 csa_offset;
+	struct ieee80211_hdr frame[0];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
+
+enum iwl_mac_beacon_flags {
+	IWL_MAC_BEACON_CCK	= BIT(8),
+	IWL_MAC_BEACON_ANT_A	= BIT(9),
+	IWL_MAC_BEACON_ANT_B	= BIT(10),
+	IWL_MAC_BEACON_ANT_C	= BIT(11),
+};
+
+/**
+ * struct iwl_mac_beacon_cmd - beacon template command with offloaded CSA
+ * @byte_cnt: byte count of the beacon frame.
+ * @flags: least significant byte for rate code. The most significant byte
+ *	is &enum iwl_mac_beacon_flags.
+ * @reserved: reserved
+ * @template_id: currently equal to the mac context id of the coresponding mac.
+ * @tim_idx: the offset of the tim IE in the beacon
+ * @tim_size: the length of the tim IE
+ * @ecsa_offset: offset to the ECSA IE if present
+ * @csa_offset: offset to the CSA IE if present
+ * @frame: the template of the beacon frame
+ */
+struct iwl_mac_beacon_cmd {
+	__le16 byte_cnt;
+	__le16 flags;
+	__le64 reserved;
+	__le32 template_id;
+	__le32 tim_idx;
+	__le32 tim_size;
+	__le32 ecsa_offset;
+	__le32 csa_offset;
+	struct ieee80211_hdr frame[0];
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_9 */
+
+struct iwl_beacon_notif {
+	struct iwl_mvm_tx_resp beacon_notify_hdr;
+	__le64 tsf;
+	__le32 ibss_mgr_status;
+} __packed;
+
+/**
+ * struct iwl_extended_beacon_notif - notifies about beacon transmission
+ * @beacon_notify_hdr: tx response command associated with the beacon
+ * @tsf: last beacon tsf
+ * @ibss_mgr_status: whether IBSS is manager
+ * @gp2: last beacon time in gp2
+ */
+struct iwl_extended_beacon_notif {
+	struct iwl_mvm_tx_resp beacon_notify_hdr;
+	__le64 tsf;
+	__le32 ibss_mgr_status;
+	__le32 gp2;
+} __packed; /* BEACON_NTFY_API_S_VER_5 */
+
+/**
+ * enum iwl_dump_control - dump (flush) control flags
+ * @DUMP_TX_FIFO_FLUSH: Dump MSDUs until the the FIFO is empty
+ *	and the TFD queues are empty.
+ */
+enum iwl_dump_control {
+	DUMP_TX_FIFO_FLUSH	= BIT(1),
+};
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd_v1 {
+	__le32 queues_ctl;
+	__le16 flush_ctl;
+	__le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwl_tx_path_flush_cmd {
+	__le32 sta_id;
+	__le16 tid_mask;
+	__le16 reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
+/* Available options for the SCD_QUEUE_CFG HCMD */
+enum iwl_scd_cfg_actions {
+	SCD_CFG_DISABLE_QUEUE		= 0x0,
+	SCD_CFG_ENABLE_QUEUE		= 0x1,
+	SCD_CFG_UPDATE_QUEUE_TID	= 0x2,
+};
+
+/**
+ * struct iwl_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token: unused
+ * @sta_id: station id
+ * @tid: TID
+ * @scd_queue: scheduler queue to confiug
+ * @action: 1 queue enable, 0 queue disable, 2 change txq's tid owner
+ *	Value is one of &enum iwl_scd_cfg_actions options
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: &enum iwl_mvm_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ * @reserved: reserved
+ */
+struct iwl_scd_txq_cfg_cmd {
+	u8 token;
+	u8 sta_id;
+	u8 tid;
+	u8 scd_queue;
+	u8 action;
+	u8 aggregate;
+	u8 tx_fifo;
+	u8 window;
+	__le16 ssn;
+	__le16 reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwl_scd_txq_cfg_rsp {
+	u8 token;
+	u8 sta_id;
+	u8 tid;
+	u8 scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+#endif /* __iwl_fw_api_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
new file mode 100644
index 0000000..6ac240b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/txq.h
@@ -0,0 +1,169 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_api_txq_h__
+#define __iwl_fw_api_txq_h__
+
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_AUX_QUEUE: a queue reserved for aux frames
+ * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_INJECT_MONITOR_QUEUE: a queue reserved for injection using
+ *	monitor mode. Note this queue is the same as the queue for P2P device
+ *	but we can't have active monitor mode along with P2P device anyway.
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ *	that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ *	Each MGMT queue is mapped to a single STA
+ *	MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
+ *	responses
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ *	DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ *	the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ *	as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+	IWL_MVM_DQA_CMD_QUEUE = 0,
+	IWL_MVM_DQA_AUX_QUEUE = 1,
+	IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+	IWL_MVM_DQA_INJECT_MONITOR_QUEUE = 2,
+	IWL_MVM_DQA_GCAST_QUEUE = 3,
+	IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+	IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+	IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+	IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+	IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+	IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
+enum iwl_mvm_tx_fifo {
+	IWL_MVM_TX_FIFO_BK = 0,
+	IWL_MVM_TX_FIFO_BE,
+	IWL_MVM_TX_FIFO_VI,
+	IWL_MVM_TX_FIFO_VO,
+	IWL_MVM_TX_FIFO_MCAST = 5,
+	IWL_MVM_TX_FIFO_CMD = 7,
+};
+
+enum iwl_gen2_tx_fifo {
+	IWL_GEN2_TX_FIFO_CMD = 0,
+	IWL_GEN2_EDCA_TX_FIFO_BK,
+	IWL_GEN2_EDCA_TX_FIFO_BE,
+	IWL_GEN2_EDCA_TX_FIFO_VI,
+	IWL_GEN2_EDCA_TX_FIFO_VO,
+	IWL_GEN2_TRIG_TX_FIFO_BK,
+	IWL_GEN2_TRIG_TX_FIFO_BE,
+	IWL_GEN2_TRIG_TX_FIFO_VI,
+	IWL_GEN2_TRIG_TX_FIFO_VO,
+};
+
+/**
+ * enum iwl_tx_queue_cfg_actions - TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+enum iwl_tx_queue_cfg_actions {
+	TX_QUEUE_CFG_ENABLE_QUEUE		= BIT(0),
+	TX_QUEUE_CFG_TFD_SHORT_FORMAT		= BIT(1),
+};
+
+#define IWL_DEFAULT_QUEUE_SIZE 256
+#define IWL_MGMT_QUEUE_SIZE 16
+/**
+ * struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ *	Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwl_tx_queue_cfg_cmd {
+	u8 sta_id;
+	u8 tid;
+	__le16 flags;
+	__le32 cb_size;
+	__le64 byte_cnt_addr;
+	__le64 tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwl_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ * @reserved: reserved
+ */
+struct iwl_tx_queue_cfg_rsp {
+	__le16 queue_number;
+	__le16 flags;
+	__le16 write_pointer;
+	__le16 reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+#endif /* __iwl_fw_api_txq_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
new file mode 100644
index 0000000..a31a42e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -0,0 +1,1217 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/devcoredump.h>
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/**
+ * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
+ *
+ * @fwrt_ptr: pointer to the buffer coming from fwrt
+ * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
+ *	transport's data.
+ * @trans_len: length of the valid data in trans_ptr
+ * @fwrt_len: length of the valid data in fwrt_ptr
+ */
+struct iwl_fw_dump_ptrs {
+	struct iwl_trans_dump_data *trans_ptr;
+	void *fwrt_ptr;
+	u32 fwrt_len;
+};
+
+#define RADIO_REG_MAX_READ 0x2ad
+static void iwl_read_radio_regs(struct iwl_fw_runtime *fwrt,
+				struct iwl_fw_error_dump_data **dump_data)
+{
+	u8 *pos = (void *)(*dump_data)->data;
+	unsigned long flags;
+	int i;
+
+	IWL_DEBUG_INFO(fwrt, "WRT radio registers dump\n");
+
+	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+		return;
+
+	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG);
+	(*dump_data)->len = cpu_to_le32(RADIO_REG_MAX_READ);
+
+	for (i = 0; i < RADIO_REG_MAX_READ; i++) {
+		u32 rd_cmd = RADIO_RSP_RD_CMD;
+
+		rd_cmd |= i << RADIO_RSP_ADDR_POS;
+		iwl_write_prph_no_grab(fwrt->trans, RSP_RADIO_CMD, rd_cmd);
+		*pos = (u8)iwl_read_prph_no_grab(fwrt->trans, RSP_RADIO_RDDAT);
+
+		pos++;
+	}
+
+	*dump_data = iwl_fw_error_next_data(*dump_data);
+
+	iwl_trans_release_nic_access(fwrt->trans, &flags);
+}
+
+static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime *fwrt,
+			      struct iwl_fw_error_dump_data **dump_data,
+			      int size, u32 offset, int fifo_num)
+{
+	struct iwl_fw_error_dump_fifo *fifo_hdr;
+	u32 *fifo_data;
+	u32 fifo_len;
+	int i;
+
+	fifo_hdr = (void *)(*dump_data)->data;
+	fifo_data = (void *)fifo_hdr->data;
+	fifo_len = size;
+
+	/* No need to try to read the data if the length is 0 */
+	if (fifo_len == 0)
+		return;
+
+	/* Add a TLV for the RXF */
+	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RXF);
+	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+	fifo_hdr->available_bytes =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						RXF_RD_D_SPACE + offset));
+	fifo_hdr->wr_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						RXF_RD_WR_PTR + offset));
+	fifo_hdr->rd_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						RXF_RD_RD_PTR + offset));
+	fifo_hdr->fence_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						RXF_RD_FENCE_PTR + offset));
+	fifo_hdr->fence_mode =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						RXF_SET_FENCE_MODE + offset));
+
+	/* Lock fence */
+	iwl_trans_write_prph(fwrt->trans, RXF_SET_FENCE_MODE + offset, 0x1);
+	/* Set fence pointer to the same place like WR pointer */
+	iwl_trans_write_prph(fwrt->trans, RXF_LD_WR2FENCE + offset, 0x1);
+	/* Set fence offset */
+	iwl_trans_write_prph(fwrt->trans,
+			     RXF_LD_FENCE_OFFSET_ADDR + offset, 0x0);
+
+	/* Read FIFO */
+	fifo_len /= sizeof(u32); /* Size in DWORDS */
+	for (i = 0; i < fifo_len; i++)
+		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
+						 RXF_FIFO_RD_FENCE_INC +
+						 offset);
+	*dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
+			      struct iwl_fw_error_dump_data **dump_data,
+			      int size, u32 offset, int fifo_num)
+{
+	struct iwl_fw_error_dump_fifo *fifo_hdr;
+	u32 *fifo_data;
+	u32 fifo_len;
+	int i;
+
+	fifo_hdr = (void *)(*dump_data)->data;
+	fifo_data = (void *)fifo_hdr->data;
+	fifo_len = size;
+
+	/* No need to try to read the data if the length is 0 */
+	if (fifo_len == 0)
+		return;
+
+	/* Add a TLV for the FIFO */
+	(*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXF);
+	(*dump_data)->len = cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+	fifo_hdr->fifo_num = cpu_to_le32(fifo_num);
+	fifo_hdr->available_bytes =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						TXF_FIFO_ITEM_CNT + offset));
+	fifo_hdr->wr_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						TXF_WR_PTR + offset));
+	fifo_hdr->rd_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						TXF_RD_PTR + offset));
+	fifo_hdr->fence_ptr =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						TXF_FENCE_PTR + offset));
+	fifo_hdr->fence_mode =
+		cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+						TXF_LOCK_FENCE + offset));
+
+	/* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
+	iwl_trans_write_prph(fwrt->trans, TXF_READ_MODIFY_ADDR + offset,
+			     TXF_WR_PTR + offset);
+
+	/* Dummy-read to advance the read pointer to the head */
+	iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
+
+	/* Read FIFO */
+	fifo_len /= sizeof(u32); /* Size in DWORDS */
+	for (i = 0; i < fifo_len; i++)
+		fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
+						  TXF_READ_MODIFY_DATA +
+						  offset);
+	*dump_data = iwl_fw_error_next_data(*dump_data);
+}
+
+static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
+			      struct iwl_fw_error_dump_data **dump_data)
+{
+	struct iwl_fw_error_dump_fifo *fifo_hdr;
+	struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg;
+	u32 *fifo_data;
+	u32 fifo_len;
+	unsigned long flags;
+	int i, j;
+
+	IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n");
+
+	if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
+		return;
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+		/* Pull RXF1 */
+		iwl_fwrt_dump_rxf(fwrt, dump_data,
+				  cfg->lmac[0].rxfifo1_size, 0, 0);
+		/* Pull RXF2 */
+		iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
+				  RXF_DIFF_FROM_PREV, 1);
+		/* Pull LMAC2 RXF1 */
+		if (fwrt->smem_cfg.num_lmacs > 1)
+			iwl_fwrt_dump_rxf(fwrt, dump_data,
+					  cfg->lmac[1].rxfifo1_size,
+					  LMAC2_PRPH_OFFSET, 2);
+	}
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+		/* Pull TXF data from LMAC1 */
+		for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
+			/* Mark the number of TXF we're pulling now */
+			iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
+			iwl_fwrt_dump_txf(fwrt, dump_data,
+					  cfg->lmac[0].txfifo_size[i], 0, i);
+		}
+
+		/* Pull TXF data from LMAC2 */
+		if (fwrt->smem_cfg.num_lmacs > 1) {
+			for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
+			     i++) {
+				/* Mark the number of TXF we're pulling now */
+				iwl_trans_write_prph(fwrt->trans,
+						     TXF_LARC_NUM +
+						     LMAC2_PRPH_OFFSET, i);
+				iwl_fwrt_dump_txf(fwrt, dump_data,
+						  cfg->lmac[1].txfifo_size[i],
+						  LMAC2_PRPH_OFFSET,
+						  i + cfg->num_txfifo_entries);
+			}
+		}
+	}
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
+	    fw_has_capa(&fwrt->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+		/* Pull UMAC internal TXF data from all TXFs */
+		for (i = 0;
+		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
+		     i++) {
+			fifo_hdr = (void *)(*dump_data)->data;
+			fifo_data = (void *)fifo_hdr->data;
+			fifo_len = fwrt->smem_cfg.internal_txfifo_size[i];
+
+			/* No need to try to read the data if the length is 0 */
+			if (fifo_len == 0)
+				continue;
+
+			/* Add a TLV for the internal FIFOs */
+			(*dump_data)->type =
+				cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+			(*dump_data)->len =
+				cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+			fifo_hdr->fifo_num = cpu_to_le32(i);
+
+			/* Mark the number of TXF we're pulling now */
+			iwl_trans_write_prph(fwrt->trans, TXF_CPU2_NUM, i +
+				fwrt->smem_cfg.num_txfifo_entries);
+
+			fifo_hdr->available_bytes =
+				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+								TXF_CPU2_FIFO_ITEM_CNT));
+			fifo_hdr->wr_ptr =
+				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+								TXF_CPU2_WR_PTR));
+			fifo_hdr->rd_ptr =
+				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+								TXF_CPU2_RD_PTR));
+			fifo_hdr->fence_ptr =
+				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+								TXF_CPU2_FENCE_PTR));
+			fifo_hdr->fence_mode =
+				cpu_to_le32(iwl_trans_read_prph(fwrt->trans,
+								TXF_CPU2_LOCK_FENCE));
+
+			/* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+			iwl_trans_write_prph(fwrt->trans,
+					     TXF_CPU2_READ_MODIFY_ADDR,
+					     TXF_CPU2_WR_PTR);
+
+			/* Dummy-read to advance the read pointer to head */
+			iwl_trans_read_prph(fwrt->trans,
+					    TXF_CPU2_READ_MODIFY_DATA);
+
+			/* Read FIFO */
+			fifo_len /= sizeof(u32); /* Size in DWORDS */
+			for (j = 0; j < fifo_len; j++)
+				fifo_data[j] =
+					iwl_trans_read_prph(fwrt->trans,
+							    TXF_CPU2_READ_MODIFY_DATA);
+			*dump_data = iwl_fw_error_next_data(*dump_data);
+		}
+	}
+
+	iwl_trans_release_nic_access(fwrt->trans, &flags);
+}
+
+#define IWL8260_ICCM_OFFSET		0x44000 /* Only for B-step */
+#define IWL8260_ICCM_LEN		0xC000 /* Only for B-step */
+
+struct iwl_prph_range {
+	u32 start, end;
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
+	{ .start = 0x00a00000, .end = 0x00a00000 },
+	{ .start = 0x00a0000c, .end = 0x00a00024 },
+	{ .start = 0x00a0002c, .end = 0x00a0003c },
+	{ .start = 0x00a00410, .end = 0x00a00418 },
+	{ .start = 0x00a00420, .end = 0x00a00420 },
+	{ .start = 0x00a00428, .end = 0x00a00428 },
+	{ .start = 0x00a00430, .end = 0x00a0043c },
+	{ .start = 0x00a00444, .end = 0x00a00444 },
+	{ .start = 0x00a004c0, .end = 0x00a004cc },
+	{ .start = 0x00a004d8, .end = 0x00a004d8 },
+	{ .start = 0x00a004e0, .end = 0x00a004f0 },
+	{ .start = 0x00a00840, .end = 0x00a00840 },
+	{ .start = 0x00a00850, .end = 0x00a00858 },
+	{ .start = 0x00a01004, .end = 0x00a01008 },
+	{ .start = 0x00a01010, .end = 0x00a01010 },
+	{ .start = 0x00a01018, .end = 0x00a01018 },
+	{ .start = 0x00a01024, .end = 0x00a01024 },
+	{ .start = 0x00a0102c, .end = 0x00a01034 },
+	{ .start = 0x00a0103c, .end = 0x00a01040 },
+	{ .start = 0x00a01048, .end = 0x00a01094 },
+	{ .start = 0x00a01c00, .end = 0x00a01c20 },
+	{ .start = 0x00a01c58, .end = 0x00a01c58 },
+	{ .start = 0x00a01c7c, .end = 0x00a01c7c },
+	{ .start = 0x00a01c28, .end = 0x00a01c54 },
+	{ .start = 0x00a01c5c, .end = 0x00a01c5c },
+	{ .start = 0x00a01c60, .end = 0x00a01cdc },
+	{ .start = 0x00a01ce0, .end = 0x00a01d0c },
+	{ .start = 0x00a01d18, .end = 0x00a01d20 },
+	{ .start = 0x00a01d2c, .end = 0x00a01d30 },
+	{ .start = 0x00a01d40, .end = 0x00a01d5c },
+	{ .start = 0x00a01d80, .end = 0x00a01d80 },
+	{ .start = 0x00a01d98, .end = 0x00a01d9c },
+	{ .start = 0x00a01da8, .end = 0x00a01da8 },
+	{ .start = 0x00a01db8, .end = 0x00a01df4 },
+	{ .start = 0x00a01dc0, .end = 0x00a01dfc },
+	{ .start = 0x00a01e00, .end = 0x00a01e2c },
+	{ .start = 0x00a01e40, .end = 0x00a01e60 },
+	{ .start = 0x00a01e68, .end = 0x00a01e6c },
+	{ .start = 0x00a01e74, .end = 0x00a01e74 },
+	{ .start = 0x00a01e84, .end = 0x00a01e90 },
+	{ .start = 0x00a01e9c, .end = 0x00a01ec4 },
+	{ .start = 0x00a01ed0, .end = 0x00a01ee0 },
+	{ .start = 0x00a01f00, .end = 0x00a01f1c },
+	{ .start = 0x00a01f44, .end = 0x00a01ffc },
+	{ .start = 0x00a02000, .end = 0x00a02048 },
+	{ .start = 0x00a02068, .end = 0x00a020f0 },
+	{ .start = 0x00a02100, .end = 0x00a02118 },
+	{ .start = 0x00a02140, .end = 0x00a0214c },
+	{ .start = 0x00a02168, .end = 0x00a0218c },
+	{ .start = 0x00a021c0, .end = 0x00a021c0 },
+	{ .start = 0x00a02400, .end = 0x00a02410 },
+	{ .start = 0x00a02418, .end = 0x00a02420 },
+	{ .start = 0x00a02428, .end = 0x00a0242c },
+	{ .start = 0x00a02434, .end = 0x00a02434 },
+	{ .start = 0x00a02440, .end = 0x00a02460 },
+	{ .start = 0x00a02468, .end = 0x00a024b0 },
+	{ .start = 0x00a024c8, .end = 0x00a024cc },
+	{ .start = 0x00a02500, .end = 0x00a02504 },
+	{ .start = 0x00a0250c, .end = 0x00a02510 },
+	{ .start = 0x00a02540, .end = 0x00a02554 },
+	{ .start = 0x00a02580, .end = 0x00a025f4 },
+	{ .start = 0x00a02600, .end = 0x00a0260c },
+	{ .start = 0x00a02648, .end = 0x00a02650 },
+	{ .start = 0x00a02680, .end = 0x00a02680 },
+	{ .start = 0x00a026c0, .end = 0x00a026d0 },
+	{ .start = 0x00a02700, .end = 0x00a0270c },
+	{ .start = 0x00a02804, .end = 0x00a02804 },
+	{ .start = 0x00a02818, .end = 0x00a0281c },
+	{ .start = 0x00a02c00, .end = 0x00a02db4 },
+	{ .start = 0x00a02df4, .end = 0x00a02fb0 },
+	{ .start = 0x00a03000, .end = 0x00a03014 },
+	{ .start = 0x00a0301c, .end = 0x00a0302c },
+	{ .start = 0x00a03034, .end = 0x00a03038 },
+	{ .start = 0x00a03040, .end = 0x00a03048 },
+	{ .start = 0x00a03060, .end = 0x00a03068 },
+	{ .start = 0x00a03070, .end = 0x00a03074 },
+	{ .start = 0x00a0307c, .end = 0x00a0307c },
+	{ .start = 0x00a03080, .end = 0x00a03084 },
+	{ .start = 0x00a0308c, .end = 0x00a03090 },
+	{ .start = 0x00a03098, .end = 0x00a03098 },
+	{ .start = 0x00a030a0, .end = 0x00a030a0 },
+	{ .start = 0x00a030a8, .end = 0x00a030b4 },
+	{ .start = 0x00a030bc, .end = 0x00a030bc },
+	{ .start = 0x00a030c0, .end = 0x00a0312c },
+	{ .start = 0x00a03c00, .end = 0x00a03c5c },
+	{ .start = 0x00a04400, .end = 0x00a04454 },
+	{ .start = 0x00a04460, .end = 0x00a04474 },
+	{ .start = 0x00a044c0, .end = 0x00a044ec },
+	{ .start = 0x00a04500, .end = 0x00a04504 },
+	{ .start = 0x00a04510, .end = 0x00a04538 },
+	{ .start = 0x00a04540, .end = 0x00a04548 },
+	{ .start = 0x00a04560, .end = 0x00a0457c },
+	{ .start = 0x00a04590, .end = 0x00a04598 },
+	{ .start = 0x00a045c0, .end = 0x00a045f4 },
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
+	{ .start = 0x00a05c00, .end = 0x00a05c18 },
+	{ .start = 0x00a05400, .end = 0x00a056e8 },
+	{ .start = 0x00a08000, .end = 0x00a098bc },
+	{ .start = 0x00a02400, .end = 0x00a02758 },
+};
+
+static void _iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+				 u32 len_bytes, __le32 *data)
+{
+	u32 i;
+
+	for (i = 0; i < len_bytes; i += 4)
+		*data++ = cpu_to_le32(iwl_read_prph_no_grab(trans, start + i));
+}
+
+static bool iwl_read_prph_block(struct iwl_trans *trans, u32 start,
+				u32 len_bytes, __le32 *data)
+{
+	unsigned long flags;
+	bool success = false;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		success = true;
+		_iwl_read_prph_block(trans, start, len_bytes, data);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+
+	return success;
+}
+
+static void iwl_dump_prph(struct iwl_trans *trans,
+			  struct iwl_fw_error_dump_data **data,
+			  const struct iwl_prph_range *iwl_prph_dump_addr,
+			  u32 range_len)
+{
+	struct iwl_fw_error_dump_prph *prph;
+	unsigned long flags;
+	u32 i;
+
+	IWL_DEBUG_INFO(trans, "WRT PRPH dump\n");
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return;
+
+	for (i = 0; i < range_len; i++) {
+		/* The range includes both boundaries */
+		int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
+			 iwl_prph_dump_addr[i].start + 4;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
+		(*data)->len = cpu_to_le32(sizeof(*prph) +
+					num_bytes_in_chunk);
+		prph = (void *)(*data)->data;
+		prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
+
+		_iwl_read_prph_block(trans, iwl_prph_dump_addr[i].start,
+				     /* our range is inclusive, hence + 4 */
+				     iwl_prph_dump_addr[i].end -
+				     iwl_prph_dump_addr[i].start + 4,
+				     (void *)prph->data);
+
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	iwl_trans_release_nic_access(trans, &flags);
+}
+
+/*
+ * alloc_sgtable - allocates scallerlist table in the given size,
+ * fills it with pages and returns it
+ * @size: the size (in bytes) of the table
+*/
+static struct scatterlist *alloc_sgtable(int size)
+{
+	int alloc_size, nents, i;
+	struct page *new_page;
+	struct scatterlist *iter;
+	struct scatterlist *table;
+
+	nents = DIV_ROUND_UP(size, PAGE_SIZE);
+	table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return NULL;
+	sg_init_table(table, nents);
+	iter = table;
+	for_each_sg(table, iter, sg_nents(table), i) {
+		new_page = alloc_page(GFP_KERNEL);
+		if (!new_page) {
+			/* release all previous allocated pages in the table */
+			iter = table;
+			for_each_sg(table, iter, sg_nents(table), i) {
+				new_page = sg_page(iter);
+				if (new_page)
+					__free_page(new_page);
+			}
+			return NULL;
+		}
+		alloc_size = min_t(int, size, PAGE_SIZE);
+		size -= PAGE_SIZE;
+		sg_set_page(iter, new_page, alloc_size, 0);
+	}
+	return table;
+}
+
+void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
+{
+	struct iwl_fw_error_dump_file *dump_file;
+	struct iwl_fw_error_dump_data *dump_data;
+	struct iwl_fw_error_dump_info *dump_info;
+	struct iwl_fw_error_dump_mem *dump_mem;
+	struct iwl_fw_error_dump_smem_cfg *dump_smem_cfg;
+	struct iwl_fw_error_dump_trigger_desc *dump_trig;
+	struct iwl_fw_dump_ptrs *fw_error_dump;
+	struct scatterlist *sg_dump_data;
+	u32 sram_len, sram_ofs;
+	const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg_mem_tlv;
+	struct iwl_fwrt_shared_mem_cfg *mem_cfg = &fwrt->smem_cfg;
+	u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
+	u32 smem_len = fwrt->fw->n_dbg_mem_tlv ? 0 : fwrt->trans->cfg->smem_len;
+	u32 sram2_len = fwrt->fw->n_dbg_mem_tlv ?
+				0 : fwrt->trans->cfg->dccm2_len;
+	bool monitor_dump_only = false;
+	int i;
+
+	IWL_DEBUG_INFO(fwrt, "WRT dump start\n");
+
+	/* there's no point in fw dump if the bus is dead */
+	if (test_bit(STATUS_TRANS_DEAD, &fwrt->trans->status)) {
+		IWL_ERR(fwrt, "Skip fw error dump since bus is dead\n");
+		goto out;
+	}
+
+	if (fwrt->dump.trig &&
+	    fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)
+		monitor_dump_only = true;
+
+	fw_error_dump = kzalloc(sizeof(*fw_error_dump), GFP_KERNEL);
+	if (!fw_error_dump)
+		goto out;
+
+	/* SRAM - include stack CCM if driver knows the values for it */
+	if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) {
+		const struct fw_img *img;
+
+		img = &fwrt->fw->img[fwrt->cur_fw_img];
+		sram_ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+		sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+	} else {
+		sram_ofs = fwrt->trans->cfg->dccm_offset;
+		sram_len = fwrt->trans->cfg->dccm_len;
+	}
+
+	/* reading RXF/TXF sizes */
+	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+		fifo_data_len = 0;
+
+		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
+
+			/* Count RXF2 size */
+			if (mem_cfg->rxfifo2_size) {
+				/* Add header info */
+				fifo_data_len +=
+					mem_cfg->rxfifo2_size +
+					sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_fifo);
+			}
+
+			/* Count RXF1 sizes */
+			for (i = 0; i < mem_cfg->num_lmacs; i++) {
+				if (!mem_cfg->lmac[i].rxfifo1_size)
+					continue;
+
+				/* Add header info */
+				fifo_data_len +=
+					mem_cfg->lmac[i].rxfifo1_size +
+					sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_fifo);
+			}
+		}
+
+		if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
+			size_t fifo_const_len = sizeof(*dump_data) +
+				sizeof(struct iwl_fw_error_dump_fifo);
+
+			/* Count TXF sizes */
+			for (i = 0; i < mem_cfg->num_lmacs; i++) {
+				int j;
+
+				for (j = 0; j < mem_cfg->num_txfifo_entries;
+				     j++) {
+					if (!mem_cfg->lmac[i].txfifo_size[j])
+						continue;
+
+					/* Add header info */
+					fifo_data_len +=
+						fifo_const_len +
+						mem_cfg->lmac[i].txfifo_size[j];
+				}
+			}
+		}
+
+		if ((fwrt->fw->dbg_dump_mask &
+		    BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
+		    fw_has_capa(&fwrt->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+			for (i = 0;
+			     i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+			     i++) {
+				if (!mem_cfg->internal_txfifo_size[i])
+					continue;
+
+				/* Add header info */
+				fifo_data_len +=
+					mem_cfg->internal_txfifo_size[i] +
+					sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_fifo);
+			}
+		}
+
+		/* Make room for PRPH registers */
+		if (!fwrt->trans->cfg->gen2 &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
+			for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
+			     i++) {
+				/* The range includes both boundaries */
+				int num_bytes_in_chunk =
+					iwl_prph_dump_addr_comm[i].end -
+					iwl_prph_dump_addr_comm[i].start + 4;
+
+				prph_len += sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_prph) +
+					num_bytes_in_chunk;
+			}
+		}
+
+		if (!fwrt->trans->cfg->gen2 &&
+		    fwrt->trans->cfg->mq_rx_supported &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
+			for (i = 0; i <
+				ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
+				/* The range includes both boundaries */
+				int num_bytes_in_chunk =
+					iwl_prph_dump_addr_9000[i].end -
+					iwl_prph_dump_addr_9000[i].start + 4;
+
+				prph_len += sizeof(*dump_data) +
+					sizeof(struct iwl_fw_error_dump_prph) +
+					num_bytes_in_chunk;
+			}
+		}
+
+		if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
+		    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
+			radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
+	}
+
+	file_len = sizeof(*dump_file) +
+		   fifo_data_len +
+		   prph_len +
+		   radio_len;
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
+		file_len += sizeof(*dump_data) + sizeof(*dump_info);
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
+		file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+		/* Make room for the SMEM, if it exists */
+		if (smem_len)
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				smem_len;
+
+		/* Make room for the secondary SRAM, if it exists */
+		if (sram2_len)
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				sram2_len;
+
+		/* Make room for MEM segments */
+		for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+			file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+				    le32_to_cpu(fw_dbg_mem[i].len);
+		}
+	}
+
+	/* Make room for fw's virtual image pages, if it exists */
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+	    !fwrt->trans->cfg->gen2 &&
+	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
+	    fwrt->fw_paging_db[0].fw_paging_block)
+		file_len += fwrt->num_of_paging_blk *
+			(sizeof(*dump_data) +
+			 sizeof(struct iwl_fw_error_dump_paging) +
+			 PAGING_BLOCK_SIZE);
+
+	/* If we only want a monitor dump, reset the file length */
+	if (monitor_dump_only) {
+		file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 +
+			   sizeof(*dump_info) + sizeof(*dump_smem_cfg);
+	}
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+	    fwrt->dump.desc)
+		file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
+			    fwrt->dump.desc->len;
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
+	    !fwrt->fw->n_dbg_mem_tlv)
+		file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
+
+	dump_file = vzalloc(file_len);
+	if (!dump_file) {
+		kfree(fw_error_dump);
+		goto out;
+	}
+
+	fw_error_dump->fwrt_ptr = dump_file;
+
+	dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
+	dump_data = (void *)dump_file->data;
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
+		dump_data->len = cpu_to_le32(sizeof(*dump_info));
+		dump_info = (void *)dump_data->data;
+		dump_info->device_family =
+			fwrt->trans->cfg->device_family ==
+			IWL_DEVICE_FAMILY_7000 ?
+				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
+				cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
+		dump_info->hw_step =
+			cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
+		memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
+		       sizeof(dump_info->fw_human_readable));
+		strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
+			sizeof(dump_info->dev_human_readable) - 1);
+		strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
+			sizeof(dump_info->bus_human_readable) - 1);
+
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
+		/* Dump shared memory configuration */
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
+		dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
+		dump_smem_cfg = (void *)dump_data->data;
+		dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
+		dump_smem_cfg->num_txfifo_entries =
+			cpu_to_le32(mem_cfg->num_txfifo_entries);
+		for (i = 0; i < MAX_NUM_LMAC; i++) {
+			int j;
+			u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
+
+			for (j = 0; j < TX_FIFO_MAX_NUM; j++)
+				dump_smem_cfg->lmac[i].txfifo_size[j] =
+					cpu_to_le32(txf_size[j]);
+			dump_smem_cfg->lmac[i].rxfifo1_size =
+				cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
+		}
+		dump_smem_cfg->rxfifo2_size =
+			cpu_to_le32(mem_cfg->rxfifo2_size);
+		dump_smem_cfg->internal_txfifo_addr =
+			cpu_to_le32(mem_cfg->internal_txfifo_addr);
+		for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
+			dump_smem_cfg->internal_txfifo_size[i] =
+				cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
+		}
+
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	/* We only dump the FIFOs if the FW is in error state */
+	if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
+		iwl_fw_dump_fifos(fwrt, &dump_data);
+		if (radio_len)
+			iwl_read_radio_regs(fwrt, &dump_data);
+	}
+
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
+	    fwrt->dump.desc) {
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
+		dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
+					     fwrt->dump.desc->len);
+		dump_trig = (void *)dump_data->data;
+		memcpy(dump_trig, &fwrt->dump.desc->trig_desc,
+		       sizeof(*dump_trig) + fwrt->dump.desc->len);
+
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	/* In case we only want monitor dump, skip to dump trasport data */
+	if (monitor_dump_only)
+		goto dump_trans_data;
+
+	if (!fwrt->fw->n_dbg_mem_tlv &&
+	    fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+		dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+		dump_mem = (void *)dump_data->data;
+		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+		dump_mem->offset = cpu_to_le32(sram_ofs);
+		iwl_trans_read_mem_bytes(fwrt->trans, sram_ofs, dump_mem->data,
+					 sram_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
+		u32 len = le32_to_cpu(fw_dbg_mem[i].len);
+		u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
+		bool success;
+
+		if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
+			break;
+
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+		dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
+		dump_mem = (void *)dump_data->data;
+		dump_mem->type = fw_dbg_mem[i].data_type;
+		dump_mem->offset = cpu_to_le32(ofs);
+
+		IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n",
+			       dump_mem->type);
+
+		switch (dump_mem->type & cpu_to_le32(FW_DBG_MEM_TYPE_MASK)) {
+		case cpu_to_le32(FW_DBG_MEM_TYPE_REGULAR):
+			iwl_trans_read_mem_bytes(fwrt->trans, ofs,
+						 dump_mem->data,
+						 len);
+			success = true;
+			break;
+		case cpu_to_le32(FW_DBG_MEM_TYPE_PRPH):
+			success = iwl_read_prph_block(fwrt->trans, ofs, len,
+						      (void *)dump_mem->data);
+			break;
+		default:
+			/*
+			 * shouldn't get here, we ignored this kind
+			 * of TLV earlier during the TLV parsing?!
+			 */
+			WARN_ON(1);
+			success = false;
+		}
+
+		if (success)
+			dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+		IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+		dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
+		dump_mem = (void *)dump_data->data;
+		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SMEM);
+		dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->smem_offset);
+		iwl_trans_read_mem_bytes(fwrt->trans,
+					 fwrt->trans->cfg->smem_offset,
+					 dump_mem->data, smem_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
+		IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
+		dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+		dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
+		dump_mem = (void *)dump_data->data;
+		dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+		dump_mem->offset = cpu_to_le32(fwrt->trans->cfg->dccm2_offset);
+		iwl_trans_read_mem_bytes(fwrt->trans,
+					 fwrt->trans->cfg->dccm2_offset,
+					 dump_mem->data, sram2_len);
+		dump_data = iwl_fw_error_next_data(dump_data);
+	}
+
+	/* Dump fw's virtual image */
+	if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
+	    !fwrt->trans->cfg->gen2 &&
+	    fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
+	    fwrt->fw_paging_db[0].fw_paging_block) {
+		IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
+		for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) {
+			struct iwl_fw_error_dump_paging *paging;
+			struct page *pages =
+				fwrt->fw_paging_db[i].fw_paging_block;
+			dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys;
+
+			dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+			dump_data->len = cpu_to_le32(sizeof(*paging) +
+						     PAGING_BLOCK_SIZE);
+			paging = (void *)dump_data->data;
+			paging->index = cpu_to_le32(i);
+			dma_sync_single_for_cpu(fwrt->trans->dev, addr,
+						PAGING_BLOCK_SIZE,
+						DMA_BIDIRECTIONAL);
+			memcpy(paging->data, page_address(pages),
+			       PAGING_BLOCK_SIZE);
+			dump_data = iwl_fw_error_next_data(dump_data);
+		}
+	}
+
+	if (prph_len) {
+		iwl_dump_prph(fwrt->trans, &dump_data,
+			      iwl_prph_dump_addr_comm,
+			      ARRAY_SIZE(iwl_prph_dump_addr_comm));
+
+		if (fwrt->trans->cfg->mq_rx_supported)
+			iwl_dump_prph(fwrt->trans, &dump_data,
+				      iwl_prph_dump_addr_9000,
+				      ARRAY_SIZE(iwl_prph_dump_addr_9000));
+	}
+
+dump_trans_data:
+	fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans,
+						       fwrt->dump.trig);
+	fw_error_dump->fwrt_len = file_len;
+	if (fw_error_dump->trans_ptr)
+		file_len += fw_error_dump->trans_ptr->len;
+	dump_file->file_len = cpu_to_le32(file_len);
+
+	sg_dump_data = alloc_sgtable(file_len);
+	if (sg_dump_data) {
+		sg_pcopy_from_buffer(sg_dump_data,
+				     sg_nents(sg_dump_data),
+				     fw_error_dump->fwrt_ptr,
+				     fw_error_dump->fwrt_len, 0);
+		if (fw_error_dump->trans_ptr)
+			sg_pcopy_from_buffer(sg_dump_data,
+					     sg_nents(sg_dump_data),
+					     fw_error_dump->trans_ptr->data,
+					     fw_error_dump->trans_ptr->len,
+					     fw_error_dump->fwrt_len);
+		dev_coredumpsg(fwrt->trans->dev, sg_dump_data, file_len,
+			       GFP_KERNEL);
+	}
+	vfree(fw_error_dump->fwrt_ptr);
+	vfree(fw_error_dump->trans_ptr);
+	kfree(fw_error_dump);
+
+out:
+	iwl_fw_free_dump_desc(fwrt);
+	clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+	IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
+}
+IWL_EXPORT_SYMBOL(iwl_fw_error_dump);
+
+const struct iwl_fw_dump_desc iwl_dump_desc_assert = {
+	.trig_desc = {
+		.type = cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT),
+	},
+};
+IWL_EXPORT_SYMBOL(iwl_dump_desc_assert);
+
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+			    const struct iwl_fw_dump_desc *desc,
+			    const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+	unsigned int delay = 0;
+
+	if (trigger)
+		delay = msecs_to_jiffies(le32_to_cpu(trigger->stop_delay));
+
+	/*
+	 * If the loading of the FW completed successfully, the next step is to
+	 * get the SMEM config data. Thus, if fwrt->smem_cfg.num_lmacs is non
+	 * zero, the FW was already loaded successully. If the state is "NO_FW"
+	 * in such a case - WARN and exit, since FW may be dead. Otherwise, we
+	 * can try to collect the data, since FW might just not be fully
+	 * loaded (no "ALIVE" yet), and the debug data is accessible.
+	 *
+	 * Corner case: got the FW alive but crashed before getting the SMEM
+	 *	config. In such a case, due to HW access problems, we might
+	 *	collect garbage.
+	 */
+	if (WARN((fwrt->trans->state == IWL_TRANS_NO_FW) &&
+		 fwrt->smem_cfg.num_lmacs,
+		 "Can't collect dbg data when FW isn't alive\n"))
+		return -EIO;
+
+	if (test_and_set_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status))
+		return -EBUSY;
+
+	if (WARN_ON(fwrt->dump.desc))
+		iwl_fw_free_dump_desc(fwrt);
+
+	IWL_WARN(fwrt, "Collecting data: trigger %d fired.\n",
+		 le32_to_cpu(desc->trig_desc.type));
+
+	fwrt->dump.desc = desc;
+	fwrt->dump.trig = trigger;
+
+	schedule_delayed_work(&fwrt->dump.wk, delay);
+
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc);
+
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+		       enum iwl_fw_dbg_trigger trig,
+		       const char *str, size_t len,
+		       const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+	struct iwl_fw_dump_desc *desc;
+
+	if (trigger && trigger->flags & IWL_FW_DBG_FORCE_RESTART) {
+		IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", trig);
+		iwl_force_nmi(fwrt->trans);
+		return 0;
+	}
+
+	desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->len = len;
+	desc->trig_desc.type = cpu_to_le32(trig);
+	memcpy(desc->trig_desc.data, str, len);
+
+	return iwl_fw_dbg_collect_desc(fwrt, desc, trigger);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect);
+
+int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
+			    struct iwl_fw_dbg_trigger_tlv *trigger,
+			    const char *fmt, ...)
+{
+	u16 occurrences = le16_to_cpu(trigger->occurrences);
+	int ret, len = 0;
+	char buf[64];
+
+	if (!occurrences)
+		return 0;
+
+	if (fmt) {
+		va_list ap;
+
+		buf[sizeof(buf) - 1] = '\0';
+
+		va_start(ap, fmt);
+		vsnprintf(buf, sizeof(buf), fmt, ap);
+		va_end(ap);
+
+		/* check for truncation */
+		if (WARN_ON_ONCE(buf[sizeof(buf) - 1]))
+			buf[sizeof(buf) - 1] = '\0';
+
+		len = strlen(buf) + 1;
+	}
+
+	ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len,
+				 trigger);
+
+	if (ret)
+		return ret;
+
+	trigger->occurrences = cpu_to_le16(occurrences - 1);
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig);
+
+int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 conf_id)
+{
+	u8 *ptr;
+	int ret;
+	int i;
+
+	if (WARN_ONCE(conf_id >= ARRAY_SIZE(fwrt->fw->dbg_conf_tlv),
+		      "Invalid configuration %d\n", conf_id))
+		return -EINVAL;
+
+	/* EARLY START - firmware's configuration is hard coded */
+	if ((!fwrt->fw->dbg_conf_tlv[conf_id] ||
+	     !fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) &&
+	    conf_id == FW_DBG_START_FROM_ALIVE)
+		return 0;
+
+	if (!fwrt->fw->dbg_conf_tlv[conf_id])
+		return -EINVAL;
+
+	if (fwrt->dump.conf != FW_DBG_INVALID)
+		IWL_WARN(fwrt, "FW already configured (%d) - re-configuring\n",
+			 fwrt->dump.conf);
+
+	/* start default config marker cmd for syncing logs */
+	iwl_fw_trigger_timestamp(fwrt, 1);
+
+	/* Send all HCMDs for configuring the FW debug */
+	ptr = (void *)&fwrt->fw->dbg_conf_tlv[conf_id]->hcmd;
+	for (i = 0; i < fwrt->fw->dbg_conf_tlv[conf_id]->num_of_hcmds; i++) {
+		struct iwl_fw_dbg_conf_hcmd *cmd = (void *)ptr;
+		struct iwl_host_cmd hcmd = {
+			.id = cmd->id,
+			.len = { le16_to_cpu(cmd->len), },
+			.data = { cmd->data, },
+		};
+
+		ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+		if (ret)
+			return ret;
+
+		ptr += sizeof(*cmd);
+		ptr += le16_to_cpu(cmd->len);
+	}
+
+	fwrt->dump.conf = conf_id;
+
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf);
+
+void iwl_fw_error_dump_wk(struct work_struct *work)
+{
+	struct iwl_fw_runtime *fwrt =
+		container_of(work, struct iwl_fw_runtime, dump.wk.work);
+
+	if (fwrt->ops && fwrt->ops->dump_start &&
+	    fwrt->ops->dump_start(fwrt->ops_ctx))
+		return;
+
+	if (fwrt->ops && fwrt->ops->fw_running &&
+	    !fwrt->ops->fw_running(fwrt->ops_ctx)) {
+		IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
+		iwl_fw_free_dump_desc(fwrt);
+		clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
+		goto out;
+	}
+
+	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		/* stop recording */
+		iwl_fw_dbg_stop_recording(fwrt);
+
+		iwl_fw_error_dump(fwrt);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
+		    fwrt->fw->dbg_dest_tlv) {
+			iwl_clear_bits_prph(fwrt->trans,
+					    MON_BUFF_SAMPLE_CTL, 0x100);
+			iwl_clear_bits_prph(fwrt->trans,
+					    MON_BUFF_SAMPLE_CTL, 0x1);
+			iwl_set_bits_prph(fwrt->trans,
+					  MON_BUFF_SAMPLE_CTL, 0x1);
+		}
+	} else {
+		u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE);
+		u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL);
+
+		iwl_fw_dbg_stop_recording(fwrt);
+		/* wait before we collect the data till the DBGC stop */
+		udelay(500);
+
+		iwl_fw_error_dump(fwrt);
+
+		/* start recording again if the firmware is not crashed */
+		if (!test_bit(STATUS_FW_ERROR, &fwrt->trans->status) &&
+		    fwrt->fw->dbg_dest_tlv) {
+			iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, in_sample);
+			iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
+		}
+	}
+out:
+	if (fwrt->ops && fwrt->ops->dump_end)
+		fwrt->ops->dump_end(fwrt->ops_ctx);
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
new file mode 100644
index 0000000..507d9a4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -0,0 +1,266 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_fw_dbg_h__
+#define __iwl_fw_dbg_h__
+#include <linux/workqueue.h>
+#include <net/cfg80211.h>
+#include "runtime.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "file.h"
+#include "error-dump.h"
+
+/**
+ * struct iwl_fw_dump_desc - describes the dump
+ * @len: length of trig_desc->data
+ * @trig_desc: the description of the dump
+ */
+struct iwl_fw_dump_desc {
+	size_t len;
+	/* must be last */
+	struct iwl_fw_error_dump_trigger_desc trig_desc;
+};
+
+extern const struct iwl_fw_dump_desc iwl_dump_desc_assert;
+
+static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
+{
+	if (fwrt->dump.desc != &iwl_dump_desc_assert)
+		kfree(fwrt->dump.desc);
+	fwrt->dump.desc = NULL;
+	fwrt->dump.trig = NULL;
+}
+
+void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
+int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt,
+			    const struct iwl_fw_dump_desc *desc,
+			    const struct iwl_fw_dbg_trigger_tlv *trigger);
+int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt,
+		       enum iwl_fw_dbg_trigger trig,
+		       const char *str, size_t len,
+		       const struct iwl_fw_dbg_trigger_tlv *trigger);
+int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt,
+			    struct iwl_fw_dbg_trigger_tlv *trigger,
+			    const char *fmt, ...) __printf(3, 4);
+int iwl_fw_start_dbg_conf(struct iwl_fw_runtime *fwrt, u8 id);
+
+#define iwl_fw_dbg_trigger_enabled(fw, id) ({			\
+	void *__dbg_trigger = (fw)->dbg_trigger_tlv[(id)];	\
+	unlikely(__dbg_trigger);				\
+})
+
+static inline struct iwl_fw_dbg_trigger_tlv*
+_iwl_fw_dbg_get_trigger(const struct iwl_fw *fw, enum iwl_fw_dbg_trigger id)
+{
+	return fw->dbg_trigger_tlv[id];
+}
+
+#define iwl_fw_dbg_get_trigger(fw, id) ({			\
+	BUILD_BUG_ON(!__builtin_constant_p(id));		\
+	BUILD_BUG_ON((id) >= FW_DBG_TRIGGER_MAX);		\
+	_iwl_fw_dbg_get_trigger((fw), (id));			\
+})
+
+static inline bool
+iwl_fw_dbg_trigger_vif_match(struct iwl_fw_dbg_trigger_tlv *trig,
+			     struct wireless_dev *wdev)
+{
+	u32 trig_vif = le32_to_cpu(trig->vif_type);
+
+	return trig_vif == IWL_FW_DBG_CONF_VIF_ANY ||
+	       wdev->iftype == trig_vif;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_stop_conf_match(struct iwl_fw_runtime *fwrt,
+				   struct iwl_fw_dbg_trigger_tlv *trig)
+{
+	return ((trig->mode & IWL_FW_DBG_TRIGGER_STOP) &&
+		(fwrt->dump.conf == FW_DBG_INVALID ||
+		(BIT(fwrt->dump.conf) & le32_to_cpu(trig->stop_conf_ids))));
+}
+
+static inline bool
+iwl_fw_dbg_no_trig_window(struct iwl_fw_runtime *fwrt,
+			  struct iwl_fw_dbg_trigger_tlv *trig)
+{
+	unsigned long wind_jiff =
+		msecs_to_jiffies(le16_to_cpu(trig->trig_dis_ms));
+	u32 id = le32_to_cpu(trig->id);
+
+	/* If this is the first event checked, jump to update start ts */
+	if (fwrt->dump.non_collect_ts_start[id] &&
+	    (time_after(fwrt->dump.non_collect_ts_start[id] + wind_jiff,
+			jiffies)))
+		return true;
+
+	fwrt->dump.non_collect_ts_start[id] = jiffies;
+	return false;
+}
+
+static inline bool
+iwl_fw_dbg_trigger_check_stop(struct iwl_fw_runtime *fwrt,
+			      struct wireless_dev *wdev,
+			      struct iwl_fw_dbg_trigger_tlv *trig)
+{
+	if (wdev && !iwl_fw_dbg_trigger_vif_match(trig, wdev))
+		return false;
+
+	if (iwl_fw_dbg_no_trig_window(fwrt, trig)) {
+		IWL_WARN(fwrt, "Trigger %d occurred while no-collect window.\n",
+			 trig->id);
+		return false;
+	}
+
+	return iwl_fw_dbg_trigger_stop_conf_match(fwrt, trig);
+}
+
+static inline void
+_iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt,
+				struct wireless_dev *wdev,
+				struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+	if (!trigger)
+		return;
+
+	if (!iwl_fw_dbg_trigger_check_stop(fwrt, wdev, trigger))
+		return;
+
+	iwl_fw_dbg_collect_trig(fwrt, trigger, NULL);
+}
+
+#define iwl_fw_dbg_trigger_simple_stop(fwrt, wdev, trig)	\
+	_iwl_fw_dbg_trigger_simple_stop((fwrt), (wdev),		\
+					iwl_fw_dbg_get_trigger((fwrt)->fw,\
+							       (trig)))
+
+static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
+{
+	if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+	} else {
+		iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0);
+		udelay(100);
+		iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0);
+	}
+}
+
+static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
+{
+	fwrt->dump.conf = FW_DBG_INVALID;
+}
+
+void iwl_fw_error_dump_wk(struct work_struct *work);
+
+static inline void iwl_fw_flush_dump(struct iwl_fw_runtime *fwrt)
+{
+	flush_delayed_work(&fwrt->dump.wk);
+}
+
+static inline void iwl_fw_cancel_dump(struct iwl_fw_runtime *fwrt)
+{
+	cancel_delayed_work_sync(&fwrt->dump.wk);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
+{
+	fwrt->timestamp.delay = 0;
+	cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay);
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
+{
+	cancel_delayed_work_sync(&fwrt->timestamp.wk);
+}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
+{
+	if (!fwrt->timestamp.delay)
+		return;
+
+	schedule_delayed_work(&fwrt->timestamp.wk,
+			      round_jiffies_relative(fwrt->timestamp.delay));
+}
+
+#else
+
+static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt,
+					    u32 delay) {}
+
+static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+#endif  /* __iwl_fw_dbg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
new file mode 100644
index 0000000..8ba5a60
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -0,0 +1,202 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "api/commands.h"
+#include "debugfs.h"
+#include "dbg.h"
+
+#define FWRT_DEBUGFS_READ_FILE_OPS(name)				\
+static ssize_t iwl_dbgfs_##name##_read(struct iwl_fw_runtime *fwrt,	\
+				       char *buf, size_t count,		\
+				       loff_t *ppos);			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)			\
+static ssize_t iwl_dbgfs_##name##_write(struct iwl_fw_runtime *fwrt,	\
+					char *buf, size_t count,	\
+					loff_t *ppos);			\
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file,		\
+					 const char __user *user_buf,	\
+					 size_t count, loff_t *ppos)	\
+{									\
+	struct iwl_fw_runtime *fwrt = file->private_data;		\
+	char buf[buflen] = {};						\
+	size_t buf_size = min(count, sizeof(buf) -  1);			\
+									\
+	if (copy_from_user(buf, user_buf, buf_size))			\
+		return -EFAULT;						\
+									\
+	return iwl_dbgfs_##name##_write(fwrt, buf, buf_size, ppos);	\
+}
+
+#define FWRT_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen)			\
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)				\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_WRITE_FILE_OPS(name, buflen)			\
+FWRT_DEBUGFS_WRITE_WRAPPER(name, buflen)				\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define FWRT_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, fwrt,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define FWRT_DEBUGFS_ADD_FILE(name, parent, mode) \
+	FWRT_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+static int iwl_fw_send_timestamp_marker_cmd(struct iwl_fw_runtime *fwrt)
+{
+	struct iwl_mvm_marker marker = {
+		.dw_len = sizeof(struct iwl_mvm_marker) / 4,
+		.marker_id = MARKER_ID_SYNC_CLOCK,
+
+		/* the real timestamp is taken from the ftrace clock
+		 * this is for finding the match between fw and kernel logs
+		 */
+		.timestamp = cpu_to_le64(fwrt->timestamp.seq++),
+	};
+
+	struct iwl_host_cmd hcmd = {
+		.id = MARKER_CMD,
+		.flags = CMD_ASYNC,
+		.data[0] = &marker,
+		.len[0] = sizeof(marker),
+	};
+
+	return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+static void iwl_fw_timestamp_marker_wk(struct work_struct *work)
+{
+	int ret;
+	struct iwl_fw_runtime *fwrt =
+		container_of(work, struct iwl_fw_runtime, timestamp.wk.work);
+	unsigned long delay = fwrt->timestamp.delay;
+
+	ret = iwl_fw_send_timestamp_marker_cmd(fwrt);
+	if (!ret && delay)
+		schedule_delayed_work(&fwrt->timestamp.wk,
+				      round_jiffies_relative(delay));
+	else
+		IWL_INFO(fwrt,
+			 "stopping timestamp_marker, ret: %d, delay: %u\n",
+			 ret, jiffies_to_msecs(delay) / 1000);
+}
+
+void iwl_fw_trigger_timestamp(struct iwl_fw_runtime *fwrt, u32 delay)
+{
+	IWL_INFO(fwrt,
+		 "starting timestamp_marker trigger with delay: %us\n",
+		 delay);
+
+	iwl_fw_cancel_timestamp(fwrt);
+
+	fwrt->timestamp.delay = msecs_to_jiffies(delay * 1000);
+
+	schedule_delayed_work(&fwrt->timestamp.wk,
+			      round_jiffies_relative(fwrt->timestamp.delay));
+}
+
+static ssize_t iwl_dbgfs_timestamp_marker_write(struct iwl_fw_runtime *fwrt,
+						char *buf, size_t count,
+						loff_t *ppos)
+{
+	int ret;
+	u32 delay;
+
+	ret = kstrtou32(buf, 10, &delay);
+	if (ret < 0)
+		return ret;
+
+	iwl_fw_trigger_timestamp(fwrt, delay);
+
+	return count;
+}
+
+FWRT_DEBUGFS_WRITE_FILE_OPS(timestamp_marker, 10);
+
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+			    struct dentry *dbgfs_dir)
+{
+	INIT_DELAYED_WORK(&fwrt->timestamp.wk, iwl_fw_timestamp_marker_wk);
+	FWRT_DEBUGFS_ADD_FILE(timestamp_marker, dbgfs_dir, 0200);
+	return 0;
+err:
+	IWL_ERR(fwrt, "Can't create the fwrt debugfs directory\n");
+	return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
new file mode 100644
index 0000000..cbbfa8e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "runtime.h"
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+			    struct dentry *dbgfs_dir);
+
+#else
+static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
+					  struct dentry *dbgfs_dir)
+{
+	return 0;
+}
+
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
new file mode 100644
index 0000000..ed7beca
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -0,0 +1,365 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __fw_error_dump_h__
+#define __fw_error_dump_h__
+
+#include <linux/types.h>
+
+#define IWL_FW_ERROR_DUMP_BARKER	0x14789632
+
+/**
+ * enum iwl_fw_error_dump_type - types of data in the dump file
+ * @IWL_FW_ERROR_DUMP_CSR: Control Status Registers - from offset 0
+ * @IWL_FW_ERROR_DUMP_RXF:
+ * @IWL_FW_ERROR_DUMP_TXCMD: last TX command data, structured as
+ *	&struct iwl_fw_error_dump_txcmd packets
+ * @IWL_FW_ERROR_DUMP_DEV_FW_INFO:  struct %iwl_fw_error_dump_info
+ *	info on the device / firmware.
+ * @IWL_FW_ERROR_DUMP_FW_MONITOR: firmware monitor
+ * @IWL_FW_ERROR_DUMP_PRPH: range of periphery registers - there can be several
+ *	sections like this in a single file.
+ * @IWL_FW_ERROR_DUMP_FH_REGS: range of FH registers
+ * @IWL_FW_ERROR_DUMP_MEM: chunk of memory
+ * @IWL_FW_ERROR_DUMP_ERROR_INFO: description of what triggered this dump.
+ *	Structured as &struct iwl_fw_error_dump_trigger_desc.
+ * @IWL_FW_ERROR_DUMP_RB: the content of an RB structured as
+ *	&struct iwl_fw_error_dump_rb
+ * @IWL_FW_ERROR_PAGING: UMAC's image memory segments which were
+ *	paged to the DRAM.
+ * @IWL_FW_ERROR_DUMP_RADIO_REG: Dump the radio registers.
+ * @IWL_FW_ERROR_DUMP_EXTERNAL: used only by external code utilities, and
+ *	for that reason is not in use in any other place in the Linux Wi-Fi
+ *	stack.
+ * @IWL_FW_ERROR_DUMP_MEM_CFG: the addresses and sizes of fifos in the smem,
+ *	which we get from the fw after ALIVE. The content is structured as
+ *	&struct iwl_fw_error_dump_smem_cfg.
+ */
+enum iwl_fw_error_dump_type {
+	/* 0 is deprecated */
+	IWL_FW_ERROR_DUMP_CSR = 1,
+	IWL_FW_ERROR_DUMP_RXF = 2,
+	IWL_FW_ERROR_DUMP_TXCMD = 3,
+	IWL_FW_ERROR_DUMP_DEV_FW_INFO = 4,
+	IWL_FW_ERROR_DUMP_FW_MONITOR = 5,
+	IWL_FW_ERROR_DUMP_PRPH = 6,
+	IWL_FW_ERROR_DUMP_TXF = 7,
+	IWL_FW_ERROR_DUMP_FH_REGS = 8,
+	IWL_FW_ERROR_DUMP_MEM = 9,
+	IWL_FW_ERROR_DUMP_ERROR_INFO = 10,
+	IWL_FW_ERROR_DUMP_RB = 11,
+	IWL_FW_ERROR_DUMP_PAGING = 12,
+	IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+	IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
+	IWL_FW_ERROR_DUMP_EXTERNAL = 15, /* Do not move */
+	IWL_FW_ERROR_DUMP_MEM_CFG = 16,
+
+	IWL_FW_ERROR_DUMP_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_data - data for one type
+ * @type: &enum iwl_fw_error_dump_type
+ * @len: the length starting from %data
+ * @data: the data itself
+ */
+struct iwl_fw_error_dump_data {
+	__le32 type;
+	__le32 len;
+	__u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_file - the layout of the header of the file
+ * @barker: must be %IWL_FW_ERROR_DUMP_BARKER
+ * @file_len: the length of all the file starting from %barker
+ * @data: array of &struct iwl_fw_error_dump_data
+ */
+struct iwl_fw_error_dump_file {
+	__le32 barker;
+	__le32 file_len;
+	u8 data[0];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_txcmd - TX command data
+ * @cmdlen: original length of command
+ * @caplen: captured length of command (may be less)
+ * @data: captured command data, @caplen bytes
+ */
+struct iwl_fw_error_dump_txcmd {
+	__le32 cmdlen;
+	__le32 caplen;
+	u8 data[];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_fifo - RX/TX FIFO data
+ * @fifo_num: number of FIFO (starting from 0)
+ * @available_bytes: num of bytes available in FIFO (may be less than FIFO size)
+ * @wr_ptr: position of write pointer
+ * @rd_ptr: position of read pointer
+ * @fence_ptr: position of fence pointer
+ * @fence_mode: the current mode of the fence (before locking) -
+ *	0=follow RD pointer ; 1 = freeze
+ * @data: all of the FIFO's data
+ */
+struct iwl_fw_error_dump_fifo {
+	__le32 fifo_num;
+	__le32 available_bytes;
+	__le32 wr_ptr;
+	__le32 rd_ptr;
+	__le32 fence_ptr;
+	__le32 fence_mode;
+	u8 data[];
+} __packed;
+
+enum iwl_fw_error_dump_family {
+	IWL_FW_ERROR_DUMP_FAMILY_7 = 7,
+	IWL_FW_ERROR_DUMP_FAMILY_8 = 8,
+};
+
+/**
+ * struct iwl_fw_error_dump_info - info on the device / firmware
+ * @device_family: the family of the device (7 / 8)
+ * @hw_step: the step of the device
+ * @fw_human_readable: human readable FW version
+ * @dev_human_readable: name of the device
+ * @bus_human_readable: name of the bus used
+ */
+struct iwl_fw_error_dump_info {
+	__le32 device_family;
+	__le32 hw_step;
+	u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ];
+	u8 dev_human_readable[64];
+	u8 bus_human_readable[8];
+} __packed;
+
+/**
+ * struct iwl_fw_error_dump_fw_mon - FW monitor data
+ * @fw_mon_wr_ptr: the position of the write pointer in the cyclic buffer
+ * @fw_mon_base_ptr: base pointer of the data
+ * @fw_mon_cycle_cnt: number of wraparounds
+ * @reserved: for future use
+ * @data: captured data
+ */
+struct iwl_fw_error_dump_fw_mon {
+	__le32 fw_mon_wr_ptr;
+	__le32 fw_mon_base_ptr;
+	__le32 fw_mon_cycle_cnt;
+	__le32 reserved[3];
+	u8 data[];
+} __packed;
+
+#define MAX_NUM_LMAC 2
+#define TX_FIFO_INTERNAL_MAX_NUM	6
+#define TX_FIFO_MAX_NUM			15
+/**
+ * struct iwl_fw_error_dump_smem_cfg - Dump SMEM configuration
+ *	This must follow &struct iwl_fwrt_shared_mem_cfg.
+ * @num_lmacs: number of lmacs
+ * @num_txfifo_entries: number of tx fifos
+ * @lmac: sizes of lmacs txfifos and rxfifo1
+ * @rxfifo2_size: size of rxfifo2
+ * @internal_txfifo_addr: address of internal tx fifo
+ * @internal_txfifo_size: size of internal tx fifo
+ */
+struct iwl_fw_error_dump_smem_cfg {
+	__le32 num_lmacs;
+	__le32 num_txfifo_entries;
+	struct {
+		__le32 txfifo_size[TX_FIFO_MAX_NUM];
+		__le32 rxfifo1_size;
+	} lmac[MAX_NUM_LMAC];
+	__le32 rxfifo2_size;
+	__le32 internal_txfifo_addr;
+	__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed;
+/**
+ * struct iwl_fw_error_dump_prph - periphery registers data
+ * @prph_start: address of the first register in this chunk
+ * @data: the content of the registers
+ */
+struct iwl_fw_error_dump_prph {
+	__le32 prph_start;
+	__le32 data[];
+};
+
+enum iwl_fw_error_dump_mem_type {
+	IWL_FW_ERROR_DUMP_MEM_SRAM,
+	IWL_FW_ERROR_DUMP_MEM_SMEM,
+};
+
+/**
+ * struct iwl_fw_error_dump_mem - chunk of memory
+ * @type: &enum iwl_fw_error_dump_mem_type
+ * @offset: the offset from which the memory was read
+ * @data: the content of the memory
+ */
+struct iwl_fw_error_dump_mem {
+	__le32 type;
+	__le32 offset;
+	u8 data[];
+};
+
+/**
+ * struct iwl_fw_error_dump_rb - content of an Receive Buffer
+ * @index: the index of the Receive Buffer in the Rx queue
+ * @rxq: the RB's Rx queue
+ * @reserved:
+ * @data: the content of the Receive Buffer
+ */
+struct iwl_fw_error_dump_rb {
+	__le32 index;
+	__le32 rxq;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
+ * struct iwl_fw_error_dump_paging - content of the UMAC's image page
+ *	block on DRAM
+ * @index: the index of the page block
+ * @reserved:
+ * @data: the content of the page block
+ */
+struct iwl_fw_error_dump_paging {
+	__le32 index;
+	__le32 reserved;
+	u8 data[];
+};
+
+/**
+ * iwl_fw_error_next_data - advance fw error dump data pointer
+ * @data: previous data block
+ * Returns: next data block
+ */
+static inline struct iwl_fw_error_dump_data *
+iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
+{
+	return (void *)(data->data + le32_to_cpu(data->len));
+}
+
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ *	This should not be defined as a trigger to the driver, but a value the
+ *	driver should set to indicate that the trigger was initiated by the
+ *	user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ *	missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ *	command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ *	goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ *	detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ *	events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
+ * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
+ *	threshold.
+ * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ *  the firmware sends a tx reply.
+ */
+enum iwl_fw_dbg_trigger {
+	FW_DBG_TRIGGER_INVALID = 0,
+	FW_DBG_TRIGGER_USER,
+	FW_DBG_TRIGGER_FW_ASSERT,
+	FW_DBG_TRIGGER_MISSED_BEACONS,
+	FW_DBG_TRIGGER_CHANNEL_SWITCH,
+	FW_DBG_TRIGGER_FW_NOTIF,
+	FW_DBG_TRIGGER_MLME,
+	FW_DBG_TRIGGER_STATS,
+	FW_DBG_TRIGGER_RSSI,
+	FW_DBG_TRIGGER_TXQ_TIMERS,
+	FW_DBG_TRIGGER_TIME_EVENT,
+	FW_DBG_TRIGGER_BA,
+	FW_DBG_TRIGGER_TX_LATENCY,
+	FW_DBG_TRIGGER_TDLS,
+	FW_DBG_TRIGGER_TX_STATUS,
+
+	/* must be last */
+	FW_DBG_TRIGGER_MAX,
+};
+
+/**
+ * struct iwl_fw_error_dump_trigger_desc - describes the trigger condition
+ * @type: &enum iwl_fw_dbg_trigger
+ * @data: raw data about what happened
+ */
+struct iwl_fw_error_dump_trigger_desc {
+	__le32 type;
+	u8 data[];
+};
+
+#endif /* __fw_error_dump_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
new file mode 100644
index 0000000..bbf2b26
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -0,0 +1,895 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_file_h__
+#define __iwl_fw_file_h__
+
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+
+/* v1/v2 uCode file layout */
+struct iwl_ucode_header {
+	__le32 ver;	/* major/minor/API/serial */
+	union {
+		struct {
+			__le32 inst_size;	/* bytes of runtime code */
+			__le32 data_size;	/* bytes of runtime data */
+			__le32 init_size;	/* bytes of init code */
+			__le32 init_data_size;	/* bytes of init data */
+			__le32 boot_size;	/* bytes of bootstrap code */
+			u8 data[0];		/* in same order as sizes */
+		} v1;
+		struct {
+			__le32 build;		/* build number */
+			__le32 inst_size;	/* bytes of runtime code */
+			__le32 data_size;	/* bytes of runtime data */
+			__le32 init_size;	/* bytes of init code */
+			__le32 init_data_size;	/* bytes of init data */
+			__le32 boot_size;	/* bytes of bootstrap code */
+			u8 data[0];		/* in same order as sizes */
+		} v2;
+	} u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+enum iwl_ucode_tlv_type {
+	IWL_UCODE_TLV_INVALID		= 0, /* unused */
+	IWL_UCODE_TLV_INST		= 1,
+	IWL_UCODE_TLV_DATA		= 2,
+	IWL_UCODE_TLV_INIT		= 3,
+	IWL_UCODE_TLV_INIT_DATA		= 4,
+	IWL_UCODE_TLV_BOOT		= 5,
+	IWL_UCODE_TLV_PROBE_MAX_LEN	= 6, /* a u32 value */
+	IWL_UCODE_TLV_PAN		= 7,
+	IWL_UCODE_TLV_RUNT_EVTLOG_PTR	= 8,
+	IWL_UCODE_TLV_RUNT_EVTLOG_SIZE	= 9,
+	IWL_UCODE_TLV_RUNT_ERRLOG_PTR	= 10,
+	IWL_UCODE_TLV_INIT_EVTLOG_PTR	= 11,
+	IWL_UCODE_TLV_INIT_EVTLOG_SIZE	= 12,
+	IWL_UCODE_TLV_INIT_ERRLOG_PTR	= 13,
+	IWL_UCODE_TLV_ENHANCE_SENS_TBL	= 14,
+	IWL_UCODE_TLV_PHY_CALIBRATION_SIZE = 15,
+	IWL_UCODE_TLV_WOWLAN_INST	= 16,
+	IWL_UCODE_TLV_WOWLAN_DATA	= 17,
+	IWL_UCODE_TLV_FLAGS		= 18,
+	IWL_UCODE_TLV_SEC_RT		= 19,
+	IWL_UCODE_TLV_SEC_INIT		= 20,
+	IWL_UCODE_TLV_SEC_WOWLAN	= 21,
+	IWL_UCODE_TLV_DEF_CALIB		= 22,
+	IWL_UCODE_TLV_PHY_SKU		= 23,
+	IWL_UCODE_TLV_SECURE_SEC_RT	= 24,
+	IWL_UCODE_TLV_SECURE_SEC_INIT	= 25,
+	IWL_UCODE_TLV_SECURE_SEC_WOWLAN	= 26,
+	IWL_UCODE_TLV_NUM_OF_CPU	= 27,
+	IWL_UCODE_TLV_CSCHEME		= 28,
+	IWL_UCODE_TLV_API_CHANGES_SET	= 29,
+	IWL_UCODE_TLV_ENABLED_CAPABILITIES	= 30,
+	IWL_UCODE_TLV_N_SCAN_CHANNELS		= 31,
+	IWL_UCODE_TLV_PAGING		= 32,
+	IWL_UCODE_TLV_SEC_RT_USNIFFER	= 34,
+	/* 35 is unused */
+	IWL_UCODE_TLV_FW_VERSION	= 36,
+	IWL_UCODE_TLV_FW_DBG_DEST	= 38,
+	IWL_UCODE_TLV_FW_DBG_CONF	= 39,
+	IWL_UCODE_TLV_FW_DBG_TRIGGER	= 40,
+	IWL_UCODE_TLV_FW_GSCAN_CAPA	= 50,
+	IWL_UCODE_TLV_FW_MEM_SEG	= 51,
+	IWL_UCODE_TLV_IML		= 52,
+
+	/* TLVs 0x1000-0x2000 are for internal driver usage */
+	IWL_UCODE_TLV_FW_DBG_DUMP_LST	= 0x1000,
+};
+
+struct iwl_ucode_tlv {
+	__le32 type;		/* see above */
+	__le32 length;		/* not including type/length fields */
+	u8 data[0];
+};
+
+#define IWL_TLV_UCODE_MAGIC		0x0a4c5749
+#define FW_VER_HUMAN_READABLE_SZ	64
+
+struct iwl_tlv_ucode_header {
+	/*
+	 * The TLV style ucode header is distinguished from
+	 * the v1/v2 style header by first four bytes being
+	 * zero, as such is an invalid combination of
+	 * major/minor/API/serial versions.
+	 */
+	__le32 zero;
+	__le32 magic;
+	u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+	/* major/minor/API/serial or major in new format */
+	__le32 ver;
+	__le32 build;
+	__le64 ignore;
+	/*
+	 * The data contained herein has a TLV layout,
+	 * see above for the TLV header and types.
+	 * Note that each TLV is padded to a length
+	 * that is a multiple of 4 for alignment.
+	 */
+	u8 data[0];
+};
+
+/*
+ * ucode TLVs
+ *
+ * ability to get extension for: flags & capabilities from ucode binaries files
+ */
+struct iwl_ucode_api {
+	__le32 api_index;
+	__le32 api_flags;
+} __packed;
+
+struct iwl_ucode_capa {
+	__le32 api_index;
+	__le32 api_capa;
+} __packed;
+
+/**
+ * enum iwl_ucode_tlv_flag - ucode API flags
+ * @IWL_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ *	was a separate TLV but moved here to save space.
+ * @IWL_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behavior on hidden SSID,
+ *	treats good CRC threshold as a boolean
+ * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ *	offload profile config command.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ *	(rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ *	from the probe request template.
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ */
+enum iwl_ucode_tlv_flag {
+	IWL_UCODE_TLV_FLAGS_PAN			= BIT(0),
+	IWL_UCODE_TLV_FLAGS_NEWSCAN		= BIT(1),
+	IWL_UCODE_TLV_FLAGS_MFP			= BIT(2),
+	IWL_UCODE_TLV_FLAGS_SHORT_BL		= BIT(7),
+	IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS	= BIT(10),
+	IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID	= BIT(12),
+	IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL	= BIT(15),
+	IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE	= BIT(16),
+	IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT	= BIT(24),
+	IWL_UCODE_TLV_FLAGS_EBS_SUPPORT		= BIT(25),
+	IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD	= BIT(26),
+	IWL_UCODE_TLV_FLAGS_BCAST_FILTERING	= BIT(29),
+};
+
+typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
+
+/**
+ * enum iwl_ucode_tlv_api - ucode api
+ * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ *	longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWL_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWL_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWL_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ *	iteration complete notification, and the timestamp reported for RX
+ *	received during scan, are reported in TSF of the mac specified in the
+ *	scan request.
+ * @IWL_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ *	ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWL_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignement.
+ * @IWL_UCODE_TLV_API_NAN2_VER2: This ucode supports NAN API version 2
+ * @IWL_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
+ * @IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY: Quota command includes a field
+ *	indicating low latency direction.
+ * @IWL_UCODE_TLV_API_DEPRECATE_TTAK: RX status flag TTAK ok (bit 7) is
+ *	deprecated.
+ * @IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2: This ucode supports version 8
+ *	of scan request: SCAN_REQUEST_CMD_UMAC_API_S_VER_8
+ *
+ * @NUM_IWL_UCODE_TLV_API: number of bits used
+ */
+enum iwl_ucode_tlv_api {
+	/* API Set 0 */
+	IWL_UCODE_TLV_API_FRAGMENTED_SCAN	= (__force iwl_ucode_tlv_api_t)8,
+	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE	= (__force iwl_ucode_tlv_api_t)9,
+	IWL_UCODE_TLV_API_LQ_SS_PARAMS		= (__force iwl_ucode_tlv_api_t)18,
+	IWL_UCODE_TLV_API_NEW_VERSION		= (__force iwl_ucode_tlv_api_t)20,
+	IWL_UCODE_TLV_API_SCAN_TSF_REPORT	= (__force iwl_ucode_tlv_api_t)28,
+	IWL_UCODE_TLV_API_TKIP_MIC_KEYS		= (__force iwl_ucode_tlv_api_t)29,
+	IWL_UCODE_TLV_API_STA_TYPE		= (__force iwl_ucode_tlv_api_t)30,
+	IWL_UCODE_TLV_API_NAN2_VER2		= (__force iwl_ucode_tlv_api_t)31,
+	/* API Set 1 */
+	IWL_UCODE_TLV_API_ADAPTIVE_DWELL	= (__force iwl_ucode_tlv_api_t)32,
+	IWL_UCODE_TLV_API_OCE			= (__force iwl_ucode_tlv_api_t)33,
+	IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE	= (__force iwl_ucode_tlv_api_t)34,
+	IWL_UCODE_TLV_API_NEW_RX_STATS		= (__force iwl_ucode_tlv_api_t)35,
+	IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY	= (__force iwl_ucode_tlv_api_t)38,
+	IWL_UCODE_TLV_API_DEPRECATE_TTAK	= (__force iwl_ucode_tlv_api_t)41,
+	IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2	= (__force iwl_ucode_tlv_api_t)42,
+
+	NUM_IWL_UCODE_TLV_API
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
+};
+
+typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
+
+/**
+ * enum iwl_ucode_tlv_capa - ucode capabilities
+ * @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWL_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWL_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWL_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
+ * @IWL_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ *	tx power value into TPC Report action frame and Link Measurement Report
+ *	action frame
+ * @IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ *	channel in DS parameter set element in probe requests.
+ * @IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ *	probe requests.
+ * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ *	which also implies support for the scheduler configuration command
+ * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD: supports U-APSD on p2p interface when it
+ *	is standalone or with a BSS station interface in the same binding.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ *	sources for the MCC. This TLV bit is a future replacement to
+ *	IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ *	is supported.
+ * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
+ * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
+ * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
+ * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
+ * @IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD: the firmware supports CSA
+ *	countdown offloading. Beacon notifications are not sent to the host.
+ *	The fw also offloads TBTT alignment.
+ * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ *	antenna the beacon should be transmitted
+ * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ *	from AP and will send it upon d0i3 exit.
+ * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ *	thresholds reporting
+ * @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ *	regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ *	memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWL_UCODE_TLV_CAPA_TX_POWER_ACK: reduced TX power API has larger
+ *	command size (command version 4) that supports toggling ACK TX
+ *	power reduction.
+ * @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ *
+ * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
+ */
+enum iwl_ucode_tlv_capa {
+	IWL_UCODE_TLV_CAPA_D0I3_SUPPORT			= (__force iwl_ucode_tlv_capa_t)0,
+	IWL_UCODE_TLV_CAPA_LAR_SUPPORT			= (__force iwl_ucode_tlv_capa_t)1,
+	IWL_UCODE_TLV_CAPA_UMAC_SCAN			= (__force iwl_ucode_tlv_capa_t)2,
+	IWL_UCODE_TLV_CAPA_BEAMFORMER			= (__force iwl_ucode_tlv_capa_t)3,
+	IWL_UCODE_TLV_CAPA_TOF_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)5,
+	IWL_UCODE_TLV_CAPA_TDLS_SUPPORT			= (__force iwl_ucode_tlv_capa_t)6,
+	IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT	= (__force iwl_ucode_tlv_capa_t)8,
+	IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)9,
+	IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT	= (__force iwl_ucode_tlv_capa_t)10,
+	IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT		= (__force iwl_ucode_tlv_capa_t)11,
+	IWL_UCODE_TLV_CAPA_DQA_SUPPORT			= (__force iwl_ucode_tlv_capa_t)12,
+	IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH		= (__force iwl_ucode_tlv_capa_t)13,
+	IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG		= (__force iwl_ucode_tlv_capa_t)17,
+	IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)18,
+	IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT		= (__force iwl_ucode_tlv_capa_t)19,
+	IWL_UCODE_TLV_CAPA_CSUM_SUPPORT			= (__force iwl_ucode_tlv_capa_t)21,
+	IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS		= (__force iwl_ucode_tlv_capa_t)22,
+	IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD		= (__force iwl_ucode_tlv_capa_t)26,
+	IWL_UCODE_TLV_CAPA_BT_COEX_PLCR			= (__force iwl_ucode_tlv_capa_t)28,
+	IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC		= (__force iwl_ucode_tlv_capa_t)29,
+	IWL_UCODE_TLV_CAPA_BT_COEX_RRC			= (__force iwl_ucode_tlv_capa_t)30,
+	IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT		= (__force iwl_ucode_tlv_capa_t)31,
+	IWL_UCODE_TLV_CAPA_STA_PM_NOTIF			= (__force iwl_ucode_tlv_capa_t)38,
+	IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT		= (__force iwl_ucode_tlv_capa_t)39,
+	IWL_UCODE_TLV_CAPA_CDB_SUPPORT			= (__force iwl_ucode_tlv_capa_t)40,
+	IWL_UCODE_TLV_CAPA_D0I3_END_FIRST		= (__force iwl_ucode_tlv_capa_t)41,
+	IWL_UCODE_TLV_CAPA_TLC_OFFLOAD                  = (__force iwl_ucode_tlv_capa_t)43,
+	IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA                = (__force iwl_ucode_tlv_capa_t)44,
+	IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE		= (__force iwl_ucode_tlv_capa_t)64,
+	IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS		= (__force iwl_ucode_tlv_capa_t)65,
+	IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT		= (__force iwl_ucode_tlv_capa_t)67,
+	IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT	= (__force iwl_ucode_tlv_capa_t)68,
+	IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD		= (__force iwl_ucode_tlv_capa_t)70,
+	IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION		= (__force iwl_ucode_tlv_capa_t)71,
+	IWL_UCODE_TLV_CAPA_BEACON_STORING		= (__force iwl_ucode_tlv_capa_t)72,
+	IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2		= (__force iwl_ucode_tlv_capa_t)73,
+	IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW		= (__force iwl_ucode_tlv_capa_t)74,
+	IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT	= (__force iwl_ucode_tlv_capa_t)75,
+	IWL_UCODE_TLV_CAPA_CTDP_SUPPORT			= (__force iwl_ucode_tlv_capa_t)76,
+	IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED		= (__force iwl_ucode_tlv_capa_t)77,
+	IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG	= (__force iwl_ucode_tlv_capa_t)80,
+	IWL_UCODE_TLV_CAPA_LQM_SUPPORT			= (__force iwl_ucode_tlv_capa_t)81,
+	IWL_UCODE_TLV_CAPA_TX_POWER_ACK			= (__force iwl_ucode_tlv_capa_t)84,
+	IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT		= (__force iwl_ucode_tlv_capa_t)86,
+	IWL_UCODE_TLV_CAPA_MLME_OFFLOAD			= (__force iwl_ucode_tlv_capa_t)96,
+
+	NUM_IWL_UCODE_TLV_CAPA
+#ifdef __CHECKER__
+		/* sparse says it cannot increment the previous enum member */
+		= 128
+#endif
+};
+
+/* The default calibrate table size if not specified by firmware file */
+#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE	18
+#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE		19
+#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE			253
+
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH	200
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define CPU1_CPU2_SEPARATOR_SECTION	0xFFFFCCCC
+#define PAGING_SEPARATOR_SECTION	0xAAAABBBB
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWL_UCODE_MAJOR(ver)	(((ver) & 0xFF000000) >> 24)
+#define IWL_UCODE_MINOR(ver)	(((ver) & 0x00FF0000) >> 16)
+#define IWL_UCODE_API(ver)	(((ver) & 0x0000FF00) >> 8)
+#define IWL_UCODE_SERIAL(ver)	((ver) & 0x000000FF)
+
+/**
+ * struct iwl_tlv_calib_ctrl - Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ *		flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ *		event triggers.
+ */
+struct iwl_tlv_calib_ctrl {
+	__le32 flow_trigger;
+	__le32 event_trigger;
+} __packed;
+
+enum iwl_fw_phy_cfg {
+	FW_PHY_CFG_RADIO_TYPE_POS = 0,
+	FW_PHY_CFG_RADIO_TYPE = 0x3 << FW_PHY_CFG_RADIO_TYPE_POS,
+	FW_PHY_CFG_RADIO_STEP_POS = 2,
+	FW_PHY_CFG_RADIO_STEP = 0x3 << FW_PHY_CFG_RADIO_STEP_POS,
+	FW_PHY_CFG_RADIO_DASH_POS = 4,
+	FW_PHY_CFG_RADIO_DASH = 0x3 << FW_PHY_CFG_RADIO_DASH_POS,
+	FW_PHY_CFG_TX_CHAIN_POS = 16,
+	FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
+	FW_PHY_CFG_RX_CHAIN_POS = 20,
+	FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
+	FW_PHY_CFG_SHARED_CLK = BIT(31),
+};
+
+#define IWL_UCODE_MAX_CS		1
+
+/**
+ * struct iwl_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwl_fw_cipher_scheme {
+	__le32 cipher;
+	u8 flags;
+	u8 hdr_len;
+	u8 pn_len;
+	u8 pn_off;
+	u8 key_idx_off;
+	u8 key_idx_mask;
+	u8 key_idx_shift;
+	u8 mic_len;
+	u8 hw_cipher;
+} __packed;
+
+enum iwl_fw_dbg_reg_operator {
+	CSR_ASSIGN,
+	CSR_SETBIT,
+	CSR_CLEARBIT,
+
+	PRPH_ASSIGN,
+	PRPH_SETBIT,
+	PRPH_CLEARBIT,
+
+	INDIRECT_ASSIGN,
+	INDIRECT_SETBIT,
+	INDIRECT_CLEARBIT,
+
+	PRPH_BLOCKBIT,
+};
+
+/**
+ * struct iwl_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: &enum iwl_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwl_fw_dbg_reg_op {
+	u8 op;
+	u8 reserved[3];
+	__le32 addr;
+	__le32 val;
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
+ */
+enum iwl_fw_dbg_monitor_mode {
+	SMEM_MODE = 0,
+	EXTERNAL_MODE = 1,
+	MARBH_MODE = 2,
+	MIPI_MODE = 3,
+};
+
+/**
+ * enum iwl_fw_mem_seg_type - memory segment type
+ * @FW_DBG_MEM_TYPE_MASK: mask for the type indication
+ * @FW_DBG_MEM_TYPE_REGULAR: regular memory
+ * @FW_DBG_MEM_TYPE_PRPH: periphery memory (requires special reading)
+ */
+enum iwl_fw_mem_seg_type {
+	FW_DBG_MEM_TYPE_MASK	= 0xff000000,
+	FW_DBG_MEM_TYPE_REGULAR	= 0x00000000,
+	FW_DBG_MEM_TYPE_PRPH	= 0x01000000,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: the memory segment type to record, see &enum iwl_fw_mem_seg_type
+ *	for what we care about
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+	__le32 data_type;
+	__le32 ofs;
+	__le32 len;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: &enum iwl_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg:  addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWL_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwl_fw_dbg_dest_tlv_v1 {
+	u8 version;
+	u8 monitor_mode;
+	u8 size_power;
+	u8 reserved;
+	__le32 base_reg;
+	__le32 end_reg;
+	__le32 write_ptr_reg;
+	__le32 wrap_count;
+	u8 base_shift;
+	u8 end_shift;
+	struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWL_LDBG_M2S_BUF_SIZE_MSK	0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWL_LDBG_M2S_BUF_BA_MSK		0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWL_M2S_UNIT_SIZE			0x100
+
+struct iwl_fw_dbg_dest_tlv {
+	u8 version;
+	u8 monitor_mode;
+	u8 size_power;
+	u8 reserved;
+	__le32 cfg_reg;
+	__le32 write_ptr_reg;
+	__le32 wrap_count;
+	u8 base_shift;
+	u8 size_shift;
+	struct iwl_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwl_fw_dbg_conf_hcmd {
+	u8 id;
+	u8 reserved;
+	__le16 len;
+	u8 data[0];
+} __packed;
+
+/**
+ * enum iwl_fw_dbg_trigger_mode - triggers functionalities
+ *
+ * @IWL_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWL_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWL_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ *	collect only monitor data
+ */
+enum iwl_fw_dbg_trigger_mode {
+	IWL_FW_DBG_TRIGGER_START = BIT(0),
+	IWL_FW_DBG_TRIGGER_STOP = BIT(1),
+	IWL_FW_DBG_TRIGGER_MONITOR_ONLY = BIT(2),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_flags - the flags supported by wrt triggers
+ * @IWL_FW_DBG_FORCE_RESTART: force a firmware restart
+ */
+enum iwl_fw_dbg_trigger_flags {
+	IWL_FW_DBG_FORCE_RESTART = BIT(0),
+};
+
+/**
+ * enum iwl_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWL_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWL_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWL_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWL_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWL_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ */
+enum iwl_fw_dbg_trigger_vif_type {
+	IWL_FW_DBG_CONF_VIF_ANY = NL80211_IFTYPE_UNSPECIFIED,
+	IWL_FW_DBG_CONF_VIF_IBSS = NL80211_IFTYPE_ADHOC,
+	IWL_FW_DBG_CONF_VIF_STATION = NL80211_IFTYPE_STATION,
+	IWL_FW_DBG_CONF_VIF_AP = NL80211_IFTYPE_AP,
+	IWL_FW_DBG_CONF_VIF_P2P_CLIENT = NL80211_IFTYPE_P2P_CLIENT,
+	IWL_FW_DBG_CONF_VIF_P2P_GO = NL80211_IFTYPE_P2P_GO,
+	IWL_FW_DBG_CONF_VIF_P2P_DEVICE = NL80211_IFTYPE_P2P_DEVICE,
+};
+
+/**
+ * struct iwl_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: &enum iwl_fw_dbg_trigger
+ * @vif_type: &enum iwl_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ *	if the mode is %IWL_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ *	to the currently running configuration is set, the data should be
+ *	collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ *	after the STOP trigger fires.
+ * @mode: &enum iwl_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWL_FW_DBG_TRIGGER_START, this defines what
+ *	configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ * @trig_dis_ms: the time, in milliseconds, after an occurrence of this
+ *	trigger in which another occurrence should be ignored.
+ * @flags: &enum iwl_fw_dbg_trigger_flags
+ */
+struct iwl_fw_dbg_trigger_tlv {
+	__le32 id;
+	__le32 vif_type;
+	__le32 stop_conf_ids;
+	__le32 stop_delay;
+	u8 mode;
+	u8 start_conf_id;
+	__le16 occurrences;
+	__le16 trig_dis_ms;
+	u8 flags;
+	u8 reserved[5];
+
+	u8 data[0];
+} __packed;
+
+#define FW_DBG_START_FROM_ALIVE	0
+#define FW_DBG_CONF_MAX		32
+#define FW_DBG_INVALID		0xff
+
+/**
+ * struct iwl_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwl_fw_dbg_trigger_missed_bcon {
+	__le32 stop_consec_missed_bcon;
+	__le32 stop_consec_missed_bcon_since_rx;
+	__le32 reserved2[2];
+	__le32 start_consec_missed_bcon;
+	__le32 start_consec_missed_bcon_since_rx;
+	__le32 reserved1[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_cmd {
+	struct cmd {
+		u8 cmd_id;
+		u8 group_id;
+	} __packed cmds[16];
+} __packed;
+
+/**
+ * iwl_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwl_fw_dbg_trigger_stats {
+	__le32 stop_offset;
+	__le32 stop_threshold;
+	__le32 start_offset;
+	__le32 start_threshold;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwl_fw_dbg_trigger_low_rssi {
+	__le32 rssi;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwl_fw_dbg_trigger_mlme {
+	u8 stop_auth_denied;
+	u8 stop_auth_timeout;
+	u8 stop_rx_deauth;
+	u8 stop_tx_deauth;
+
+	u8 stop_assoc_denied;
+	u8 stop_assoc_timeout;
+	u8 stop_connection_loss;
+	u8 reserved;
+
+	u8 start_auth_denied;
+	u8 start_auth_timeout;
+	u8 start_rx_deauth;
+	u8 start_tx_deauth;
+
+	u8 start_assoc_denied;
+	u8 start_assoc_timeout;
+	u8 start_connection_loss;
+	u8 reserved2;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwl_fw_dbg_trigger_txq_timer {
+	__le32 command_queue;
+	__le32 bss;
+	__le32 softap;
+	__le32 p2p_go;
+	__le32 p2p_client;
+	__le32 p2p_device;
+	__le32 ibss;
+	__le32 tdls;
+	__le32 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ *	trigger each time a time event notification that relates to time event
+ *	id with one of the actions in the bitmap is received and
+ *	BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwl_fw_dbg_trigger_time_event {
+	struct {
+		__le32 id;
+		__le32 action_bitmap;
+		__le32 status_bitmap;
+	} __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *	when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *	when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ *	when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ *	when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ *	when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ *	when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ *	when a frame times out in the reodering buffer.
+ */
+struct iwl_fw_dbg_trigger_ba {
+	__le16 rx_ba_start;
+	__le16 rx_ba_stop;
+	__le16 tx_ba_start;
+	__le16 tx_ba_stop;
+	__le16 rx_bar;
+	__le16 tx_bar;
+	__le16 frame_timeout;
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_tdls - configures trigger for TDLS events.
+ * @action_bitmap: the TDLS action to trigger the collection upon
+ * @peer_mode: trigger on specific peer or all
+ * @peer: the TDLS peer to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tdls {
+	u8 action_bitmap;
+	u8 peer_mode;
+	u8 peer[ETH_ALEN];
+	u8 reserved[4];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response
+ *  status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tx_status {
+	struct tx_status {
+		u8 status;
+		u8 reserved[3];
+	} __packed statuses[16];
+	__le32 reserved[2];
+} __packed;
+
+/**
+ * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ *	If there is more than one HCMD to send, they will appear one after the
+ *	other and be sent in the order that they appear in.
+ * This parses IWL_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %FW_DBG_CONF_MAX configuration per run.
+ */
+struct iwl_fw_dbg_conf_tlv {
+	u8 id;
+	u8 usniffer;
+	u8 reserved;
+	u8 num_of_hcmds;
+	struct iwl_fw_dbg_conf_hcmd hcmd;
+} __packed;
+
+#endif  /* __iwl_fw_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
new file mode 100644
index 0000000..0861b97
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -0,0 +1,305 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_fw_img_h__
+#define __iwl_fw_img_h__
+#include <linux/types.h>
+
+#include "file.h"
+#include "error-dump.h"
+
+/**
+ * enum iwl_ucode_type
+ *
+ * The type of ucode.
+ *
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
+ * @IWL_UCODE_REGULAR_USNIFFER: Normal runtime ucode when using usniffer image
+ */
+enum iwl_ucode_type {
+	IWL_UCODE_REGULAR,
+	IWL_UCODE_INIT,
+	IWL_UCODE_WOWLAN,
+	IWL_UCODE_REGULAR_USNIFFER,
+	IWL_UCODE_TYPE_MAX,
+};
+
+/*
+ * enumeration of ucode section.
+ * This enumeration is used directly for older firmware (before 16.0).
+ * For new firmware, there can be up to 4 sections (see below) but the
+ * first one packaged into the firmware file is the DATA section and
+ * some debugging code accesses that.
+ */
+enum iwl_ucode_sec {
+	IWL_UCODE_SECTION_DATA,
+	IWL_UCODE_SECTION_INST,
+};
+
+struct iwl_ucode_capabilities {
+	u32 max_probe_length;
+	u32 n_scan_channels;
+	u32 standard_phy_calibration_size;
+	u32 flags;
+	unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
+	unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
+};
+
+static inline bool
+fw_has_api(const struct iwl_ucode_capabilities *capabilities,
+	   iwl_ucode_tlv_api_t api)
+{
+	return test_bit((__force long)api, capabilities->_api);
+}
+
+static inline bool
+fw_has_capa(const struct iwl_ucode_capabilities *capabilities,
+	    iwl_ucode_tlv_capa_t capa)
+{
+	return test_bit((__force long)capa, capabilities->_capa);
+}
+
+/* one for each uCode image (inst/data, init/runtime/wowlan) */
+struct fw_desc {
+	const void *data;	/* vmalloc'ed data */
+	u32 len;		/* size in bytes */
+	u32 offset;		/* offset in the device */
+};
+
+struct fw_img {
+	struct fw_desc *sec;
+	int num_sec;
+	bool is_dual_cpus;
+	u32 paging_mem_size;
+};
+
+/*
+ * Block paging calculations
+ */
+#define PAGE_2_EXP_SIZE 12 /* 4K == 2^12 */
+#define FW_PAGING_SIZE BIT(PAGE_2_EXP_SIZE) /* page size is 4KB */
+#define PAGE_PER_GROUP_2_EXP_SIZE 3
+/* 8 pages per group */
+#define NUM_OF_PAGE_PER_GROUP BIT(PAGE_PER_GROUP_2_EXP_SIZE)
+/* don't change, support only 32KB size */
+#define PAGING_BLOCK_SIZE (NUM_OF_PAGE_PER_GROUP * FW_PAGING_SIZE)
+/* 32K == 2^15 */
+#define BLOCK_2_EXP_SIZE (PAGE_2_EXP_SIZE + PAGE_PER_GROUP_2_EXP_SIZE)
+
+/*
+ * Image paging calculations
+ */
+#define BLOCK_PER_IMAGE_2_EXP_SIZE 5
+/* 2^5 == 32 blocks per image */
+#define NUM_OF_BLOCK_PER_IMAGE BIT(BLOCK_PER_IMAGE_2_EXP_SIZE)
+/* maximum image size 1024KB */
+#define MAX_PAGING_IMAGE_SIZE (NUM_OF_BLOCK_PER_IMAGE * PAGING_BLOCK_SIZE)
+
+/* Virtual address signature */
+#define PAGING_ADDR_SIG 0xAA000000
+
+#define PAGING_CMD_IS_SECURED BIT(9)
+#define PAGING_CMD_IS_ENABLED BIT(8)
+#define PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS	0
+#define PAGING_TLV_SECURE_MASK 1
+
+/**
+ * struct iwl_fw_paging
+ * @fw_paging_phys: page phy pointer
+ * @fw_paging_block: pointer to the allocated block
+ * @fw_paging_size: page size
+ */
+struct iwl_fw_paging {
+	dma_addr_t fw_paging_phys;
+	struct page *fw_paging_block;
+	u32 fw_paging_size;
+};
+
+/**
+ * struct iwl_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwl_fw_cscheme_list {
+	u8 size;
+	struct iwl_fw_cipher_scheme cs[];
+} __packed;
+
+/**
+ * enum iwl_fw_type - iwlwifi firmware type
+ * @IWL_FW_DVM: DVM firmware
+ * @IWL_FW_MVM: MVM firmware
+ */
+enum iwl_fw_type {
+	IWL_FW_DVM,
+	IWL_FW_MVM,
+};
+
+/**
+ * struct iwl_fw - variables associated with the firmware
+ *
+ * @ucode_ver: ucode version from the ucode file
+ * @fw_version: firmware version string
+ * @img: ucode image like ucode_rt, ucode_init, ucode_wowlan.
+ * @iml_len: length of the image loader image
+ * @iml: image loader fw image
+ * @ucode_capa: capabilities parsed from the ucode file.
+ * @enhance_sensitivity_table: device can do enhanced sensitivity.
+ * @init_evtlog_ptr: event log offset for init ucode.
+ * @init_evtlog_size: event log size for init ucode.
+ * @init_errlog_ptr: error log offfset for init ucode.
+ * @inst_evtlog_ptr: event log offset for runtime ucode.
+ * @inst_evtlog_size: event log size for runtime ucode.
+ * @inst_errlog_ptr: error log offfset for runtime ucode.
+ * @type: firmware type (&enum iwl_fw_type)
+ * @cipher_scheme: optional external cipher scheme.
+ * @human_readable: human readable version
+ *	we get the ALIVE from the uCode
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_conf_tlv_len: lengths of the @dbg_conf_tlv entries
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs
+ * @dbg_trigger_tlv_len: lengths of the @dbg_trigger_tlv entries
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ */
+struct iwl_fw {
+	u32 ucode_ver;
+
+	char fw_version[ETHTOOL_FWVERS_LEN];
+
+	/* ucode images */
+	struct fw_img img[IWL_UCODE_TYPE_MAX];
+	size_t iml_len;
+	u8 *iml;
+
+	struct iwl_ucode_capabilities ucode_capa;
+	bool enhance_sensitivity_table;
+
+	u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+	u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+	struct iwl_tlv_calib_ctrl default_calib[IWL_UCODE_TYPE_MAX];
+	u32 phy_config;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+
+	enum iwl_fw_type type;
+
+	struct iwl_fw_cipher_scheme cs[IWL_UCODE_MAX_CS];
+	u8 human_readable[FW_VER_HUMAN_READABLE_SZ];
+
+	struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
+	struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+	struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+	size_t n_dbg_mem_tlv;
+	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+	u8 dbg_dest_reg_num;
+	u32 dbg_dump_mask;
+};
+
+static inline const char *get_fw_dbg_mode_string(int mode)
+{
+	switch (mode) {
+	case SMEM_MODE:
+		return "SMEM";
+	case EXTERNAL_MODE:
+		return "EXTERNAL_DRAM";
+	case MARBH_MODE:
+		return "MARBH";
+	case MIPI_MODE:
+		return "MIPI";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static inline bool
+iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
+{
+	const struct iwl_fw_dbg_conf_tlv *conf_tlv = fw->dbg_conf_tlv[id];
+
+	if (!conf_tlv)
+		return false;
+
+	return conf_tlv->usniffer;
+}
+
+static inline const struct fw_img *
+iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
+{
+	if (ucode_type >= IWL_UCODE_TYPE_MAX)
+		return NULL;
+
+	return &fw->img[ucode_type];
+}
+
+#endif  /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
new file mode 100644
index 0000000..2efac30
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -0,0 +1,90 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "dbg.h"
+#include "debugfs.h"
+
+void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
+			const struct iwl_fw *fw,
+			const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+			struct dentry *dbgfs_dir)
+{
+	memset(fwrt, 0, sizeof(*fwrt));
+	fwrt->trans = trans;
+	fwrt->fw = fw;
+	fwrt->dev = trans->dev;
+	fwrt->dump.conf = FW_DBG_INVALID;
+	fwrt->ops = ops;
+	fwrt->ops_ctx = ops_ctx;
+	INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
+	iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
+
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
+{
+	iwl_fw_suspend_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
+{
+	iwl_fw_resume_timestamp(fwrt);
+}
+IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
new file mode 100644
index 0000000..1096c94
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c
@@ -0,0 +1,194 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/export.h>
+
+#include "iwl-drv.h"
+#include "notif-wait.h"
+
+
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
+{
+	spin_lock_init(&notif_wait->notif_wait_lock);
+	INIT_LIST_HEAD(&notif_wait->notif_waits);
+	init_waitqueue_head(&notif_wait->notif_waitq);
+}
+IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
+
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_wait,
+			   struct iwl_rx_packet *pkt)
+{
+	bool triggered = false;
+
+	if (!list_empty(&notif_wait->notif_waits)) {
+		struct iwl_notification_wait *w;
+
+		spin_lock(&notif_wait->notif_wait_lock);
+		list_for_each_entry(w, &notif_wait->notif_waits, list) {
+			int i;
+			bool found = false;
+
+			/*
+			 * If it already finished (triggered) or has been
+			 * aborted then don't evaluate it again to avoid races,
+			 * Otherwise the function could be called again even
+			 * though it returned true before
+			 */
+			if (w->triggered || w->aborted)
+				continue;
+
+			for (i = 0; i < w->n_cmds; i++) {
+				u16 rec_id = WIDE_ID(pkt->hdr.group_id,
+						     pkt->hdr.cmd);
+
+				if (w->cmds[i] == rec_id ||
+				    (!iwl_cmd_groupid(w->cmds[i]) &&
+				     DEF_ID(w->cmds[i]) == rec_id)) {
+					found = true;
+					break;
+				}
+			}
+			if (!found)
+				continue;
+
+			if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
+				w->triggered = true;
+				triggered = true;
+			}
+		}
+		spin_unlock(&notif_wait->notif_wait_lock);
+	}
+
+	return triggered;
+}
+IWL_EXPORT_SYMBOL(iwl_notification_wait);
+
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
+{
+	struct iwl_notification_wait *wait_entry;
+
+	spin_lock(&notif_wait->notif_wait_lock);
+	list_for_each_entry(wait_entry, &notif_wait->notif_waits, list)
+		wait_entry->aborted = true;
+	spin_unlock(&notif_wait->notif_wait_lock);
+
+	wake_up_all(&notif_wait->notif_waitq);
+}
+IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
+
+void
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
+			   struct iwl_notification_wait *wait_entry,
+			   const u16 *cmds, int n_cmds,
+			   bool (*fn)(struct iwl_notif_wait_data *notif_wait,
+				      struct iwl_rx_packet *pkt, void *data),
+			   void *fn_data)
+{
+	if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
+		n_cmds = MAX_NOTIF_CMDS;
+
+	wait_entry->fn = fn;
+	wait_entry->fn_data = fn_data;
+	wait_entry->n_cmds = n_cmds;
+	memcpy(wait_entry->cmds, cmds, n_cmds * sizeof(u16));
+	wait_entry->triggered = false;
+	wait_entry->aborted = false;
+
+	spin_lock_bh(&notif_wait->notif_wait_lock);
+	list_add(&wait_entry->list, &notif_wait->notif_waits);
+	spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
+IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
+
+void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
+			     struct iwl_notification_wait *wait_entry)
+{
+	spin_lock_bh(&notif_wait->notif_wait_lock);
+	list_del(&wait_entry->list);
+	spin_unlock_bh(&notif_wait->notif_wait_lock);
+}
+IWL_EXPORT_SYMBOL(iwl_remove_notification);
+
+int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
+			  struct iwl_notification_wait *wait_entry,
+			  unsigned long timeout)
+{
+	int ret;
+
+	ret = wait_event_timeout(notif_wait->notif_waitq,
+				 wait_entry->triggered || wait_entry->aborted,
+				 timeout);
+
+	iwl_remove_notification(notif_wait, wait_entry);
+
+	if (wait_entry->aborted)
+		return -EIO;
+
+	/* return value is always >= 0 */
+	if (ret <= 0)
+		return -ETIMEDOUT;
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_wait_notification);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
new file mode 100644
index 0000000..368884b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/notif-wait.h
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_notif_wait_h__
+#define __iwl_notif_wait_h__
+
+#include <linux/wait.h>
+
+#include "iwl-trans.h"
+
+struct iwl_notif_wait_data {
+	struct list_head notif_waits;
+	spinlock_t notif_wait_lock;
+	wait_queue_head_t notif_waitq;
+};
+
+#define MAX_NOTIF_CMDS	5
+
+/**
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: Function called with the notification. If the function
+ *	returns true, the wait is over, if it returns false then
+ *	the waiter stays blocked. If no function is given, any
+ *	of the listed commands will unblock the waiter.
+ * @cmds: command IDs
+ * @n_cmds: number of command IDs
+ * @triggered: waiter should be woken up
+ * @aborted: wait was aborted
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwl_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwl_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
+ */
+struct iwl_notification_wait {
+	struct list_head list;
+
+	bool (*fn)(struct iwl_notif_wait_data *notif_data,
+		   struct iwl_rx_packet *pkt, void *data);
+	void *fn_data;
+
+	u16 cmds[MAX_NOTIF_CMDS];
+	u8 n_cmds;
+	bool triggered, aborted;
+};
+
+
+/* caller functions */
+void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data);
+bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data,
+			   struct iwl_rx_packet *pkt);
+void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
+
+static inline void
+iwl_notification_notify(struct iwl_notif_wait_data *notif_data)
+{
+	wake_up_all(&notif_data->notif_waitq);
+}
+
+static inline void
+iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data,
+			     struct iwl_rx_packet *pkt)
+{
+	if (iwl_notification_wait(notif_data, pkt))
+		iwl_notification_notify(notif_data);
+}
+
+/* user functions */
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
+			   struct iwl_notification_wait *wait_entry,
+			   const u16 *cmds, int n_cmds,
+			   bool (*fn)(struct iwl_notif_wait_data *notif_data,
+				      struct iwl_rx_packet *pkt, void *data),
+			   void *fn_data);
+
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_notif_wait_data *notif_data,
+		      struct iwl_notification_wait *wait_entry,
+		      unsigned long timeout);
+
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_notif_wait_data *notif_data,
+			struct iwl_notification_wait *wait_entry);
+
+#endif /* __iwl_notif_wait_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
new file mode 100644
index 0000000..9b8dd7f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -0,0 +1,351 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "fw/api/commands.h"
+
+void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt)
+{
+	int i;
+
+	if (!fwrt->fw_paging_db[0].fw_paging_block)
+		return;
+
+	for (i = 0; i < NUM_OF_FW_PAGING_BLOCKS; i++) {
+		struct iwl_fw_paging *paging = &fwrt->fw_paging_db[i];
+
+		if (!paging->fw_paging_block) {
+			IWL_DEBUG_FW(fwrt,
+				     "Paging: block %d already freed, continue to next page\n",
+				     i);
+
+			continue;
+		}
+		dma_unmap_page(fwrt->trans->dev, paging->fw_paging_phys,
+			       paging->fw_paging_size, DMA_BIDIRECTIONAL);
+
+		__free_pages(paging->fw_paging_block,
+			     get_order(paging->fw_paging_size));
+		paging->fw_paging_block = NULL;
+	}
+
+	memset(fwrt->fw_paging_db, 0, sizeof(fwrt->fw_paging_db));
+}
+IWL_EXPORT_SYMBOL(iwl_free_fw_paging);
+
+static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime *fwrt,
+				   const struct fw_img *image)
+{
+	struct page *block;
+	dma_addr_t phys = 0;
+	int blk_idx, order, num_of_pages, size;
+
+	if (fwrt->fw_paging_db[0].fw_paging_block)
+		return 0;
+
+	/* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
+	BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE) != PAGING_BLOCK_SIZE);
+
+	num_of_pages = image->paging_mem_size / FW_PAGING_SIZE;
+	fwrt->num_of_paging_blk =
+		DIV_ROUND_UP(num_of_pages, NUM_OF_PAGE_PER_GROUP);
+	fwrt->num_of_pages_in_last_blk =
+		num_of_pages -
+		NUM_OF_PAGE_PER_GROUP * (fwrt->num_of_paging_blk - 1);
+
+	IWL_DEBUG_FW(fwrt,
+		     "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
+		     fwrt->num_of_paging_blk,
+		     fwrt->num_of_pages_in_last_blk);
+
+	/*
+	 * Allocate CSS and paging blocks in dram.
+	 */
+	for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+		/* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
+		size = blk_idx ? PAGING_BLOCK_SIZE : FW_PAGING_SIZE;
+		order = get_order(size);
+		block = alloc_pages(GFP_KERNEL, order);
+		if (!block) {
+			/* free all the previous pages since we failed */
+			iwl_free_fw_paging(fwrt);
+			return -ENOMEM;
+		}
+
+		fwrt->fw_paging_db[blk_idx].fw_paging_block = block;
+		fwrt->fw_paging_db[blk_idx].fw_paging_size = size;
+
+		phys = dma_map_page(fwrt->trans->dev, block, 0,
+				    PAGE_SIZE << order,
+				    DMA_BIDIRECTIONAL);
+		if (dma_mapping_error(fwrt->trans->dev, phys)) {
+			/*
+			 * free the previous pages and the current one
+			 * since we failed to map_page.
+			 */
+			iwl_free_fw_paging(fwrt);
+			return -ENOMEM;
+		}
+		fwrt->fw_paging_db[blk_idx].fw_paging_phys = phys;
+
+		if (!blk_idx)
+			IWL_DEBUG_FW(fwrt,
+				     "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
+				     order);
+		else
+			IWL_DEBUG_FW(fwrt,
+				     "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
+				     order);
+	}
+
+	return 0;
+}
+
+static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
+			       const struct fw_img *image)
+{
+	int sec_idx, idx, ret;
+	u32 offset = 0;
+
+	/*
+	 * find where is the paging image start point:
+	 * if CPU2 exist and it's in paging format, then the image looks like:
+	 * CPU1 sections (2 or more)
+	 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
+	 * CPU2 sections (not paged)
+	 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
+	 * non paged to CPU2 paging sec
+	 * CPU2 paging CSS
+	 * CPU2 paging image (including instruction and data)
+	 */
+	for (sec_idx = 0; sec_idx < image->num_sec; sec_idx++) {
+		if (image->sec[sec_idx].offset == PAGING_SEPARATOR_SECTION) {
+			sec_idx++;
+			break;
+		}
+	}
+
+	/*
+	 * If paging is enabled there should be at least 2 more sections left
+	 * (one for CSS and one for Paging data)
+	 */
+	if (sec_idx >= image->num_sec - 1) {
+		IWL_ERR(fwrt, "Paging: Missing CSS and/or paging sections\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	/* copy the CSS block to the dram */
+	IWL_DEBUG_FW(fwrt, "Paging: load paging CSS to FW, sec = %d\n",
+		     sec_idx);
+
+	if (image->sec[sec_idx].len > fwrt->fw_paging_db[0].fw_paging_size) {
+		IWL_ERR(fwrt, "CSS block is larger than paging size\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
+	       image->sec[sec_idx].data,
+	       image->sec[sec_idx].len);
+	dma_sync_single_for_device(fwrt->trans->dev,
+				   fwrt->fw_paging_db[0].fw_paging_phys,
+				   fwrt->fw_paging_db[0].fw_paging_size,
+				   DMA_BIDIRECTIONAL);
+
+	IWL_DEBUG_FW(fwrt,
+		     "Paging: copied %d CSS bytes to first block\n",
+		     fwrt->fw_paging_db[0].fw_paging_size);
+
+	sec_idx++;
+
+	/*
+	 * Copy the paging blocks to the dram.  The loop index starts
+	 * from 1 since the CSS block (index 0) was already copied to
+	 * dram.  We use num_of_paging_blk + 1 to account for that.
+	 */
+	for (idx = 1; idx < fwrt->num_of_paging_blk + 1; idx++) {
+		struct iwl_fw_paging *block = &fwrt->fw_paging_db[idx];
+		int remaining = image->sec[sec_idx].len - offset;
+		int len = block->fw_paging_size;
+
+		/*
+		 * For the last block, we copy all that is remaining,
+		 * for all other blocks, we copy fw_paging_size at a
+		 * time. */
+		if (idx == fwrt->num_of_paging_blk) {
+			len = remaining;
+			if (remaining !=
+			    fwrt->num_of_pages_in_last_blk * FW_PAGING_SIZE) {
+				IWL_ERR(fwrt,
+					"Paging: last block contains more data than expected %d\n",
+					remaining);
+				ret = -EINVAL;
+				goto err;
+			}
+		} else if (block->fw_paging_size > remaining) {
+			IWL_ERR(fwrt,
+				"Paging: not enough data in other in block %d (%d)\n",
+				idx, remaining);
+			ret = -EINVAL;
+			goto err;
+		}
+
+		memcpy(page_address(block->fw_paging_block),
+		       image->sec[sec_idx].data + offset, len);
+		dma_sync_single_for_device(fwrt->trans->dev,
+					   block->fw_paging_phys,
+					   block->fw_paging_size,
+					   DMA_BIDIRECTIONAL);
+
+		IWL_DEBUG_FW(fwrt,
+			     "Paging: copied %d paging bytes to block %d\n",
+			     len, idx);
+
+		offset += block->fw_paging_size;
+	}
+
+	return 0;
+
+err:
+	iwl_free_fw_paging(fwrt);
+	return ret;
+}
+
+static int iwl_save_fw_paging(struct iwl_fw_runtime *fwrt,
+			      const struct fw_img *fw)
+{
+	int ret;
+
+	ret = iwl_alloc_fw_paging_mem(fwrt, fw);
+	if (ret)
+		return ret;
+
+	return iwl_fill_paging_mem(fwrt, fw);
+}
+
+/* send paging cmd to FW in case CPU2 has paging image */
+static int iwl_send_paging_cmd(struct iwl_fw_runtime *fwrt,
+			       const struct fw_img *fw)
+{
+	struct iwl_fw_paging_cmd paging_cmd = {
+		.flags = cpu_to_le32(PAGING_CMD_IS_SECURED |
+				     PAGING_CMD_IS_ENABLED |
+				     (fwrt->num_of_pages_in_last_blk <<
+				      PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
+		.block_size = cpu_to_le32(BLOCK_2_EXP_SIZE),
+		.block_num = cpu_to_le32(fwrt->num_of_paging_blk),
+	};
+	struct iwl_host_cmd hcmd = {
+		.id = iwl_cmd_id(FW_PAGING_BLOCK_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.len = { sizeof(paging_cmd), },
+		.data = { &paging_cmd, },
+	};
+	int blk_idx;
+
+	/* loop for for all paging blocks + CSS block */
+	for (blk_idx = 0; blk_idx < fwrt->num_of_paging_blk + 1; blk_idx++) {
+		dma_addr_t addr = fwrt->fw_paging_db[blk_idx].fw_paging_phys;
+		__le32 phy_addr;
+
+		addr = addr >> PAGE_2_EXP_SIZE;
+		phy_addr = cpu_to_le32(addr);
+		paging_cmd.device_phy_addr[blk_idx] = phy_addr;
+	}
+
+	return iwl_trans_send_cmd(fwrt->trans, &hcmd);
+}
+
+int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type)
+{
+	const struct fw_img *fw = &fwrt->fw->img[type];
+	int ret;
+
+	if (fwrt->trans->cfg->gen2)
+		return 0;
+
+	/*
+	 * Configure and operate fw paging mechanism.
+	 * The driver configures the paging flow only once.
+	 * The CPU2 paging image is included in the IWL_UCODE_INIT image.
+	 */
+	if (!fw->paging_mem_size)
+		return 0;
+
+	ret = iwl_save_fw_paging(fwrt, fw);
+	if (ret) {
+		IWL_ERR(fwrt, "failed to save the FW paging image\n");
+		return ret;
+	}
+
+	ret = iwl_send_paging_cmd(fwrt, fw);
+	if (ret) {
+		IWL_ERR(fwrt, "failed to send the paging cmd\n");
+		iwl_free_fw_paging(fwrt);
+		return ret;
+	}
+
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_init_paging);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
new file mode 100644
index 0000000..ed23367
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -0,0 +1,171 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fw_runtime_h__
+#define __iwl_fw_runtime_h__
+
+#include "iwl-config.h"
+#include "iwl-trans.h"
+#include "img.h"
+#include "fw/api/debug.h"
+#include "fw/api/paging.h"
+#include "iwl-eeprom-parse.h"
+
+struct iwl_fw_runtime_ops {
+	int (*dump_start)(void *ctx);
+	void (*dump_end)(void *ctx);
+	bool (*fw_running)(void *ctx);
+};
+
+#define MAX_NUM_LMAC 2
+struct iwl_fwrt_shared_mem_cfg {
+	int num_lmacs;
+	int num_txfifo_entries;
+	struct {
+		u32 txfifo_size[TX_FIFO_MAX_NUM];
+		u32 rxfifo1_size;
+	} lmac[MAX_NUM_LMAC];
+	u32 rxfifo2_size;
+	u32 internal_txfifo_addr;
+	u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+};
+
+enum iwl_fw_runtime_status {
+	IWL_FWRT_STATUS_DUMPING = 0,
+};
+
+/**
+ * struct iwl_fw_runtime - runtime data for firmware
+ * @fw: firmware image
+ * @cfg: NIC configuration
+ * @dev: device pointer
+ * @ops: user ops
+ * @ops_ctx: user ops context
+ * @status: status flags
+ * @fw_paging_db: paging database
+ * @num_of_paging_blk: number of paging blocks
+ * @num_of_pages_in_last_blk: number of pages in the last block
+ * @smem_cfg: saved firmware SMEM configuration
+ * @cur_fw_img: current firmware image, must be maintained by
+ *	the driver by calling &iwl_fw_set_current_image()
+ * @dump: debug dump data
+ */
+struct iwl_fw_runtime {
+	struct iwl_trans *trans;
+	const struct iwl_fw *fw;
+	struct device *dev;
+
+	const struct iwl_fw_runtime_ops *ops;
+	void *ops_ctx;
+
+	unsigned long status;
+
+	/* Paging */
+	struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
+	u16 num_of_paging_blk;
+	u16 num_of_pages_in_last_blk;
+
+	enum iwl_ucode_type cur_fw_img;
+
+	/* memory configuration */
+	struct iwl_fwrt_shared_mem_cfg smem_cfg;
+
+	/* debug */
+	struct {
+		const struct iwl_fw_dump_desc *desc;
+		const struct iwl_fw_dbg_trigger_tlv *trig;
+		struct delayed_work wk;
+
+		u8 conf;
+
+		/* ts of the beginning of a non-collect fw dbg data period */
+		unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1];
+	} dump;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct {
+		struct delayed_work wk;
+		u32 delay;
+		u64 seq;
+	} timestamp;
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+};
+
+void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
+			const struct iwl_fw *fw,
+			const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+			struct dentry *dbgfs_dir);
+
+void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
+
+void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
+
+void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
+
+static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
+					    enum iwl_ucode_type cur_fw_img)
+{
+	fwrt->cur_fw_img = cur_fw_img;
+}
+
+int iwl_init_paging(struct iwl_fw_runtime *fwrt, enum iwl_ucode_type type);
+void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
+
+void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
+
+#endif /* __iwl_fw_runtime_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
new file mode 100644
index 0000000..ff85d69
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c
@@ -0,0 +1,157 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-drv.h"
+#include "runtime.h"
+#include "fw/api/commands.h"
+
+static void iwl_parse_shared_mem_22000(struct iwl_fw_runtime *fwrt,
+				       struct iwl_rx_packet *pkt)
+{
+	struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
+	int i, lmac;
+	int lmac_num = le32_to_cpu(mem_cfg->lmac_num);
+
+	if (WARN_ON(lmac_num > ARRAY_SIZE(mem_cfg->lmac_smem)))
+		return;
+
+	fwrt->smem_cfg.num_lmacs = lmac_num;
+	fwrt->smem_cfg.num_txfifo_entries =
+		ARRAY_SIZE(mem_cfg->lmac_smem[0].txfifo_size);
+	fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo2_size);
+
+	for (lmac = 0; lmac < lmac_num; lmac++) {
+		struct iwl_shared_mem_lmac_cfg *lmac_cfg =
+			&mem_cfg->lmac_smem[lmac];
+
+		for (i = 0; i < ARRAY_SIZE(lmac_cfg->txfifo_size); i++)
+			fwrt->smem_cfg.lmac[lmac].txfifo_size[i] =
+				le32_to_cpu(lmac_cfg->txfifo_size[i]);
+		fwrt->smem_cfg.lmac[lmac].rxfifo1_size =
+			le32_to_cpu(lmac_cfg->rxfifo1_size);
+	}
+}
+
+static void iwl_parse_shared_mem(struct iwl_fw_runtime *fwrt,
+				 struct iwl_rx_packet *pkt)
+{
+	struct iwl_shared_mem_cfg_v2 *mem_cfg = (void *)pkt->data;
+	int i;
+
+	fwrt->smem_cfg.num_lmacs = 1;
+
+	fwrt->smem_cfg.num_txfifo_entries = ARRAY_SIZE(mem_cfg->txfifo_size);
+	for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
+		fwrt->smem_cfg.lmac[0].txfifo_size[i] =
+			le32_to_cpu(mem_cfg->txfifo_size[i]);
+
+	fwrt->smem_cfg.lmac[0].rxfifo1_size =
+		le32_to_cpu(mem_cfg->rxfifo_size[0]);
+	fwrt->smem_cfg.rxfifo2_size = le32_to_cpu(mem_cfg->rxfifo_size[1]);
+
+	/* new API has more data, from rxfifo_addr field and on */
+	if (fw_has_capa(&fwrt->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+		BUILD_BUG_ON(sizeof(fwrt->smem_cfg.internal_txfifo_size) !=
+			     sizeof(mem_cfg->internal_txfifo_size));
+
+		fwrt->smem_cfg.internal_txfifo_addr =
+			le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+		for (i = 0;
+		     i < ARRAY_SIZE(fwrt->smem_cfg.internal_txfifo_size);
+		     i++)
+			fwrt->smem_cfg.internal_txfifo_size[i] =
+				le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+	}
+}
+
+void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
+{
+	struct iwl_host_cmd cmd = {
+		.flags = CMD_WANT_SKB,
+		.data = { NULL, },
+		.len = { 0, },
+	};
+	struct iwl_rx_packet *pkt;
+
+	if (fw_has_capa(&fwrt->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+		cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+	else
+		cmd.id = SHARED_MEM_CFG;
+
+	if (WARN_ON(iwl_trans_send_cmd(fwrt->trans, &cmd)))
+		return;
+
+	pkt = cmd.resp_pkt;
+	if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+		iwl_parse_shared_mem_22000(fwrt, pkt);
+	else
+		iwl_parse_shared_mem(fwrt, pkt);
+
+	IWL_DEBUG_INFO(fwrt, "SHARED MEM CFG: got memory offsets/sizes\n");
+
+	iwl_free_resp(&cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_get_shared_mem_conf);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 0000000..ee9347a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,117 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+/*
+ * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
+ */
+
+#ifndef __iwl_agn_hw_h__
+#define __iwl_agn_hw_h__
+
+#define IWLAGN_RTC_INST_LOWER_BOUND		(0x000000)
+#define IWLAGN_RTC_INST_UPPER_BOUND		(0x020000)
+
+#define IWLAGN_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IWLAGN_RTC_DATA_UPPER_BOUND		(0x80C000)
+
+#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
+				IWLAGN_RTC_INST_LOWER_BOUND)
+#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
+				IWLAGN_RTC_DATA_LOWER_BOUND)
+
+#define IWL60_RTC_INST_LOWER_BOUND		(0x000000)
+#define IWL60_RTC_INST_UPPER_BOUND		(0x040000)
+#define IWL60_RTC_DATA_LOWER_BOUND		(0x800000)
+#define IWL60_RTC_DATA_UPPER_BOUND		(0x814000)
+#define IWL60_RTC_INST_SIZE \
+	(IWL60_RTC_INST_UPPER_BOUND - IWL60_RTC_INST_LOWER_BOUND)
+#define IWL60_RTC_DATA_SIZE \
+	(IWL60_RTC_DATA_UPPER_BOUND - IWL60_RTC_DATA_LOWER_BOUND)
+
+/* RSSI to dBm */
+#define IWLAGN_RSSI_OFFSET	44
+
+#define IWLAGN_DEFAULT_TX_RETRY			15
+#define IWLAGN_MGMT_DFAULT_RETRY_LIMIT		3
+#define IWLAGN_RTS_DFAULT_RETRY_LIMIT		60
+#define IWLAGN_BAR_DFAULT_RETRY_LIMIT		60
+#define IWLAGN_LOW_RETRY_LIMIT			7
+
+/* Limit range of txpower output target to be between these values */
+#define IWLAGN_TX_POWER_TARGET_POWER_MIN	(0)	/* 0 dBm: 1 milliwatt */
+#define IWLAGN_TX_POWER_TARGET_POWER_MAX	(16)	/* 16 dBm */
+
+/* EEPROM */
+#define IWLAGN_EEPROM_IMG_SIZE		2048
+
+/* high blocks contain PAPD data */
+#define OTP_HIGH_IMAGE_SIZE_6x00        (6 * 512 * sizeof(u16)) /* 6 KB */
+#define OTP_HIGH_IMAGE_SIZE_1000        (0x200 * sizeof(u16)) /* 1024 bytes */
+#define OTP_MAX_LL_ITEMS_1000		(3)	/* OTP blocks for 1000 */
+#define OTP_MAX_LL_ITEMS_6x00		(4)	/* OTP blocks for 6x00 */
+#define OTP_MAX_LL_ITEMS_6x50		(7)	/* OTP blocks for 6x50 */
+#define OTP_MAX_LL_ITEMS_2x00		(4)	/* OTP blocks for 2x00 */
+
+
+#define IWLAGN_NUM_QUEUES		20
+
+#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
new file mode 100644
index 0000000..12fddcf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -0,0 +1,584 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __IWL_CONFIG_H__
+#define __IWL_CONFIG_H__
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/nl80211.h>
+#include "iwl-csr.h"
+
+enum iwl_device_family {
+	IWL_DEVICE_FAMILY_UNDEFINED,
+	IWL_DEVICE_FAMILY_1000,
+	IWL_DEVICE_FAMILY_100,
+	IWL_DEVICE_FAMILY_2000,
+	IWL_DEVICE_FAMILY_2030,
+	IWL_DEVICE_FAMILY_105,
+	IWL_DEVICE_FAMILY_135,
+	IWL_DEVICE_FAMILY_5000,
+	IWL_DEVICE_FAMILY_5150,
+	IWL_DEVICE_FAMILY_6000,
+	IWL_DEVICE_FAMILY_6000i,
+	IWL_DEVICE_FAMILY_6005,
+	IWL_DEVICE_FAMILY_6030,
+	IWL_DEVICE_FAMILY_6050,
+	IWL_DEVICE_FAMILY_6150,
+	IWL_DEVICE_FAMILY_7000,
+	IWL_DEVICE_FAMILY_8000,
+	IWL_DEVICE_FAMILY_9000,
+	IWL_DEVICE_FAMILY_22000,
+	IWL_DEVICE_FAMILY_22560,
+};
+
+/*
+ * LED mode
+ *    IWL_LED_DEFAULT:  use device default
+ *    IWL_LED_RF_STATE: turn LED on/off based on RF state
+ *			LED ON  = RF ON
+ *			LED OFF = RF OFF
+ *    IWL_LED_BLINK:    adjust led blink rate based on blink table
+ *    IWL_LED_DISABLE:	led disabled
+ */
+enum iwl_led_mode {
+	IWL_LED_DEFAULT,
+	IWL_LED_RF_STATE,
+	IWL_LED_BLINK,
+	IWL_LED_DISABLE,
+};
+
+/**
+ * enum iwl_nvm_type - nvm formats
+ * @IWL_NVM: the regular format
+ * @IWL_NVM_EXT: extended NVM format
+ * @IWL_NVM_SDP: NVM format used by 3168 series
+ */
+enum iwl_nvm_type {
+	IWL_NVM,
+	IWL_NVM_EXT,
+	IWL_NVM_SDP,
+};
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs.  It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN		1
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF		50
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF	100
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF	200
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX		255
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE	0
+
+/* TX queue watchdog timeouts in mSecs */
+#define IWL_WATCHDOG_DISABLED	0
+#define IWL_DEF_WD_TIMEOUT	2500
+#define IWL_LONG_WD_TIMEOUT	10000
+#define IWL_MAX_WD_TIMEOUT	120000
+
+#define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+				 NETIF_F_TSO | NETIF_F_TSO6)
+
+/* Antenna presence definitions */
+#define	ANT_NONE	0x0
+#define	ANT_INVALID	0xff
+#define	ANT_A		BIT(0)
+#define	ANT_B		BIT(1)
+#define ANT_C		BIT(2)
+#define	ANT_AB		(ANT_A | ANT_B)
+#define	ANT_AC		(ANT_A | ANT_C)
+#define ANT_BC		(ANT_B | ANT_C)
+#define ANT_ABC		(ANT_A | ANT_B | ANT_C)
+#define MAX_ANT_NUM 3
+
+
+static inline u8 num_of_ant(u8 mask)
+{
+	return  !!((mask) & ANT_A) +
+		!!((mask) & ANT_B) +
+		!!((mask) & ANT_C);
+}
+
+/*
+ * @max_ll_items: max number of OTP blocks
+ * @shadow_ram_support: shadow support for OTP memory
+ * @led_compensation: compensate on the led on/off time per HW according
+ *	to the deviation to achieve the desired led frequency.
+ *	The detail algorithm is described in iwl-led.c
+ * @wd_timeout: TX queues watchdog timeout
+ * @max_event_log_size: size of event log buffer size for ucode event logging
+ * @shadow_reg_enable: HW shadow register support
+ * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
+ *	is in flight. This is due to a HW bug in 7260, 3160 and 7265.
+ * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
+ * @max_tfd_queue_size: max number of entries in tfd queue.
+ */
+struct iwl_base_params {
+	unsigned int wd_timeout;
+
+	u16 eeprom_size;
+	u16 max_event_log_size;
+
+	u8 pll_cfg:1, /* for iwl_pcie_apm_init() */
+	   shadow_ram_support:1,
+	   shadow_reg_enable:1,
+	   pcie_l1_allowed:1,
+	   apmg_wake_up_wa:1,
+	   scd_chain_ext_wa:1;
+
+	u16 num_of_queues;	/* def: HW dependent */
+	u32 max_tfd_queue_size;	/* def: HW dependent */
+
+	u8 max_ll_items;
+	u8 led_compensation;
+};
+
+/*
+ * @stbc: support Tx STBC and 1*SS Rx STBC
+ * @ldpc: support Tx/Rx with LDPC
+ * @use_rts_for_aggregation: use rts/cts protection for HT traffic
+ * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
+ */
+struct iwl_ht_params {
+	u8 ht_greenfield_support:1,
+	   stbc:1,
+	   ldpc:1,
+	   use_rts_for_aggregation:1;
+	u8 ht40_bands;
+};
+
+/*
+ * Tx-backoff threshold
+ * @temperature: The threshold in Celsius
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_tt_tx_backoff {
+	s32 temperature;
+	u32 backoff;
+};
+
+#define TT_TX_BACKOFF_SIZE 6
+
+/**
+ * struct iwl_tt_params - thermal throttling parameters
+ * @ct_kill_entry: CT Kill entry threshold
+ * @ct_kill_exit: CT Kill exit threshold
+ * @ct_kill_duration: The time  intervals (in uSec) in which the driver needs
+ *	to checks whether to exit CT Kill.
+ * @dynamic_smps_entry: Dynamic SMPS entry threshold
+ * @dynamic_smps_exit: Dynamic SMPS exit threshold
+ * @tx_protection_entry: TX protection entry threshold
+ * @tx_protection_exit: TX protection exit threshold
+ * @tx_backoff: Array of thresholds for tx-backoff , in ascending order.
+ * @support_ct_kill: Support CT Kill?
+ * @support_dynamic_smps: Support dynamic SMPS?
+ * @support_tx_protection: Support tx protection?
+ * @support_tx_backoff: Support tx-backoff?
+ */
+struct iwl_tt_params {
+	u32 ct_kill_entry;
+	u32 ct_kill_exit;
+	u32 ct_kill_duration;
+	u32 dynamic_smps_entry;
+	u32 dynamic_smps_exit;
+	u32 tx_protection_entry;
+	u32 tx_protection_exit;
+	struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
+	u8 support_ct_kill:1,
+	   support_dynamic_smps:1,
+	   support_tx_protection:1,
+	   support_tx_backoff:1;
+};
+
+/*
+ * information on how to parse the EEPROM
+ */
+#define EEPROM_REG_BAND_1_CHANNELS		0x08
+#define EEPROM_REG_BAND_2_CHANNELS		0x26
+#define EEPROM_REG_BAND_3_CHANNELS		0x42
+#define EEPROM_REG_BAND_4_CHANNELS		0x5C
+#define EEPROM_REG_BAND_5_CHANNELS		0x74
+#define EEPROM_REG_BAND_24_HT40_CHANNELS	0x82
+#define EEPROM_REG_BAND_52_HT40_CHANNELS	0x92
+#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS	0x80
+#define EEPROM_REGULATORY_BAND_NO_HT40		0
+
+/* lower blocks contain EEPROM image and calibration data */
+#define OTP_LOW_IMAGE_SIZE		(2 * 512 * sizeof(u16)) /* 2 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_7000	(16 * 512 * sizeof(u16)) /* 16 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_8000	(32 * 512 * sizeof(u16)) /* 32 KB */
+#define OTP_LOW_IMAGE_SIZE_FAMILY_9000	OTP_LOW_IMAGE_SIZE_FAMILY_8000
+#define OTP_LOW_IMAGE_SIZE_FAMILY_22000	OTP_LOW_IMAGE_SIZE_FAMILY_9000
+
+struct iwl_eeprom_params {
+	const u8 regulatory_bands[7];
+	bool enhanced_txpower;
+};
+
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+	u32 pwr;
+	u32 backoff;
+};
+
+/**
+ * struct iwl_csr_params
+ *
+ * @flag_sw_reset: reset the device
+ * @flag_mac_clock_ready:
+ *	Indicates MAC (ucode processor, etc.) is powered up and can run.
+ *	Internal resources are accessible.
+ *	NOTE:  This does not indicate that the processor is actually running.
+ *	NOTE:  This does not indicate that device has completed
+ *	       init or post-power-down restore of internal SRAM memory.
+ *	       Use CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ *	       SRAM is restored and uCode is in normal operation mode.
+ *	       This note is relevant only for pre 5xxx devices.
+ *	NOTE:  After device reset, this bit remains "0" until host sets
+ *	       INIT_DONE
+ * @flag_init_done: Host sets this to put device into fully operational
+ *	D0 power mode. Host resets this after SW_RESET to put device into
+ *	low power mode.
+ * @flag_mac_access_req: Host sets this to request and maintain MAC wakeup,
+ *	to allow host access to device-internal resources. Host must wait for
+ *	mac_clock_ready (and !GOING_TO_SLEEP) before accessing non-CSR device
+ *	registers.
+ * @flag_val_mac_access_en: mac access is enabled
+ * @flag_master_dis: disable master
+ * @flag_stop_master: stop master
+ * @addr_sw_reset: address for resetting the device
+ * @mac_addr0_otp: first part of MAC address from OTP
+ * @mac_addr1_otp: second part of MAC address from OTP
+ * @mac_addr0_strap: first part of MAC address from strap
+ * @mac_addr1_strap: second part of MAC address from strap
+ */
+struct iwl_csr_params {
+	u8 flag_sw_reset;
+	u8 flag_mac_clock_ready;
+	u8 flag_init_done;
+	u8 flag_mac_access_req;
+	u8 flag_val_mac_access_en;
+	u8 flag_master_dis;
+	u8 flag_stop_master;
+	u8 addr_sw_reset;
+	u32 mac_addr0_otp;
+	u32 mac_addr1_otp;
+	u32 mac_addr0_strap;
+	u32 mac_addr1_strap;
+};
+
+/**
+ * struct iwl_cfg
+ * @name: Official name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ *	(.ucode) will be added to filename before loading from disk. The
+ *	filename is constructed as fw_name_pre<api>.ucode.
+ * @fw_name_pre_b_or_c_step: same as @fw_name_pre, only for b or c steps
+ *	(if supported)
+ * @fw_name_pre_rf_next_step: same as @fw_name_pre_b_or_c_step, only for rf
+ *	next step. Supported only in integrated solutions.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @max_inst_size: The maximal length of the fw inst section (only DVM)
+ * @max_data_size: The maximal length of the fw data section (only DVM)
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @non_shared_ant: the antenna that is for WiFi only
+ * @nvm_ver: NVM version
+ * @nvm_calib_ver: NVM calibration version
+ * @lib: pointer to the lib ops
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht parameters
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @high_temp: Is this NIC is designated to be in high temperature.
+ * @host_interrupt_operation_mode: device needs host interrupt operation
+ *	mode set
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @mac_addr_from_csr: read HW address from CSR registers
+ * @features: hw features, any combination of feature_whitelist
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
+ * @csr: csr flags and addresses that are different across devices
+ * @max_rx_agg_size: max RX aggregation size of the ADDBA request/response
+ * @max_tx_agg_size: max TX aggregation size of the ADDBA request/response
+ * @max_ht_ampdu_factor: the exponent of the max length of A-MPDU that the
+ *	station can receive in HT
+ * @max_vht_ampdu_exponent: the exponent of the max length of A-MPDU that the
+ *	station can receive in VHT
+ * @dccm_offset: offset from which DCCM begins
+ * @dccm_len: length of DCCM (including runtime stack CCM)
+ * @dccm2_offset: offset from which the second DCCM begins
+ * @dccm2_len: length of the second DCCM
+ * @smem_offset: offset from which the SMEM begins
+ * @smem_len: the length of SMEM
+ * @mq_rx_supported: multi-queue rx support
+ * @vht_mu_mimo_supported: VHT MU-MIMO support
+ * @rf_id: need to read rf_id to determine the firmware image
+ * @integrated: discrete or integrated
+ * @gen2: 22000 and on transport operation
+ * @cdb: CDB support
+ * @nvm_type: see &enum iwl_nvm_type
+ *
+ * We enable the driver to be backward compatible wrt. hardware features.
+ * API differences in uCode shouldn't be handled here but through TLVs
+ * and/or the uCode API version instead.
+ */
+struct iwl_cfg {
+	/* params specific to an individual device within a device family */
+	const char *name;
+	const char *fw_name_pre;
+	const char *fw_name_pre_b_or_c_step;
+	const char *fw_name_pre_rf_next_step;
+	/* params not likely to change within a device family */
+	const struct iwl_base_params *base_params;
+	/* params likely to change within a device family */
+	const struct iwl_ht_params *ht_params;
+	const struct iwl_eeprom_params *eeprom_params;
+	const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
+	const char *default_nvm_file_C_step;
+	const struct iwl_tt_params *thermal_params;
+	const struct iwl_csr_params *csr;
+	enum iwl_device_family device_family;
+	enum iwl_led_mode led_mode;
+	enum iwl_nvm_type nvm_type;
+	u32 max_data_size;
+	u32 max_inst_size;
+	netdev_features_t features;
+	u32 dccm_offset;
+	u32 dccm_len;
+	u32 dccm2_offset;
+	u32 dccm2_len;
+	u32 smem_offset;
+	u32 smem_len;
+	u32 soc_latency;
+	u16 nvm_ver;
+	u16 nvm_calib_ver;
+	u32 rx_with_siso_diversity:1,
+	    bt_shared_single_ant:1,
+	    internal_wimax_coex:1,
+	    host_interrupt_operation_mode:1,
+	    high_temp:1,
+	    mac_addr_from_csr:1,
+	    lp_xtal_workaround:1,
+	    disable_dummy_notification:1,
+	    apmg_not_supported:1,
+	    mq_rx_supported:1,
+	    vht_mu_mimo_supported:1,
+	    rf_id:1,
+	    integrated:1,
+	    use_tfh:1,
+	    gen2:1,
+	    cdb:1,
+	    dbgc_supported:1;
+	u8 valid_tx_ant;
+	u8 valid_rx_ant;
+	u8 non_shared_ant;
+	u8 nvm_hw_section_num;
+	u8 max_rx_agg_size;
+	u8 max_tx_agg_size;
+	u8 max_ht_ampdu_exponent;
+	u8 max_vht_ampdu_exponent;
+	u8 ucode_api_max;
+	u8 ucode_api_min;
+	u32 min_umac_error_event_table;
+	u32 extra_phy_cfg_flags;
+};
+
+static const struct iwl_csr_params iwl_csr_v1 = {
+	.flag_mac_clock_ready = 0,
+	.flag_val_mac_access_en = 0,
+	.flag_init_done = 2,
+	.flag_mac_access_req = 3,
+	.flag_sw_reset = 7,
+	.flag_master_dis = 8,
+	.flag_stop_master = 9,
+	.addr_sw_reset = (CSR_BASE + 0x020),
+	.mac_addr0_otp = 0x380,
+	.mac_addr1_otp = 0x384,
+	.mac_addr0_strap = 0x388,
+	.mac_addr1_strap = 0x38C
+};
+
+static const struct iwl_csr_params iwl_csr_v2 = {
+	.flag_init_done = 6,
+	.flag_mac_clock_ready = 20,
+	.flag_val_mac_access_en = 20,
+	.flag_mac_access_req = 21,
+	.flag_master_dis = 28,
+	.flag_stop_master = 29,
+	.flag_sw_reset = 31,
+	.addr_sw_reset = (CSR_BASE + 0x024),
+	.mac_addr0_otp = 0x30,
+	.mac_addr1_otp = 0x34,
+	.mac_addr0_strap = 0x38,
+	.mac_addr1_strap = 0x3C
+};
+
+/*
+ * This list declares the config structures for all devices.
+ */
+#if IS_ENABLED(CONFIG_IWLDVM)
+extern const struct iwl_cfg iwl5300_agn_cfg;
+extern const struct iwl_cfg iwl5100_agn_cfg;
+extern const struct iwl_cfg iwl5350_agn_cfg;
+extern const struct iwl_cfg iwl5100_bgn_cfg;
+extern const struct iwl_cfg iwl5100_abg_cfg;
+extern const struct iwl_cfg iwl5150_agn_cfg;
+extern const struct iwl_cfg iwl5150_abg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_cfg;
+extern const struct iwl_cfg iwl6005_2abg_cfg;
+extern const struct iwl_cfg iwl6005_2bg_cfg;
+extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
+extern const struct iwl_cfg iwl6005_2agn_d_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
+extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
+extern const struct iwl_cfg iwl1030_bgn_cfg;
+extern const struct iwl_cfg iwl1030_bg_cfg;
+extern const struct iwl_cfg iwl6030_2agn_cfg;
+extern const struct iwl_cfg iwl6030_2abg_cfg;
+extern const struct iwl_cfg iwl6030_2bgn_cfg;
+extern const struct iwl_cfg iwl6030_2bg_cfg;
+extern const struct iwl_cfg iwl6000i_2agn_cfg;
+extern const struct iwl_cfg iwl6000i_2abg_cfg;
+extern const struct iwl_cfg iwl6000i_2bg_cfg;
+extern const struct iwl_cfg iwl6000_3agn_cfg;
+extern const struct iwl_cfg iwl6050_2agn_cfg;
+extern const struct iwl_cfg iwl6050_2abg_cfg;
+extern const struct iwl_cfg iwl6150_bgn_cfg;
+extern const struct iwl_cfg iwl6150_bg_cfg;
+extern const struct iwl_cfg iwl1000_bgn_cfg;
+extern const struct iwl_cfg iwl1000_bg_cfg;
+extern const struct iwl_cfg iwl100_bgn_cfg;
+extern const struct iwl_cfg iwl100_bg_cfg;
+extern const struct iwl_cfg iwl130_bgn_cfg;
+extern const struct iwl_cfg iwl130_bg_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_cfg;
+extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
+extern const struct iwl_cfg iwl2030_2bgn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_cfg;
+extern const struct iwl_cfg iwl6035_2agn_sff_cfg;
+extern const struct iwl_cfg iwl105_bgn_cfg;
+extern const struct iwl_cfg iwl105_bgn_d_cfg;
+extern const struct iwl_cfg iwl135_bgn_cfg;
+#endif /* CONFIG_IWLDVM */
+#if IS_ENABLED(CONFIG_IWLMVM)
+extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
+extern const struct iwl_cfg iwl7260_2n_cfg;
+extern const struct iwl_cfg iwl7260_n_cfg;
+extern const struct iwl_cfg iwl3160_2ac_cfg;
+extern const struct iwl_cfg iwl3160_2n_cfg;
+extern const struct iwl_cfg iwl3160_n_cfg;
+extern const struct iwl_cfg iwl3165_2ac_cfg;
+extern const struct iwl_cfg iwl3168_2ac_cfg;
+extern const struct iwl_cfg iwl7265_2ac_cfg;
+extern const struct iwl_cfg iwl7265_2n_cfg;
+extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl7265d_2ac_cfg;
+extern const struct iwl_cfg iwl7265d_2n_cfg;
+extern const struct iwl_cfg iwl7265d_n_cfg;
+extern const struct iwl_cfg iwl8260_2n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8265_2ac_cfg;
+extern const struct iwl_cfg iwl8275_2ac_cfg;
+extern const struct iwl_cfg iwl4165_2ac_cfg;
+extern const struct iwl_cfg iwl9160_2ac_cfg;
+extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260_killer_2ac_cfg;
+extern const struct iwl_cfg iwl9270_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg;
+extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
+extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
+extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
+extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
+extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
+#endif /* CONFIG_IWLMVM */
+
+#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
new file mode 100644
index 0000000..ebea991
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -0,0 +1,286 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_context_info_file_gen3_h__
+#define __iwl_context_info_file_gen3_h__
+
+#include "iwl-context-info.h"
+
+#define CSR_CTXT_INFO_BOOT_CTRL         0x0
+#define CSR_CTXT_INFO_ADDR              0x118
+#define CSR_IML_DATA_ADDR               0x120
+#define CSR_IML_SIZE_ADDR               0x128
+#define CSR_IML_RESP_ADDR               0x12c
+
+/* Set bit for enabling automatic function boot */
+#define CSR_AUTO_FUNC_BOOT_ENA          BIT(1)
+/* Set bit for initiating function boot */
+#define CSR_AUTO_FUNC_INIT              BIT(7)
+
+/**
+ * enum iwl_prph_scratch_mtr_format - tfd size configuration
+ * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+enum iwl_prph_scratch_mtr_format {
+	IWL_PRPH_MTR_FORMAT_16B = 0x0,
+	IWL_PRPH_MTR_FORMAT_32B = 0x40000,
+	IWL_PRPH_MTR_FORMAT_64B = 0x80000,
+	IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
+};
+
+/**
+ * enum iwl_prph_scratch_flags - PRPH scratch control flags
+ * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ *	in hwm config.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ *	multicomm.
+ * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ *	completion descriptor, 1 for responses (legacy)
+ * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ *	There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ *	3: 256 bit.
+ */
+enum iwl_prph_scratch_flags {
+	IWL_PRPH_SCRATCH_EARLY_DEBUG_EN		= BIT(4),
+	IWL_PRPH_SCRATCH_EDBG_DEST_DRAM		= BIT(8),
+	IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL	= BIT(9),
+	IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER	= BIT(10),
+	IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF	= BIT(11),
+	IWL_PRPH_SCRATCH_RB_SIZE_4K		= BIT(16),
+	IWL_PRPH_SCRATCH_MTR_MODE		= BIT(17),
+	IWL_PRPH_SCRATCH_MTR_FORMAT		= BIT(18) | BIT(19),
+};
+
+/*
+ * struct iwl_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_version {
+	__le16 mac_id;
+	__le16 version;
+	__le16 size;
+	__le16 reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwl_prph_scratch_control - control structure
+ * @control_flags: context information flags see &enum iwl_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_control {
+	__le32 control_flags;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwl_prph_scratch_ror_cfg - ror config
+ * @ror_base_addr: ror start address
+ * @ror_size: ror size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_ror_cfg {
+	__le64 ror_base_addr;
+	__le32 ror_size;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_hwm_cfg {
+	__le64 hwm_base_addr;
+	__le32 hwm_size;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch_rbd_cfg {
+	__le64 free_rbd_addr;
+	__le32 reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @ror_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwl_prph_scratch_ctrl_cfg {
+	struct iwl_prph_scratch_version version;
+	struct iwl_prph_scratch_control control;
+	struct iwl_prph_scratch_ror_cfg ror_cfg;
+	struct iwl_prph_scratch_hwm_cfg hwm_cfg;
+	struct iwl_prph_scratch_rbd_cfg rbd_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwl_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwl_prph_scratch {
+	struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
+	__le32 reserved[16];
+	struct iwl_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwl_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwl_prph_info {
+	__le32 boot_stage_mirror;
+	__le32 ipc_status_mirror;
+	__le32 sleep_notif;
+	__le32 reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwl_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ *	capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ *	start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ *	start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ *	start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ *	start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ *	transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ *	completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ *	completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ *	completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ *	descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ *	descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ *	descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ *	descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ *	after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwl_context_info_gen3 {
+	__le16 version;
+	__le16 size;
+	__le32 config;
+	__le64 prph_info_base_addr;
+	__le64 cr_head_idx_arr_base_addr;
+	__le64 tr_tail_idx_arr_base_addr;
+	__le64 cr_tail_idx_arr_base_addr;
+	__le64 tr_head_idx_arr_base_addr;
+	__le16 cr_idx_arr_size;
+	__le16 tr_idx_arr_size;
+	__le64 mtr_base_addr;
+	__le64 mcr_base_addr;
+	__le16 mtr_size;
+	__le16 mcr_size;
+	__le16 mtr_doorbell_vec;
+	__le16 mcr_doorbell_vec;
+	__le16 mtr_msi_vec;
+	__le16 mcr_msi_vec;
+	u8 mtr_opt_header_size;
+	u8 mtr_opt_footer_size;
+	u8 mcr_opt_header_size;
+	u8 mcr_opt_footer_size;
+	__le16 msg_rings_ctrl_flags;
+	__le16 prph_info_msi_vec;
+	__le64 prph_scratch_base_addr;
+	__le32 prph_scratch_size;
+	__le32 reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+				 const struct fw_img *fw);
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+
+#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
new file mode 100644
index 0000000..4b6fdf3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -0,0 +1,208 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_context_info_file_h__
+#define __iwl_context_info_file_h__
+
+/* maximmum number of DRAM map entries supported by FW */
+#define IWL_MAX_DRAM_ENTRY	64
+#define CSR_CTXT_INFO_BA	0x40
+
+/**
+ * enum iwl_context_info_flags - Context information control flags
+ * @IWL_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ *	the init done for driver command that configures several system modes
+ * @IWL_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWL_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWL_CTXT_INFO_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWL_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ *	exponent, the actual size is 2**value, valid sizes are 8-2048.
+ *	The value is four bits long. Maximum valid exponent is 12
+ * @IWL_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ *	default is short format - not supported by the driver)
+ */
+enum iwl_context_info_flags {
+	IWL_CTXT_INFO_AUTO_FUNC_INIT	= BIT(0),
+	IWL_CTXT_INFO_EARLY_DEBUG	= BIT(1),
+	IWL_CTXT_INFO_ENABLE_CDMP	= BIT(2),
+	IWL_CTXT_INFO_RB_SIZE_4K	= BIT(3),
+	IWL_CTXT_INFO_RB_CB_SIZE_POS	= 4,
+	IWL_CTXT_INFO_TFD_FORMAT_LONG	= BIT(8),
+};
+
+/*
+ * struct iwl_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwl_context_info_version {
+	__le16 mac_id;
+	__le16 version;
+	__le16 size;
+	__le16 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwl_context_info_flags
+ */
+struct iwl_context_info_control {
+	__le32 control_flags;
+	__le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwl_context_info_dram {
+	__le64 umac_img[IWL_MAX_DRAM_ENTRY];
+	__le64 lmac_img[IWL_MAX_DRAM_ENTRY];
+	__le64 virtual_img[IWL_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwl_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwl_context_info_rbd_cfg {
+	__le64 free_rbd_addr;
+	__le64 used_rbd_addr;
+	__le64 status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwl_context_info_hcmd_cfg  - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwl_context_info_hcmd_cfg {
+	__le64 cmd_queue_addr;
+	u8 cmd_queue_size;
+	u8 reserved[7];
+} __packed;
+
+/*
+ * struct iwl_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwl_context_info_dump_cfg {
+	__le64 core_dump_addr;
+	__le32 core_dump_size;
+	__le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwl_context_info_pnvm_cfg {
+	__le64 platform_nvm_addr;
+	__le32 platform_nvm_size;
+	__le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info_early_dbg_cfg - early debug configuration for
+ *	dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwl_context_info_early_dbg_cfg {
+	__le64 early_debug_addr;
+	__le32 early_debug_size;
+	__le32 reserved;
+} __packed;
+
+/*
+ * struct iwl_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwl_context_info {
+	struct iwl_context_info_version version;
+	struct iwl_context_info_control control;
+	__le64 reserved0;
+	struct iwl_context_info_rbd_cfg rbd_cfg;
+	struct iwl_context_info_hcmd_cfg hcmd_cfg;
+	__le32 reserved1[4];
+	struct iwl_context_info_dump_cfg dump_cfg;
+	struct iwl_context_info_early_dbg_cfg edbg_cfg;
+	struct iwl_context_info_pnvm_cfg pnvm_cfg;
+	__le32 reserved2[16];
+	struct iwl_context_info_dram dram;
+	__le32 reserved3[16];
+} __packed;
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+			 const struct fw_img *fw,
+			 struct iwl_context_info_dram *ctxt_dram);
+
+#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
new file mode 100644
index 0000000..9019de9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -0,0 +1,624 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_csr_h__
+#define __iwl_csr_h__
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE:  Device does need to be awake in order to read this memory
+ *        via CSR_EEPROM and CSR_OTP registers
+ */
+#define CSR_BASE    (0x000)
+
+#define CSR_HW_IF_CONFIG_REG    (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING      (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT                 (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK            (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS       (CSR_BASE+0x010) /* busmaster int status/ack*/
+#define CSR_GPIO_IN             (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET               (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_GP_CNTRL            (CSR_BASE+0x024)
+
+/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define CSR_INT_PERIODIC_REG	(CSR_BASE+0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-16:  Reserved
+ *  15-4:  Type of device:  see CSR_HW_REV_TYPE_xxx definitions
+ *  3-2:  Revision step:  0 = A, 1 = B, 2 = C, 3 = D
+ *  1-0:  "Dash" (-) value, as in A-1, etc.
+ */
+#define CSR_HW_REV              (CSR_BASE+0x028)
+
+/*
+ * RF ID revision info
+ * Bit fields:
+ * 31:24: Reserved (set to 0x0)
+ * 23:12: Type
+ * 11:8:  Step (A - 0x0, B - 0x1, etc)
+ * 7:4:   Dash
+ * 3:0:   Flavor
+ */
+#define CSR_HW_RF_ID		(CSR_BASE+0x09c)
+
+/*
+ * EEPROM and OTP (one-time-programmable) memory reads
+ *
+ * NOTE:  Device must be awake, initialized via apm_ops.init(),
+ *        in order to read.
+ */
+#define CSR_EEPROM_REG          (CSR_BASE+0x02c)
+#define CSR_EEPROM_GP           (CSR_BASE+0x030)
+#define CSR_OTP_GP_REG   	(CSR_BASE+0x034)
+
+#define CSR_GIO_REG		(CSR_BASE+0x03C)
+#define CSR_GP_UCODE_REG	(CSR_BASE+0x048)
+#define CSR_GP_DRIVER_REG	(CSR_BASE+0x050)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define CSR_UCODE_DRV_GP1       (CSR_BASE+0x054)
+#define CSR_UCODE_DRV_GP1_SET   (CSR_BASE+0x058)
+#define CSR_UCODE_DRV_GP1_CLR   (CSR_BASE+0x05c)
+#define CSR_UCODE_DRV_GP2       (CSR_BASE+0x060)
+
+#define CSR_MBOX_SET_REG	(CSR_BASE + 0x88)
+
+#define CSR_LED_REG             (CSR_BASE+0x094)
+#define CSR_DRAM_INT_TBL_REG	(CSR_BASE+0x0A0)
+#define CSR_MAC_SHADOW_REG_CTRL		(CSR_BASE + 0x0A8) /* 6000 and up */
+#define CSR_MAC_SHADOW_REG_CTRL_RX_WAKE	BIT(20)
+#define CSR_MAC_SHADOW_REG_CTL2		(CSR_BASE + 0x0AC)
+#define CSR_MAC_SHADOW_REG_CTL2_RX_WAKE	0xFFFF
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS    (CSR_BASE+0x100)
+
+/* host chicken bits */
+#define CSR_HOST_CHICKEN	(CSR_BASE + 0x204)
+#define CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME	BIT(19)
+
+/* Analog phase-lock-loop configuration  */
+#define CSR_ANA_PLL_CFG         (CSR_BASE+0x20c)
+
+/*
+ * CSR HW resources monitor registers
+ */
+#define CSR_MONITOR_CFG_REG		(CSR_BASE+0x214)
+#define CSR_MONITOR_STATUS_REG		(CSR_BASE+0x228)
+#define CSR_MONITOR_XTAL_RESOURCES	(0x00000010)
+
+/*
+ * CSR Hardware Revision Workaround Register.  Indicates hardware rev;
+ * "step" determines CCK backoff for txpower calculation.
+ * See also CSR_HW_REV register.
+ * Bit fields:
+ *  3-2:  0 = A, 1 = B, 2 = C, 3 = D step
+ *  1-0:  "Dash" (-) value, as in C-1, etc.
+ */
+#define CSR_HW_REV_WA_REG		(CSR_BASE+0x22C)
+
+#define CSR_DBG_HPET_MEM_REG		(CSR_BASE+0x240)
+#define CSR_DBG_LINK_PWR_MGMT_REG	(CSR_BASE+0x250)
+
+/* Bits for CSR_HW_IF_CONFIG_REG */
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH	(0x00000003)
+#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP	(0x0000000C)
+#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER	(0x000000C0)
+#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI		(0x00000100)
+#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI	(0x00000200)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE	(0x00000C00)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH	(0x00003000)
+#define CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP	(0x0000C000)
+
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_DASH	(0)
+#define CSR_HW_IF_CONFIG_REG_POS_MAC_STEP	(2)
+#define CSR_HW_IF_CONFIG_REG_POS_BOARD_VER	(6)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE	(10)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_DASH	(12)
+#define CSR_HW_IF_CONFIG_REG_POS_PHY_STEP	(14)
+
+#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A	(0x00080000)
+#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM	(0x00200000)
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY	(0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE		  (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_ENABLE_PME		  (0x10000000)
+#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE	  (0x40000000) /* PERSISTENCE */
+
+#define CSR_MBOX_SET_REG_OS_ALIVE		BIT(5)
+
+#define CSR_INT_PERIODIC_DIS			(0x00) /* disable periodic int*/
+#define CSR_INT_PERIODIC_ENA			(0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define CSR_INT_BIT_FH_RX        (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR       (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC	 (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX        (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD          (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR       (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL      (1 << 7)  /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL      (1 << 6)  /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX        (1 << 3)  /* Rx, command responses */
+#define CSR_INT_BIT_WAKEUP       (1 << 1)  /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE        (1 << 0)  /* uCode interrupts once it initializes */
+
+#define CSR_INI_SET_MASK	(CSR_INT_BIT_FH_RX   | \
+				 CSR_INT_BIT_HW_ERR  | \
+				 CSR_INT_BIT_FH_TX   | \
+				 CSR_INT_BIT_SW_ERR  | \
+				 CSR_INT_BIT_RF_KILL | \
+				 CSR_INT_BIT_SW_RX   | \
+				 CSR_INT_BIT_WAKEUP  | \
+				 CSR_INT_BIT_ALIVE   | \
+				 CSR_INT_BIT_RX_PERIODIC)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define CSR_FH_INT_BIT_ERR       (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR  (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR_FH_INT_BIT_RX_CHNL1  (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0  (1 << 16) /* Rx channel 0 */
+#define CSR_FH_INT_BIT_TX_CHNL1  (1 << 1)  /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0  (1 << 0)  /* Tx channel 0 */
+
+#define CSR_FH_INT_RX_MASK	(CSR_FH_INT_BIT_HI_PRIOR | \
+				CSR_FH_INT_BIT_RX_CHNL1 | \
+				CSR_FH_INT_BIT_RX_CHNL0)
+
+#define CSR_FH_INT_TX_MASK	(CSR_FH_INT_BIT_TX_CHNL1 | \
+				CSR_FH_INT_BIT_TX_CHNL0)
+
+/* GPIO */
+#define CSR_GPIO_IN_BIT_AUX_POWER                   (0x00000200)
+#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC                (0x00000000)
+#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC               (0x00000200)
+
+/* RESET */
+#define CSR_RESET_REG_FLAG_NEVO_RESET                (0x00000001)
+#define CSR_RESET_REG_FLAG_FORCE_NMI                 (0x00000002)
+#define CSR_RESET_REG_FLAG_MASTER_DISABLED           (0x00000100)
+#define CSR_RESET_REG_FLAG_STOP_MASTER               (0x00000200)
+#define CSR_RESET_LINK_PWR_MGMT_DISABLED             (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ *    27:  HW_RF_KILL_SW
+ *         Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24:  POWER_SAVE_TYPE
+ *         Indicates current power-saving mode:
+ *         000 -- No power saving
+ *         001 -- MAC power-down
+ *         010 -- PHY (radio) power-down
+ *         011 -- Error
+ *    10:  XTAL ON request
+ *   9-6:  SYS_CONFIG
+ *         Indicates current system configuration, reflecting pins on chip
+ *         as forced high/low by device circuit board.
+ *     4:  GOING_TO_SLEEP
+ *         Indicates MAC is entering a power-saving sleep power-down.
+ *         Not a good time to access device-internal resources.
+ */
+#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
+#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON		     (0x00000400)
+
+#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE         (0x07000000)
+#define CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN     (0x04000000)
+#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW          (0x08000000)
+
+
+/* HW REV */
+#define CSR_HW_REV_DASH(_val)          (((_val) & 0x0000003) >> 0)
+#define CSR_HW_REV_STEP(_val)          (((_val) & 0x000000C) >> 2)
+
+/* HW RFID */
+#define CSR_HW_RFID_FLAVOR(_val)       (((_val) & 0x000000F) >> 0)
+#define CSR_HW_RFID_DASH(_val)         (((_val) & 0x00000F0) >> 4)
+#define CSR_HW_RFID_STEP(_val)         (((_val) & 0x0000F00) >> 8)
+#define CSR_HW_RFID_TYPE(_val)         (((_val) & 0x0FFF000) >> 12)
+
+/**
+ *  hw_rev values
+ */
+enum {
+	SILICON_A_STEP = 0,
+	SILICON_B_STEP,
+	SILICON_C_STEP,
+};
+
+
+#define CSR_HW_REV_TYPE_MSK		(0x000FFF0)
+#define CSR_HW_REV_TYPE_5300		(0x0000020)
+#define CSR_HW_REV_TYPE_5350		(0x0000030)
+#define CSR_HW_REV_TYPE_5100		(0x0000050)
+#define CSR_HW_REV_TYPE_5150		(0x0000040)
+#define CSR_HW_REV_TYPE_1000		(0x0000060)
+#define CSR_HW_REV_TYPE_6x00		(0x0000070)
+#define CSR_HW_REV_TYPE_6x50		(0x0000080)
+#define CSR_HW_REV_TYPE_6150		(0x0000084)
+#define CSR_HW_REV_TYPE_6x05		(0x00000B0)
+#define CSR_HW_REV_TYPE_6x30		CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_6x35		CSR_HW_REV_TYPE_6x05
+#define CSR_HW_REV_TYPE_2x30		(0x00000C0)
+#define CSR_HW_REV_TYPE_2x00		(0x0000100)
+#define CSR_HW_REV_TYPE_105		(0x0000110)
+#define CSR_HW_REV_TYPE_135		(0x0000120)
+#define CSR_HW_REV_TYPE_7265D		(0x0000210)
+#define CSR_HW_REV_TYPE_NONE		(0x00001F0)
+#define CSR_HW_REV_TYPE_QNJ		(0x0000360)
+#define CSR_HW_REV_TYPE_HR_CDB		(0x0000340)
+
+/* RF_ID value */
+#define CSR_HW_RF_ID_TYPE_JF		(0x00105100)
+#define CSR_HW_RF_ID_TYPE_HR		(0x0010A000)
+#define CSR_HW_RF_ID_TYPE_HRCDB		(0x00109F00)
+
+/* HW_RF CHIP ID  */
+#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
+
+/* HW_RF CHIP STEP  */
+#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
+
+/* EEPROM REG */
+#define CSR_EEPROM_REG_READ_VALID_MSK	(0x00000001)
+#define CSR_EEPROM_REG_BIT_CMD		(0x00000002)
+#define CSR_EEPROM_REG_MSK_ADDR		(0x0000FFFC)
+#define CSR_EEPROM_REG_MSK_DATA		(0xFFFF0000)
+
+/* EEPROM GP */
+#define CSR_EEPROM_GP_VALID_MSK		(0x00000007) /* signature */
+#define CSR_EEPROM_GP_IF_OWNER_MSK	(0x00000180)
+#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP	(0x00000000)
+#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP		(0x00000001)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K		(0x00000002)
+#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K		(0x00000004)
+
+/* One-time-programmable memory general purpose reg */
+#define CSR_OTP_GP_REG_DEVICE_SELECT	(0x00010000) /* 0 - EEPROM, 1 - OTP */
+#define CSR_OTP_GP_REG_OTP_ACCESS_MODE	(0x00020000) /* 0 - absolute, 1 - relative */
+#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK          (0x00100000) /* bit 20 */
+#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK        (0x00200000) /* bit 21 */
+
+/* GP REG */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK            (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_NO_POWER_SAVE            (0x00000000)
+#define CSR_GP_REG_MAC_POWER_SAVE           (0x01000000)
+#define CSR_GP_REG_PHY_POWER_SAVE           (0x02000000)
+#define CSR_GP_REG_POWER_SAVE_ERROR         (0x03000000)
+
+
+/* CSR GIO */
+#define CSR_GIO_REG_VAL_L0S_ENABLED	(0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ *     4:  UCODE_DISABLE
+ *         Host sets this to request permanent halt of uCode, same as
+ *         sending CARD_STATE command with "halt" bit set.
+ *     3:  CT_KILL_EXIT
+ *         Host sets this to request exit from CT_KILL state, i.e. host thinks
+ *         device temperature is low enough to continue normal operation.
+ *     2:  CMD_BLOCKED
+ *         Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ *         to release uCode to clear all Tx and command queues, enter
+ *         unassociated mode, and power down.
+ *         NOTE:  Some devices also use HBUS_TARG_MBX_C register for this bit.
+ *     1:  SW_BIT_RFKILL
+ *         Host sets this when issuing CARD_STATE command to request
+ *         device sleep.
+ *     0:  MAC_SLEEP
+ *         uCode sets this when preparing a power-saving power-down.
+ *         uCode resets this when power-up is complete and SRAM is sane.
+ *         NOTE:  device saves internal SRAM data to host when powering down,
+ *                and must restore this data after powering back up.
+ *                MAC_SLEEP is the best indication that restore is complete.
+ *                Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ *                do not need to save/restore it.
+ */
+#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP             (0x00000001)
+#define CSR_UCODE_SW_BIT_RFKILL                     (0x00000002)
+#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED           (0x00000004)
+#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT      (0x00000008)
+#define CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE       (0x00000020)
+
+/* GP Driver */
+#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_MSK	    (0x00000003)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB	    (0x00000000)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB	    (0x00000001)
+#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA	    (0x00000002)
+#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6	    (0x00000004)
+#define CSR_GP_DRIVER_REG_BIT_6050_1x2		    (0x00000008)
+
+#define CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER	    (0x00000080)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX  (0x00800000)
+#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER  (0x20000000)
+
+/* LED */
+#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
+#define CSR_LED_REG_TURN_ON (0x60)
+#define CSR_LED_REG_TURN_OFF (0x20)
+
+/* ANA_PLL */
+#define CSR50_ANA_PLL_CFG_VAL        (0x00880300)
+
+/* HPET MEM debug */
+#define CSR_DBG_HPET_MEM_REG_VAL	(0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define CSR_DRAM_INT_TBL_ENABLE		(1 << 31)
+#define CSR_DRAM_INIT_TBL_WRITE_POINTER	(1 << 28)
+#define CSR_DRAM_INIT_TBL_WRAP_CHECK	(1 << 27)
+
+/*
+ * SHR target access (Shared block memory space)
+ *
+ * Shared internal registers can be accessed directly from PCI bus through SHR
+ * arbiter without need for the MAC HW to be powered up. This is possible due to
+ * indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
+ * HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
+ *
+ * Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
+ * need not be powered up so no "grab inc access" is required.
+ */
+
+/*
+ * Registers for accessing shared registers (e.g. SHR_APMG_GP1,
+ * SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
+ * first, write to the control register:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
+ * second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
+ *
+ * To write the register, first, write to the data register
+ * HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
+ * HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
+ * HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
+ */
+#define HEEP_CTRL_WRD_PCIEX_CTRL_REG	(CSR_BASE+0x0ec)
+#define HEEP_CTRL_WRD_PCIEX_DATA_REG	(CSR_BASE+0x0f4)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define HBUS_BASE	(0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job.  Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ *  0-31:  memory address within device
+ */
+#define HBUS_TARG_MEM_RADDR     (HBUS_BASE+0x00c)
+#define HBUS_TARG_MEM_WADDR     (HBUS_BASE+0x010)
+#define HBUS_TARG_MEM_WDAT      (HBUS_BASE+0x018)
+#define HBUS_TARG_MEM_RDAT      (HBUS_BASE+0x01c)
+
+/* Mailbox C, used as workaround alternative to CSR_UCODE_DRV_GP1 mailbox */
+#define HBUS_TARG_MBX_C         (HBUS_BASE+0x030)
+#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED         (0x00000004)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.).  First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ *  0-15:  register address (offset) within device
+ * 24-25:  (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define HBUS_TARG_PRPH_WADDR    (HBUS_BASE+0x044)
+#define HBUS_TARG_PRPH_RADDR    (HBUS_BASE+0x048)
+#define HBUS_TARG_PRPH_WDAT     (HBUS_BASE+0x04c)
+#define HBUS_TARG_PRPH_RDAT     (HBUS_BASE+0x050)
+
+/* Used to enable DBGM */
+#define HBUS_TARG_TEST_REG	(HBUS_BASE+0x05c)
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ *  0-7:  queue write index
+ * 11-8:  queue selector
+ */
+#define HBUS_TARG_WRPTR         (HBUS_BASE+0x060)
+
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX	(0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF	(0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN	(0x0)
+#define IWL_HOST_INT_OPER_MODE		BIT(31)
+
+/*****************************************************************************
+ *                        7000/3000 series SHR DTS addresses                 *
+ *****************************************************************************/
+
+/* Diode Results Register Structure: */
+enum dtd_diode_reg {
+	DTS_DIODE_REG_DIG_VAL			= 0x000000FF, /* bits [7:0] */
+	DTS_DIODE_REG_VREF_LOW			= 0x0000FF00, /* bits [15:8] */
+	DTS_DIODE_REG_VREF_HIGH			= 0x00FF0000, /* bits [23:16] */
+	DTS_DIODE_REG_VREF_ID			= 0x03000000, /* bits [25:24] */
+	DTS_DIODE_REG_PASS_ONCE			= 0x80000000, /* bits [31:31] */
+	DTS_DIODE_REG_FLAGS_MSK			= 0xFF000000, /* bits [31:24] */
+/* Those are the masks INSIDE the flags bit-field: */
+	DTS_DIODE_REG_FLAGS_VREFS_ID_POS	= 0,
+	DTS_DIODE_REG_FLAGS_VREFS_ID		= 0x00000003, /* bits [1:0] */
+	DTS_DIODE_REG_FLAGS_PASS_ONCE_POS	= 7,
+	DTS_DIODE_REG_FLAGS_PASS_ONCE		= 0x00000080, /* bits [7:7] */
+};
+
+/*****************************************************************************
+ *                        MSIX related registers                             *
+ *****************************************************************************/
+
+#define CSR_MSIX_BASE			(0x2000)
+#define CSR_MSIX_FH_INT_CAUSES_AD	(CSR_MSIX_BASE + 0x800)
+#define CSR_MSIX_FH_INT_MASK_AD		(CSR_MSIX_BASE + 0x804)
+#define CSR_MSIX_HW_INT_CAUSES_AD	(CSR_MSIX_BASE + 0x808)
+#define CSR_MSIX_HW_INT_MASK_AD		(CSR_MSIX_BASE + 0x80C)
+#define CSR_MSIX_AUTOMASK_ST_AD		(CSR_MSIX_BASE + 0x810)
+#define CSR_MSIX_RX_IVAR_AD_REG		(CSR_MSIX_BASE + 0x880)
+#define CSR_MSIX_IVAR_AD_REG		(CSR_MSIX_BASE + 0x890)
+#define CSR_MSIX_PENDING_PBA_AD		(CSR_MSIX_BASE + 0x1000)
+#define CSR_MSIX_RX_IVAR(cause)		(CSR_MSIX_RX_IVAR_AD_REG + (cause))
+#define CSR_MSIX_IVAR(cause)		(CSR_MSIX_IVAR_AD_REG + (cause))
+
+#define MSIX_FH_INT_CAUSES_Q(q)		(q)
+
+/*
+ * Causes for the FH register interrupts
+ */
+enum msix_fh_int_causes {
+	MSIX_FH_INT_CAUSES_Q0			= BIT(0),
+	MSIX_FH_INT_CAUSES_Q1			= BIT(1),
+	MSIX_FH_INT_CAUSES_D2S_CH0_NUM		= BIT(16),
+	MSIX_FH_INT_CAUSES_D2S_CH1_NUM		= BIT(17),
+	MSIX_FH_INT_CAUSES_S2D			= BIT(19),
+	MSIX_FH_INT_CAUSES_FH_ERR		= BIT(21),
+};
+
+/*
+ * Causes for the HW register interrupts
+ */
+enum msix_hw_int_causes {
+	MSIX_HW_INT_CAUSES_REG_ALIVE		= BIT(0),
+	MSIX_HW_INT_CAUSES_REG_WAKEUP		= BIT(1),
+	MSIX_HW_INT_CAUSES_REG_IPC		= BIT(1),
+	MSIX_HW_INT_CAUSES_REG_SW_ERR_V2	= BIT(5),
+	MSIX_HW_INT_CAUSES_REG_CT_KILL		= BIT(6),
+	MSIX_HW_INT_CAUSES_REG_RF_KILL		= BIT(7),
+	MSIX_HW_INT_CAUSES_REG_PERIODIC		= BIT(8),
+	MSIX_HW_INT_CAUSES_REG_SW_ERR		= BIT(25),
+	MSIX_HW_INT_CAUSES_REG_SCD		= BIT(26),
+	MSIX_HW_INT_CAUSES_REG_FH_TX		= BIT(27),
+	MSIX_HW_INT_CAUSES_REG_HW_ERR		= BIT(29),
+	MSIX_HW_INT_CAUSES_REG_HAP		= BIT(30),
+};
+
+#define MSIX_MIN_INTERRUPT_VECTORS		2
+#define MSIX_AUTO_CLEAR_CAUSE			0
+#define MSIX_NON_AUTO_CLEAR_CAUSE		BIT(7)
+
+/*****************************************************************************
+ *                     HW address related registers                          *
+ *****************************************************************************/
+
+#define CSR_ADDR_BASE			(0x380)
+#define CSR_MAC_ADDR0_OTP		(CSR_ADDR_BASE)
+#define CSR_MAC_ADDR1_OTP		(CSR_ADDR_BASE + 4)
+#define CSR_MAC_ADDR0_STRAP		(CSR_ADDR_BASE + 8)
+#define CSR_MAC_ADDR1_STRAP		(CSR_ADDR_BASE + 0xC)
+
+#endif /* !__iwl_csr_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
new file mode 100644
index 0000000..b1c3b0d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -0,0 +1,136 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "iwl-devtrace.h"
+
+#define __iwl_fn(fn)						\
+void __iwl_ ##fn(struct device *dev, const char *fmt, ...)	\
+{								\
+	struct va_format vaf = {				\
+		.fmt = fmt,					\
+	};							\
+	va_list args;						\
+								\
+	va_start(args, fmt);					\
+	vaf.va = &args;						\
+	dev_ ##fn(dev, "%pV", &vaf);				\
+	trace_iwlwifi_ ##fn(&vaf);				\
+	va_end(args);						\
+}
+
+__iwl_fn(warn)
+IWL_EXPORT_SYMBOL(__iwl_warn);
+__iwl_fn(info)
+IWL_EXPORT_SYMBOL(__iwl_info);
+__iwl_fn(crit)
+IWL_EXPORT_SYMBOL(__iwl_crit);
+
+void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
+		const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+	if (!trace_only) {
+		if (rfkill_prefix)
+			dev_err(dev, "(RFKILL) %pV", &vaf);
+		else
+			dev_err(dev, "%pV", &vaf);
+	}
+	trace_iwlwifi_err(&vaf);
+	va_end(args);
+}
+IWL_EXPORT_SYMBOL(__iwl_err);
+
+#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+void __iwl_dbg(struct device *dev,
+	       u32 level, bool limit, const char *function,
+	       const char *fmt, ...)
+{
+	struct va_format vaf = {
+		.fmt = fmt,
+	};
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.va = &args;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	if (iwl_have_debug_level(level) &&
+	    (!limit || net_ratelimit()))
+		dev_printk(KERN_DEBUG, dev, "%c %s %pV",
+			   in_interrupt() ? 'I' : 'U', function, &vaf);
+#endif
+	trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
+	va_end(args);
+}
+IWL_EXPORT_SYMBOL(__iwl_dbg);
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
new file mode 100644
index 0000000..c023fcf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -0,0 +1,225 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_debug_h__
+#define __iwl_debug_h__
+
+#include "iwl-modparams.h"
+
+
+static inline bool iwl_have_debug_level(u32 level)
+{
+#ifdef CONFIG_IWLWIFI_DEBUG
+	return iwlwifi_mod_params.debug_level & level;
+#else
+	return false;
+#endif
+}
+
+struct device;
+void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
+		const char *fmt, ...) __printf(4, 5);
+void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
+void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
+
+/* not all compilers can evaluate strlen() at compile time, so use sizeof() */
+#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
+
+/* No matter what is m (priv, bus, trans), this will work */
+#define IWL_ERR_DEV(d, f, a...)						\
+	do {								\
+		CHECK_FOR_NEWLINE(f);					\
+		__iwl_err((d), false, false, f, ## a);			\
+	} while (0)
+#define IWL_ERR(m, f, a...)						\
+	IWL_ERR_DEV((m)->dev, f, ## a)
+#define IWL_WARN(m, f, a...)						\
+	do {								\
+		CHECK_FOR_NEWLINE(f);					\
+		__iwl_warn((m)->dev, f, ## a);				\
+	} while (0)
+#define IWL_INFO(m, f, a...)						\
+	do {								\
+		CHECK_FOR_NEWLINE(f);					\
+		__iwl_info((m)->dev, f, ## a);				\
+	} while (0)
+#define IWL_CRIT(m, f, a...)						\
+	do {								\
+		CHECK_FOR_NEWLINE(f);					\
+		__iwl_crit((m)->dev, f, ## a);				\
+	} while (0)
+
+#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
+void __iwl_dbg(struct device *dev,
+	       u32 level, bool limit, const char *function,
+	       const char *fmt, ...) __printf(5, 6);
+#else
+__printf(5, 6) static inline void
+__iwl_dbg(struct device *dev,
+	  u32 level, bool limit, const char *function,
+	  const char *fmt, ...)
+{}
+#endif
+
+#define iwl_print_hex_error(m, p, len)					\
+do {									\
+	print_hex_dump(KERN_ERR, "iwl data: ",				\
+		       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);		\
+} while (0)
+
+#define __IWL_DEBUG_DEV(dev, level, limit, fmt, args...)		\
+	do {								\
+		CHECK_FOR_NEWLINE(fmt);					\
+		__iwl_dbg(dev, level, limit, __func__, fmt, ##args);	\
+	} while (0)
+#define IWL_DEBUG(m, level, fmt, args...)				\
+	__IWL_DEBUG_DEV((m)->dev, level, false, fmt, ##args)
+#define IWL_DEBUG_DEV(dev, level, fmt, args...)				\
+	__IWL_DEBUG_DEV(dev, level, false, fmt, ##args)
+#define IWL_DEBUG_LIMIT(m, level, fmt, args...)				\
+	__IWL_DEBUG_DEV((m)->dev, level, true, fmt, ##args)
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define iwl_print_hex_dump(m, level, p, len)				\
+do {                                            			\
+	if (iwl_have_debug_level(level))				\
+		print_hex_dump(KERN_DEBUG, "iwl data: ",		\
+			       DUMP_PREFIX_OFFSET, 16, 1, p, len, 1);	\
+} while (0)
+#else
+#define iwl_print_hex_dump(m, level, p, len)
+#endif				/* CONFIG_IWLWIFI_DEBUG */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IWL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
+ * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ *	/sys/module/iwlwifi/parameters/debug
+ * when CONFIG_IWLWIFI_DEBUG=y.
+ *
+ *	/sys/kernel/debug/phy0/iwlwifi/debug/debug_level
+ * when CONFIG_IWLWIFI_DEBUGFS=y.
+ *
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IWL_DL_INFO		0x00000001
+#define IWL_DL_MAC80211		0x00000002
+#define IWL_DL_HCMD		0x00000004
+#define IWL_DL_TDLS		0x00000008
+/* 0x000000F0 - 0x00000010 */
+#define IWL_DL_QUOTA		0x00000010
+#define IWL_DL_TE		0x00000020
+#define IWL_DL_EEPROM		0x00000040
+#define IWL_DL_RADIO		0x00000080
+/* 0x00000F00 - 0x00000100 */
+#define IWL_DL_POWER		0x00000100
+#define IWL_DL_TEMP		0x00000200
+#define IWL_DL_RPM		0x00000400
+#define IWL_DL_SCAN		0x00000800
+/* 0x0000F000 - 0x00001000 */
+#define IWL_DL_ASSOC		0x00001000
+#define IWL_DL_DROP		0x00002000
+#define IWL_DL_LAR		0x00004000
+#define IWL_DL_COEX		0x00008000
+/* 0x000F0000 - 0x00010000 */
+#define IWL_DL_FW		0x00010000
+#define IWL_DL_RF_KILL		0x00020000
+#define IWL_DL_FW_ERRORS	0x00040000
+/* 0x00F00000 - 0x00100000 */
+#define IWL_DL_RATE		0x00100000
+#define IWL_DL_CALIB		0x00200000
+#define IWL_DL_WEP		0x00400000
+#define IWL_DL_TX		0x00800000
+/* 0x0F000000 - 0x01000000 */
+#define IWL_DL_RX		0x01000000
+#define IWL_DL_ISR		0x02000000
+#define IWL_DL_HT		0x04000000
+#define IWL_DL_EXTERNAL		0x08000000
+/* 0xF0000000 - 0x10000000 */
+#define IWL_DL_11H		0x10000000
+#define IWL_DL_STATS		0x20000000
+#define IWL_DL_TX_REPLY		0x40000000
+#define IWL_DL_TX_QUEUES	0x80000000
+
+#define IWL_DEBUG_INFO(p, f, a...)	IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_TDLS(p, f, a...)	IWL_DEBUG(p, IWL_DL_TDLS, f, ## a)
+#define IWL_DEBUG_MAC80211(p, f, a...)	IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_EXTERNAL(p, f, a...)	IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
+#define IWL_DEBUG_TEMP(p, f, a...)	IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
+#define IWL_DEBUG_SCAN(p, f, a...)	IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
+#define IWL_DEBUG_RX(p, f, a...)	IWL_DEBUG(p, IWL_DL_RX, f, ## a)
+#define IWL_DEBUG_TX(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX, f, ## a)
+#define IWL_DEBUG_ISR(p, f, a...)	IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
+#define IWL_DEBUG_WEP(p, f, a...)	IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
+#define IWL_DEBUG_HC(p, f, a...)	IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
+#define IWL_DEBUG_QUOTA(p, f, a...)	IWL_DEBUG(p, IWL_DL_QUOTA, f, ## a)
+#define IWL_DEBUG_TE(p, f, a...)	IWL_DEBUG(p, IWL_DL_TE, f, ## a)
+#define IWL_DEBUG_EEPROM(d, f, a...)	IWL_DEBUG_DEV(d, IWL_DL_EEPROM, f, ## a)
+#define IWL_DEBUG_CALIB(p, f, a...)	IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
+#define IWL_DEBUG_FW(p, f, a...)	IWL_DEBUG(p, IWL_DL_FW, f, ## a)
+#define IWL_DEBUG_RF_KILL(p, f, a...)	IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
+#define IWL_DEBUG_FW_ERRORS(p, f, a...)	IWL_DEBUG(p, IWL_DL_FW_ERRORS, f, ## a)
+#define IWL_DEBUG_DROP(p, f, a...)	IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_DROP_LIMIT(p, f, a...)	\
+		IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
+#define IWL_DEBUG_COEX(p, f, a...)	IWL_DEBUG(p, IWL_DL_COEX, f, ## a)
+#define IWL_DEBUG_RATE(p, f, a...)	IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_RATE_LIMIT(p, f, a...)	\
+		IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
+#define IWL_DEBUG_ASSOC(p, f, a...)	\
+		IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...)	\
+		IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
+#define IWL_DEBUG_HT(p, f, a...)	IWL_DEBUG(p, IWL_DL_HT, f, ## a)
+#define IWL_DEBUG_STATS(p, f, a...)	IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_STATS_LIMIT(p, f, a...)	\
+		IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
+#define IWL_DEBUG_TX_REPLY(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...)	IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
+#define IWL_DEBUG_RADIO(p, f, a...)	IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_DEV_RADIO(p, f, a...)	IWL_DEBUG_DEV(p, IWL_DL_RADIO, f, ## a)
+#define IWL_DEBUG_POWER(p, f, a...)	IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
+#define IWL_DEBUG_11H(p, f, a...)	IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...)	IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_LAR(p, f, a...)	IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
new file mode 100644
index 0000000..a80e420
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -0,0 +1,98 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_DATA
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_data
+
+TRACE_EVENT(iwlwifi_dev_tx_data,
+	TP_PROTO(const struct device *dev,
+		 struct sk_buff *skb, u8 hdr_len),
+	TP_ARGS(dev, skb, hdr_len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__dynamic_array(u8, data,
+				iwl_trace_data(skb) ? skb->len - hdr_len : 0)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		if (iwl_trace_data(skb))
+			skb_copy_bits(skb, hdr_len,
+				      __get_dynamic_array(data),
+				      skb->len - hdr_len);
+	),
+	TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_tx_tso_chunk,
+	TP_PROTO(const struct device *dev,
+		 u8 *data_src, size_t data_len),
+	TP_ARGS(dev, data_src, data_len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__dynamic_array(u8, data, data_len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		memcpy(__get_dynamic_array(data), data_src, data_len);
+	),
+	TP_printk("[%s] TX frame data", __get_str(dev))
+);
+
+TRACE_EVENT(iwlwifi_dev_rx_data,
+	TP_PROTO(const struct device *dev,
+		 const struct iwl_trans *trans,
+		 void *rxbuf, size_t len),
+	TP_ARGS(dev, trans, rxbuf, len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__dynamic_array(u8, data,
+				len - iwl_rx_trace_len(trans, rxbuf, len))
+	),
+	TP_fast_assign(
+		size_t offs = iwl_rx_trace_len(trans, rxbuf, len);
+		DEV_ASSIGN;
+		if (offs < len)
+			memcpy(__get_dynamic_array(data),
+			       ((u8 *)rxbuf) + offs, len - offs);
+	),
+	TP_printk("[%s] RX frame data", __get_str(dev))
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_DATA */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-data
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
new file mode 100644
index 0000000..4164dc1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
@@ -0,0 +1,214 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_IO
+
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_io
+
+TRACE_EVENT(iwlwifi_dev_ioread32,
+	TP_PROTO(const struct device *dev, u32 offs, u32 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] read io[%#x] = %#x",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite8,
+	TP_PROTO(const struct device *dev, u32 offs, u8 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, offs)
+		__field(u8, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] write io[%#x] = %#x)",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite32,
+	TP_PROTO(const struct device *dev, u32 offs, u32 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] write io[%#x] = %#x)",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite64,
+	TP_PROTO(const struct device *dev, u64 offs, u64 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u64, offs)
+		__field(u64, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] write io[%llu] = %llu)",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite_prph32,
+	TP_PROTO(const struct device *dev, u32 offs, u32 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] write PRPH[%#x] = %#x)",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_iowrite_prph64,
+	TP_PROTO(const struct device *dev, u64 offs, u64 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u64, offs)
+		__field(u64, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] write PRPH[%llu] = %llu)",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_ioread_prph32,
+	TP_PROTO(const struct device *dev, u32 offs, u32 val),
+	TP_ARGS(dev, offs, val),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, offs)
+		__field(u32, val)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->offs = offs;
+		__entry->val = val;
+	),
+	TP_printk("[%s] read PRPH[%#x] = %#x",
+		  __get_str(dev), __entry->offs, __entry->val)
+);
+
+TRACE_EVENT(iwlwifi_dev_irq,
+	TP_PROTO(const struct device *dev),
+	TP_ARGS(dev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+	),
+	/* TP_printk("") doesn't compile */
+	TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_irq_msix,
+	TP_PROTO(const struct device *dev, struct msix_entry *msix_entry,
+		 bool defirq, u32 inta_fh, u32 inta_hw),
+	TP_ARGS(dev, msix_entry, defirq, inta_fh, inta_hw),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, entry)
+		__field(u8, defirq)
+		__field(u32, inta_fh)
+		__field(u32, inta_hw)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->entry = msix_entry->entry;
+		__entry->defirq = defirq;
+		__entry->inta_fh = inta_fh;
+		__entry->inta_hw = inta_hw;
+	),
+	TP_printk("entry:%d defirq:%d fh:0x%x, hw:0x%x",
+		  __entry->entry, __entry->defirq,
+		  __entry->inta_fh, __entry->inta_hw)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+	TP_PROTO(const struct device *dev, u32 index, u32 value),
+	TP_ARGS(dev, index, value),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, index)
+		__field(u32, value)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->index = index;
+		__entry->value = value;
+	),
+	TP_printk("[%s] read ict[%d] = %#.8x",
+		  __get_str(dev), __entry->index, __entry->value)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_IO */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-io
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
new file mode 100644
index 0000000..27e3e4e
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -0,0 +1,209 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_IWLWIFI
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi
+
+TRACE_EVENT(iwlwifi_dev_hcmd,
+	TP_PROTO(const struct device *dev,
+		 struct iwl_host_cmd *cmd, u16 total_size,
+		 struct iwl_cmd_header_wide *hdr),
+	TP_ARGS(dev, cmd, total_size, hdr),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__dynamic_array(u8, hcmd, total_size)
+		__field(u32, flags)
+	),
+	TP_fast_assign(
+		int i, offset = sizeof(struct iwl_cmd_header);
+
+		if (hdr->group_id)
+			offset = sizeof(struct iwl_cmd_header_wide);
+
+		DEV_ASSIGN;
+		__entry->flags = cmd->flags;
+		memcpy(__get_dynamic_array(hcmd), hdr, offset);
+
+		for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+			if (!cmd->len[i])
+				continue;
+			memcpy((u8 *)__get_dynamic_array(hcmd) + offset,
+			       cmd->data[i], cmd->len[i]);
+			offset += cmd->len[i];
+		}
+	),
+	TP_printk("[%s] hcmd %#.2x.%#.2x (%ssync)",
+		  __get_str(dev), ((u8 *)__get_dynamic_array(hcmd))[1],
+		  ((u8 *)__get_dynamic_array(hcmd))[0],
+		  __entry->flags & CMD_ASYNC ? "a" : "")
+);
+
+TRACE_EVENT(iwlwifi_dev_rx,
+	TP_PROTO(const struct device *dev, const struct iwl_trans *trans,
+		 struct iwl_rx_packet *pkt, size_t len),
+	TP_ARGS(dev, trans, pkt, len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u16, cmd)
+		__dynamic_array(u8, rxbuf, iwl_rx_trace_len(trans, pkt, len))
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+		memcpy(__get_dynamic_array(rxbuf), pkt,
+		       iwl_rx_trace_len(trans, pkt, len));
+	),
+	TP_printk("[%s] RX cmd %#.2x",
+		  __get_str(dev), __entry->cmd)
+);
+
+TRACE_EVENT(iwlwifi_dev_tx,
+	TP_PROTO(const struct device *dev, struct sk_buff *skb,
+		 void *tfd, size_t tfdlen,
+		 void *buf0, size_t buf0_len,
+		 int hdr_len),
+	TP_ARGS(dev, skb, tfd, tfdlen, buf0, buf0_len, hdr_len),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(void *, skbaddr)
+		__field(size_t, framelen)
+		__dynamic_array(u8, tfd, tfdlen)
+
+		/*
+		 * Do not insert between or below these items,
+		 * we want to keep the frame together (except
+		 * for the possible padding).
+		 */
+		__dynamic_array(u8, buf0, buf0_len)
+		__dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
+						0 : skb->len - hdr_len)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->skbaddr = skb;
+		__entry->framelen = buf0_len;
+		if (hdr_len > 0)
+			__entry->framelen += skb->len - hdr_len;
+		memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
+		memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
+		if (hdr_len > 0 && !iwl_trace_data(skb))
+			skb_copy_bits(skb, hdr_len,
+				      __get_dynamic_array(buf1),
+				      skb->len - hdr_len);
+	),
+	TP_printk("[%s] TX %.2x (%zu bytes) skbaddr=%p",
+		  __get_str(dev), ((u8 *)__get_dynamic_array(buf0))[0],
+		  __entry->framelen, __entry->skbaddr)
+);
+
+struct iwl_error_event_table;
+TRACE_EVENT(iwlwifi_dev_ucode_error,
+	TP_PROTO(const struct device *dev, const struct iwl_error_event_table *table,
+		 u32 hw_ver, u32 brd_ver),
+	TP_ARGS(dev, table, hw_ver, brd_ver),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+		__field(u32, desc)
+		__field(u32, tsf_low)
+		__field(u32, data1)
+		__field(u32, data2)
+		__field(u32, line)
+		__field(u32, blink2)
+		__field(u32, ilink1)
+		__field(u32, ilink2)
+		__field(u32, bcon_time)
+		__field(u32, gp1)
+		__field(u32, gp2)
+		__field(u32, rev_type)
+		__field(u32, major)
+		__field(u32, minor)
+		__field(u32, hw_ver)
+		__field(u32, brd_ver)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->desc = table->error_id;
+		__entry->tsf_low = table->tsf_low;
+		__entry->data1 = table->data1;
+		__entry->data2 = table->data2;
+		__entry->line = table->line;
+		__entry->blink2 = table->blink2;
+		__entry->ilink1 = table->ilink1;
+		__entry->ilink2 = table->ilink2;
+		__entry->bcon_time = table->bcon_time;
+		__entry->gp1 = table->gp1;
+		__entry->gp2 = table->gp2;
+		__entry->rev_type = table->gp3;
+		__entry->major = table->ucode_ver;
+		__entry->minor = table->hw_ver;
+		__entry->hw_ver = hw_ver;
+		__entry->brd_ver = brd_ver;
+	),
+	TP_printk("[%s] #%02d %010u data 0x%08X 0x%08X line %u, "
+		  "blink2 0x%05X ilink 0x%05X 0x%05X "
+		  "bcon_tm %010u gp 0x%08X 0x%08X rev_type 0x%08X major 0x%08X "
+		  "minor 0x%08X hw 0x%08X brd 0x%08X",
+		  __get_str(dev), __entry->desc, __entry->tsf_low,
+		  __entry->data1, __entry->data2, __entry->line,
+		  __entry->blink2, __entry->ilink1, __entry->ilink2,
+		  __entry->bcon_time, __entry->gp1, __entry->gp2,
+		  __entry->rev_type, __entry->major, __entry->minor,
+		  __entry->hw_ver, __entry->brd_ver)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_event,
+	TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+	TP_ARGS(dev, time, data, ev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__field(u32, time)
+		__field(u32, data)
+		__field(u32, ev)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->time = time;
+		__entry->data = data;
+		__entry->ev = ev;
+	),
+	TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+		  __get_str(dev), __entry->time, __entry->data, __entry->ev)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_IWLWIFI */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-iwlwifi
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
new file mode 100644
index 0000000..5dfc929
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_MSG
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_msg
+
+#define MAX_MSG_LEN	110
+
+DECLARE_EVENT_CLASS(iwlwifi_msg_event,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf),
+	TP_STRUCT__entry(
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_warn,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_info,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
+	TP_PROTO(struct va_format *vaf),
+	TP_ARGS(vaf)
+);
+
+TRACE_EVENT(iwlwifi_dbg,
+	TP_PROTO(u32 level, bool in_interrupt, const char *function,
+		 struct va_format *vaf),
+	TP_ARGS(level, in_interrupt, function, vaf),
+	TP_STRUCT__entry(
+		__field(u32, level)
+		__field(u8, in_interrupt)
+		__string(function, function)
+		__dynamic_array(char, msg, MAX_MSG_LEN)
+	),
+	TP_fast_assign(
+		__entry->level = level;
+		__entry->in_interrupt = in_interrupt;
+		__assign_str(function, function);
+		WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
+				       MAX_MSG_LEN, vaf->fmt,
+				       *vaf->va) >= MAX_MSG_LEN);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_MSG */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-msg
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
new file mode 100644
index 0000000..e9b8673
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
@@ -0,0 +1,81 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ)
+#define __IWLWIFI_DEVICE_TRACE_UCODE
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+	TP_PROTO(const struct device *dev, u32 time, u32 data, u32 ev),
+	TP_ARGS(dev, time, data, ev),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__field(u32, time)
+		__field(u32, data)
+		__field(u32, ev)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->time = time;
+		__entry->data = data;
+		__entry->ev = ev;
+	),
+	TP_printk("[%s] EVT_LOGT:%010u:0x%08x:%04u",
+		  __get_str(dev), __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+	TP_PROTO(const struct device *dev, u32 wraps, u32 n_entry, u32 p_entry),
+	TP_ARGS(dev, wraps, n_entry, p_entry),
+	TP_STRUCT__entry(
+		DEV_ENTRY
+
+		__field(u32, wraps)
+		__field(u32, n_entry)
+		__field(u32, p_entry)
+	),
+	TP_fast_assign(
+		DEV_ASSIGN;
+		__entry->wraps = wraps;
+		__entry->n_entry = n_entry;
+		__entry->p_entry = p_entry;
+	),
+	TP_printk("[%s] wraps=#%02d n=0x%X p=0x%X",
+		  __get_str(dev), __entry->wraps, __entry->n_entry,
+		  __entry->p_entry)
+);
+#endif /* __IWLWIFI_DEVICE_TRACE_UCODE */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE iwl-devtrace-ucode
+#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
new file mode 100644
index 0000000..6aa7198
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -0,0 +1,41 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+
+/* sparse doesn't like tracepoint macros */
+#ifndef __CHECKER__
+#include "iwl-trans.h"
+
+#include "dvm/commands.h"
+#define CREATE_TRACE_POINTS
+#include "iwl-devtrace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
new file mode 100644
index 0000000..f5c1127
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -0,0 +1,108 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(C) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __IWLWIFI_DEVICE_TRACE
+#include <linux/skbuff.h>
+#include <linux/ieee80211.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+#if !defined(__IWLWIFI_DEVICE_TRACE)
+static inline bool iwl_trace_data(struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	__le16 fc = hdr->frame_control;
+	int offs = 24; /* start with normal header length */
+
+	if (!ieee80211_is_data(fc))
+		return false;
+
+	/* Try to determine if the frame is EAPOL. This might have false
+	 * positives (if there's no RFC 1042 header and we compare to some
+	 * payload instead) but since we're only doing tracing that's not
+	 * a problem.
+	 */
+
+	if (ieee80211_has_a4(fc))
+		offs += 6;
+	if (ieee80211_is_data_qos(fc))
+		offs += 2;
+	/* don't account for crypto - these are unencrypted */
+
+	/* also account for the RFC 1042 header, of course */
+	offs += 6;
+
+	return skb->len <= offs + 2 ||
+		*(__be16 *)(skb->data + offs) != cpu_to_be16(ETH_P_PAE);
+}
+
+static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
+				      void *rxbuf, size_t len)
+{
+	struct iwl_cmd_header *cmd = (void *)((u8 *)rxbuf + sizeof(__le32));
+	struct ieee80211_hdr *hdr;
+
+	if (cmd->cmd != trans->rx_mpdu_cmd)
+		return len;
+
+	hdr = (void *)((u8 *)cmd + sizeof(struct iwl_cmd_header) +
+			trans->rx_mpdu_cmd_hdr_size);
+	if (!ieee80211_is_data(hdr->frame_control))
+		return len;
+	/* maybe try to identify EAPOL frames? */
+	return sizeof(__le32) + sizeof(*cmd) + trans->rx_mpdu_cmd_hdr_size +
+		ieee80211_hdrlen(hdr->frame_control);
+}
+#endif
+
+#define __IWLWIFI_DEVICE_TRACE
+
+#include <linux/tracepoint.h>
+#include <linux/device.h>
+#include "iwl-trans.h"
+
+
+#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#undef DECLARE_EVENT_CLASS
+#define DECLARE_EVENT_CLASS(...)
+#undef DEFINE_EVENT
+#define DEFINE_EVENT(evt_class, name, proto, ...) \
+static inline void trace_ ## name(proto) {}
+#endif
+
+#define DEV_ENTRY	__string(dev, dev_name(dev))
+#define DEV_ASSIGN	__assign_str(dev, dev_name(dev))
+
+#include "iwl-devtrace-io.h"
+#include "iwl-devtrace-ucode.h"
+#include "iwl-devtrace-msg.h"
+#include "iwl-devtrace-data.h"
+#include "iwl-devtrace-iwlwifi.h"
+
+#endif /* __IWLWIFI_DEVICE_TRACE */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
new file mode 100644
index 0000000..c063125
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -0,0 +1,1824 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include "iwl-drv.h"
+#include "iwl-csr.h"
+#include "iwl-debug.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "iwl-agn-hw.h"
+#include "fw/img.h"
+#include "iwl-config.h"
+#include "iwl-modparams.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+#define DRV_DESCRIPTION	"Intel(R) Wireless WiFi driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static struct dentry *iwl_dbgfs_root;
+#endif
+
+/**
+ * struct iwl_drv - drv common data
+ * @list: list of drv structures using this opmode
+ * @fw: the iwl_fw structure
+ * @op_mode: the running op_mode
+ * @trans: transport layer
+ * @dev: for debug prints only
+ * @fw_index: firmware revision to try loading
+ * @firmware_name: composite filename of ucode file to load
+ * @request_firmware_complete: the firmware has been obtained from user space
+ */
+struct iwl_drv {
+	struct list_head list;
+	struct iwl_fw fw;
+
+	struct iwl_op_mode *op_mode;
+	struct iwl_trans *trans;
+	struct device *dev;
+
+	int fw_index;                   /* firmware we're trying to load */
+	char firmware_name[64];         /* name of firmware file to load */
+
+	struct completion request_firmware_complete;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct dentry *dbgfs_drv;
+	struct dentry *dbgfs_trans;
+	struct dentry *dbgfs_op_mode;
+#endif
+};
+
+enum {
+	DVM_OP_MODE,
+	MVM_OP_MODE,
+};
+
+/* Protects the table contents, i.e. the ops pointer & drv list */
+static struct mutex iwlwifi_opmode_table_mtx;
+static struct iwlwifi_opmode_table {
+	const char *name;			/* name: iwldvm, iwlmvm, etc */
+	const struct iwl_op_mode_ops *ops;	/* pointer to op_mode ops */
+	struct list_head drv;		/* list of devices using this op_mode */
+} iwlwifi_opmode_table[] = {		/* ops set when driver is initialized */
+	[DVM_OP_MODE] = { .name = "iwldvm", .ops = NULL },
+	[MVM_OP_MODE] = { .name = "iwlmvm", .ops = NULL },
+};
+
+#define IWL_DEFAULT_SCAN_CHANNELS 40
+
+/*
+ * struct fw_sec: Just for the image parsing process.
+ * For the fw storage we are using struct fw_desc.
+ */
+struct fw_sec {
+	const void *data;		/* the sec data */
+	size_t size;			/* section size */
+	u32 offset;			/* offset of writing in the device */
+};
+
+static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
+{
+	vfree(desc->data);
+	desc->data = NULL;
+	desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_drv *drv, struct fw_img *img)
+{
+	int i;
+	for (i = 0; i < img->num_sec; i++)
+		iwl_free_fw_desc(drv, &img->sec[i]);
+	kfree(img->sec);
+}
+
+static void iwl_dealloc_ucode(struct iwl_drv *drv)
+{
+	int i;
+
+	kfree(drv->fw.dbg_dest_tlv);
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++)
+		kfree(drv->fw.dbg_conf_tlv[i]);
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
+		kfree(drv->fw.dbg_trigger_tlv[i]);
+	kfree(drv->fw.dbg_mem_tlv);
+	kfree(drv->fw.iml);
+
+	for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
+		iwl_free_fw_img(drv, drv->fw.img + i);
+}
+
+static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
+			     struct fw_sec *sec)
+{
+	void *data;
+
+	desc->data = NULL;
+
+	if (!sec || !sec->size)
+		return -EINVAL;
+
+	data = vmalloc(sec->size);
+	if (!data)
+		return -ENOMEM;
+
+	desc->len = sec->size;
+	desc->offset = sec->offset;
+	memcpy(data, sec->data, desc->len);
+	desc->data = data;
+
+	return 0;
+}
+
+static void iwl_req_fw_callback(const struct firmware *ucode_raw,
+				void *context);
+
+static int iwl_request_firmware(struct iwl_drv *drv, bool first)
+{
+	const struct iwl_cfg *cfg = drv->trans->cfg;
+	char tag[8];
+	const char *fw_pre_name;
+
+	if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+	    (CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_B_STEP ||
+	     CSR_HW_REV_STEP(drv->trans->hw_rev) == SILICON_C_STEP))
+		fw_pre_name = cfg->fw_name_pre_b_or_c_step;
+	else if (drv->trans->cfg->integrated &&
+		 CSR_HW_RFID_STEP(drv->trans->hw_rf_id) == SILICON_B_STEP &&
+		 cfg->fw_name_pre_rf_next_step)
+		fw_pre_name = cfg->fw_name_pre_rf_next_step;
+	else
+		fw_pre_name = cfg->fw_name_pre;
+
+	if (first) {
+		drv->fw_index = cfg->ucode_api_max;
+		sprintf(tag, "%d", drv->fw_index);
+	} else {
+		drv->fw_index--;
+		sprintf(tag, "%d", drv->fw_index);
+	}
+
+	if (drv->fw_index < cfg->ucode_api_min) {
+		IWL_ERR(drv, "no suitable firmware found!\n");
+
+		if (cfg->ucode_api_min == cfg->ucode_api_max) {
+			IWL_ERR(drv, "%s%d is required\n", fw_pre_name,
+				cfg->ucode_api_max);
+		} else {
+			IWL_ERR(drv, "minimum version required: %s%d\n",
+				fw_pre_name,
+				cfg->ucode_api_min);
+			IWL_ERR(drv, "maximum version supported: %s%d\n",
+				fw_pre_name,
+				cfg->ucode_api_max);
+		}
+
+		IWL_ERR(drv,
+			"check git://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git\n");
+		return -ENOENT;
+	}
+
+	snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+		 fw_pre_name, tag);
+
+	IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
+		       drv->firmware_name);
+
+	return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
+				       drv->trans->dev,
+				       GFP_KERNEL, drv, iwl_req_fw_callback);
+}
+
+struct fw_img_parsing {
+	struct fw_sec *sec;
+	int sec_counter;
+};
+
+/*
+ * struct fw_sec_parsing: to extract fw section and it's offset from tlv
+ */
+struct fw_sec_parsing {
+	__le32 offset;
+	const u8 data[];
+} __packed;
+
+/**
+ * struct iwl_tlv_calib_data - parse the default calib data from TLV
+ *
+ * @ucode_type: the uCode to which the following default calib relates.
+ * @calib: default calibrations.
+ */
+struct iwl_tlv_calib_data {
+	__le32 ucode_type;
+	struct iwl_tlv_calib_ctrl calib;
+} __packed;
+
+struct iwl_firmware_pieces {
+	struct fw_img_parsing img[IWL_UCODE_TYPE_MAX];
+
+	u32 init_evtlog_ptr, init_evtlog_size, init_errlog_ptr;
+	u32 inst_evtlog_ptr, inst_evtlog_size, inst_errlog_ptr;
+
+	/* FW debug data parsed for driver usage */
+	bool dbg_dest_tlv_init;
+	u8 *dbg_dest_ver;
+	union {
+		struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
+		struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+	};
+	struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+	size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
+	struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+	size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+	struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+	size_t n_dbg_mem_tlv;
+};
+
+/*
+ * These functions are just to extract uCode section data from the pieces
+ * structure.
+ */
+static struct fw_sec *get_sec(struct iwl_firmware_pieces *pieces,
+			      enum iwl_ucode_type type,
+			      int  sec)
+{
+	return &pieces->img[type].sec[sec];
+}
+
+static void alloc_sec_data(struct iwl_firmware_pieces *pieces,
+			   enum iwl_ucode_type type,
+			   int sec)
+{
+	struct fw_img_parsing *img = &pieces->img[type];
+	struct fw_sec *sec_memory;
+	int size = sec + 1;
+	size_t alloc_size = sizeof(*img->sec) * size;
+
+	if (img->sec && img->sec_counter >= size)
+		return;
+
+	sec_memory = krealloc(img->sec, alloc_size, GFP_KERNEL);
+	if (!sec_memory)
+		return;
+
+	img->sec = sec_memory;
+	img->sec_counter = size;
+}
+
+static void set_sec_data(struct iwl_firmware_pieces *pieces,
+			 enum iwl_ucode_type type,
+			 int sec,
+			 const void *data)
+{
+	alloc_sec_data(pieces, type, sec);
+
+	pieces->img[type].sec[sec].data = data;
+}
+
+static void set_sec_size(struct iwl_firmware_pieces *pieces,
+			 enum iwl_ucode_type type,
+			 int sec,
+			 size_t size)
+{
+	alloc_sec_data(pieces, type, sec);
+
+	pieces->img[type].sec[sec].size = size;
+}
+
+static size_t get_sec_size(struct iwl_firmware_pieces *pieces,
+			   enum iwl_ucode_type type,
+			   int sec)
+{
+	return pieces->img[type].sec[sec].size;
+}
+
+static void set_sec_offset(struct iwl_firmware_pieces *pieces,
+			   enum iwl_ucode_type type,
+			   int sec,
+			   u32 offset)
+{
+	alloc_sec_data(pieces, type, sec);
+
+	pieces->img[type].sec[sec].offset = offset;
+}
+
+static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
+{
+	int i, j;
+	struct iwl_fw_cscheme_list *l = (struct iwl_fw_cscheme_list *)data;
+	struct iwl_fw_cipher_scheme *fwcs;
+
+	if (len < sizeof(*l) ||
+	    len < sizeof(l->size) + l->size * sizeof(l->cs[0]))
+		return -EINVAL;
+
+	for (i = 0, j = 0; i < IWL_UCODE_MAX_CS && i < l->size; i++) {
+		fwcs = &l->cs[j];
+
+		/* we skip schemes with zero cipher suite selector */
+		if (!fwcs->cipher)
+			continue;
+
+		fw->cs[j++] = *fwcs;
+	}
+
+	return 0;
+}
+
+/*
+ * Gets uCode section from tlv.
+ */
+static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
+			       const void *data, enum iwl_ucode_type type,
+			       int size)
+{
+	struct fw_img_parsing *img;
+	struct fw_sec *sec;
+	struct fw_sec_parsing *sec_parse;
+	size_t alloc_size;
+
+	if (WARN_ON(!pieces || !data || type >= IWL_UCODE_TYPE_MAX))
+		return -1;
+
+	sec_parse = (struct fw_sec_parsing *)data;
+
+	img = &pieces->img[type];
+
+	alloc_size = sizeof(*img->sec) * (img->sec_counter + 1);
+	sec = krealloc(img->sec, alloc_size, GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+	img->sec = sec;
+
+	sec = &img->sec[img->sec_counter];
+
+	sec->offset = le32_to_cpu(sec_parse->offset);
+	sec->data = sec_parse->data;
+	sec->size = size - sizeof(sec_parse->offset);
+
+	++img->sec_counter;
+
+	return 0;
+}
+
+static int iwl_set_default_calib(struct iwl_drv *drv, const u8 *data)
+{
+	struct iwl_tlv_calib_data *def_calib =
+					(struct iwl_tlv_calib_data *)data;
+	u32 ucode_type = le32_to_cpu(def_calib->ucode_type);
+	if (ucode_type >= IWL_UCODE_TYPE_MAX) {
+		IWL_ERR(drv, "Wrong ucode_type %u for default calibration.\n",
+			ucode_type);
+		return -EINVAL;
+	}
+	drv->fw.default_calib[ucode_type].flow_trigger =
+		def_calib->calib.flow_trigger;
+	drv->fw.default_calib[ucode_type].event_trigger =
+		def_calib->calib.event_trigger;
+
+	return 0;
+}
+
+static void iwl_set_ucode_api_flags(struct iwl_drv *drv, const u8 *data,
+				    struct iwl_ucode_capabilities *capa)
+{
+	const struct iwl_ucode_api *ucode_api = (void *)data;
+	u32 api_index = le32_to_cpu(ucode_api->api_index);
+	u32 api_flags = le32_to_cpu(ucode_api->api_flags);
+	int i;
+
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_API, 32)) {
+		IWL_WARN(drv,
+			 "api flags index %d larger than supported by driver\n",
+			 api_index);
+		return;
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (api_flags & BIT(i))
+			__set_bit(i + 32 * api_index, capa->_api);
+	}
+}
+
+static void iwl_set_ucode_capabilities(struct iwl_drv *drv, const u8 *data,
+				       struct iwl_ucode_capabilities *capa)
+{
+	const struct iwl_ucode_capa *ucode_capa = (void *)data;
+	u32 api_index = le32_to_cpu(ucode_capa->api_index);
+	u32 api_flags = le32_to_cpu(ucode_capa->api_capa);
+	int i;
+
+	if (api_index >= DIV_ROUND_UP(NUM_IWL_UCODE_TLV_CAPA, 32)) {
+		IWL_WARN(drv,
+			 "capa flags index %d larger than supported by driver\n",
+			 api_index);
+		return;
+	}
+
+	for (i = 0; i < 32; i++) {
+		if (api_flags & BIT(i))
+			__set_bit(i + 32 * api_index, capa->_capa);
+	}
+}
+
+static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
+				    const struct firmware *ucode_raw,
+				    struct iwl_firmware_pieces *pieces)
+{
+	struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
+	u32 api_ver, hdr_size, build;
+	char buildstr[25];
+	const u8 *src;
+
+	drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+	api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+	switch (api_ver) {
+	default:
+		hdr_size = 28;
+		if (ucode_raw->size < hdr_size) {
+			IWL_ERR(drv, "File size too small!\n");
+			return -EINVAL;
+		}
+		build = le32_to_cpu(ucode->u.v2.build);
+		set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+			     le32_to_cpu(ucode->u.v2.inst_size));
+		set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+			     le32_to_cpu(ucode->u.v2.data_size));
+		set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+			     le32_to_cpu(ucode->u.v2.init_size));
+		set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+			     le32_to_cpu(ucode->u.v2.init_data_size));
+		src = ucode->u.v2.data;
+		break;
+	case 0:
+	case 1:
+	case 2:
+		hdr_size = 24;
+		if (ucode_raw->size < hdr_size) {
+			IWL_ERR(drv, "File size too small!\n");
+			return -EINVAL;
+		}
+		build = 0;
+		set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+			     le32_to_cpu(ucode->u.v1.inst_size));
+		set_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+			     le32_to_cpu(ucode->u.v1.data_size));
+		set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+			     le32_to_cpu(ucode->u.v1.init_size));
+		set_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+			     le32_to_cpu(ucode->u.v1.init_data_size));
+		src = ucode->u.v1.data;
+		break;
+	}
+
+	if (build)
+		sprintf(buildstr, " build %u", build);
+	else
+		buildstr[0] = '\0';
+
+	snprintf(drv->fw.fw_version,
+		 sizeof(drv->fw.fw_version),
+		 "%u.%u.%u.%u%s",
+		 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+		 IWL_UCODE_MINOR(drv->fw.ucode_ver),
+		 IWL_UCODE_API(drv->fw.ucode_ver),
+		 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+		 buildstr);
+
+	/* Verify size of file vs. image size info in file's header */
+
+	if (ucode_raw->size != hdr_size +
+	    get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) +
+	    get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) +
+	    get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) +
+	    get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA)) {
+
+		IWL_ERR(drv,
+			"uCode file size %d does not match expected size\n",
+			(int)ucode_raw->size);
+		return -EINVAL;
+	}
+
+
+	set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST, src);
+	src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST);
+	set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST,
+		       IWLAGN_RTC_INST_LOWER_BOUND);
+	set_sec_data(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA, src);
+	src += get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA);
+	set_sec_offset(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA,
+		       IWLAGN_RTC_DATA_LOWER_BOUND);
+	set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST, src);
+	src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST);
+	set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST,
+		       IWLAGN_RTC_INST_LOWER_BOUND);
+	set_sec_data(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA, src);
+	src += get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA);
+	set_sec_offset(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA,
+		       IWLAGN_RTC_DATA_LOWER_BOUND);
+	return 0;
+}
+
+static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
+				const struct firmware *ucode_raw,
+				struct iwl_firmware_pieces *pieces,
+				struct iwl_ucode_capabilities *capa,
+				bool *usniffer_images)
+{
+	struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
+	struct iwl_ucode_tlv *tlv;
+	size_t len = ucode_raw->size;
+	const u8 *data;
+	u32 tlv_len;
+	u32 usniffer_img;
+	enum iwl_ucode_tlv_type tlv_type;
+	const u8 *tlv_data;
+	char buildstr[25];
+	u32 build, paging_mem_size;
+	int num_of_cpus;
+	bool usniffer_req = false;
+
+	if (len < sizeof(*ucode)) {
+		IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
+		return -EINVAL;
+	}
+
+	if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC)) {
+		IWL_ERR(drv, "invalid uCode magic: 0X%x\n",
+			le32_to_cpu(ucode->magic));
+		return -EINVAL;
+	}
+
+	drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
+	memcpy(drv->fw.human_readable, ucode->human_readable,
+	       sizeof(drv->fw.human_readable));
+	build = le32_to_cpu(ucode->build);
+
+	if (build)
+		sprintf(buildstr, " build %u", build);
+	else
+		buildstr[0] = '\0';
+
+	snprintf(drv->fw.fw_version,
+		 sizeof(drv->fw.fw_version),
+		 "%u.%u.%u.%u%s",
+		 IWL_UCODE_MAJOR(drv->fw.ucode_ver),
+		 IWL_UCODE_MINOR(drv->fw.ucode_ver),
+		 IWL_UCODE_API(drv->fw.ucode_ver),
+		 IWL_UCODE_SERIAL(drv->fw.ucode_ver),
+		 buildstr);
+
+	data = ucode->data;
+
+	len -= sizeof(*ucode);
+
+	while (len >= sizeof(*tlv)) {
+		len -= sizeof(*tlv);
+		tlv = (void *)data;
+
+		tlv_len = le32_to_cpu(tlv->length);
+		tlv_type = le32_to_cpu(tlv->type);
+		tlv_data = tlv->data;
+
+		if (len < tlv_len) {
+			IWL_ERR(drv, "invalid TLV len: %zd/%u\n",
+				len, tlv_len);
+			return -EINVAL;
+		}
+		len -= ALIGN(tlv_len, 4);
+		data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+
+		switch (tlv_type) {
+		case IWL_UCODE_TLV_INST:
+			set_sec_data(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_INST, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_INST, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_REGULAR,
+				       IWL_UCODE_SECTION_INST,
+				       IWLAGN_RTC_INST_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_DATA:
+			set_sec_data(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_DATA, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_DATA, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_REGULAR,
+				       IWL_UCODE_SECTION_DATA,
+				       IWLAGN_RTC_DATA_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_INIT:
+			set_sec_data(pieces, IWL_UCODE_INIT,
+				     IWL_UCODE_SECTION_INST, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_INIT,
+				     IWL_UCODE_SECTION_INST, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_INIT,
+				       IWL_UCODE_SECTION_INST,
+				       IWLAGN_RTC_INST_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_INIT_DATA:
+			set_sec_data(pieces, IWL_UCODE_INIT,
+				     IWL_UCODE_SECTION_DATA, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_INIT,
+				     IWL_UCODE_SECTION_DATA, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_INIT,
+				       IWL_UCODE_SECTION_DATA,
+				       IWLAGN_RTC_DATA_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_BOOT:
+			IWL_ERR(drv, "Found unexpected BOOT ucode\n");
+			break;
+		case IWL_UCODE_TLV_PROBE_MAX_LEN:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			capa->max_probe_length =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_PAN:
+			if (tlv_len)
+				goto invalid_tlv_len;
+			capa->flags |= IWL_UCODE_TLV_FLAGS_PAN;
+			break;
+		case IWL_UCODE_TLV_FLAGS:
+			/* must be at least one u32 */
+			if (tlv_len < sizeof(u32))
+				goto invalid_tlv_len;
+			/* and a proper number of u32s */
+			if (tlv_len % sizeof(u32))
+				goto invalid_tlv_len;
+			/*
+			 * This driver only reads the first u32 as
+			 * right now no more features are defined,
+			 * if that changes then either the driver
+			 * will not work with the new firmware, or
+			 * it'll not take advantage of new features.
+			 */
+			capa->flags = le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_API_CHANGES_SET:
+			if (tlv_len != sizeof(struct iwl_ucode_api))
+				goto invalid_tlv_len;
+			iwl_set_ucode_api_flags(drv, tlv_data, capa);
+			break;
+		case IWL_UCODE_TLV_ENABLED_CAPABILITIES:
+			if (tlv_len != sizeof(struct iwl_ucode_capa))
+				goto invalid_tlv_len;
+			iwl_set_ucode_capabilities(drv, tlv_data, capa);
+			break;
+		case IWL_UCODE_TLV_INIT_EVTLOG_PTR:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->init_evtlog_ptr =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_INIT_EVTLOG_SIZE:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->init_evtlog_size =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_INIT_ERRLOG_PTR:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->init_errlog_ptr =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_EVTLOG_PTR:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->inst_evtlog_ptr =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_EVTLOG_SIZE:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->inst_evtlog_size =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_RUNT_ERRLOG_PTR:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			pieces->inst_errlog_ptr =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_ENHANCE_SENS_TBL:
+			if (tlv_len)
+				goto invalid_tlv_len;
+			drv->fw.enhance_sensitivity_table = true;
+			break;
+		case IWL_UCODE_TLV_WOWLAN_INST:
+			set_sec_data(pieces, IWL_UCODE_WOWLAN,
+				     IWL_UCODE_SECTION_INST, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_WOWLAN,
+				     IWL_UCODE_SECTION_INST, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_WOWLAN,
+				       IWL_UCODE_SECTION_INST,
+				       IWLAGN_RTC_INST_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_WOWLAN_DATA:
+			set_sec_data(pieces, IWL_UCODE_WOWLAN,
+				     IWL_UCODE_SECTION_DATA, tlv_data);
+			set_sec_size(pieces, IWL_UCODE_WOWLAN,
+				     IWL_UCODE_SECTION_DATA, tlv_len);
+			set_sec_offset(pieces, IWL_UCODE_WOWLAN,
+				       IWL_UCODE_SECTION_DATA,
+				       IWLAGN_RTC_DATA_LOWER_BOUND);
+			break;
+		case IWL_UCODE_TLV_PHY_CALIBRATION_SIZE:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			capa->standard_phy_calibration_size =
+					le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_SEC_RT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_SEC_INIT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_SEC_WOWLAN:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_DEF_CALIB:
+			if (tlv_len != sizeof(struct iwl_tlv_calib_data))
+				goto invalid_tlv_len;
+			if (iwl_set_default_calib(drv, tlv_data))
+				goto tlv_error;
+			break;
+		case IWL_UCODE_TLV_PHY_SKU:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			drv->fw.phy_config = le32_to_cpup((__le32 *)tlv_data);
+			drv->fw.valid_tx_ant = (drv->fw.phy_config &
+						FW_PHY_CFG_TX_CHAIN) >>
+						FW_PHY_CFG_TX_CHAIN_POS;
+			drv->fw.valid_rx_ant = (drv->fw.phy_config &
+						FW_PHY_CFG_RX_CHAIN) >>
+						FW_PHY_CFG_RX_CHAIN_POS;
+			break;
+		case IWL_UCODE_TLV_SECURE_SEC_RT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_REGULAR,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_SECURE_SEC_INIT:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_INIT,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_SECURE_SEC_WOWLAN:
+			iwl_store_ucode_sec(pieces, tlv_data, IWL_UCODE_WOWLAN,
+					    tlv_len);
+			drv->fw.type = IWL_FW_MVM;
+			break;
+		case IWL_UCODE_TLV_NUM_OF_CPU:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			num_of_cpus =
+				le32_to_cpup((__le32 *)tlv_data);
+
+			if (num_of_cpus == 2) {
+				drv->fw.img[IWL_UCODE_REGULAR].is_dual_cpus =
+					true;
+				drv->fw.img[IWL_UCODE_INIT].is_dual_cpus =
+					true;
+				drv->fw.img[IWL_UCODE_WOWLAN].is_dual_cpus =
+					true;
+			} else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
+				IWL_ERR(drv, "Driver support upto 2 CPUs\n");
+				return -EINVAL;
+			}
+			break;
+		case IWL_UCODE_TLV_CSCHEME:
+			if (iwl_store_cscheme(&drv->fw, tlv_data, tlv_len))
+				goto invalid_tlv_len;
+			break;
+		case IWL_UCODE_TLV_N_SCAN_CHANNELS:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			capa->n_scan_channels =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+		case IWL_UCODE_TLV_FW_VERSION: {
+			__le32 *ptr = (void *)tlv_data;
+			u32 major, minor;
+			u8 local_comp;
+
+			if (tlv_len != sizeof(u32) * 3)
+				goto invalid_tlv_len;
+
+			major = le32_to_cpup(ptr++);
+			minor = le32_to_cpup(ptr++);
+			local_comp = le32_to_cpup(ptr);
+
+			if (major >= 35)
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%08x.%u", major, minor, local_comp);
+			else
+				snprintf(drv->fw.fw_version,
+					 sizeof(drv->fw.fw_version),
+					"%u.%u.%u", major, minor, local_comp);
+			break;
+			}
+		case IWL_UCODE_TLV_FW_DBG_DEST: {
+			struct iwl_fw_dbg_dest_tlv *dest = NULL;
+			struct iwl_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+			u8 mon_mode;
+
+			pieces->dbg_dest_ver = (u8 *)tlv_data;
+			if (*pieces->dbg_dest_ver == 1) {
+				dest = (void *)tlv_data;
+			} else if (*pieces->dbg_dest_ver == 0) {
+				dest_v1 = (void *)tlv_data;
+			} else {
+				IWL_ERR(drv,
+					"The version is %d, and it is invalid\n",
+					*pieces->dbg_dest_ver);
+				break;
+			}
+
+			if (pieces->dbg_dest_tlv_init) {
+				IWL_ERR(drv,
+					"dbg destination ignored, already exists\n");
+				break;
+			}
+
+			pieces->dbg_dest_tlv_init = true;
+
+			if (dest_v1) {
+				pieces->dbg_dest_tlv_v1 = dest_v1;
+				mon_mode = dest_v1->monitor_mode;
+			} else {
+				pieces->dbg_dest_tlv = dest;
+				mon_mode = dest->monitor_mode;
+			}
+
+			IWL_INFO(drv, "Found debug destination: %s\n",
+				 get_fw_dbg_mode_string(mon_mode));
+
+			drv->fw.dbg_dest_reg_num = (dest_v1) ?
+				tlv_len -
+				offsetof(struct iwl_fw_dbg_dest_tlv_v1,
+					 reg_ops) :
+				tlv_len -
+				offsetof(struct iwl_fw_dbg_dest_tlv,
+					 reg_ops);
+
+			drv->fw.dbg_dest_reg_num /=
+				sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]);
+
+			break;
+			}
+		case IWL_UCODE_TLV_FW_DBG_CONF: {
+			struct iwl_fw_dbg_conf_tlv *conf = (void *)tlv_data;
+
+			if (!pieces->dbg_dest_tlv_init) {
+				IWL_ERR(drv,
+					"Ignore dbg config %d - no destination configured\n",
+					conf->id);
+				break;
+			}
+
+			if (conf->id >= ARRAY_SIZE(drv->fw.dbg_conf_tlv)) {
+				IWL_ERR(drv,
+					"Skip unknown configuration: %d\n",
+					conf->id);
+				break;
+			}
+
+			if (pieces->dbg_conf_tlv[conf->id]) {
+				IWL_ERR(drv,
+					"Ignore duplicate dbg config %d\n",
+					conf->id);
+				break;
+			}
+
+			if (conf->usniffer)
+				usniffer_req = true;
+
+			IWL_INFO(drv, "Found debug configuration: %d\n",
+				 conf->id);
+
+			pieces->dbg_conf_tlv[conf->id] = conf;
+			pieces->dbg_conf_tlv_len[conf->id] = tlv_len;
+			break;
+			}
+		case IWL_UCODE_TLV_FW_DBG_TRIGGER: {
+			struct iwl_fw_dbg_trigger_tlv *trigger =
+				(void *)tlv_data;
+			u32 trigger_id = le32_to_cpu(trigger->id);
+
+			if (trigger_id >= ARRAY_SIZE(drv->fw.dbg_trigger_tlv)) {
+				IWL_ERR(drv,
+					"Skip unknown trigger: %u\n",
+					trigger->id);
+				break;
+			}
+
+			if (pieces->dbg_trigger_tlv[trigger_id]) {
+				IWL_ERR(drv,
+					"Ignore duplicate dbg trigger %u\n",
+					trigger->id);
+				break;
+			}
+
+			IWL_INFO(drv, "Found debug trigger: %u\n", trigger->id);
+
+			pieces->dbg_trigger_tlv[trigger_id] = trigger;
+			pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
+			break;
+			}
+		case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
+			if (tlv_len != sizeof(u32)) {
+				IWL_ERR(drv,
+					"dbg lst mask size incorrect, skip\n");
+				break;
+			}
+
+			drv->fw.dbg_dump_mask =
+				le32_to_cpup((__le32 *)tlv_data);
+			break;
+			}
+		case IWL_UCODE_TLV_SEC_RT_USNIFFER:
+			*usniffer_images = true;
+			iwl_store_ucode_sec(pieces, tlv_data,
+					    IWL_UCODE_REGULAR_USNIFFER,
+					    tlv_len);
+			break;
+		case IWL_UCODE_TLV_PAGING:
+			if (tlv_len != sizeof(u32))
+				goto invalid_tlv_len;
+			paging_mem_size = le32_to_cpup((__le32 *)tlv_data);
+
+			IWL_DEBUG_FW(drv,
+				     "Paging: paging enabled (size = %u bytes)\n",
+				     paging_mem_size);
+
+			if (paging_mem_size > MAX_PAGING_IMAGE_SIZE) {
+				IWL_ERR(drv,
+					"Paging: driver supports up to %lu bytes for paging image\n",
+					MAX_PAGING_IMAGE_SIZE);
+				return -EINVAL;
+			}
+
+			if (paging_mem_size & (FW_PAGING_SIZE - 1)) {
+				IWL_ERR(drv,
+					"Paging: image isn't multiple %lu\n",
+					FW_PAGING_SIZE);
+				return -EINVAL;
+			}
+
+			drv->fw.img[IWL_UCODE_REGULAR].paging_mem_size =
+				paging_mem_size;
+			usniffer_img = IWL_UCODE_REGULAR_USNIFFER;
+			drv->fw.img[usniffer_img].paging_mem_size =
+				paging_mem_size;
+			break;
+		case IWL_UCODE_TLV_FW_GSCAN_CAPA:
+			/* ignored */
+			break;
+		case IWL_UCODE_TLV_FW_MEM_SEG: {
+			struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+				(void *)tlv_data;
+			u32 type;
+			size_t size;
+			struct iwl_fw_dbg_mem_seg_tlv *n;
+
+			if (tlv_len != (sizeof(*dbg_mem)))
+				goto invalid_tlv_len;
+
+			type = le32_to_cpu(dbg_mem->data_type);
+
+			IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+				       dbg_mem->data_type);
+
+			switch (type & FW_DBG_MEM_TYPE_MASK) {
+			case FW_DBG_MEM_TYPE_REGULAR:
+			case FW_DBG_MEM_TYPE_PRPH:
+				/* we know how to handle these */
+				break;
+			default:
+				IWL_ERR(drv,
+					"Found debug memory segment with invalid type: 0x%x\n",
+					type);
+				return -EINVAL;
+			}
+
+			size = sizeof(*pieces->dbg_mem_tlv) *
+			       (pieces->n_dbg_mem_tlv + 1);
+			n = krealloc(pieces->dbg_mem_tlv, size, GFP_KERNEL);
+			if (!n)
+				return -ENOMEM;
+			pieces->dbg_mem_tlv = n;
+			pieces->dbg_mem_tlv[pieces->n_dbg_mem_tlv] = *dbg_mem;
+			pieces->n_dbg_mem_tlv++;
+			break;
+			}
+		case IWL_UCODE_TLV_IML: {
+			drv->fw.iml_len = tlv_len;
+			drv->fw.iml = kmemdup(tlv_data, tlv_len, GFP_KERNEL);
+			if (!drv->fw.iml)
+				return -ENOMEM;
+			break;
+			}
+		default:
+			IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
+			break;
+		}
+	}
+
+	if (!fw_has_capa(capa, IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED) &&
+	    usniffer_req && !*usniffer_images) {
+		IWL_ERR(drv,
+			"user selected to work with usniffer but usniffer image isn't available in ucode package\n");
+		return -EINVAL;
+	}
+
+	if (len) {
+		IWL_ERR(drv, "invalid TLV after parsing: %zd\n", len);
+		iwl_print_hex_dump(drv, IWL_DL_FW, (u8 *)data, len);
+		return -EINVAL;
+	}
+
+	return 0;
+
+ invalid_tlv_len:
+	IWL_ERR(drv, "TLV %d has invalid size: %u\n", tlv_type, tlv_len);
+ tlv_error:
+	iwl_print_hex_dump(drv, IWL_DL_FW, tlv_data, tlv_len);
+
+	return -EINVAL;
+}
+
+static int iwl_alloc_ucode(struct iwl_drv *drv,
+			   struct iwl_firmware_pieces *pieces,
+			   enum iwl_ucode_type type)
+{
+	int i;
+	struct fw_desc *sec;
+
+	sec = kcalloc(pieces->img[type].sec_counter, sizeof(*sec), GFP_KERNEL);
+	if (!sec)
+		return -ENOMEM;
+	drv->fw.img[type].sec = sec;
+	drv->fw.img[type].num_sec = pieces->img[type].sec_counter;
+
+	for (i = 0; i < pieces->img[type].sec_counter; i++)
+		if (iwl_alloc_fw_desc(drv, &sec[i], get_sec(pieces, type, i)))
+			return -ENOMEM;
+
+	return 0;
+}
+
+static int validate_sec_sizes(struct iwl_drv *drv,
+			      struct iwl_firmware_pieces *pieces,
+			      const struct iwl_cfg *cfg)
+{
+	IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %zd\n",
+		get_sec_size(pieces, IWL_UCODE_REGULAR,
+			     IWL_UCODE_SECTION_INST));
+	IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %zd\n",
+		get_sec_size(pieces, IWL_UCODE_REGULAR,
+			     IWL_UCODE_SECTION_DATA));
+	IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %zd\n",
+		get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
+	IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %zd\n",
+		get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
+
+	/* Verify that uCode images will fit in card's SRAM. */
+	if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
+	    cfg->max_inst_size) {
+		IWL_ERR(drv, "uCode instr len %zd too large to fit in\n",
+			get_sec_size(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_INST));
+		return -1;
+	}
+
+	if (get_sec_size(pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
+	    cfg->max_data_size) {
+		IWL_ERR(drv, "uCode data len %zd too large to fit in\n",
+			get_sec_size(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_DATA));
+		return -1;
+	}
+
+	if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST) >
+	     cfg->max_inst_size) {
+		IWL_ERR(drv, "uCode init instr len %zd too large to fit in\n",
+			get_sec_size(pieces, IWL_UCODE_INIT,
+				     IWL_UCODE_SECTION_INST));
+		return -1;
+	}
+
+	if (get_sec_size(pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA) >
+	    cfg->max_data_size) {
+		IWL_ERR(drv, "uCode init data len %zd too large to fit in\n",
+			get_sec_size(pieces, IWL_UCODE_REGULAR,
+				     IWL_UCODE_SECTION_DATA));
+		return -1;
+	}
+	return 0;
+}
+
+static struct iwl_op_mode *
+_iwl_op_mode_start(struct iwl_drv *drv, struct iwlwifi_opmode_table *op)
+{
+	const struct iwl_op_mode_ops *ops = op->ops;
+	struct dentry *dbgfs_dir = NULL;
+	struct iwl_op_mode *op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	drv->dbgfs_op_mode = debugfs_create_dir(op->name,
+						drv->dbgfs_drv);
+	if (!drv->dbgfs_op_mode) {
+		IWL_ERR(drv,
+			"failed to create opmode debugfs directory\n");
+		return op_mode;
+	}
+	dbgfs_dir = drv->dbgfs_op_mode;
+#endif
+
+	op_mode = ops->start(drv->trans, drv->trans->cfg, &drv->fw, dbgfs_dir);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (!op_mode) {
+		debugfs_remove_recursive(drv->dbgfs_op_mode);
+		drv->dbgfs_op_mode = NULL;
+	}
+#endif
+
+	return op_mode;
+}
+
+static void _iwl_op_mode_stop(struct iwl_drv *drv)
+{
+	/* op_mode can be NULL if its start failed */
+	if (drv->op_mode) {
+		iwl_op_mode_stop(drv->op_mode);
+		drv->op_mode = NULL;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		debugfs_remove_recursive(drv->dbgfs_op_mode);
+		drv->dbgfs_op_mode = NULL;
+#endif
+	}
+}
+
+/**
+ * iwl_req_fw_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
+{
+	struct iwl_drv *drv = context;
+	struct iwl_fw *fw = &drv->fw;
+	struct iwl_ucode_header *ucode;
+	struct iwlwifi_opmode_table *op;
+	int err;
+	struct iwl_firmware_pieces *pieces;
+	const unsigned int api_max = drv->trans->cfg->ucode_api_max;
+	const unsigned int api_min = drv->trans->cfg->ucode_api_min;
+	size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
+	u32 api_ver;
+	int i;
+	bool load_module = false;
+	bool usniffer_images = false;
+
+	fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
+	fw->ucode_capa.standard_phy_calibration_size =
+			IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+	fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+	/* dump all fw memory areas by default */
+	fw->dbg_dump_mask = 0xffffffff;
+
+	pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
+	if (!pieces)
+		goto out_free_fw;
+
+	if (!ucode_raw)
+		goto try_again;
+
+	IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
+		       drv->firmware_name, ucode_raw->size);
+
+	/* Make sure that we got at least the API version number */
+	if (ucode_raw->size < 4) {
+		IWL_ERR(drv, "File size way too small!\n");
+		goto try_again;
+	}
+
+	/* Data from ucode file:  header followed by uCode images */
+	ucode = (struct iwl_ucode_header *)ucode_raw->data;
+
+	if (ucode->ver)
+		err = iwl_parse_v1_v2_firmware(drv, ucode_raw, pieces);
+	else
+		err = iwl_parse_tlv_firmware(drv, ucode_raw, pieces,
+					     &fw->ucode_capa, &usniffer_images);
+
+	if (err)
+		goto try_again;
+
+	if (fw_has_api(&drv->fw.ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION))
+		api_ver = drv->fw.ucode_ver;
+	else
+		api_ver = IWL_UCODE_API(drv->fw.ucode_ver);
+
+	/*
+	 * api_ver should match the api version forming part of the
+	 * firmware filename ... but we don't check for that and only rely
+	 * on the API version read from firmware header from here on forward
+	 */
+	if (api_ver < api_min || api_ver > api_max) {
+		IWL_ERR(drv,
+			"Driver unable to support your firmware API. "
+			"Driver supports v%u, firmware is v%u.\n",
+			api_max, api_ver);
+		goto try_again;
+	}
+
+	/*
+	 * In mvm uCode there is no difference between data and instructions
+	 * sections.
+	 */
+	if (fw->type == IWL_FW_DVM && validate_sec_sizes(drv, pieces,
+							 drv->trans->cfg))
+		goto try_again;
+
+	/* Allocate ucode buffers for card's bus-master loading ... */
+
+	/* Runtime instructions and 2 copies of data:
+	 * 1) unmodified from disk
+	 * 2) backup cache for save/restore during power-downs
+	 */
+	for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
+		if (iwl_alloc_ucode(drv, pieces, i))
+			goto out_free_fw;
+
+	if (pieces->dbg_dest_tlv_init) {
+		size_t dbg_dest_size = sizeof(*drv->fw.dbg_dest_tlv) +
+			sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+			drv->fw.dbg_dest_reg_num;
+
+		drv->fw.dbg_dest_tlv = kmalloc(dbg_dest_size, GFP_KERNEL);
+
+		if (!drv->fw.dbg_dest_tlv)
+			goto out_free_fw;
+
+		if (*pieces->dbg_dest_ver == 0) {
+			memcpy(drv->fw.dbg_dest_tlv, pieces->dbg_dest_tlv_v1,
+			       dbg_dest_size);
+		} else {
+			struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv =
+				drv->fw.dbg_dest_tlv;
+
+			dest_tlv->version = pieces->dbg_dest_tlv->version;
+			dest_tlv->monitor_mode =
+				pieces->dbg_dest_tlv->monitor_mode;
+			dest_tlv->size_power =
+				pieces->dbg_dest_tlv->size_power;
+			dest_tlv->wrap_count =
+				pieces->dbg_dest_tlv->wrap_count;
+			dest_tlv->write_ptr_reg =
+				pieces->dbg_dest_tlv->write_ptr_reg;
+			dest_tlv->base_shift =
+				pieces->dbg_dest_tlv->base_shift;
+			memcpy(dest_tlv->reg_ops,
+			       pieces->dbg_dest_tlv->reg_ops,
+			       sizeof(drv->fw.dbg_dest_tlv->reg_ops[0]) *
+			       drv->fw.dbg_dest_reg_num);
+
+			/* In version 1 of the destination tlv, which is
+			 * relevant for internal buffer exclusively,
+			 * the base address is part of given with the length
+			 * of the buffer, and the size shift is give instead of
+			 * end shift. We now store these values in base_reg,
+			 * and end shift, and when dumping the data we'll
+			 * manipulate it for extracting both the length and
+			 * base address */
+			dest_tlv->base_reg = pieces->dbg_dest_tlv->cfg_reg;
+			dest_tlv->end_shift =
+				pieces->dbg_dest_tlv->size_shift;
+		}
+	}
+
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_conf_tlv); i++) {
+		if (pieces->dbg_conf_tlv[i]) {
+			drv->fw.dbg_conf_tlv_len[i] =
+				pieces->dbg_conf_tlv_len[i];
+			drv->fw.dbg_conf_tlv[i] =
+				kmemdup(pieces->dbg_conf_tlv[i],
+					drv->fw.dbg_conf_tlv_len[i],
+					GFP_KERNEL);
+			if (!drv->fw.dbg_conf_tlv[i])
+				goto out_free_fw;
+		}
+	}
+
+	memset(&trigger_tlv_sz, 0xff, sizeof(trigger_tlv_sz));
+
+	trigger_tlv_sz[FW_DBG_TRIGGER_MISSED_BEACONS] =
+		sizeof(struct iwl_fw_dbg_trigger_missed_bcon);
+	trigger_tlv_sz[FW_DBG_TRIGGER_CHANNEL_SWITCH] = 0;
+	trigger_tlv_sz[FW_DBG_TRIGGER_FW_NOTIF] =
+		sizeof(struct iwl_fw_dbg_trigger_cmd);
+	trigger_tlv_sz[FW_DBG_TRIGGER_MLME] =
+		sizeof(struct iwl_fw_dbg_trigger_mlme);
+	trigger_tlv_sz[FW_DBG_TRIGGER_STATS] =
+		sizeof(struct iwl_fw_dbg_trigger_stats);
+	trigger_tlv_sz[FW_DBG_TRIGGER_RSSI] =
+		sizeof(struct iwl_fw_dbg_trigger_low_rssi);
+	trigger_tlv_sz[FW_DBG_TRIGGER_TXQ_TIMERS] =
+		sizeof(struct iwl_fw_dbg_trigger_txq_timer);
+	trigger_tlv_sz[FW_DBG_TRIGGER_TIME_EVENT] =
+		sizeof(struct iwl_fw_dbg_trigger_time_event);
+	trigger_tlv_sz[FW_DBG_TRIGGER_BA] =
+		sizeof(struct iwl_fw_dbg_trigger_ba);
+	trigger_tlv_sz[FW_DBG_TRIGGER_TDLS] =
+		sizeof(struct iwl_fw_dbg_trigger_tdls);
+
+	for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++) {
+		if (pieces->dbg_trigger_tlv[i]) {
+			/*
+			 * If the trigger isn't long enough, WARN and exit.
+			 * Someone is trying to debug something and he won't
+			 * be able to catch the bug he is trying to chase.
+			 * We'd better be noisy to be sure he knows what's
+			 * going on.
+			 */
+			if (WARN_ON(pieces->dbg_trigger_tlv_len[i] <
+				    (trigger_tlv_sz[i] +
+				     sizeof(struct iwl_fw_dbg_trigger_tlv))))
+				goto out_free_fw;
+			drv->fw.dbg_trigger_tlv_len[i] =
+				pieces->dbg_trigger_tlv_len[i];
+			drv->fw.dbg_trigger_tlv[i] =
+				kmemdup(pieces->dbg_trigger_tlv[i],
+					drv->fw.dbg_trigger_tlv_len[i],
+					GFP_KERNEL);
+			if (!drv->fw.dbg_trigger_tlv[i])
+				goto out_free_fw;
+		}
+	}
+
+	/* Now that we can no longer fail, copy information */
+
+	drv->fw.dbg_mem_tlv = pieces->dbg_mem_tlv;
+	pieces->dbg_mem_tlv = NULL;
+	drv->fw.n_dbg_mem_tlv = pieces->n_dbg_mem_tlv;
+
+	/*
+	 * The (size - 16) / 12 formula is based on the information recorded
+	 * for each event, which is of mode 1 (including timestamp) for all
+	 * new microcodes that include this information.
+	 */
+	fw->init_evtlog_ptr = pieces->init_evtlog_ptr;
+	if (pieces->init_evtlog_size)
+		fw->init_evtlog_size = (pieces->init_evtlog_size - 16)/12;
+	else
+		fw->init_evtlog_size =
+			drv->trans->cfg->base_params->max_event_log_size;
+	fw->init_errlog_ptr = pieces->init_errlog_ptr;
+	fw->inst_evtlog_ptr = pieces->inst_evtlog_ptr;
+	if (pieces->inst_evtlog_size)
+		fw->inst_evtlog_size = (pieces->inst_evtlog_size - 16)/12;
+	else
+		fw->inst_evtlog_size =
+			drv->trans->cfg->base_params->max_event_log_size;
+	fw->inst_errlog_ptr = pieces->inst_errlog_ptr;
+
+	/*
+	 * figure out the offset of chain noise reset and gain commands
+	 * base on the size of standard phy calibration commands table size
+	 */
+	if (fw->ucode_capa.standard_phy_calibration_size >
+	    IWL_MAX_PHY_CALIBRATE_TBL_SIZE)
+		fw->ucode_capa.standard_phy_calibration_size =
+			IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+	/* We have our copies now, allow OS release its copies */
+	release_firmware(ucode_raw);
+
+	mutex_lock(&iwlwifi_opmode_table_mtx);
+	switch (fw->type) {
+	case IWL_FW_DVM:
+		op = &iwlwifi_opmode_table[DVM_OP_MODE];
+		break;
+	default:
+		WARN(1, "Invalid fw type %d\n", fw->type);
+	case IWL_FW_MVM:
+		op = &iwlwifi_opmode_table[MVM_OP_MODE];
+		break;
+	}
+
+	IWL_INFO(drv, "loaded firmware version %s op_mode %s\n",
+		 drv->fw.fw_version, op->name);
+
+	/* add this device to the list of devices using this op_mode */
+	list_add_tail(&drv->list, &op->drv);
+
+	if (op->ops) {
+		drv->op_mode = _iwl_op_mode_start(drv, op);
+
+		if (!drv->op_mode) {
+			mutex_unlock(&iwlwifi_opmode_table_mtx);
+			goto out_unbind;
+		}
+	} else {
+		load_module = true;
+	}
+	mutex_unlock(&iwlwifi_opmode_table_mtx);
+
+	/*
+	 * Complete the firmware request last so that
+	 * a driver unbind (stop) doesn't run while we
+	 * are doing the start() above.
+	 */
+	complete(&drv->request_firmware_complete);
+
+	/*
+	 * Load the module last so we don't block anything
+	 * else from proceeding if the module fails to load
+	 * or hangs loading.
+	 */
+	if (load_module) {
+		request_module("%s", op->name);
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
+		if (err)
+			IWL_ERR(drv,
+				"failed to load module %s (error %d), is dynamic loading enabled?\n",
+				op->name, err);
+#endif
+	}
+	goto free;
+
+ try_again:
+	/* try next, if any */
+	release_firmware(ucode_raw);
+	if (iwl_request_firmware(drv, false))
+		goto out_unbind;
+	goto free;
+
+ out_free_fw:
+	iwl_dealloc_ucode(drv);
+	release_firmware(ucode_raw);
+ out_unbind:
+	complete(&drv->request_firmware_complete);
+	device_release_driver(drv->trans->dev);
+ free:
+	if (pieces) {
+		for (i = 0; i < ARRAY_SIZE(pieces->img); i++)
+			kfree(pieces->img[i].sec);
+		kfree(pieces->dbg_mem_tlv);
+		kfree(pieces);
+	}
+}
+
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
+{
+	struct iwl_drv *drv;
+	int ret;
+
+	drv = kzalloc(sizeof(*drv), GFP_KERNEL);
+	if (!drv) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	drv->trans = trans;
+	drv->dev = trans->dev;
+
+	init_completion(&drv->request_firmware_complete);
+	INIT_LIST_HEAD(&drv->list);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* Create the device debugfs entries. */
+	drv->dbgfs_drv = debugfs_create_dir(dev_name(trans->dev),
+					    iwl_dbgfs_root);
+
+	if (!drv->dbgfs_drv) {
+		IWL_ERR(drv, "failed to create debugfs directory\n");
+		ret = -ENOMEM;
+		goto err_free_drv;
+	}
+
+	/* Create transport layer debugfs dir */
+	drv->trans->dbgfs_dir = debugfs_create_dir("trans", drv->dbgfs_drv);
+
+	if (!drv->trans->dbgfs_dir) {
+		IWL_ERR(drv, "failed to create transport debugfs directory\n");
+		ret = -ENOMEM;
+		goto err_free_dbgfs;
+	}
+#endif
+
+	ret = iwl_request_firmware(drv, true);
+	if (ret) {
+		IWL_ERR(trans, "Couldn't request the fw\n");
+		goto err_fw;
+	}
+
+	return drv;
+
+err_fw:
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+err_free_dbgfs:
+	debugfs_remove_recursive(drv->dbgfs_drv);
+err_free_drv:
+#endif
+	kfree(drv);
+err:
+	return ERR_PTR(ret);
+}
+
+void iwl_drv_stop(struct iwl_drv *drv)
+{
+	wait_for_completion(&drv->request_firmware_complete);
+
+	_iwl_op_mode_stop(drv);
+
+	iwl_dealloc_ucode(drv);
+
+	mutex_lock(&iwlwifi_opmode_table_mtx);
+	/*
+	 * List is empty (this item wasn't added)
+	 * when firmware loading failed -- in that
+	 * case we can't remove it from any list.
+	 */
+	if (!list_empty(&drv->list))
+		list_del(&drv->list);
+	mutex_unlock(&iwlwifi_opmode_table_mtx);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	debugfs_remove_recursive(drv->dbgfs_drv);
+#endif
+
+	kfree(drv);
+}
+
+
+/* shared module parameters */
+struct iwl_mod_params iwlwifi_mod_params = {
+	.fw_restart = true,
+	.bt_coex_active = true,
+	.power_level = IWL_POWER_INDEX_1,
+	.d0i3_disable = true,
+	.d0i3_timeout = 1000,
+	.uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
+	/* the rest are 0 by default */
+};
+IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
+
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
+{
+	int i;
+	struct iwl_drv *drv;
+	struct iwlwifi_opmode_table *op;
+
+	mutex_lock(&iwlwifi_opmode_table_mtx);
+	for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+		op = &iwlwifi_opmode_table[i];
+		if (strcmp(op->name, name))
+			continue;
+		op->ops = ops;
+		/* TODO: need to handle exceptional case */
+		list_for_each_entry(drv, &op->drv, list)
+			drv->op_mode = _iwl_op_mode_start(drv, op);
+
+		mutex_unlock(&iwlwifi_opmode_table_mtx);
+		return 0;
+	}
+	mutex_unlock(&iwlwifi_opmode_table_mtx);
+	return -EIO;
+}
+IWL_EXPORT_SYMBOL(iwl_opmode_register);
+
+void iwl_opmode_deregister(const char *name)
+{
+	int i;
+	struct iwl_drv *drv;
+
+	mutex_lock(&iwlwifi_opmode_table_mtx);
+	for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) {
+		if (strcmp(iwlwifi_opmode_table[i].name, name))
+			continue;
+		iwlwifi_opmode_table[i].ops = NULL;
+
+		/* call the stop routine for all devices */
+		list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list)
+			_iwl_op_mode_stop(drv);
+
+		mutex_unlock(&iwlwifi_opmode_table_mtx);
+		return;
+	}
+	mutex_unlock(&iwlwifi_opmode_table_mtx);
+}
+IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
+
+static int __init iwl_drv_init(void)
+{
+	int i;
+
+	mutex_init(&iwlwifi_opmode_table_mtx);
+
+	for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++)
+		INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv);
+
+	pr_info(DRV_DESCRIPTION "\n");
+	pr_info(DRV_COPYRIGHT "\n");
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* Create the root of iwlwifi debugfs subsystem. */
+	iwl_dbgfs_root = debugfs_create_dir(DRV_NAME, NULL);
+
+	if (!iwl_dbgfs_root)
+		return -EFAULT;
+#endif
+
+	return iwl_pci_register_driver();
+}
+module_init(iwl_drv_init);
+
+static void __exit iwl_drv_exit(void)
+{
+	iwl_pci_unregister_driver();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	debugfs_remove_recursive(iwl_dbgfs_root);
+#endif
+}
+module_exit(iwl_drv_exit);
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+module_param_named(debug, iwlwifi_mod_params.debug_level, uint, 0644);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, iwlwifi_mod_params.swcrypto, int, 0444);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, 0444);
+MODULE_PARM_DESC(11n_disable,
+	"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
+module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
+MODULE_PARM_DESC(amsdu_size,
+		 "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
+		 "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
+module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
+
+module_param_named(antenna_coupling, iwlwifi_mod_params.antenna_coupling,
+		   int, 0444);
+MODULE_PARM_DESC(antenna_coupling,
+		 "specify antenna coupling in dB (default: 0 dB)");
+
+module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444);
+MODULE_PARM_DESC(nvm_file, "NVM file name");
+
+module_param_named(d0i3_disable, iwlwifi_mod_params.d0i3_disable, bool, 0444);
+MODULE_PARM_DESC(d0i3_disable, "disable d0i3 functionality (default: Y)");
+
+module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444);
+MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
+
+module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644);
+MODULE_PARM_DESC(uapsd_disable,
+		 "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ *   Able to scan and finding all the available AP
+ *   Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
+		   bool, 0444);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
+
+module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, 0444);
+MODULE_PARM_DESC(led_mode, "0=system default, "
+		"1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
+
+module_param_named(power_save, iwlwifi_mod_params.power_save, bool, 0444);
+MODULE_PARM_DESC(power_save,
+		 "enable WiFi power management (default: disable)");
+
+module_param_named(power_level, iwlwifi_mod_params.power_level, int, 0444);
+MODULE_PARM_DESC(power_level,
+		 "default power save level (range from 1 - 5, default: 1)");
+
+module_param_named(fw_monitor, iwlwifi_mod_params.fw_monitor, bool, 0444);
+MODULE_PARM_DESC(fw_monitor,
+		 "firmware monitor - to debug FW (default: false - needs lots of memory)");
+
+module_param_named(d0i3_timeout, iwlwifi_mod_params.d0i3_timeout, uint, 0444);
+MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
+
+module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool, 0444);
+MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
+
+module_param_named(remove_when_gone,
+		   iwlwifi_mod_params.remove_when_gone, bool,
+		   0444);
+MODULE_PARM_DESC(remove_when_gone,
+		 "Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
+
+module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
+		   S_IRUGO);
+MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
new file mode 100644
index 0000000..1f8a2ee
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -0,0 +1,153 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_drv_h__
+#define __iwl_drv_h__
+#include <linux/export.h>
+
+/* for all modules */
+#define DRV_NAME        "iwlwifi"
+#define DRV_COPYRIGHT	"Copyright(c) 2003- 2015 Intel Corporation"
+#define DRV_AUTHOR     "<linuxwifi@intel.com>"
+
+/* radio config bits (actual values from NVM definition) */
+#define NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EXT_NVM_RF_CFG_FLAVOR_MSK(x)   ((x) & 0xF)
+#define EXT_NVM_RF_CFG_DASH_MSK(x)   (((x) >> 4) & 0xF)
+#define EXT_NVM_RF_CFG_STEP_MSK(x)   (((x) >> 8) & 0xF)
+#define EXT_NVM_RF_CFG_TYPE_MSK(x)   (((x) >> 12) & 0xFFF)
+#define EXT_NVM_RF_CFG_TX_ANT_MSK(x) (((x) >> 24) & 0xF)
+#define EXT_NVM_RF_CFG_RX_ANT_MSK(x) (((x) >> 28) & 0xF)
+
+/**
+ * DOC: Driver system flows - drv component
+ *
+ * This component implements the system flows such as bus enumeration, bus
+ * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in
+ * bus specific files (transport files). This is the code that is common among
+ * different buses.
+ *
+ * This component is also in charge of managing the several implementations of
+ * the wifi flows: it will allow to have several fw API implementation. These
+ * different implementations will differ in the way they implement mac80211's
+ * handlers too.
+
+ * The init flow wrt to the drv component looks like this:
+ * 1) The bus specific component is called from module_init
+ * 2) The bus specific component registers the bus driver
+ * 3) The bus driver calls the probe function
+ * 4) The bus specific component configures the bus
+ * 5) The bus specific component calls to the drv bus agnostic part
+ *    (iwl_drv_start)
+ * 6) iwl_drv_start fetches the fw ASYNC, iwl_req_fw_callback
+ * 7) iwl_req_fw_callback parses the fw file
+ * 8) iwl_req_fw_callback starts the wifi implementation to matches the fw
+ */
+
+struct iwl_drv;
+struct iwl_trans;
+struct iwl_cfg;
+/**
+ * iwl_drv_start - start the drv
+ *
+ * @trans_ops: the ops of the transport
+ *
+ * starts the driver: fetches the firmware. This should be called by bus
+ * specific system flows implementations. For example, the bus specific probe
+ * function should do bus related operations only, and then call to this
+ * function. It returns the driver object or %NULL if an error occurred.
+ */
+struct iwl_drv *iwl_drv_start(struct iwl_trans *trans);
+
+/**
+ * iwl_drv_stop - stop the drv
+ *
+ * @drv:
+ *
+ * Stop the driver. This should be called by bus specific system flows
+ * implementations. For example, the bus specific remove function should first
+ * call this function and then do the bus related operations only.
+ */
+void iwl_drv_stop(struct iwl_drv *drv);
+
+/*
+ * exported symbol management
+ *
+ * The driver can be split into multiple modules, in which case some symbols
+ * must be exported for the sub-modules. However, if it's not split and
+ * everything is built-in, then we can avoid that.
+ */
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
+#define IWL_EXPORT_SYMBOL(sym)	EXPORT_SYMBOL_GPL(sym)
+#else
+#define IWL_EXPORT_SYMBOL(sym)
+#endif
+
+#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
new file mode 100644
index 0000000..a4c9621
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -0,0 +1,949 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-eeprom-parse.h"
+
+/* EEPROM offset definitions */
+
+/* indirect access definitions */
+#define ADDRESS_MSK                 0x0000FFFF
+#define INDIRECT_TYPE_MSK           0x000F0000
+#define INDIRECT_HOST               0x00010000
+#define INDIRECT_GENERAL            0x00020000
+#define INDIRECT_REGULATORY         0x00030000
+#define INDIRECT_CALIBRATION        0x00040000
+#define INDIRECT_PROCESS_ADJST      0x00050000
+#define INDIRECT_OTHERS             0x00060000
+#define INDIRECT_TXP_LIMIT          0x00070000
+#define INDIRECT_TXP_LIMIT_SIZE     0x00080000
+#define INDIRECT_ADDRESS            0x00100000
+
+/* corresponding link offsets in EEPROM */
+#define EEPROM_LINK_HOST             (2*0x64)
+#define EEPROM_LINK_GENERAL          (2*0x65)
+#define EEPROM_LINK_REGULATORY       (2*0x66)
+#define EEPROM_LINK_CALIBRATION      (2*0x67)
+#define EEPROM_LINK_PROCESS_ADJST    (2*0x68)
+#define EEPROM_LINK_OTHERS           (2*0x69)
+#define EEPROM_LINK_TXP_LIMIT        (2*0x6a)
+#define EEPROM_LINK_TXP_LIMIT_SIZE   (2*0x6b)
+
+/* General */
+#define EEPROM_DEVICE_ID                    (2*0x08)	/* 2 bytes */
+#define EEPROM_SUBSYSTEM_ID		    (2*0x0A)	/* 2 bytes */
+#define EEPROM_MAC_ADDRESS                  (2*0x15)	/* 6  bytes */
+#define EEPROM_BOARD_REVISION               (2*0x35)	/* 2  bytes */
+#define EEPROM_BOARD_PBA_NUMBER             (2*0x3B+1)	/* 9  bytes */
+#define EEPROM_VERSION                      (2*0x44)	/* 2  bytes */
+#define EEPROM_SKU_CAP                      (2*0x45)	/* 2  bytes */
+#define EEPROM_OEM_MODE                     (2*0x46)	/* 2  bytes */
+#define EEPROM_RADIO_CONFIG                 (2*0x48)	/* 2  bytes */
+#define EEPROM_NUM_MAC_ADDRESS              (2*0x4C)	/* 2  bytes */
+
+/* calibration */
+struct iwl_eeprom_calib_hdr {
+	u8 version;
+	u8 pa_type;
+	__le16 voltage;
+} __packed;
+
+#define EEPROM_CALIB_ALL	(INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
+#define EEPROM_XTAL		((2*0x128) | EEPROM_CALIB_ALL)
+
+/* temperature */
+#define EEPROM_KELVIN_TEMPERATURE	((2*0x12A) | EEPROM_CALIB_ALL)
+#define EEPROM_RAW_TEMPERATURE		((2*0x12B) | EEPROM_CALIB_ALL)
+
+/* SKU Capabilities (actual values from EEPROM definition) */
+enum eeprom_sku_bits {
+	EEPROM_SKU_CAP_BAND_24GHZ	= BIT(4),
+	EEPROM_SKU_CAP_BAND_52GHZ	= BIT(5),
+	EEPROM_SKU_CAP_11N_ENABLE	= BIT(6),
+	EEPROM_SKU_CAP_AMT_ENABLE	= BIT(7),
+	EEPROM_SKU_CAP_IPAN_ENABLE	= BIT(8)
+};
+
+/* radio config bits (actual values from EEPROM definition) */
+#define EEPROM_RF_CFG_TYPE_MSK(x)   (x & 0x3)         /* bits 0-1   */
+#define EEPROM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
+#define EEPROM_RF_CFG_DASH_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
+#define EEPROM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+
+/*
+ * EEPROM bands
+ * These are the channel numbers from each band in the order
+ * that they are stored in the EEPROM band information. Note
+ * that EEPROM bands aren't the same as mac80211 bands, and
+ * there are even special "ht40 bands" in the EEPROM.
+ */
+static const u8 iwl_eeprom_band_1[14] = { /* 2.4 GHz */
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+static const u8 iwl_eeprom_band_2[] = {	/* 4915-5080MHz */
+	183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 iwl_eeprom_band_3[] = {	/* 5170-5320MHz */
+	34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 iwl_eeprom_band_4[] = {	/* 5500-5700MHz */
+	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 iwl_eeprom_band_5[] = {	/* 5725-5825MHz */
+	145, 149, 153, 157, 161, 165
+};
+
+static const u8 iwl_eeprom_band_6[] = {	/* 2.4 ht40 channel */
+	1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 iwl_eeprom_band_7[] = {	/* 5.2 ht40 channel */
+	36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+#define IWL_NUM_CHANNELS	(ARRAY_SIZE(iwl_eeprom_band_1) + \
+				 ARRAY_SIZE(iwl_eeprom_band_2) + \
+				 ARRAY_SIZE(iwl_eeprom_band_3) + \
+				 ARRAY_SIZE(iwl_eeprom_band_4) + \
+				 ARRAY_SIZE(iwl_eeprom_band_5))
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+	{ .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+	{ .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+	{ .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+	{ .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+	{ .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+	{ .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+	{ .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+	{ .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+	{ .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS	0
+#define N_RATES_24	ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS	4
+#define N_RATES_52	(N_RATES_24 - RATES_52_OFFS)
+
+/* EEPROM reading functions */
+
+static u16 iwl_eeprom_query16(const u8 *eeprom, size_t eeprom_size, int offset)
+{
+	if (WARN_ON(offset + sizeof(u16) > eeprom_size))
+		return 0;
+	return le16_to_cpup((__le16 *)(eeprom + offset));
+}
+
+static u32 eeprom_indirect_address(const u8 *eeprom, size_t eeprom_size,
+				   u32 address)
+{
+	u16 offset = 0;
+
+	if ((address & INDIRECT_ADDRESS) == 0)
+		return address;
+
+	switch (address & INDIRECT_TYPE_MSK) {
+	case INDIRECT_HOST:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_HOST);
+		break;
+	case INDIRECT_GENERAL:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_GENERAL);
+		break;
+	case INDIRECT_REGULATORY:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_REGULATORY);
+		break;
+	case INDIRECT_TXP_LIMIT:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_TXP_LIMIT);
+		break;
+	case INDIRECT_TXP_LIMIT_SIZE:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_TXP_LIMIT_SIZE);
+		break;
+	case INDIRECT_CALIBRATION:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_CALIBRATION);
+		break;
+	case INDIRECT_PROCESS_ADJST:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_PROCESS_ADJST);
+		break;
+	case INDIRECT_OTHERS:
+		offset = iwl_eeprom_query16(eeprom, eeprom_size,
+					    EEPROM_LINK_OTHERS);
+		break;
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	/* translate the offset from words to byte */
+	return (address & ADDRESS_MSK) + (offset << 1);
+}
+
+static const u8 *iwl_eeprom_query_addr(const u8 *eeprom, size_t eeprom_size,
+				       u32 offset)
+{
+	u32 address = eeprom_indirect_address(eeprom, eeprom_size, offset);
+
+	if (WARN_ON(address >= eeprom_size))
+		return NULL;
+
+	return &eeprom[address];
+}
+
+static int iwl_eeprom_read_calib(const u8 *eeprom, size_t eeprom_size,
+				 struct iwl_nvm_data *data)
+{
+	struct iwl_eeprom_calib_hdr *hdr;
+
+	hdr = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+					    EEPROM_CALIB_ALL);
+	if (!hdr)
+		return -ENODATA;
+	data->calib_version = hdr->version;
+	data->calib_voltage = hdr->voltage;
+
+	return 0;
+}
+
+/**
+ * enum iwl_eeprom_channel_flags - channel flags in EEPROM
+ * @EEPROM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @EEPROM_CHANNEL_IBSS: usable as an IBSS channel
+ * @EEPROM_CHANNEL_ACTIVE: active scanning allowed
+ * @EEPROM_CHANNEL_RADAR: radar detection required
+ * @EEPROM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @EEPROM_CHANNEL_DFS: dynamic freq selection candidate
+ */
+enum iwl_eeprom_channel_flags {
+	EEPROM_CHANNEL_VALID = BIT(0),
+	EEPROM_CHANNEL_IBSS = BIT(1),
+	EEPROM_CHANNEL_ACTIVE = BIT(3),
+	EEPROM_CHANNEL_RADAR = BIT(4),
+	EEPROM_CHANNEL_WIDE = BIT(5),
+	EEPROM_CHANNEL_DFS = BIT(7),
+};
+
+/**
+ * struct iwl_eeprom_channel - EEPROM channel data
+ * @flags: %EEPROM_CHANNEL_* flags
+ * @max_power_avg: max power (in dBm) on this channel, at most 31 dBm
+ */
+struct iwl_eeprom_channel {
+	u8 flags;
+	s8 max_power_avg;
+} __packed;
+
+
+enum iwl_eeprom_enhanced_txpwr_flags {
+	IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
+	IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
+	IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
+	IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
+	IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
+	IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
+	IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
+	IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
+};
+
+/**
+ * iwl_eeprom_enhanced_txpwr structure
+ * @flags: entry flags
+ * @channel: channel number
+ * @chain_a_max_pwr: chain a max power in 1/2 dBm
+ * @chain_b_max_pwr: chain b max power in 1/2 dBm
+ * @chain_c_max_pwr: chain c max power in 1/2 dBm
+ * @delta_20_in_40: 20-in-40 deltas (hi/lo)
+ * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
+ * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
+ *
+ * This structure presents the enhanced regulatory tx power limit layout
+ * in an EEPROM image.
+ */
+struct iwl_eeprom_enhanced_txpwr {
+	u8 flags;
+	u8 channel;
+	s8 chain_a_max;
+	s8 chain_b_max;
+	s8 chain_c_max;
+	u8 delta_20_in_40;
+	s8 mimo2_max;
+	s8 mimo3_max;
+} __packed;
+
+static s8 iwl_get_max_txpwr_half_dbm(const struct iwl_nvm_data *data,
+				     struct iwl_eeprom_enhanced_txpwr *txp)
+{
+	s8 result = 0; /* (.5 dBm) */
+
+	/* Take the highest tx power from any valid chains */
+	if (data->valid_tx_ant & ANT_A && txp->chain_a_max > result)
+		result = txp->chain_a_max;
+
+	if (data->valid_tx_ant & ANT_B && txp->chain_b_max > result)
+		result = txp->chain_b_max;
+
+	if (data->valid_tx_ant & ANT_C && txp->chain_c_max > result)
+		result = txp->chain_c_max;
+
+	if ((data->valid_tx_ant == ANT_AB ||
+	     data->valid_tx_ant == ANT_BC ||
+	     data->valid_tx_ant == ANT_AC) && txp->mimo2_max > result)
+		result = txp->mimo2_max;
+
+	if (data->valid_tx_ant == ANT_ABC && txp->mimo3_max > result)
+		result = txp->mimo3_max;
+
+	return result;
+}
+
+#define EEPROM_TXP_OFFS	(0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
+#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
+#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
+
+#define TXP_CHECK_AND_PRINT(x) \
+	((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) ? # x " " : "")
+
+static void
+iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
+				struct iwl_eeprom_enhanced_txpwr *txp,
+				int n_channels, s8 max_txpower_avg)
+{
+	int ch_idx;
+	enum nl80211_band band;
+
+	band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
+		NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+
+	for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
+		struct ieee80211_channel *chan = &data->channels[ch_idx];
+
+		/* update matching channel or from common data only */
+		if (txp->channel != 0 && chan->hw_value != txp->channel)
+			continue;
+
+		/* update matching band only */
+		if (band != chan->band)
+			continue;
+
+		if (chan->max_power < max_txpower_avg &&
+		    !(txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ))
+			chan->max_power = max_txpower_avg;
+	}
+}
+
+static void iwl_eeprom_enhanced_txpower(struct device *dev,
+					struct iwl_nvm_data *data,
+					const u8 *eeprom, size_t eeprom_size,
+					int n_channels)
+{
+	struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
+	int idx, entries;
+	__le16 *txp_len;
+	s8 max_txp_avg_halfdbm;
+
+	BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
+
+	/* the length is in 16-bit words, but we want entries */
+	txp_len = (__le16 *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+						  EEPROM_TXP_SZ_OFFS);
+	entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
+
+	txp_array = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size,
+						  EEPROM_TXP_OFFS);
+
+	for (idx = 0; idx < entries; idx++) {
+		txp = &txp_array[idx];
+		/* skip invalid entries */
+		if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
+			continue;
+
+		IWL_DEBUG_EEPROM(dev, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
+				 (txp->channel && (txp->flags &
+					IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
+					"Common " : (txp->channel) ?
+					"Channel" : "Common",
+				 (txp->channel),
+				 TXP_CHECK_AND_PRINT(VALID),
+				 TXP_CHECK_AND_PRINT(BAND_52G),
+				 TXP_CHECK_AND_PRINT(OFDM),
+				 TXP_CHECK_AND_PRINT(40MHZ),
+				 TXP_CHECK_AND_PRINT(HT_AP),
+				 TXP_CHECK_AND_PRINT(RES1),
+				 TXP_CHECK_AND_PRINT(RES2),
+				 TXP_CHECK_AND_PRINT(COMMON_TYPE),
+				 txp->flags);
+		IWL_DEBUG_EEPROM(dev,
+				 "\t\t chain_A: %d chain_B: %d chain_C: %d\n",
+				 txp->chain_a_max, txp->chain_b_max,
+				 txp->chain_c_max);
+		IWL_DEBUG_EEPROM(dev,
+				 "\t\t MIMO2: %d MIMO3: %d High 20_on_40: 0x%02x Low 20_on_40: 0x%02x\n",
+				 txp->mimo2_max, txp->mimo3_max,
+				 ((txp->delta_20_in_40 & 0xf0) >> 4),
+				 (txp->delta_20_in_40 & 0x0f));
+
+		max_txp_avg_halfdbm = iwl_get_max_txpwr_half_dbm(data, txp);
+
+		iwl_eeprom_enh_txp_read_element(data, txp, n_channels,
+				DIV_ROUND_UP(max_txp_avg_halfdbm, 2));
+
+		if (max_txp_avg_halfdbm > data->max_tx_pwr_half_dbm)
+			data->max_tx_pwr_half_dbm = max_txp_avg_halfdbm;
+	}
+}
+
+static void iwl_init_band_reference(const struct iwl_cfg *cfg,
+				    const u8 *eeprom, size_t eeprom_size,
+				    int eeprom_band, int *eeprom_ch_count,
+				    const struct iwl_eeprom_channel **ch_info,
+				    const u8 **eeprom_ch_array)
+{
+	u32 offset = cfg->eeprom_params->regulatory_bands[eeprom_band - 1];
+
+	offset |= INDIRECT_ADDRESS | INDIRECT_REGULATORY;
+
+	*ch_info = (void *)iwl_eeprom_query_addr(eeprom, eeprom_size, offset);
+
+	switch (eeprom_band) {
+	case 1:		/* 2.4GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
+		*eeprom_ch_array = iwl_eeprom_band_1;
+		break;
+	case 2:		/* 4.9GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
+		*eeprom_ch_array = iwl_eeprom_band_2;
+		break;
+	case 3:		/* 5.2GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
+		*eeprom_ch_array = iwl_eeprom_band_3;
+		break;
+	case 4:		/* 5.5GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
+		*eeprom_ch_array = iwl_eeprom_band_4;
+		break;
+	case 5:		/* 5.7GHz band */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
+		*eeprom_ch_array = iwl_eeprom_band_5;
+		break;
+	case 6:		/* 2.4GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
+		*eeprom_ch_array = iwl_eeprom_band_6;
+		break;
+	case 7:		/* 5 GHz ht40 channels */
+		*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
+		*eeprom_ch_array = iwl_eeprom_band_7;
+		break;
+	default:
+		*eeprom_ch_count = 0;
+		*eeprom_ch_array = NULL;
+		WARN_ON(1);
+	}
+}
+
+#define CHECK_AND_PRINT(x) \
+	((eeprom_ch->flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static void iwl_mod_ht40_chan_info(struct device *dev,
+				   struct iwl_nvm_data *data, int n_channels,
+				   enum nl80211_band band, u16 channel,
+				   const struct iwl_eeprom_channel *eeprom_ch,
+				   u8 clear_ht40_extension_channel)
+{
+	struct ieee80211_channel *chan = NULL;
+	int i;
+
+	for (i = 0; i < n_channels; i++) {
+		if (data->channels[i].band != band)
+			continue;
+		if (data->channels[i].hw_value != channel)
+			continue;
+		chan = &data->channels[i];
+		break;
+	}
+
+	if (!chan)
+		return;
+
+	IWL_DEBUG_EEPROM(dev,
+			 "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+			 channel,
+			 band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
+			 CHECK_AND_PRINT(IBSS),
+			 CHECK_AND_PRINT(ACTIVE),
+			 CHECK_AND_PRINT(RADAR),
+			 CHECK_AND_PRINT(WIDE),
+			 CHECK_AND_PRINT(DFS),
+			 eeprom_ch->flags,
+			 eeprom_ch->max_power_avg,
+			 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+			  !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? ""
+								      : "not ");
+
+	if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+		chan->flags &= ~clear_ht40_extension_channel;
+}
+
+#define CHECK_AND_PRINT_I(x)	\
+	((eeprom_ch_info[ch_idx].flags & EEPROM_CHANNEL_##x) ? # x " " : "")
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+				struct iwl_nvm_data *data,
+				const u8 *eeprom, size_t eeprom_size)
+{
+	int band, ch_idx;
+	const struct iwl_eeprom_channel *eeprom_ch_info;
+	const u8 *eeprom_ch_array;
+	int eeprom_ch_count;
+	int n_channels = 0;
+
+	/*
+	 * Loop through the 5 EEPROM bands and add them to the parse list
+	 */
+	for (band = 1; band <= 5; band++) {
+		struct ieee80211_channel *channel;
+
+		iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+					&eeprom_ch_count, &eeprom_ch_info,
+					&eeprom_ch_array);
+
+		/* Loop through each band adding each of the channels */
+		for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+			const struct iwl_eeprom_channel *eeprom_ch;
+
+			eeprom_ch = &eeprom_ch_info[ch_idx];
+
+			if (!(eeprom_ch->flags & EEPROM_CHANNEL_VALID)) {
+				IWL_DEBUG_EEPROM(dev,
+						 "Ch. %d Flags %x [%sGHz] - No traffic\n",
+						 eeprom_ch_array[ch_idx],
+						 eeprom_ch_info[ch_idx].flags,
+						 (band != 1) ? "5.2" : "2.4");
+				continue;
+			}
+
+			channel = &data->channels[n_channels];
+			n_channels++;
+
+			channel->hw_value = eeprom_ch_array[ch_idx];
+			channel->band = (band == 1) ? NL80211_BAND_2GHZ
+						    : NL80211_BAND_5GHZ;
+			channel->center_freq =
+				ieee80211_channel_to_frequency(
+					channel->hw_value, channel->band);
+
+			/* set no-HT40, will enable as appropriate later */
+			channel->flags = IEEE80211_CHAN_NO_HT40;
+
+			if (!(eeprom_ch->flags & EEPROM_CHANNEL_IBSS))
+				channel->flags |= IEEE80211_CHAN_NO_IR;
+
+			if (!(eeprom_ch->flags & EEPROM_CHANNEL_ACTIVE))
+				channel->flags |= IEEE80211_CHAN_NO_IR;
+
+			if (eeprom_ch->flags & EEPROM_CHANNEL_RADAR)
+				channel->flags |= IEEE80211_CHAN_RADAR;
+
+			/* Initialize regulatory-based run-time data */
+			channel->max_power =
+				eeprom_ch_info[ch_idx].max_power_avg;
+			IWL_DEBUG_EEPROM(dev,
+					 "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+					 channel->hw_value,
+					 (band != 1) ? "5.2" : "2.4",
+					 CHECK_AND_PRINT_I(VALID),
+					 CHECK_AND_PRINT_I(IBSS),
+					 CHECK_AND_PRINT_I(ACTIVE),
+					 CHECK_AND_PRINT_I(RADAR),
+					 CHECK_AND_PRINT_I(WIDE),
+					 CHECK_AND_PRINT_I(DFS),
+					 eeprom_ch_info[ch_idx].flags,
+					 eeprom_ch_info[ch_idx].max_power_avg,
+					 ((eeprom_ch_info[ch_idx].flags &
+							EEPROM_CHANNEL_IBSS) &&
+					  !(eeprom_ch_info[ch_idx].flags &
+							EEPROM_CHANNEL_RADAR))
+						? "" : "not ");
+		}
+	}
+
+	if (cfg->eeprom_params->enhanced_txpower) {
+		/*
+		 * for newer device (6000 series and up)
+		 * EEPROM contain enhanced tx power information
+		 * driver need to process addition information
+		 * to determine the max channel tx power limits
+		 */
+		iwl_eeprom_enhanced_txpower(dev, data, eeprom, eeprom_size,
+					    n_channels);
+	} else {
+		/* All others use data from channel map */
+		int i;
+
+		data->max_tx_pwr_half_dbm = -128;
+
+		for (i = 0; i < n_channels; i++)
+			data->max_tx_pwr_half_dbm =
+				max_t(s8, data->max_tx_pwr_half_dbm,
+				      data->channels[i].max_power * 2);
+	}
+
+	/* Check if we do have HT40 channels */
+	if (cfg->eeprom_params->regulatory_bands[5] ==
+				EEPROM_REGULATORY_BAND_NO_HT40 &&
+	    cfg->eeprom_params->regulatory_bands[6] ==
+				EEPROM_REGULATORY_BAND_NO_HT40)
+		return n_channels;
+
+	/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+	for (band = 6; band <= 7; band++) {
+		enum nl80211_band ieeeband;
+
+		iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
+					&eeprom_ch_count, &eeprom_ch_info,
+					&eeprom_ch_array);
+
+		/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+		ieeeband = (band == 6) ? NL80211_BAND_2GHZ
+				       : NL80211_BAND_5GHZ;
+
+		/* Loop through each band adding each of the channels */
+		for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
+			/* Set up driver's info for lower half */
+			iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+					       eeprom_ch_array[ch_idx],
+					       &eeprom_ch_info[ch_idx],
+					       IEEE80211_CHAN_NO_HT40PLUS);
+
+			/* Set up driver's info for upper half */
+			iwl_mod_ht40_chan_info(dev, data, n_channels, ieeeband,
+					       eeprom_ch_array[ch_idx] + 4,
+					       &eeprom_ch_info[ch_idx],
+					       IEEE80211_CHAN_NO_HT40MINUS);
+		}
+	}
+
+	return n_channels;
+}
+
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+			    struct ieee80211_supported_band *sband,
+			    int n_channels, enum nl80211_band band)
+{
+	struct ieee80211_channel *chan = &data->channels[0];
+	int n = 0, idx = 0;
+
+	while (idx < n_channels && chan->band != band)
+		chan = &data->channels[++idx];
+
+	sband->channels = &data->channels[idx];
+
+	while (idx < n_channels && chan->band == band) {
+		chan = &data->channels[++idx];
+		n++;
+	}
+
+	sband->n_channels = n;
+
+	return n;
+}
+
+#define MAX_BIT_RATE_40_MHZ	150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ	72 /* Mbps */
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+			  struct iwl_nvm_data *data,
+			  struct ieee80211_sta_ht_cap *ht_info,
+			  enum nl80211_band band,
+			  u8 tx_chains, u8 rx_chains)
+{
+	int max_bit_rate = 0;
+
+	tx_chains = hweight8(tx_chains);
+	if (cfg->rx_with_siso_diversity)
+		rx_chains = 1;
+	else
+		rx_chains = hweight8(rx_chains);
+
+	if (!(data->sku_cap_11n_enable) || !cfg->ht_params) {
+		ht_info->ht_supported = false;
+		return;
+	}
+
+	if (data->sku_cap_mimo_disabled)
+		rx_chains = 1;
+
+	ht_info->ht_supported = true;
+	ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+
+	if (cfg->ht_params->stbc) {
+		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+		if (tx_chains > 1)
+			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+	}
+
+	if (cfg->ht_params->ldpc)
+		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+	if ((cfg->mq_rx_supported &&
+	     iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
+	     iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+		ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+	ht_info->ampdu_factor = cfg->max_ht_ampdu_exponent;
+	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+	ht_info->mcs.rx_mask[0] = 0xFF;
+	if (rx_chains >= 2)
+		ht_info->mcs.rx_mask[1] = 0xFF;
+	if (rx_chains >= 3)
+		ht_info->mcs.rx_mask[2] = 0xFF;
+
+	if (cfg->ht_params->ht_greenfield_support)
+		ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+	ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+	max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+	if (cfg->ht_params->ht40_bands & BIT(band)) {
+		ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+		ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+		max_bit_rate = MAX_BIT_RATE_40_MHZ;
+	}
+
+	/* Highest supported Rx data rate */
+	max_bit_rate *= rx_chains;
+	WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+	ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+	/* Tx MCS capabilities */
+	ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+	if (tx_chains != rx_chains) {
+		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+		ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+	}
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+			    struct iwl_nvm_data *data,
+			    const u8 *eeprom, size_t eeprom_size)
+{
+	int n_channels = iwl_init_channel_map(dev, cfg, data,
+					      eeprom, eeprom_size);
+	int n_used = 0;
+	struct ieee80211_supported_band *sband;
+
+	sband = &data->bands[NL80211_BAND_2GHZ];
+	sband->band = NL80211_BAND_2GHZ;
+	sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+	sband->n_bitrates = N_RATES_24;
+	n_used += iwl_init_sband_channels(data, sband, n_channels,
+					  NL80211_BAND_2GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
+			     data->valid_tx_ant, data->valid_rx_ant);
+
+	sband = &data->bands[NL80211_BAND_5GHZ];
+	sband->band = NL80211_BAND_5GHZ;
+	sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+	sband->n_bitrates = N_RATES_52;
+	n_used += iwl_init_sband_channels(data, sband, n_channels,
+					  NL80211_BAND_5GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
+			     data->valid_tx_ant, data->valid_rx_ant);
+
+	if (n_channels != n_used)
+		IWL_ERR_DEV(dev, "EEPROM: used only %d of %d channels\n",
+			    n_used, n_channels);
+}
+
+/* EEPROM data functions */
+
+struct iwl_nvm_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+		      const u8 *eeprom, size_t eeprom_size)
+{
+	struct iwl_nvm_data *data;
+	const void *tmp;
+	u16 radio_cfg, sku;
+
+	if (WARN_ON(!cfg || !cfg->eeprom_params))
+		return NULL;
+
+	data = kzalloc(sizeof(*data) +
+		       sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+		       GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	/* get MAC address(es) */
+	tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_MAC_ADDRESS);
+	if (!tmp)
+		goto err_free;
+	memcpy(data->hw_addr, tmp, ETH_ALEN);
+	data->n_hw_addrs = iwl_eeprom_query16(eeprom, eeprom_size,
+					      EEPROM_NUM_MAC_ADDRESS);
+
+	if (iwl_eeprom_read_calib(eeprom, eeprom_size, data))
+		goto err_free;
+
+	tmp = iwl_eeprom_query_addr(eeprom, eeprom_size, EEPROM_XTAL);
+	if (!tmp)
+		goto err_free;
+	memcpy(data->xtal_calib, tmp, sizeof(data->xtal_calib));
+
+	tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+				    EEPROM_RAW_TEMPERATURE);
+	if (!tmp)
+		goto err_free;
+	data->raw_temperature = *(__le16 *)tmp;
+
+	tmp = iwl_eeprom_query_addr(eeprom, eeprom_size,
+				    EEPROM_KELVIN_TEMPERATURE);
+	if (!tmp)
+		goto err_free;
+	data->kelvin_temperature = *(__le16 *)tmp;
+	data->kelvin_voltage = *((__le16 *)tmp + 1);
+
+	radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
+					     EEPROM_RADIO_CONFIG);
+	data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
+	data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
+	data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
+	data->radio_cfg_type = EEPROM_RF_CFG_TYPE_MSK(radio_cfg);
+	data->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+	data->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+
+	sku = iwl_eeprom_query16(eeprom, eeprom_size,
+				 EEPROM_SKU_CAP);
+	data->sku_cap_11n_enable = sku & EEPROM_SKU_CAP_11N_ENABLE;
+	data->sku_cap_amt_enable = sku & EEPROM_SKU_CAP_AMT_ENABLE;
+	data->sku_cap_band_24ghz_enable = sku & EEPROM_SKU_CAP_BAND_24GHZ;
+	data->sku_cap_band_52ghz_enable = sku & EEPROM_SKU_CAP_BAND_52GHZ;
+	data->sku_cap_ipan_enable = sku & EEPROM_SKU_CAP_IPAN_ENABLE;
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+		data->sku_cap_11n_enable = false;
+
+	data->nvm_version = iwl_eeprom_query16(eeprom, eeprom_size,
+					       EEPROM_VERSION);
+
+	/* check overrides (some devices have wrong EEPROM) */
+	if (cfg->valid_tx_ant)
+		data->valid_tx_ant = cfg->valid_tx_ant;
+	if (cfg->valid_rx_ant)
+		data->valid_rx_ant = cfg->valid_rx_ant;
+
+	if (!data->valid_tx_ant || !data->valid_rx_ant) {
+		IWL_ERR_DEV(dev, "invalid antennas (0x%x, 0x%x)\n",
+			    data->valid_tx_ant, data->valid_rx_ant);
+		goto err_free;
+	}
+
+	iwl_init_sbands(dev, cfg, data, eeprom, eeprom_size);
+
+	return data;
+ err_free:
+	kfree(data);
+	return NULL;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
+
+/* helper functions */
+int iwl_nvm_check_version(struct iwl_nvm_data *data,
+			     struct iwl_trans *trans)
+{
+	if (data->nvm_version >= trans->cfg->nvm_ver ||
+	    data->calib_version >= trans->cfg->nvm_calib_ver) {
+		IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n",
+			       data->nvm_version, data->calib_version);
+		return 0;
+	}
+
+	IWL_ERR(trans,
+		"Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n",
+		data->nvm_version, trans->cfg->nvm_ver,
+		data->calib_version,  trans->cfg->nvm_calib_ver);
+	return -EINVAL;
+}
+IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
new file mode 100644
index 0000000..8be50ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_eeprom_parse_h__
+#define __iwl_eeprom_parse_h__
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <net/cfg80211.h>
+#include "iwl-trans.h"
+
+struct iwl_nvm_data {
+	int n_hw_addrs;
+	u8 hw_addr[ETH_ALEN];
+
+	u8 calib_version;
+	__le16 calib_voltage;
+
+	__le16 raw_temperature;
+	__le16 kelvin_temperature;
+	__le16 kelvin_voltage;
+	__le16 xtal_calib[2];
+
+	bool sku_cap_band_24ghz_enable;
+	bool sku_cap_band_52ghz_enable;
+	bool sku_cap_11n_enable;
+	bool sku_cap_11ac_enable;
+	bool sku_cap_11ax_enable;
+	bool sku_cap_amt_enable;
+	bool sku_cap_ipan_enable;
+	bool sku_cap_mimo_disabled;
+
+	u16 radio_cfg_type;
+	u8 radio_cfg_step;
+	u8 radio_cfg_dash;
+	u8 radio_cfg_pnum;
+	u8 valid_tx_ant, valid_rx_ant;
+
+	u32 nvm_version;
+	s8 max_tx_pwr_half_dbm;
+
+	bool lar_enabled;
+	bool vht160_supported;
+	struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+	struct ieee80211_channel channels[];
+};
+
+/**
+ * iwl_parse_eeprom_data - parse EEPROM data and return values
+ *
+ * @dev: device pointer we're parsing for, for debug only
+ * @cfg: device configuration for parsing and overrides
+ * @eeprom: the EEPROM data
+ * @eeprom_size: length of the EEPROM data
+ *
+ * This function parses all EEPROM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
+		      const u8 *eeprom, size_t eeprom_size);
+
+int iwl_nvm_check_version(struct iwl_nvm_data *data,
+			  struct iwl_trans *trans);
+
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+			    struct ieee80211_supported_band *sband,
+			    int n_channels, enum nl80211_band band);
+
+void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
+			  struct iwl_nvm_data *data,
+			  struct ieee80211_sta_ht_cap *ht_info,
+			  enum nl80211_band band,
+			  u8 tx_chains, u8 rx_chains);
+
+#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
new file mode 100644
index 0000000..ac965c3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -0,0 +1,466 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include "iwl-drv.h"
+#include "iwl-debug.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT	5000 /* uSec */
+
+#define IWL_EEPROM_SEM_TIMEOUT		10   /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT	1000 /* number of attempts (not time) */
+
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+
+#define	EEPROM_SEM_TIMEOUT 10		/* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000	/* number of attempts (not time) */
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
+{
+	u16 count;
+	int ret;
+
+	for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+		/* Request semaphore */
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+		/* See if we got it */
+		ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+				EEPROM_SEM_TIMEOUT);
+		if (ret >= 0) {
+			IWL_DEBUG_EEPROM(trans->dev,
+					 "Acquired semaphore after %d tries.\n",
+					 count+1);
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
+{
+	iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
+		      CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+}
+
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
+{
+	u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+
+	IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
+
+	switch (gp) {
+	case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+		if (!nvm_is_otp) {
+			IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+				gp);
+			return -ENOENT;
+		}
+		return 0;
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+	case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+		if (nvm_is_otp) {
+			IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+			return -ENOENT;
+		}
+		return 0;
+	case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+	default:
+		IWL_ERR(trans,
+			"bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+			nvm_is_otp ? "OTP" : "EEPROM", gp);
+		return -ENOENT;
+	}
+}
+
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
+
+static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
+{
+	iwl_read32(trans, CSR_OTP_GP_REG);
+
+	iwl_clear_bit(trans, CSR_OTP_GP_REG,
+		      CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+}
+
+static int iwl_nvm_is_otp(struct iwl_trans *trans)
+{
+	u32 otpgp;
+
+	/* OTP only valid for CP/PP and after */
+	switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+	case CSR_HW_REV_TYPE_NONE:
+		IWL_ERR(trans, "Unknown hardware type\n");
+		return -EIO;
+	case CSR_HW_REV_TYPE_5300:
+	case CSR_HW_REV_TYPE_5350:
+	case CSR_HW_REV_TYPE_5100:
+	case CSR_HW_REV_TYPE_5150:
+		return 0;
+	default:
+		otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+		if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+			return 1;
+		return 0;
+	}
+}
+
+static int iwl_init_otp_access(struct iwl_trans *trans)
+{
+	int ret;
+
+	/* Enable 40MHz radio clock */
+	iwl_write32(trans, CSR_GP_CNTRL,
+		    iwl_read32(trans, CSR_GP_CNTRL) |
+		    BIT(trans->cfg->csr->flag_init_done));
+
+	/* wait for clock to be ready */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   25000);
+	if (ret < 0) {
+		IWL_ERR(trans, "Time out access OTP\n");
+	} else {
+		iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+				  APMG_PS_CTRL_VAL_RESET_REQ);
+		udelay(5);
+		iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+				    APMG_PS_CTRL_VAL_RESET_REQ);
+
+		/*
+		 * CSR auto clock gate disable bit -
+		 * this is only applicable for HW with OTP shadow RAM
+		 */
+		if (trans->cfg->base_params->shadow_ram_support)
+			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+	}
+	return ret;
+}
+
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+			     __le16 *eeprom_data)
+{
+	int ret = 0;
+	u32 r;
+	u32 otpgp;
+
+	iwl_write32(trans, CSR_EEPROM_REG,
+		    CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+	ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 CSR_EEPROM_REG_READ_VALID_MSK,
+				 IWL_EEPROM_ACCESS_TIMEOUT);
+	if (ret < 0) {
+		IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+		return ret;
+	}
+	r = iwl_read32(trans, CSR_EEPROM_REG);
+	/* check for ECC errors: */
+	otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+	if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+		/* stop in this case */
+		/* set the uncorrectable OTP ECC bit for acknowledgment */
+		iwl_set_bit(trans, CSR_OTP_GP_REG,
+			    CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+		IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+		return -EINVAL;
+	}
+	if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+		/* continue in this case */
+		/* set the correctable OTP ECC bit for acknowledgment */
+		iwl_set_bit(trans, CSR_OTP_GP_REG,
+			    CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+		IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+	}
+	*eeprom_data = cpu_to_le16(r >> 16);
+	return 0;
+}
+
+/*
+ * iwl_is_otp_empty: check for empty OTP
+ */
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
+{
+	u16 next_link_addr = 0;
+	__le16 link_value;
+	bool is_empty = false;
+
+	/* locate the beginning of OTP link list */
+	if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+		if (!link_value) {
+			IWL_ERR(trans, "OTP is empty\n");
+			is_empty = true;
+		}
+	} else {
+		IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+		is_empty = true;
+	}
+
+	return is_empty;
+}
+
+
+/*
+ * iwl_find_otp_image: find EEPROM image in OTP
+ *   finding the OTP block that contains the EEPROM image.
+ *   the last valid block on the link list (the block _before_ the last block)
+ *   is the block we should read and used to configure the device.
+ *   If all the available OTP blocks are full, the last block will be the block
+ *   we should read and used to configure the device.
+ *   only perform this operation if shadow RAM is disabled
+ */
+static int iwl_find_otp_image(struct iwl_trans *trans,
+					u16 *validblockaddr)
+{
+	u16 next_link_addr = 0, valid_addr;
+	__le16 link_value = 0;
+	int usedblocks = 0;
+
+	/* set addressing mode to absolute to traverse the link list */
+	iwl_set_otp_access_absolute(trans);
+
+	/* checking for empty OTP or error */
+	if (iwl_is_otp_empty(trans))
+		return -EINVAL;
+
+	/*
+	 * start traverse link list
+	 * until reach the max number of OTP blocks
+	 * different devices have different number of OTP blocks
+	 */
+	do {
+		/* save current valid block address
+		 * check for more block on the link list
+		 */
+		valid_addr = next_link_addr;
+		next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+		IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
+				 usedblocks, next_link_addr);
+		if (iwl_read_otp_word(trans, next_link_addr, &link_value))
+			return -EINVAL;
+		if (!link_value) {
+			/*
+			 * reach the end of link list, return success and
+			 * set address point to the starting address
+			 * of the image
+			 */
+			*validblockaddr = valid_addr;
+			/* skip first 2 bytes (link list pointer) */
+			*validblockaddr += 2;
+			return 0;
+		}
+		/* more in the link list, continue */
+		usedblocks++;
+	} while (usedblocks <= trans->cfg->base_params->max_ll_items);
+
+	/* OTP has no valid blocks */
+	IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+	return -EINVAL;
+}
+
+/**
+ * iwl_read_eeprom - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter and return it
+ * and its size.
+ *
+ * NOTE:  This routine uses the non-debug IO access functions.
+ */
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
+{
+	__le16 *e;
+	u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+	int sz;
+	int ret;
+	u16 addr;
+	u16 validblockaddr = 0;
+	u16 cache_addr = 0;
+	int nvm_is_otp;
+
+	if (!eeprom || !eeprom_size)
+		return -EINVAL;
+
+	nvm_is_otp = iwl_nvm_is_otp(trans);
+	if (nvm_is_otp < 0)
+		return nvm_is_otp;
+
+	sz = trans->cfg->base_params->eeprom_size;
+	IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+
+	e = kmalloc(sz, GFP_KERNEL);
+	if (!e)
+		return -ENOMEM;
+
+	ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+	if (ret < 0) {
+		IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+		goto err_free;
+	}
+
+	/* Make sure driver (instead of uCode) is allowed to read EEPROM */
+	ret = iwl_eeprom_acquire_semaphore(trans);
+	if (ret < 0) {
+		IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+		goto err_free;
+	}
+
+	if (nvm_is_otp) {
+		ret = iwl_init_otp_access(trans);
+		if (ret) {
+			IWL_ERR(trans, "Failed to initialize OTP access.\n");
+			goto err_unlock;
+		}
+
+		iwl_write32(trans, CSR_EEPROM_GP,
+			    iwl_read32(trans, CSR_EEPROM_GP) &
+			    ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+		iwl_set_bit(trans, CSR_OTP_GP_REG,
+			    CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+			    CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+		/* traversing the linked list if no shadow ram supported */
+		if (!trans->cfg->base_params->shadow_ram_support) {
+			ret = iwl_find_otp_image(trans, &validblockaddr);
+			if (ret)
+				goto err_unlock;
+		}
+		for (addr = validblockaddr; addr < validblockaddr + sz;
+		     addr += sizeof(u16)) {
+			__le16 eeprom_data;
+
+			ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+			if (ret)
+				goto err_unlock;
+			e[cache_addr / 2] = eeprom_data;
+			cache_addr += sizeof(u16);
+		}
+	} else {
+		/* eeprom is an array of 16bit values */
+		for (addr = 0; addr < sz; addr += sizeof(u16)) {
+			u32 r;
+
+			iwl_write32(trans, CSR_EEPROM_REG,
+				    CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+			ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+					   CSR_EEPROM_REG_READ_VALID_MSK,
+					   CSR_EEPROM_REG_READ_VALID_MSK,
+					   IWL_EEPROM_ACCESS_TIMEOUT);
+			if (ret < 0) {
+				IWL_ERR(trans,
+					"Time out reading EEPROM[%d]\n", addr);
+				goto err_unlock;
+			}
+			r = iwl_read32(trans, CSR_EEPROM_REG);
+			e[addr / 2] = cpu_to_le16(r >> 16);
+		}
+	}
+
+	IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
+			 nvm_is_otp ? "OTP" : "EEPROM");
+
+	iwl_eeprom_release_semaphore(trans);
+
+	*eeprom_size = sz;
+	*eeprom = (u8 *)e;
+	return 0;
+
+ err_unlock:
+	iwl_eeprom_release_semaphore(trans);
+ err_free:
+	kfree(e);
+
+	return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
new file mode 100644
index 0000000..1ed78be
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
@@ -0,0 +1,70 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef __iwl_eeprom_h__
+#define __iwl_eeprom_h__
+
+#include "iwl-trans.h"
+
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
+
+#endif  /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
new file mode 100644
index 0000000..df0e9ff
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -0,0 +1,781 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_fh_h__
+#define __iwl_fh_h__
+
+#include <linux/types.h>
+#include <linux/bitfield.h>
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH_MEM_LOWER_BOUND                   (0x1000)
+#define FH_MEM_UPPER_BOUND                   (0x2000)
+#define FH_MEM_LOWER_BOUND_GEN2              (0xa06000)
+#define FH_MEM_UPPER_BOUND_GEN2              (0xa08000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when doing Txing or Rxing.  The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned.  Once this is set up, the device
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ *  31-0:  Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH_KW_MEM_ADDR_REG		     (FH_MEM_LOWER_BOUND + 0x97C)
+
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct iwl_tfd_frame).  These 16 pointer registers are offset by 0x04
+ * bytes from one another.  Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers
+ * for them are in different places.
+ *
+ * Bit fields in each pointer register:
+ *  27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH_MEM_CBBC_0_15_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_MEM_CBBC_0_15_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xA10)
+#define FH_MEM_CBBC_16_19_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xBF0)
+#define FH_MEM_CBBC_16_19_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_CBBC_20_31_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xB20)
+#define FH_MEM_CBBC_20_31_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xB80)
+/* 22000 TFD table address, 64 bit */
+#define TFH_TFDQ_CBB_TABLE			(0x1C00)
+
+/* Find TFD CB base pointer for given queue */
+static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
+					     unsigned int chnl)
+{
+	if (trans->cfg->use_tfh) {
+		WARN_ON_ONCE(chnl >= 64);
+		return TFH_TFDQ_CBB_TABLE + 8 * chnl;
+	}
+	if (chnl < 16)
+		return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl;
+	if (chnl < 20)
+		return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16);
+	WARN_ON_ONCE(chnl >= 32);
+	return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20);
+}
+
+/* 22000 configuration registers */
+
+/*
+ * TFH Configuration register.
+ *
+ * BIT fields:
+ *
+ * Bits 3:0:
+ * Define the maximum number of pending read requests.
+ * Maximum configration value allowed is 0xC
+ * Bits 9:8:
+ * Define the maximum transfer size. (64 / 128 / 256)
+ * Bit 10:
+ * When bit is set and transfer size is set to 128B, the TFH will enable
+ * reading chunks of more than 64B only if the read address is aligned to 128B.
+ * In case of DRAM read address which is not aligned to 128B, the TFH will
+ * enable transfer size which doesn't cross 64B DRAM address boundary.
+*/
+#define TFH_TRANSFER_MODE		(0x1F40)
+#define TFH_TRANSFER_MAX_PENDING_REQ	0xc
+#define TFH_CHUNK_SIZE_128			BIT(8)
+#define TFH_CHUNK_SPLIT_MODE		BIT(10)
+/*
+ * Defines the offset address in dwords referring from the beginning of the
+ * Tx CMD which will be updated in DRAM.
+ * Note that the TFH offset address for Tx CMD update is always referring to
+ * the start of the TFD first TB.
+ * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
+ */
+#define TFH_TXCMD_UPDATE_CFG		(0x1F48)
+/*
+ * Controls TX DMA operation
+ *
+ * BIT fields:
+ *
+ * Bits 31:30: Enable the SRAM DMA channel.
+ * Turning on bit 31 will kick the SRAM2DRAM DMA.
+ * Note that the sram2dram may be enabled only after configuring the DRAM and
+ * SRAM addresses registers and the byte count register.
+ * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
+ * set to 1 - interrupt is sent to the driver
+ * Bit 0: Indicates the snoop configuration
+*/
+#define TFH_SRV_DMA_CHNL0_CTRL	(0x1F60)
+#define TFH_SRV_DMA_SNOOP	BIT(0)
+#define TFH_SRV_DMA_TO_DRIVER	BIT(24)
+#define TFH_SRV_DMA_START	BIT(31)
+
+/* Defines the DMA SRAM write start address to transfer a data block */
+#define TFH_SRV_DMA_CHNL0_SRAM_ADDR	(0x1F64)
+
+/* Defines the 64bits DRAM start address to read the DMA data block from */
+#define TFH_SRV_DMA_CHNL0_DRAM_ADDR	(0x1F68)
+
+/*
+ * Defines the number of bytes to transfer from DRAM to SRAM.
+ * Note that this register may be configured with non-dword aligned size.
+ */
+#define TFH_SRV_DMA_CHNL0_BC	(0x1F70)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and device for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from uCode to host driver).  Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel.  Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into device registers:
+ *
+ * 1)  Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ *     entries (although any power of 2, up to 4096, is selectable by driver).
+ *     Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ *     (typically 4K, although 8K or 16K are also selectable by driver).
+ *     Driver sets up RB size and number of RBDs in the CB via Rx config
+ *     register FH_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ *     Bit fields within one RBD:
+ *     27-0:  Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ *     Driver sets physical address [35:8] of base of RBD circular buffer
+ *     into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2)  Rx status buffer, 8 bytes, in which uCode indicates which Rx Buffers
+ *     (RBs) have been filled, via a "write pointer", actually the index of
+ *     the RB's corresponding RBD within the circular buffer.  Driver sets
+ *     physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ *     Bit fields in lower dword of Rx status buffer (upper dword not used
+ *     by driver:
+ *     31-12:  Not used by driver
+ *     11- 0:  Index of last filled Rx buffer descriptor
+ *             (device writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for device to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the device's "write" index register,
+ * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" index corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer.  This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" index has advanced past 1!  See below).
+ * NOTE:  DEVICE EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the device fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the index of the latest filled RBD.  The driver must
+ * read this "read" index from DRAM after receiving an Rx interrupt from device
+ *
+ * The driver must also internally keep track of a third index, which is the
+ * next RBD to process.  When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" index.  For example, if "read" index becomes "1",
+ * driver may process the RB pointed to by RBD 0.  Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read index == write index, device thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256.  To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" indexes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH_MEM_RSCSR_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0xBC0)
+#define FH_MEM_RSCSR_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RSCSR_CHNL0		(FH_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ *  31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_STTS_WPTR_REG	(FH_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ *  27-0:  RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH_RSCSR_CHNL0_RBDCB_BASE_REG	(FH_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (index, really!).
+ * Bit fields:
+ *  11-0:  Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ *         NOTE:  For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG	(FH_MEM_RSCSR_CHNL0 + 0x008)
+#define FH_RSCSR_CHNL0_WPTR        (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG	(FH_MEM_RSCSR_CHNL0 + 0x00c)
+#define FH_RSCSR_CHNL0_RDPTR		FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH_MEM_RSSR_RX_STATUS_REG	for
+ * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ *        min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ *        '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ *        typical value 0x10 (about 1/2 msec)
+ *  3- 0: reserved
+ */
+#define FH_MEM_RCSR_LOWER_BOUND      (FH_MEM_LOWER_BOUND + 0xC00)
+#define FH_MEM_RCSR_UPPER_BOUND      (FH_MEM_LOWER_BOUND + 0xCC0)
+#define FH_MEM_RCSR_CHNL0            (FH_MEM_RCSR_LOWER_BOUND)
+
+#define FH_MEM_RCSR_CHNL0_CONFIG_REG	(FH_MEM_RCSR_CHNL0)
+#define FH_MEM_RCSR_CHNL0_RBDCB_WPTR	(FH_MEM_RCSR_CHNL0 + 0x8)
+#define FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ	(FH_MEM_RCSR_CHNL0 + 0x10)
+
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK   (0x00001000) /* bits 12 */
+#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK   (0x00030000) /* bits 16-17 */
+#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
+
+#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS	(20)
+#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS	(4)
+#define RX_RB_TIMEOUT	(0x11)
+
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL         (0x00000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL     (0x40000000)
+#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL        (0x80000000)
+
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K    (0x00000000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K    (0x00010000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K   (0x00020000)
+#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K   (0x00030000)
+
+#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY              (0x00000004)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL    (0x00000000)
+#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL  (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ *  24:  1 = Channel 0 is idle
+ *
+ * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH_MEM_RSSR_LOWER_BOUND           (FH_MEM_LOWER_BOUND + 0xC40)
+#define FH_MEM_RSSR_UPPER_BOUND           (FH_MEM_LOWER_BOUND + 0xD00)
+
+#define FH_MEM_RSSR_SHARED_CTRL_REG       (FH_MEM_RSSR_LOWER_BOUND)
+#define FH_MEM_RSSR_RX_STATUS_REG	(FH_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+					(FH_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE	(0x01000000)
+
+#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT	28
+#define FH_MEM_TB_MAX_LENGTH			(0x00020000)
+
+/* 9000 rx series registers */
+
+#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_FRBDCB_WIDX 0xA08080
+#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
+/* Read index table */
+#define RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_URBDCB_WIDX 0xA08180
+#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define RFH_Q0_URBDCB_VAID 0xA081C0
+#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define RFH_RBDBUF_RBD0_LSB 0xA08300
+#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/**
+ * RFH Status Register
+ *
+ * Bit fields:
+ *
+ * Bit 29: RBD_FETCH_IDLE
+ * This status flag is set by the RFH when there is no active RBD fetch from
+ * DRAM.
+ * Once the RFH RBD controller starts fetching (or when there is a pending
+ * RBD read response from DRAM), this flag is immediately turned off.
+ *
+ * Bit 30: SRAM_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * SRAM to DRAM.
+ * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
+ *
+ * Bit 31: RXF_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * RXF to DRAM.
+ * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
+ */
+#define RFH_GEN_STATUS		0xA09808
+#define RFH_GEN_STATUS_GEN3	0xA07824
+#define RBD_FETCH_IDLE	BIT(29)
+#define SRAM_DMA_IDLE	BIT(30)
+#define RXF_DMA_IDLE	BIT(31)
+
+/* DMA configuration */
+#define RFH_RXF_DMA_CFG		0xA09820
+#define RFH_RXF_DMA_CFG_GEN3	0xA07880
+/* RB size */
+#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define RFH_RXF_DMA_RB_SIZE_POS 16
+#define RFH_RXF_DMA_RB_SIZE_1K	(0x1 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_2K	(0x2 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_4K	(0x4 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_8K	(0x8 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_12K	(0x9 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_16K	(0xA << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_20K	(0xB << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_24K	(0xC << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_28K	(0xD << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_32K	(0xE << RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define RFH_RXF_DMA_RBDCB_SIZE_8	(0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_16	(0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_32	(0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_64	(0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_128	(0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_256	(0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_512	(0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_1024	(0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_2048	(0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_MIN_RB_SIZE_MASK	(0x03000000) /* bit 24-25 */
+#define RFH_RXF_DMA_MIN_RB_SIZE_POS	24
+#define RFH_RXF_DMA_MIN_RB_4_8		(3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define RFH_RXF_DMA_DROP_TOO_LARGE_MASK	(0x04000000) /* bit 26 */
+#define RFH_RXF_DMA_SINGLE_FRAME_MASK	(0x20000000) /* bit 29 */
+#define RFH_DMA_EN_MASK			(0xC0000000) /* bits 30-31*/
+#define RFH_DMA_EN_ENABLE_VAL		BIT(31)
+
+#define RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define RFH_GEN_CFG	0xA09800
+#define RFH_GEN_CFG_SERVICE_DMA_SNOOP	BIT(0)
+#define RFH_GEN_CFG_RFH_DMA_SNOOP	BIT(1)
+#define RFH_GEN_CFG_RB_CHUNK_SIZE	BIT(4)
+#define RFH_GEN_CFG_RB_CHUNK_SIZE_128	1
+#define RFH_GEN_CFG_RB_CHUNK_SIZE_64	0
+/* the driver assumes everywhere that the default RXQ is 0 */
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM	0xF00
+#define RFH_GEN_CFG_VAL(_n, _v)		FIELD_PREP(RFH_GEN_CFG_ ## _n, _v)
+
+/* end of 9000 rx series registers */
+
+/* TFDB  Area - TFDs buffer table */
+#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
+#define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
+#define FH_TFDIB_UPPER_BOUND       (FH_MEM_LOWER_BOUND + 0x958)
+#define FH_TFDIB_CTRL0_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH_TFDIB_CTRL1_REG(_chnl)  (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * Device has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ *        '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ *     3: Enable internal DMA requests (1, normal operation), disable (0)
+ *  2- 0: Reserved, set to "0"
+ */
+#define FH_TCSR_LOWER_BOUND  (FH_MEM_LOWER_BOUND + 0xD00)
+#define FH_TCSR_UPPER_BOUND  (FH_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH_TCSR_CHNL_NUM                            (8)
+
+/* TCSR: tx_config register values */
+#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)	\
+		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl)	\
+		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)	\
+		(FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF		(0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV		(0x00000001)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	(0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE	(0x00000008)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT	(0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD	(0x00100000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD	(0x00200000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT	(0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD	(0x00400000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD	(0x00800000)
+
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE	(0x00000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF	(0x40000000)
+#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	(0x80000000)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY	(0x00000000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT	(0x00002000)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID	(0x00000003)
+
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM		(20)
+#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX		(12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24:  1 = Channel buffers empty (channel 7:0)
+ * 23-16:  1 = No pending requests (channel 7:0)
+ */
+#define FH_TSSR_LOWER_BOUND		(FH_MEM_LOWER_BOUND + 0xEA0)
+#define FH_TSSR_UPPER_BOUND		(FH_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH_TSSR_TX_STATUS_REG		(FH_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31:  Indicates an address error when accessed to internal memory
+ *	uCode/driver must write "1" in order to clear this flag
+ * 30:  Indicates that Host did not send the expected number of dwords to FH
+ *	uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ *	command was received from the scheduler while the TRB was already full
+ *	with previous command
+ *	uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ *	bit is set, it indicates that the FH has received a full indication
+ *	from the RTC TxFIFO and the current value of the TxCredit counter was
+ *	not equal to zero. This mean that the credit mechanism was not
+ *	synchronized to the TxFIFO status
+ *	uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG		(FH_TSSR_LOWER_BOUND + 0x018)
+#define FH_TSSR_TX_MSG_CONFIG_REG	(FH_TSSR_LOWER_BOUND + 0x008)
+
+#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH_SRVC_CHNL		(9)
+#define FH_SRVC_LOWER_BOUND	(FH_MEM_LOWER_BOUND + 0x9C8)
+#define FH_SRVC_UPPER_BOUND	(FH_MEM_LOWER_BOUND + 0x9D0)
+#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+		(FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH_TX_CHICKEN_BITS_REG	(FH_MEM_LOWER_BOUND + 0xE98)
+#define FH_TX_TRB_REG(_chan)	(FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4)
+
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN	(0x00000002)
+
+#define MQ_RX_TABLE_SIZE	512
+#define MQ_RX_TABLE_MASK	(MQ_RX_TABLE_SIZE - 1)
+#define MQ_RX_NUM_RBDS		(MQ_RX_TABLE_SIZE - 1)
+#define RX_POOL_SIZE		(MQ_RX_NUM_RBDS +	\
+				 IWL_MAX_RX_HW_QUEUES *	\
+				 (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC))
+/* cb size is the exponent */
+#define RX_QUEUE_CB_SIZE(x)	ilog2(x)
+
+#define RX_QUEUE_SIZE                         256
+#define RX_QUEUE_MASK                         255
+#define RX_QUEUE_SIZE_LOG                     8
+
+/**
+ * struct iwl_rb_status - reserve buffer status
+ * 	host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * 	in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * 	which was transferred
+ */
+struct iwl_rb_status {
+	__le16 closed_rb_num;
+	__le16 closed_fr_num;
+	__le16 finished_rb_num;
+	__le16 finished_fr_nam;
+	__le32 __unused;
+} __packed;
+
+
+#define TFD_QUEUE_SIZE_MAX      (256)
+#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
+/* cb size is the exponent - 3 */
+#define TFD_QUEUE_CB_SIZE(x)	(ilog2(x) - 3)
+#define TFD_QUEUE_SIZE_BC_DUP	(64)
+#define TFD_QUEUE_BC_SIZE	(TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define TFD_QUEUE_BC_SIZE_GEN3	(TFD_QUEUE_SIZE_MAX_GEN3 + \
+				 TFD_QUEUE_SIZE_BC_DUP)
+#define IWL_TX_DMA_MASK        DMA_BIT_MASK(36)
+#define IWL_NUM_OF_TBS		20
+#define IWL_TFH_NUM_TBS		25
+
+static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
+{
+	return (sizeof(addr) > sizeof(u32) ? upper_32_bits(addr) : 0) & 0xF;
+}
+
+/**
+ * enum iwl_tfd_tb_hi_n_len - TB hi_n_len bits
+ * @TB_HI_N_LEN_ADDR_HI_MSK: high 4 bits (to make it 36) of DMA address
+ * @TB_HI_N_LEN_LEN_MSK: length of the TB
+ */
+enum iwl_tfd_tb_hi_n_len {
+	TB_HI_N_LEN_ADDR_HI_MSK	= 0xf,
+	TB_HI_N_LEN_LEN_MSK	= 0xfff0,
+};
+
+/**
+ * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer
+ * 	every even is unaligned on 16 bit boundary
+ * @hi_n_len: &enum iwl_tfd_tb_hi_n_len
+ */
+struct iwl_tfd_tb {
+	__le32 lo;
+	__le16 hi_n_len;
+} __packed;
+
+/**
+ * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwl_tfh_tb {
+	__le16 tb_len;
+	__le64 addr;
+} __packed;
+
+/**
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs.
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 / 25 data buffers
+ * in host DRAM.  These buffers collectively contain the (one) frame described
+ * by the TFD.  Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM.  Each buffer has max size
+ * of (4K - 4).  The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+
+/**
+ * struct iwl_tfd - Transmit Frame Descriptor (TFD)
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ *	     5   reserved
+ *	     6-7 padding (not used)
+ * @ tbs[20]	transmit frame buffer descriptors
+ * @ __pad	padding
+ */
+struct iwl_tfd {
+	u8 __reserved1[3];
+	u8 num_tbs;
+	struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
+	__le32 __pad;
+} __packed;
+
+/**
+ * struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ *	     5 -15   reserved
+ * @ tbs[25]	transmit frame buffer descriptors
+ * @ __pad	padding
+ */
+struct iwl_tfh_tfd {
+	__le16 num_tbs;
+	struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
+	__le32 __pad;
+} __packed;
+
+/* Keep Warm Size */
+#define IWL_KW_SIZE 0x1000	/* 4k */
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ *	base physical address provided by SCD_DRAM_BASE_ADDR
+ * For devices up to 22000:
+ * @tfd_offset  0-12 - tx command byte count
+ *		12-16 - station index
+ * For 22000:
+ * @tfd_offset  0-12 - tx command byte count
+ *		12-13 - number of 64 byte chunks
+ *		14-16 - reserved
+ */
+struct iwlagn_scd_bc_tbl {
+	__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+} __packed;
+
+/**
+ * struct iwl_gen3_bc_tbl scheduler byte count table gen3
+ * For 22560 and on:
+ * @tfd_offset: 0-12 - tx command byte count
+ *		12-13 - number of 64 byte chunks
+ *		14-16 - reserved
+ */
+struct iwl_gen3_bc_tbl {
+	__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
+} __packed;
+
+#endif /* !__iwl_fh_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
new file mode 100644
index 0000000..efb1998
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -0,0 +1,427 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/export.h>
+
+#include "iwl-drv.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "iwl-debug.h"
+#include "iwl-prph.h"
+#include "iwl-fh.h"
+
+void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+	trace_iwlwifi_dev_iowrite8(trans->dev, ofs, val);
+	iwl_trans_write8(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_write8);
+
+void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	trace_iwlwifi_dev_iowrite32(trans->dev, ofs, val);
+	iwl_trans_write32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_write32);
+
+void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+	trace_iwlwifi_dev_iowrite64(trans->dev, ofs, val);
+	iwl_trans_write32(trans, ofs, lower_32_bits(val));
+	iwl_trans_write32(trans, ofs + 4, upper_32_bits(val));
+}
+IWL_EXPORT_SYMBOL(iwl_write64);
+
+u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
+{
+	u32 val = iwl_trans_read32(trans, ofs);
+
+	trace_iwlwifi_dev_ioread32(trans->dev, ofs, val);
+	return val;
+}
+IWL_EXPORT_SYMBOL(iwl_read32);
+
+#define IWL_POLL_INTERVAL 10	/* microseconds */
+
+int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
+		 u32 bits, u32 mask, int timeout)
+{
+	int t = 0;
+
+	do {
+		if ((iwl_read32(trans, addr) & mask) == (bits & mask))
+			return t;
+		udelay(IWL_POLL_INTERVAL);
+		t += IWL_POLL_INTERVAL;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+IWL_EXPORT_SYMBOL(iwl_poll_bit);
+
+u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
+{
+	u32 value = 0x5a5a5a5a;
+	unsigned long flags;
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		value = iwl_read32(trans, reg);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+
+	return value;
+}
+IWL_EXPORT_SYMBOL(iwl_read_direct32);
+
+void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
+{
+	unsigned long flags;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write32(trans, reg, value);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_write_direct32);
+
+void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value)
+{
+	unsigned long flags;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write64(trans, reg, value);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_write_direct64);
+
+int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
+			int timeout)
+{
+	int t = 0;
+
+	do {
+		if ((iwl_read_direct32(trans, addr) & mask) == mask)
+			return t;
+		udelay(IWL_POLL_INTERVAL);
+		t += IWL_POLL_INTERVAL;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
+
+u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs)
+{
+	u32 val = iwl_trans_read_prph(trans, ofs);
+	trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
+	return val;
+}
+IWL_EXPORT_SYMBOL(iwl_read_prph_no_grab);
+
+void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
+	iwl_trans_write_prph(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_write_prph_no_grab);
+
+void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+	trace_iwlwifi_dev_iowrite_prph64(trans->dev, ofs, val);
+	iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+	iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
+}
+IWL_EXPORT_SYMBOL(iwl_write_prph64_no_grab);
+
+u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+	unsigned long flags;
+	u32 val = 0x5a5a5a5a;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		val = iwl_read_prph_no_grab(trans, ofs);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+	return val;
+}
+IWL_EXPORT_SYMBOL(iwl_read_prph);
+
+void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	unsigned long flags;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write_prph_no_grab(trans, ofs, val);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_write_prph);
+
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+		      u32 bits, u32 mask, int timeout)
+{
+	int t = 0;
+
+	do {
+		if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+			return t;
+		udelay(IWL_POLL_INTERVAL);
+		t += IWL_POLL_INTERVAL;
+	} while (t < timeout);
+
+	return -ETIMEDOUT;
+}
+
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
+{
+	unsigned long flags;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write_prph_no_grab(trans, ofs,
+				       iwl_read_prph_no_grab(trans, ofs) |
+				       mask);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
+
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
+			    u32 bits, u32 mask)
+{
+	unsigned long flags;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write_prph_no_grab(trans, ofs,
+				       (iwl_read_prph_no_grab(trans, ofs) &
+					mask) | bits);
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
+
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
+{
+	unsigned long flags;
+	u32 val;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		val = iwl_read_prph_no_grab(trans, ofs);
+		iwl_write_prph_no_grab(trans, ofs, (val & ~mask));
+		iwl_trans_release_nic_access(trans, &flags);
+	}
+}
+IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+
+void iwl_force_nmi(struct iwl_trans *trans)
+{
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_9000)
+		iwl_write_prph(trans, DEVICE_SET_NMI_REG,
+			       DEVICE_SET_NMI_VAL_DRV);
+	else
+		iwl_write_prph(trans, UREG_NIC_SET_NMI_DRIVER,
+			       UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK);
+}
+IWL_EXPORT_SYMBOL(iwl_force_nmi);
+
+static const char *get_rfh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+#define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; }
+
+	int i;
+
+	for (i = 0; i < IWL_MAX_RX_HW_QUEUES; i++) {
+		IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_BA_LSB, i);
+		IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
+		IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
+		IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
+	}
+
+	switch (cmd) {
+	IWL_CMD(RFH_RXF_DMA_CFG);
+	IWL_CMD(RFH_GEN_CFG);
+	IWL_CMD(RFH_GEN_STATUS);
+	IWL_CMD(FH_TSSR_TX_STATUS_REG);
+	IWL_CMD(FH_TSSR_TX_ERROR_REG);
+	default:
+		return "UNKNOWN";
+	}
+#undef IWL_CMD_MQ
+}
+
+struct reg {
+	u32 addr;
+	bool is64;
+};
+
+static int iwl_dump_rfh(struct iwl_trans *trans, char **buf)
+{
+	int i, q;
+	int num_q = trans->num_rx_queues;
+	static const u32 rfh_tbl[] = {
+		RFH_RXF_DMA_CFG,
+		RFH_GEN_CFG,
+		RFH_GEN_STATUS,
+		FH_TSSR_TX_STATUS_REG,
+		FH_TSSR_TX_ERROR_REG,
+	};
+	static const struct reg rfh_mq_tbl[] = {
+		{ RFH_Q0_FRBDCB_BA_LSB, true },
+		{ RFH_Q0_FRBDCB_WIDX, false },
+		{ RFH_Q0_FRBDCB_RIDX, false },
+		{ RFH_Q0_URBD_STTS_WPTR_LSB, true },
+	};
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (buf) {
+		int pos = 0;
+		/*
+		 * Register (up to 34 for name + 8 blank/q for MQ): 40 chars
+		 * Colon + space: 2 characters
+		 * 0X%08x: 10 characters
+		 * New line: 1 character
+		 * Total of 53 characters
+		 */
+		size_t bufsz = ARRAY_SIZE(rfh_tbl) * 53 +
+			       ARRAY_SIZE(rfh_mq_tbl) * 53 * num_q + 40;
+
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+
+		pos += scnprintf(*buf + pos, bufsz - pos,
+				"RFH register values:\n");
+
+		for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
+			pos += scnprintf(*buf + pos, bufsz - pos,
+				"%40s: 0X%08x\n",
+				get_rfh_string(rfh_tbl[i]),
+				iwl_read_prph(trans, rfh_tbl[i]));
+
+		for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
+			for (q = 0; q < num_q; q++) {
+				u32 addr = rfh_mq_tbl[i].addr;
+
+				addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+				pos += scnprintf(*buf + pos, bufsz - pos,
+					"%34s(q %2d): 0X%08x\n",
+					get_rfh_string(addr), q,
+					iwl_read_prph(trans, addr));
+			}
+
+		return pos;
+	}
+#endif
+
+	IWL_ERR(trans, "RFH register values:\n");
+	for (i = 0; i < ARRAY_SIZE(rfh_tbl); i++)
+		IWL_ERR(trans, "  %34s: 0X%08x\n",
+			get_rfh_string(rfh_tbl[i]),
+			iwl_read_prph(trans, rfh_tbl[i]));
+
+	for (i = 0; i < ARRAY_SIZE(rfh_mq_tbl); i++)
+		for (q = 0; q < num_q; q++) {
+			u32 addr = rfh_mq_tbl[i].addr;
+
+			addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4);
+			IWL_ERR(trans, "  %34s(q %d): 0X%08x\n",
+				get_rfh_string(addr), q,
+				iwl_read_prph(trans, addr));
+		}
+
+	return 0;
+}
+
+static const char *get_fh_string(int cmd)
+{
+	switch (cmd) {
+	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+	IWL_CMD(FH_TSSR_TX_STATUS_REG);
+	IWL_CMD(FH_TSSR_TX_ERROR_REG);
+	default:
+		return "UNKNOWN";
+	}
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+{
+	int i;
+	static const u32 fh_tbl[] = {
+		FH_RSCSR_CHNL0_STTS_WPTR_REG,
+		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+		FH_RSCSR_CHNL0_WPTR,
+		FH_MEM_RCSR_CHNL0_CONFIG_REG,
+		FH_MEM_RSSR_SHARED_CTRL_REG,
+		FH_MEM_RSSR_RX_STATUS_REG,
+		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+		FH_TSSR_TX_STATUS_REG,
+		FH_TSSR_TX_ERROR_REG
+	};
+
+	if (trans->cfg->mq_rx_supported)
+		return iwl_dump_rfh(trans, buf);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (buf) {
+		int pos = 0;
+		size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+		*buf = kmalloc(bufsz, GFP_KERNEL);
+		if (!*buf)
+			return -ENOMEM;
+
+		pos += scnprintf(*buf + pos, bufsz - pos,
+				"FH register values:\n");
+
+		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+			pos += scnprintf(*buf + pos, bufsz - pos,
+				"  %34s: 0X%08x\n",
+				get_fh_string(fh_tbl[i]),
+				iwl_read_direct32(trans, fh_tbl[i]));
+
+		return pos;
+	}
+#endif
+
+	IWL_ERR(trans, "FH register values:\n");
+	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
+		IWL_ERR(trans, "  %34s: 0X%08x\n",
+			get_fh_string(fh_tbl[i]),
+			iwl_read_direct32(trans, fh_tbl[i]));
+
+	return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
new file mode 100644
index 0000000..5c8c0e1
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -0,0 +1,76 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_io_h__
+#define __iwl_io_h__
+
+#include "iwl-devtrace.h"
+#include "iwl-trans.h"
+
+void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write64(struct iwl_trans *trans, u64 ofs, u64 val);
+u32 iwl_read32(struct iwl_trans *trans, u32 ofs);
+
+static inline void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+	iwl_trans_set_bits_mask(trans, reg, mask, mask);
+}
+
+static inline void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
+{
+	iwl_trans_set_bits_mask(trans, reg, mask, 0);
+}
+
+int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
+		 u32 bits, u32 mask, int timeout);
+int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
+			int timeout);
+
+u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
+void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
+void iwl_write_direct64(struct iwl_trans *trans, u64 reg, u64 value);
+
+
+u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs);
+u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
+void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val);
+void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val);
+void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+		      u32 bits, u32 mask, int timeout);
+void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
+			    u32 bits, u32 mask);
+void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+void iwl_force_nmi(struct iwl_trans *trans);
+
+/* Error handling */
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
new file mode 100644
index 0000000..97072cf
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -0,0 +1,156 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program;
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_modparams_h__
+#define __iwl_modparams_h__
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/gfp.h>
+
+extern struct iwl_mod_params iwlwifi_mod_params;
+
+enum iwl_power_level {
+	IWL_POWER_INDEX_1,
+	IWL_POWER_INDEX_2,
+	IWL_POWER_INDEX_3,
+	IWL_POWER_INDEX_4,
+	IWL_POWER_INDEX_5,
+	IWL_POWER_NUM
+};
+
+enum iwl_disable_11n {
+	IWL_DISABLE_HT_ALL	 = BIT(0),
+	IWL_DISABLE_HT_TXAGG	 = BIT(1),
+	IWL_DISABLE_HT_RXAGG	 = BIT(2),
+	IWL_ENABLE_HT_TXAGG	 = BIT(3),
+};
+
+enum iwl_amsdu_size {
+	IWL_AMSDU_DEF = 0,
+	IWL_AMSDU_4K = 1,
+	IWL_AMSDU_8K = 2,
+	IWL_AMSDU_12K = 3,
+	/* Add 2K at the end to avoid breaking current API */
+	IWL_AMSDU_2K = 4,
+};
+
+enum iwl_uapsd_disable {
+	IWL_DISABLE_UAPSD_BSS		= BIT(0),
+	IWL_DISABLE_UAPSD_P2P_CLIENT	= BIT(1),
+};
+
+/**
+ * struct iwl_mod_params
+ *
+ * Holds the module parameters
+ *
+ * @swcrypto: using hardware encryption, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ *	use IWL_[DIS,EN]ABLE_HT_* constants
+ * @amsdu_size: See &enum iwl_amsdu_size.
+ * @fw_restart: restart firmware, default = 1
+ * @bt_coex_active: enable bt coex, default = true
+ * @led_mode: system default, default = 0
+ * @power_save: enable power save, default = false
+ * @power_level: power level, default = 1
+ * @debug_level: levels are IWL_DL_*
+ * @antenna_coupling: antenna coupling in dB, default = 0
+ * @nvm_file: specifies a external NVM file
+ * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default =
+ *	IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
+ * @d0i3_disable: disable d0i3, default = 1,
+ * @d0i3_timeout: time to wait after no refs are taken before
+ *	entering D0i3 (in msecs)
+ * @lar_disable: disable LAR (regulatory), default = 0
+ * @fw_monitor: allow to use firmware monitor
+ * @disable_11ac: disable VHT capabilities, default = false.
+ * @remove_when_gone: remove an inaccessible device from the PCIe bus.
+ */
+struct iwl_mod_params {
+	int swcrypto;
+	unsigned int disable_11n;
+	int amsdu_size;
+	bool fw_restart;
+	bool bt_coex_active;
+	int led_mode;
+	bool power_save;
+	int power_level;
+#ifdef CONFIG_IWLWIFI_DEBUG
+	u32 debug_level;
+#endif
+	int antenna_coupling;
+	char *nvm_file;
+	u32 uapsd_disable;
+	bool d0i3_disable;
+	unsigned int d0i3_timeout;
+	bool lar_disable;
+	bool fw_monitor;
+	bool disable_11ac;
+	/**
+	 * @disable_11ax: disable HE capabilities, default = false
+	 */
+	bool disable_11ax;
+	bool remove_when_gone;
+};
+
+#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
new file mode 100644
index 0000000..73969db
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -0,0 +1,1397 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/firmware.h>
+
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "fw/acpi.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/commands.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/img.h"
+
+/* NVM offsets (in words) definitions */
+enum nvm_offsets {
+	/* NVM HW-Section offset (in words) definitions */
+	SUBSYSTEM_ID = 0x0A,
+	HW_ADDR = 0x15,
+
+	/* NVM SW-Section offset (in words) definitions */
+	NVM_SW_SECTION = 0x1C0,
+	NVM_VERSION = 0,
+	RADIO_CFG = 1,
+	SKU = 2,
+	N_HW_ADDRS = 3,
+	NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
+
+	/* NVM calibration section offset (in words) definitions */
+	NVM_CALIB_SECTION = 0x2B8,
+	XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
+
+	/* NVM REGULATORY -Section offset (in words) definitions */
+	NVM_CHANNELS_SDP = 0,
+};
+
+enum ext_nvm_offsets {
+	/* NVM HW-Section offset (in words) definitions */
+	MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
+
+	/* NVM SW-Section offset (in words) definitions */
+	NVM_VERSION_EXT_NVM = 0,
+	RADIO_CFG_FAMILY_EXT_NVM = 0,
+	SKU_FAMILY_8000 = 2,
+	N_HW_ADDRS_FAMILY_8000 = 3,
+
+	/* NVM REGULATORY -Section offset (in words) definitions */
+	NVM_CHANNELS_EXTENDED = 0,
+	NVM_LAR_OFFSET_OLD = 0x4C7,
+	NVM_LAR_OFFSET = 0x507,
+	NVM_LAR_ENABLED = 0x7,
+};
+
+/* SKU Capabilities (actual values from NVM definition) */
+enum nvm_sku_bits {
+	NVM_SKU_CAP_BAND_24GHZ		= BIT(0),
+	NVM_SKU_CAP_BAND_52GHZ		= BIT(1),
+	NVM_SKU_CAP_11N_ENABLE		= BIT(2),
+	NVM_SKU_CAP_11AC_ENABLE		= BIT(3),
+	NVM_SKU_CAP_MIMO_DISABLE	= BIT(5),
+};
+
+/*
+ * These are the channel numbers in the order that they are stored in the NVM
+ */
+static const u8 iwl_nvm_channels[] = {
+	/* 2.4 GHz */
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+	/* 5 GHz */
+	36, 40, 44 , 48, 52, 56, 60, 64,
+	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+	149, 153, 157, 161, 165
+};
+
+static const u8 iwl_ext_nvm_channels[] = {
+	/* 2.4 GHz */
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+	/* 5 GHz */
+	36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+	96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+	149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
+#define IWL_NVM_NUM_CHANNELS		ARRAY_SIZE(iwl_nvm_channels)
+#define IWL_NVM_NUM_CHANNELS_EXT	ARRAY_SIZE(iwl_ext_nvm_channels)
+#define NUM_2GHZ_CHANNELS		14
+#define NUM_2GHZ_CHANNELS_EXT	14
+#define FIRST_2GHZ_HT_MINUS		5
+#define LAST_2GHZ_HT_PLUS		9
+#define LAST_5GHZ_HT			165
+#define LAST_5GHZ_HT_FAMILY_8000	181
+#define N_HW_ADDR_MASK			0xF
+
+/* rate data (static) */
+static struct ieee80211_rate iwl_cfg80211_rates[] = {
+	{ .bitrate = 1 * 10, .hw_value = 0, .hw_value_short = 0, },
+	{ .bitrate = 2 * 10, .hw_value = 1, .hw_value_short = 1,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 5.5 * 10, .hw_value = 2, .hw_value_short = 2,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 11 * 10, .hw_value = 3, .hw_value_short = 3,
+	  .flags = IEEE80211_RATE_SHORT_PREAMBLE, },
+	{ .bitrate = 6 * 10, .hw_value = 4, .hw_value_short = 4, },
+	{ .bitrate = 9 * 10, .hw_value = 5, .hw_value_short = 5, },
+	{ .bitrate = 12 * 10, .hw_value = 6, .hw_value_short = 6, },
+	{ .bitrate = 18 * 10, .hw_value = 7, .hw_value_short = 7, },
+	{ .bitrate = 24 * 10, .hw_value = 8, .hw_value_short = 8, },
+	{ .bitrate = 36 * 10, .hw_value = 9, .hw_value_short = 9, },
+	{ .bitrate = 48 * 10, .hw_value = 10, .hw_value_short = 10, },
+	{ .bitrate = 54 * 10, .hw_value = 11, .hw_value_short = 11, },
+};
+#define RATES_24_OFFS	0
+#define N_RATES_24	ARRAY_SIZE(iwl_cfg80211_rates)
+#define RATES_52_OFFS	4
+#define N_RATES_52	(N_RATES_24 - RATES_52_OFFS)
+
+/**
+ * enum iwl_nvm_channel_flags - channel flags in NVM
+ * @NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @NVM_CHANNEL_RADAR: radar detection required
+ * @NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ *	on same channel on 2.4 or same UNII band on 5.2
+ * @NVM_CHANNEL_UNIFORM: uniform spreading required
+ * @NVM_CHANNEL_20MHZ: 20 MHz channel okay
+ * @NVM_CHANNEL_40MHZ: 40 MHz channel okay
+ * @NVM_CHANNEL_80MHZ: 80 MHz channel okay
+ * @NVM_CHANNEL_160MHZ: 160 MHz channel okay
+ * @NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ */
+enum iwl_nvm_channel_flags {
+	NVM_CHANNEL_VALID		= BIT(0),
+	NVM_CHANNEL_IBSS		= BIT(1),
+	NVM_CHANNEL_ACTIVE		= BIT(3),
+	NVM_CHANNEL_RADAR		= BIT(4),
+	NVM_CHANNEL_INDOOR_ONLY		= BIT(5),
+	NVM_CHANNEL_GO_CONCURRENT	= BIT(6),
+	NVM_CHANNEL_UNIFORM		= BIT(7),
+	NVM_CHANNEL_20MHZ		= BIT(8),
+	NVM_CHANNEL_40MHZ		= BIT(9),
+	NVM_CHANNEL_80MHZ		= BIT(10),
+	NVM_CHANNEL_160MHZ		= BIT(11),
+	NVM_CHANNEL_DC_HIGH		= BIT(12),
+};
+
+static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
+					       int chan, u16 flags)
+{
+#define CHECK_AND_PRINT_I(x)	\
+	((flags & NVM_CHANNEL_##x) ? " " #x : "")
+
+	if (!(flags & NVM_CHANNEL_VALID)) {
+		IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n",
+			      chan, flags);
+		return;
+	}
+
+	/* Note: already can print up to 101 characters, 110 is the limit! */
+	IWL_DEBUG_DEV(dev, level,
+		      "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n",
+		      chan, flags,
+		      CHECK_AND_PRINT_I(VALID),
+		      CHECK_AND_PRINT_I(IBSS),
+		      CHECK_AND_PRINT_I(ACTIVE),
+		      CHECK_AND_PRINT_I(RADAR),
+		      CHECK_AND_PRINT_I(INDOOR_ONLY),
+		      CHECK_AND_PRINT_I(GO_CONCURRENT),
+		      CHECK_AND_PRINT_I(UNIFORM),
+		      CHECK_AND_PRINT_I(20MHZ),
+		      CHECK_AND_PRINT_I(40MHZ),
+		      CHECK_AND_PRINT_I(80MHZ),
+		      CHECK_AND_PRINT_I(160MHZ),
+		      CHECK_AND_PRINT_I(DC_HIGH));
+#undef CHECK_AND_PRINT_I
+}
+
+static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz,
+				 u16 nvm_flags, const struct iwl_cfg *cfg)
+{
+	u32 flags = IEEE80211_CHAN_NO_HT40;
+	u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+	if (cfg->nvm_type == IWL_NVM_EXT)
+		last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+	if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+		if (ch_num <= LAST_2GHZ_HT_PLUS)
+			flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+		if (ch_num >= FIRST_2GHZ_HT_MINUS)
+			flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+	} else if (ch_num <= last_5ghz_ht && (nvm_flags & NVM_CHANNEL_40MHZ)) {
+		if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+			flags &= ~IEEE80211_CHAN_NO_HT40PLUS;
+		else
+			flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
+	}
+	if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+		flags |= IEEE80211_CHAN_NO_80MHZ;
+	if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+		flags |= IEEE80211_CHAN_NO_160MHZ;
+
+	if (!(nvm_flags & NVM_CHANNEL_IBSS))
+		flags |= IEEE80211_CHAN_NO_IR;
+
+	if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+		flags |= IEEE80211_CHAN_NO_IR;
+
+	if (nvm_flags & NVM_CHANNEL_RADAR)
+		flags |= IEEE80211_CHAN_RADAR;
+
+	if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+		flags |= IEEE80211_CHAN_INDOOR_ONLY;
+
+	/* Set the GO concurrent flag only in case that NO_IR is set.
+	 * Otherwise it is meaningless
+	 */
+	if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+	    (flags & IEEE80211_CHAN_NO_IR))
+		flags |= IEEE80211_CHAN_IR_CONCURRENT;
+
+	return flags;
+}
+
+static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+				struct iwl_nvm_data *data,
+				const __le16 * const nvm_ch_flags,
+				u32 sbands_flags)
+{
+	int ch_idx;
+	int n_channels = 0;
+	struct ieee80211_channel *channel;
+	u16 ch_flags;
+	int num_of_ch, num_2ghz_channels;
+	const u8 *nvm_chan;
+
+	if (cfg->nvm_type != IWL_NVM_EXT) {
+		num_of_ch = IWL_NVM_NUM_CHANNELS;
+		nvm_chan = &iwl_nvm_channels[0];
+		num_2ghz_channels = NUM_2GHZ_CHANNELS;
+	} else {
+		num_of_ch = IWL_NVM_NUM_CHANNELS_EXT;
+		nvm_chan = &iwl_ext_nvm_channels[0];
+		num_2ghz_channels = NUM_2GHZ_CHANNELS_EXT;
+	}
+
+	for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+		bool is_5ghz = (ch_idx >= num_2ghz_channels);
+
+		ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+		if (is_5ghz && !data->sku_cap_band_52ghz_enable)
+			continue;
+
+		/* workaround to disable wide channels in 5GHz */
+		if ((sbands_flags & IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ) &&
+		    is_5ghz) {
+			ch_flags &= ~(NVM_CHANNEL_40MHZ |
+				     NVM_CHANNEL_80MHZ |
+				     NVM_CHANNEL_160MHZ);
+		}
+
+		if (ch_flags & NVM_CHANNEL_160MHZ)
+			data->vht160_supported = true;
+
+		if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR) &&
+		    !(ch_flags & NVM_CHANNEL_VALID)) {
+			/*
+			 * Channels might become valid later if lar is
+			 * supported, hence we still want to add them to
+			 * the list of supported channels to cfg80211.
+			 */
+			iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+						    nvm_chan[ch_idx], ch_flags);
+			continue;
+		}
+
+		channel = &data->channels[n_channels];
+		n_channels++;
+
+		channel->hw_value = nvm_chan[ch_idx];
+		channel->band = is_5ghz ?
+				NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
+		channel->center_freq =
+			ieee80211_channel_to_frequency(
+				channel->hw_value, channel->band);
+
+		/* Initialize regulatory-based run-time data */
+
+		/*
+		 * Default value - highest tx power value.  max_power
+		 * is not used in mvm, and is used for backwards compatibility
+		 */
+		channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
+
+		/* don't put limitations in case we're using LAR */
+		if (!(sbands_flags & IWL_NVM_SBANDS_FLAGS_LAR))
+			channel->flags = iwl_get_channel_flags(nvm_chan[ch_idx],
+							       ch_idx, is_5ghz,
+							       ch_flags, cfg);
+		else
+			channel->flags = 0;
+
+		iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
+					    channel->hw_value, ch_flags);
+		IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
+				 channel->hw_value, channel->max_power);
+	}
+
+	return n_channels;
+}
+
+static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
+				  struct iwl_nvm_data *data,
+				  struct ieee80211_sta_vht_cap *vht_cap,
+				  u8 tx_chains, u8 rx_chains)
+{
+	int num_rx_ants = num_of_ant(rx_chains);
+	int num_tx_ants = num_of_ant(tx_chains);
+	unsigned int max_ampdu_exponent = (cfg->max_vht_ampdu_exponent ?:
+					   IEEE80211_VHT_MAX_AMPDU_1024K);
+
+	vht_cap->vht_supported = true;
+
+	vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
+		       IEEE80211_VHT_CAP_RXSTBC_1 |
+		       IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+		       3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
+		       max_ampdu_exponent <<
+		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+
+	if (data->vht160_supported)
+		vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+				IEEE80211_VHT_CAP_SHORT_GI_160;
+
+	if (cfg->vht_mu_mimo_supported)
+		vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
+
+	if (cfg->ht_params->ldpc)
+		vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+
+	if (data->sku_cap_mimo_disabled) {
+		num_rx_ants = 1;
+		num_tx_ants = 1;
+	}
+
+	if (num_tx_ants > 1)
+		vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+	else
+		vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
+
+	switch (iwlwifi_mod_params.amsdu_size) {
+	case IWL_AMSDU_DEF:
+		if (cfg->mq_rx_supported)
+			vht_cap->cap |=
+				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+		else
+			vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+		break;
+	case IWL_AMSDU_2K:
+		if (cfg->mq_rx_supported)
+			vht_cap->cap |=
+				IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+		else
+			WARN(1, "RB size of 2K is not supported by this device\n");
+		break;
+	case IWL_AMSDU_4K:
+		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
+		break;
+	case IWL_AMSDU_8K:
+		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
+		break;
+	case IWL_AMSDU_12K:
+		vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
+		break;
+	default:
+		break;
+	}
+
+	vht_cap->vht_mcs.rx_mcs_map =
+		cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+			    IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+			    IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
+
+	if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
+		vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
+		/* this works because NOT_SUPPORTED == 3 */
+		vht_cap->vht_mcs.rx_mcs_map |=
+			cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
+	}
+
+	vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+}
+
+static struct ieee80211_sband_iftype_data iwl_he_capa = {
+	.types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
+	.he_cap = {
+		.has_he = true,
+		.he_cap_elem = {
+			.mac_cap_info[0] =
+				IEEE80211_HE_MAC_CAP0_HTC_HE,
+			.mac_cap_info[1] =
+				IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+				IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
+			.mac_cap_info[2] =
+				IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
+				IEEE80211_HE_MAC_CAP2_ACK_EN,
+			.mac_cap_info[3] =
+				IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
+				IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
+			.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
+			.phy_cap_info[0] =
+				IEEE80211_HE_PHY_CAP0_DUAL_BAND |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+				IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
+			.phy_cap_info[1] =
+				IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+				IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+				IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
+			.phy_cap_info[2] =
+				IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+				IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+				IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
+			.phy_cap_info[3] =
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
+				IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
+			.phy_cap_info[4] =
+				IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+				IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
+				IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
+			.phy_cap_info[5] =
+				IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
+				IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
+			.phy_cap_info[6] =
+				IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+			.phy_cap_info[7] =
+				IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
+				IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+				IEEE80211_HE_PHY_CAP7_MAX_NC_7,
+			.phy_cap_info[8] =
+				IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+				IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
+				IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
+				IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
+		},
+		/*
+		 * Set default Tx/Rx HE MCS NSS Support field. Indicate support
+		 * for up to 2 spatial streams and all MCS, without any special
+		 * cases
+		 */
+		.he_mcs_nss_supp = {
+			.rx_mcs_80 = cpu_to_le16(0xfffa),
+			.tx_mcs_80 = cpu_to_le16(0xfffa),
+			.rx_mcs_160 = cpu_to_le16(0xfffa),
+			.tx_mcs_160 = cpu_to_le16(0xfffa),
+			.rx_mcs_80p80 = cpu_to_le16(0xffff),
+			.tx_mcs_80p80 = cpu_to_le16(0xffff),
+		},
+		/*
+		 * Set default PPE thresholds, with PPET16 set to 0, PPET8 set
+		 * to 7
+		 */
+		.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
+	},
+};
+
+static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
+				 u8 tx_chains, u8 rx_chains)
+{
+	if (sband->band == NL80211_BAND_2GHZ ||
+	    sband->band == NL80211_BAND_5GHZ)
+		sband->iftype_data = &iwl_he_capa;
+	else
+		return;
+
+	sband->n_iftype_data = 1;
+
+	/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
+	if ((tx_chains & rx_chains) != ANT_AB) {
+		iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
+			~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
+		iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
+			~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
+	}
+}
+
+static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
+			    struct iwl_nvm_data *data,
+			    const __le16 *nvm_ch_flags, u8 tx_chains,
+			    u8 rx_chains, u32 sbands_flags)
+{
+	int n_channels;
+	int n_used = 0;
+	struct ieee80211_supported_band *sband;
+
+	n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+					  sbands_flags);
+	sband = &data->bands[NL80211_BAND_2GHZ];
+	sband->band = NL80211_BAND_2GHZ;
+	sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
+	sband->n_bitrates = N_RATES_24;
+	n_used += iwl_init_sband_channels(data, sband, n_channels,
+					  NL80211_BAND_2GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
+			     tx_chains, rx_chains);
+
+	if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+		iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
+	sband = &data->bands[NL80211_BAND_5GHZ];
+	sband->band = NL80211_BAND_5GHZ;
+	sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
+	sband->n_bitrates = N_RATES_52;
+	n_used += iwl_init_sband_channels(data, sband, n_channels,
+					  NL80211_BAND_5GHZ);
+	iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
+			     tx_chains, rx_chains);
+	if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
+		iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
+				      tx_chains, rx_chains);
+
+	if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
+		iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
+
+	if (n_channels != n_used)
+		IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
+			    n_used, n_channels);
+}
+
+static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+		       const __le16 *phy_sku)
+{
+	if (cfg->nvm_type != IWL_NVM_EXT)
+		return le16_to_cpup(nvm_sw + SKU);
+
+	return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000));
+}
+
+static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+{
+	if (cfg->nvm_type != IWL_NVM_EXT)
+		return le16_to_cpup(nvm_sw + NVM_VERSION);
+	else
+		return le32_to_cpup((__le32 *)(nvm_sw +
+					       NVM_VERSION_EXT_NVM));
+}
+
+static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
+			     const __le16 *phy_sku)
+{
+	if (cfg->nvm_type != IWL_NVM_EXT)
+		return le16_to_cpup(nvm_sw + RADIO_CFG);
+
+	return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM));
+
+}
+
+static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw)
+{
+	int n_hw_addr;
+
+	if (cfg->nvm_type != IWL_NVM_EXT)
+		return le16_to_cpup(nvm_sw + N_HW_ADDRS);
+
+	n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000));
+
+	return n_hw_addr & N_HW_ADDR_MASK;
+}
+
+static void iwl_set_radio_cfg(const struct iwl_cfg *cfg,
+			      struct iwl_nvm_data *data,
+			      u32 radio_cfg)
+{
+	if (cfg->nvm_type != IWL_NVM_EXT) {
+		data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg);
+		data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg);
+		data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg);
+		data->radio_cfg_pnum = NVM_RF_CFG_PNUM_MSK(radio_cfg);
+		return;
+	}
+
+	/* set the radio configuration for family 8000 */
+	data->radio_cfg_type = EXT_NVM_RF_CFG_TYPE_MSK(radio_cfg);
+	data->radio_cfg_step = EXT_NVM_RF_CFG_STEP_MSK(radio_cfg);
+	data->radio_cfg_dash = EXT_NVM_RF_CFG_DASH_MSK(radio_cfg);
+	data->radio_cfg_pnum = EXT_NVM_RF_CFG_FLAVOR_MSK(radio_cfg);
+	data->valid_tx_ant = EXT_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
+	data->valid_rx_ant = EXT_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
+}
+
+static void iwl_flip_hw_address(__le32 mac_addr0, __le32 mac_addr1, u8 *dest)
+{
+	const u8 *hw_addr;
+
+	hw_addr = (const u8 *)&mac_addr0;
+	dest[0] = hw_addr[3];
+	dest[1] = hw_addr[2];
+	dest[2] = hw_addr[1];
+	dest[3] = hw_addr[0];
+
+	hw_addr = (const u8 *)&mac_addr1;
+	dest[4] = hw_addr[1];
+	dest[5] = hw_addr[0];
+}
+
+static void iwl_set_hw_address_from_csr(struct iwl_trans *trans,
+					struct iwl_nvm_data *data)
+{
+	__le32 mac_addr0 =
+		cpu_to_le32(iwl_read32(trans,
+				       trans->cfg->csr->mac_addr0_strap));
+	__le32 mac_addr1 =
+		cpu_to_le32(iwl_read32(trans,
+				       trans->cfg->csr->mac_addr1_strap));
+
+	iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+	/*
+	 * If the OEM fused a valid address, use it instead of the one in the
+	 * OTP
+	 */
+	if (is_valid_ether_addr(data->hw_addr))
+		return;
+
+	mac_addr0 = cpu_to_le32(iwl_read32(trans,
+					   trans->cfg->csr->mac_addr0_otp));
+	mac_addr1 = cpu_to_le32(iwl_read32(trans,
+					   trans->cfg->csr->mac_addr1_otp));
+
+	iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+}
+
+static void iwl_set_hw_address_family_8000(struct iwl_trans *trans,
+					   const struct iwl_cfg *cfg,
+					   struct iwl_nvm_data *data,
+					   const __le16 *mac_override,
+					   const __be16 *nvm_hw)
+{
+	const u8 *hw_addr;
+
+	if (mac_override) {
+		static const u8 reserved_mac[] = {
+			0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+		};
+
+		hw_addr = (const u8 *)(mac_override +
+				 MAC_ADDRESS_OVERRIDE_EXT_NVM);
+
+		/*
+		 * Store the MAC address from MAO section.
+		 * No byte swapping is required in MAO section
+		 */
+		memcpy(data->hw_addr, hw_addr, ETH_ALEN);
+
+		/*
+		 * Force the use of the OTP MAC address in case of reserved MAC
+		 * address in the NVM, or if address is given but invalid.
+		 */
+		if (is_valid_ether_addr(data->hw_addr) &&
+		    memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
+			return;
+
+		IWL_ERR(trans,
+			"mac address from nvm override section is not valid\n");
+	}
+
+	if (nvm_hw) {
+		/* read the mac address from WFMP registers */
+		__le32 mac_addr0 = cpu_to_le32(iwl_trans_read_prph(trans,
+						WFMP_MAC_ADDR_0));
+		__le32 mac_addr1 = cpu_to_le32(iwl_trans_read_prph(trans,
+						WFMP_MAC_ADDR_1));
+
+		iwl_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+
+		return;
+	}
+
+	IWL_ERR(trans, "mac address is not found\n");
+}
+
+static int iwl_set_hw_address(struct iwl_trans *trans,
+			      const struct iwl_cfg *cfg,
+			      struct iwl_nvm_data *data, const __be16 *nvm_hw,
+			      const __le16 *mac_override)
+{
+	if (cfg->mac_addr_from_csr) {
+		iwl_set_hw_address_from_csr(trans, data);
+	} else if (cfg->nvm_type != IWL_NVM_EXT) {
+		const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR);
+
+		/* The byte order is little endian 16 bit, meaning 214365 */
+		data->hw_addr[0] = hw_addr[1];
+		data->hw_addr[1] = hw_addr[0];
+		data->hw_addr[2] = hw_addr[3];
+		data->hw_addr[3] = hw_addr[2];
+		data->hw_addr[4] = hw_addr[5];
+		data->hw_addr[5] = hw_addr[4];
+	} else {
+		iwl_set_hw_address_family_8000(trans, cfg, data,
+					       mac_override, nvm_hw);
+	}
+
+	if (!is_valid_ether_addr(data->hw_addr)) {
+		IWL_ERR(trans, "no valid mac address was found\n");
+		return -EINVAL;
+	}
+
+	IWL_INFO(trans, "base HW address: %pM\n", data->hw_addr);
+
+	return 0;
+}
+
+static bool
+iwl_nvm_no_wide_in_5ghz(struct device *dev, const struct iwl_cfg *cfg,
+			const __be16 *nvm_hw)
+{
+	/*
+	 * Workaround a bug in Indonesia SKUs where the regulatory in
+	 * some 7000-family OTPs erroneously allow wide channels in
+	 * 5GHz.  To check for Indonesia, we take the SKU value from
+	 * bits 1-4 in the subsystem ID and check if it is either 5 or
+	 * 9.  In those cases, we need to force-disable wide channels
+	 * in 5GHz otherwise the FW will throw a sysassert when we try
+	 * to use them.
+	 */
+	if (cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		/*
+		 * Unlike the other sections in the NVM, the hw
+		 * section uses big-endian.
+		 */
+		u16 subsystem_id = be16_to_cpup(nvm_hw + SUBSYSTEM_ID);
+		u8 sku = (subsystem_id & 0x1e) >> 1;
+
+		if (sku == 5 || sku == 9) {
+			IWL_DEBUG_EEPROM(dev,
+					 "disabling wide channels in 5GHz (0x%0x %d)\n",
+					 subsystem_id, sku);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+		   const __be16 *nvm_hw, const __le16 *nvm_sw,
+		   const __le16 *nvm_calib, const __le16 *regulatory,
+		   const __le16 *mac_override, const __le16 *phy_sku,
+		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported)
+{
+	struct device *dev = trans->dev;
+	struct iwl_nvm_data *data;
+	bool lar_enabled;
+	u32 sku, radio_cfg;
+	u32 sbands_flags = 0;
+	u16 lar_config;
+	const __le16 *ch_section;
+
+	if (cfg->nvm_type != IWL_NVM_EXT)
+		data = kzalloc(sizeof(*data) +
+			       sizeof(struct ieee80211_channel) *
+			       IWL_NVM_NUM_CHANNELS,
+			       GFP_KERNEL);
+	else
+		data = kzalloc(sizeof(*data) +
+			       sizeof(struct ieee80211_channel) *
+			       IWL_NVM_NUM_CHANNELS_EXT,
+			       GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	data->nvm_version = iwl_get_nvm_version(cfg, nvm_sw);
+
+	radio_cfg = iwl_get_radio_cfg(cfg, nvm_sw, phy_sku);
+	iwl_set_radio_cfg(cfg, data, radio_cfg);
+	if (data->valid_tx_ant)
+		tx_chains &= data->valid_tx_ant;
+	if (data->valid_rx_ant)
+		rx_chains &= data->valid_rx_ant;
+
+	sku = iwl_get_sku(cfg, nvm_sw, phy_sku);
+	data->sku_cap_band_24ghz_enable = sku & NVM_SKU_CAP_BAND_24GHZ;
+	data->sku_cap_band_52ghz_enable = sku & NVM_SKU_CAP_BAND_52GHZ;
+	data->sku_cap_11n_enable = sku & NVM_SKU_CAP_11N_ENABLE;
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+		data->sku_cap_11n_enable = false;
+	data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
+				    (sku & NVM_SKU_CAP_11AC_ENABLE);
+	data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
+
+	data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
+
+	if (cfg->nvm_type != IWL_NVM_EXT) {
+		/* Checking for required sections */
+		if (!nvm_calib) {
+			IWL_ERR(trans,
+				"Can't parse empty Calib NVM sections\n");
+			kfree(data);
+			return NULL;
+		}
+
+		ch_section = cfg->nvm_type == IWL_NVM_SDP ?
+			     &regulatory[NVM_CHANNELS_SDP] :
+			     &nvm_sw[NVM_CHANNELS];
+
+		/* in family 8000 Xtal calibration values moved to OTP */
+		data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
+		data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
+		lar_enabled = true;
+	} else {
+		u16 lar_offset = data->nvm_version < 0xE39 ?
+				 NVM_LAR_OFFSET_OLD :
+				 NVM_LAR_OFFSET;
+
+		lar_config = le16_to_cpup(regulatory + lar_offset);
+		data->lar_enabled = !!(lar_config &
+				       NVM_LAR_ENABLED);
+		lar_enabled = data->lar_enabled;
+		ch_section = &regulatory[NVM_CHANNELS_EXTENDED];
+	}
+
+	/* If no valid mac address was found - bail out */
+	if (iwl_set_hw_address(trans, cfg, data, nvm_hw, mac_override)) {
+		kfree(data);
+		return NULL;
+	}
+
+	if (lar_fw_supported && lar_enabled)
+		sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+
+	if (iwl_nvm_no_wide_in_5ghz(dev, cfg, nvm_hw))
+		sbands_flags |= IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ;
+
+	iwl_init_sbands(dev, cfg, data, ch_section, tx_chains, rx_chains,
+			sbands_flags);
+	data->calib_version = 255;
+
+	return data;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
+
+static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan,
+				       int ch_idx, u16 nvm_flags,
+				       const struct iwl_cfg *cfg)
+{
+	u32 flags = NL80211_RRF_NO_HT40;
+	u32 last_5ghz_ht = LAST_5GHZ_HT;
+
+	if (cfg->nvm_type == IWL_NVM_EXT)
+		last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000;
+
+	if (ch_idx < NUM_2GHZ_CHANNELS &&
+	    (nvm_flags & NVM_CHANNEL_40MHZ)) {
+		if (nvm_chan[ch_idx] <= LAST_2GHZ_HT_PLUS)
+			flags &= ~NL80211_RRF_NO_HT40PLUS;
+		if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
+			flags &= ~NL80211_RRF_NO_HT40MINUS;
+	} else if (nvm_chan[ch_idx] <= last_5ghz_ht &&
+		   (nvm_flags & NVM_CHANNEL_40MHZ)) {
+		if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
+			flags &= ~NL80211_RRF_NO_HT40PLUS;
+		else
+			flags &= ~NL80211_RRF_NO_HT40MINUS;
+	}
+
+	if (!(nvm_flags & NVM_CHANNEL_80MHZ))
+		flags |= NL80211_RRF_NO_80MHZ;
+	if (!(nvm_flags & NVM_CHANNEL_160MHZ))
+		flags |= NL80211_RRF_NO_160MHZ;
+
+	if (!(nvm_flags & NVM_CHANNEL_ACTIVE))
+		flags |= NL80211_RRF_NO_IR;
+
+	if (nvm_flags & NVM_CHANNEL_RADAR)
+		flags |= NL80211_RRF_DFS;
+
+	if (nvm_flags & NVM_CHANNEL_INDOOR_ONLY)
+		flags |= NL80211_RRF_NO_OUTDOOR;
+
+	/* Set the GO concurrent flag only in case that NO_IR is set.
+	 * Otherwise it is meaningless
+	 */
+	if ((nvm_flags & NVM_CHANNEL_GO_CONCURRENT) &&
+	    (flags & NL80211_RRF_NO_IR))
+		flags |= NL80211_RRF_GO_CONCURRENT;
+
+	return flags;
+}
+
+struct regdb_ptrs {
+	struct ieee80211_wmm_rule *rule;
+	u32 token;
+};
+
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+		       int num_of_ch, __le32 *channels, u16 fw_mcc,
+		       u16 geo_info)
+{
+	int ch_idx;
+	u16 ch_flags;
+	u32 reg_rule_flags, prev_reg_rule_flags = 0;
+	const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ?
+			     iwl_ext_nvm_channels : iwl_nvm_channels;
+	struct ieee80211_regdomain *regd, *copy_rd;
+	int size_of_regd, regd_to_copy;
+	struct ieee80211_reg_rule *rule;
+	struct regdb_ptrs *regdb_ptrs;
+	enum nl80211_band band;
+	int center_freq, prev_center_freq = 0;
+	int valid_rules = 0;
+	bool new_rule;
+	int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ?
+			 IWL_NVM_NUM_CHANNELS_EXT : IWL_NVM_NUM_CHANNELS;
+
+	if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES))
+		return ERR_PTR(-EINVAL);
+
+	if (WARN_ON(num_of_ch > max_num_ch))
+		num_of_ch = max_num_ch;
+
+	IWL_DEBUG_DEV(dev, IWL_DL_LAR, "building regdom for %d channels\n",
+		      num_of_ch);
+
+	/* build a regdomain rule for every valid channel */
+	size_of_regd =
+		sizeof(struct ieee80211_regdomain) +
+		num_of_ch * sizeof(struct ieee80211_reg_rule);
+
+	regd = kzalloc(size_of_regd, GFP_KERNEL);
+	if (!regd)
+		return ERR_PTR(-ENOMEM);
+
+	regdb_ptrs = kcalloc(num_of_ch, sizeof(*regdb_ptrs), GFP_KERNEL);
+	if (!regdb_ptrs) {
+		copy_rd = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	/* set alpha2 from FW. */
+	regd->alpha2[0] = fw_mcc >> 8;
+	regd->alpha2[1] = fw_mcc & 0xff;
+
+	for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+		ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
+		band = (ch_idx < NUM_2GHZ_CHANNELS) ?
+		       NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+		center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
+							     band);
+		new_rule = false;
+
+		if (!(ch_flags & NVM_CHANNEL_VALID)) {
+			iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+						    nvm_chan[ch_idx], ch_flags);
+			continue;
+		}
+
+		reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
+							     ch_flags, cfg);
+
+		/* we can't continue the same rule */
+		if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
+		    center_freq - prev_center_freq > 20) {
+			valid_rules++;
+			new_rule = true;
+		}
+
+		rule = &regd->reg_rules[valid_rules - 1];
+
+		if (new_rule)
+			rule->freq_range.start_freq_khz =
+						MHZ_TO_KHZ(center_freq - 10);
+
+		rule->freq_range.end_freq_khz = MHZ_TO_KHZ(center_freq + 10);
+
+		/* this doesn't matter - not used by FW */
+		rule->power_rule.max_antenna_gain = DBI_TO_MBI(6);
+		rule->power_rule.max_eirp =
+			DBM_TO_MBM(IWL_DEFAULT_MAX_TX_POWER);
+
+		rule->flags = reg_rule_flags;
+
+		/* rely on auto-calculation to merge BW of contiguous chans */
+		rule->flags |= NL80211_RRF_AUTO_BW;
+		rule->freq_range.max_bandwidth_khz = 0;
+
+		prev_center_freq = center_freq;
+		prev_reg_rule_flags = reg_rule_flags;
+
+		iwl_nvm_print_channel_flags(dev, IWL_DL_LAR,
+					    nvm_chan[ch_idx], ch_flags);
+
+		if (!(geo_info & GEO_WMM_ETSI_5GHZ_INFO) ||
+		    band == NL80211_BAND_2GHZ)
+			continue;
+
+		reg_query_regdb_wmm(regd->alpha2, center_freq, rule);
+	}
+
+	regd->n_reg_rules = valid_rules;
+
+	/*
+	 * Narrow down regdom for unused regulatory rules to prevent hole
+	 * between reg rules to wmm rules.
+	 */
+	regd_to_copy = sizeof(struct ieee80211_regdomain) +
+		valid_rules * sizeof(struct ieee80211_reg_rule);
+
+	copy_rd = kzalloc(regd_to_copy, GFP_KERNEL);
+	if (!copy_rd) {
+		copy_rd = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	memcpy(copy_rd, regd, regd_to_copy);
+
+out:
+	kfree(regdb_ptrs);
+	kfree(regd);
+	return copy_rd;
+}
+IWL_EXPORT_SYMBOL(iwl_parse_nvm_mcc_info);
+
+#define IWL_MAX_NVM_SECTION_SIZE	0x1b58
+#define IWL_MAX_EXT_NVM_SECTION_SIZE	0x1ffc
+#define MAX_NVM_FILE_LEN	16384
+
+void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
+		    unsigned int len)
+{
+#define IWL_4165_DEVICE_ID	0x5501
+#define NVM_SKU_CAP_MIMO_DISABLE BIT(5)
+
+	if (section == NVM_SECTION_TYPE_PHY_SKU &&
+	    hw_id == IWL_4165_DEVICE_ID && data && len >= 5 &&
+	    (data[4] & NVM_SKU_CAP_MIMO_DISABLE))
+		/* OTP 0x52 bug work around: it's a 1x1 device */
+		data[3] = ANT_B | (ANT_B << 4);
+}
+IWL_EXPORT_SYMBOL(iwl_nvm_fixups);
+
+/*
+ * Reads external NVM from a file into mvm->nvm_sections
+ *
+ * HOW TO CREATE THE NVM FILE FORMAT:
+ * ------------------------------
+ * 1. create hex file, format:
+ *      3800 -> header
+ *      0000 -> header
+ *      5a40 -> data
+ *
+ *   rev - 6 bit (word1)
+ *   len - 10 bit (word1)
+ *   id - 4 bit (word2)
+ *   rsv - 12 bit (word2)
+ *
+ * 2. flip 8bits with 8 bits per line to get the right NVM file format
+ *
+ * 3. create binary file from the hex file
+ *
+ * 4. save as "iNVM_xxx.bin" under /lib/firmware
+ */
+int iwl_read_external_nvm(struct iwl_trans *trans,
+			  const char *nvm_file_name,
+			  struct iwl_nvm_section *nvm_sections)
+{
+	int ret, section_size;
+	u16 section_id;
+	const struct firmware *fw_entry;
+	const struct {
+		__le16 word1;
+		__le16 word2;
+		u8 data[];
+	} *file_sec;
+	const u8 *eof;
+	u8 *temp;
+	int max_section_size;
+	const __le32 *dword_buff;
+
+#define NVM_WORD1_LEN(x) (8 * (x & 0x03FF))
+#define NVM_WORD2_ID(x) (x >> 12)
+#define EXT_NVM_WORD2_LEN(x) (2 * (((x) & 0xFF) << 8 | (x) >> 8))
+#define EXT_NVM_WORD1_ID(x) ((x) >> 4)
+#define NVM_HEADER_0	(0x2A504C54)
+#define NVM_HEADER_1	(0x4E564D2A)
+#define NVM_HEADER_SIZE	(4 * sizeof(u32))
+
+	IWL_DEBUG_EEPROM(trans->dev, "Read from external NVM\n");
+
+	/* Maximal size depends on NVM version */
+	if (trans->cfg->nvm_type != IWL_NVM_EXT)
+		max_section_size = IWL_MAX_NVM_SECTION_SIZE;
+	else
+		max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE;
+
+	/*
+	 * Obtain NVM image via request_firmware. Since we already used
+	 * request_firmware_nowait() for the firmware binary load and only
+	 * get here after that we assume the NVM request can be satisfied
+	 * synchronously.
+	 */
+	ret = request_firmware(&fw_entry, nvm_file_name, trans->dev);
+	if (ret) {
+		IWL_ERR(trans, "ERROR: %s isn't available %d\n",
+			nvm_file_name, ret);
+		return ret;
+	}
+
+	IWL_INFO(trans, "Loaded NVM file %s (%zu bytes)\n",
+		 nvm_file_name, fw_entry->size);
+
+	if (fw_entry->size > MAX_NVM_FILE_LEN) {
+		IWL_ERR(trans, "NVM file too large\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	eof = fw_entry->data + fw_entry->size;
+	dword_buff = (__le32 *)fw_entry->data;
+
+	/* some NVM file will contain a header.
+	 * The header is identified by 2 dwords header as follow:
+	 * dword[0] = 0x2A504C54
+	 * dword[1] = 0x4E564D2A
+	 *
+	 * This header must be skipped when providing the NVM data to the FW.
+	 */
+	if (fw_entry->size > NVM_HEADER_SIZE &&
+	    dword_buff[0] == cpu_to_le32(NVM_HEADER_0) &&
+	    dword_buff[1] == cpu_to_le32(NVM_HEADER_1)) {
+		file_sec = (void *)(fw_entry->data + NVM_HEADER_SIZE);
+		IWL_INFO(trans, "NVM Version %08X\n", le32_to_cpu(dword_buff[2]));
+		IWL_INFO(trans, "NVM Manufacturing date %08X\n",
+			 le32_to_cpu(dword_buff[3]));
+
+		/* nvm file validation, dword_buff[2] holds the file version */
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
+		    CSR_HW_REV_STEP(trans->hw_rev) == SILICON_C_STEP &&
+		    le32_to_cpu(dword_buff[2]) < 0xE4A) {
+			ret = -EFAULT;
+			goto out;
+		}
+	} else {
+		file_sec = (void *)fw_entry->data;
+	}
+
+	while (true) {
+		if (file_sec->data > eof) {
+			IWL_ERR(trans,
+				"ERROR - NVM file too short for section header\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* check for EOF marker */
+		if (!file_sec->word1 && !file_sec->word2) {
+			ret = 0;
+			break;
+		}
+
+		if (trans->cfg->nvm_type != IWL_NVM_EXT) {
+			section_size =
+				2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1));
+			section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2));
+		} else {
+			section_size = 2 * EXT_NVM_WORD2_LEN(
+						le16_to_cpu(file_sec->word2));
+			section_id = EXT_NVM_WORD1_ID(
+						le16_to_cpu(file_sec->word1));
+		}
+
+		if (section_size > max_section_size) {
+			IWL_ERR(trans, "ERROR - section too large (%d)\n",
+				section_size);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (!section_size) {
+			IWL_ERR(trans, "ERROR - section empty\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		if (file_sec->data + section_size > eof) {
+			IWL_ERR(trans,
+				"ERROR - NVM file too short for section (%d bytes)\n",
+				section_size);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
+			 "Invalid NVM section ID %d\n", section_id)) {
+			ret = -EINVAL;
+			break;
+		}
+
+		temp = kmemdup(file_sec->data, section_size, GFP_KERNEL);
+		if (!temp) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		iwl_nvm_fixups(trans->hw_id, section_id, temp, section_size);
+
+		kfree(nvm_sections[section_id].data);
+		nvm_sections[section_id].data = temp;
+		nvm_sections[section_id].length = section_size;
+
+		/* advance to the next section */
+		file_sec = (void *)(file_sec->data + section_size);
+	}
+out:
+	release_firmware(fw_entry);
+	return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_read_external_nvm);
+
+struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+				 const struct iwl_fw *fw)
+{
+	struct iwl_nvm_get_info cmd = {};
+	struct iwl_nvm_get_info_rsp *rsp;
+	struct iwl_nvm_data *nvm;
+	struct iwl_host_cmd hcmd = {
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &cmd, },
+		.len = { sizeof(cmd) },
+		.id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO)
+	};
+	int  ret;
+	bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
+				fw_has_capa(&fw->ucode_capa,
+					    IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+	u32 mac_flags;
+	u32 sbands_flags = 0;
+
+	ret = iwl_trans_send_cmd(trans, &hcmd);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (WARN(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp),
+		 "Invalid payload len in NVM response from FW %d",
+		 iwl_rx_packet_payload_len(hcmd.resp_pkt))) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	rsp = (void *)hcmd.resp_pkt->data;
+	if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
+		IWL_INFO(trans, "OTP is empty\n");
+
+	nvm = kzalloc(sizeof(*nvm) +
+		      sizeof(struct ieee80211_channel) * IWL_NUM_CHANNELS,
+		      GFP_KERNEL);
+	if (!nvm) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	iwl_set_hw_address_from_csr(trans, nvm);
+	/* TODO: if platform NVM has MAC address - override it here */
+
+	if (!is_valid_ether_addr(nvm->hw_addr)) {
+		IWL_ERR(trans, "no valid mac address was found\n");
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	IWL_INFO(trans, "base HW address: %pM\n", nvm->hw_addr);
+
+	/* Initialize general data */
+	nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
+
+	/* Initialize MAC sku data */
+	mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
+	nvm->sku_cap_11ac_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
+	nvm->sku_cap_11n_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+	nvm->sku_cap_11ax_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
+	nvm->sku_cap_band_24ghz_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
+	nvm->sku_cap_band_52ghz_enable =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+	nvm->sku_cap_mimo_disabled =
+		!!(mac_flags & NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+
+	/* Initialize PHY sku data */
+	nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains);
+	nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains);
+
+	if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) {
+		nvm->lar_enabled = true;
+		sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR;
+	}
+
+	iwl_init_sbands(trans->dev, trans->cfg, nvm,
+			rsp->regulatory.channel_profile,
+			nvm->valid_tx_ant & fw->valid_tx_ant,
+			nvm->valid_rx_ant & fw->valid_rx_ant,
+			sbands_flags);
+
+	iwl_free_resp(&hcmd);
+	return nvm;
+
+err_free:
+	kfree(nvm);
+out:
+	iwl_free_resp(&hcmd);
+	return ERR_PTR(ret);
+}
+IWL_EXPORT_SYMBOL(iwl_get_nvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
new file mode 100644
index 0000000..234d100
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -0,0 +1,142 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#ifndef __iwl_nvm_parse_h__
+#define __iwl_nvm_parse_h__
+
+#include <net/cfg80211.h>
+#include "iwl-eeprom-parse.h"
+
+/**
+ * enum iwl_nvm_sbands_flags - modification flags for the channel profiles
+ *
+ * @IWL_NVM_SBANDS_FLAGS_LAR: LAR is enabled
+ * @IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ: disallow 40, 80 and 160MHz on 5GHz
+ */
+enum iwl_nvm_sbands_flags {
+	IWL_NVM_SBANDS_FLAGS_LAR		= BIT(0),
+	IWL_NVM_SBANDS_FLAGS_NO_WIDE_IN_5GHZ	= BIT(1),
+};
+
+/**
+ * iwl_parse_nvm_data - parse NVM data and return values
+ *
+ * This function parses all NVM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+		   const __be16 *nvm_hw, const __le16 *nvm_sw,
+		   const __le16 *nvm_calib, const __le16 *regulatory,
+		   const __le16 *mac_override, const __le16 *phy_sku,
+		   u8 tx_chains, u8 rx_chains, bool lar_fw_supported);
+
+/**
+ * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ *
+ * This function parses the regulatory channel data received as a
+ * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
+ * to be fed into the regulatory core. In case the geo_info is set handle
+ * accordingly. An ERR_PTR is returned on error.
+ * If not given to the regulatory core, the user is responsible for freeing
+ * the regdomain returned here with kfree.
+ */
+struct ieee80211_regdomain *
+iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
+		       int num_of_ch, __le32 *channels, u16 fw_mcc,
+		       u16 geo_info);
+
+/**
+ * struct iwl_nvm_section - describes an NVM section in memory.
+ *
+ * This struct holds an NVM section read from the NIC using NVM_ACCESS_CMD,
+ * and saved for later use by the driver. Not all NVM sections are saved
+ * this way, only the needed ones.
+ */
+struct iwl_nvm_section {
+	u16 length;
+	const u8 *data;
+};
+
+/**
+ * iwl_read_external_nvm - Reads external NVM from a file into nvm_sections
+ */
+int iwl_read_external_nvm(struct iwl_trans *trans,
+			  const char *nvm_file_name,
+			  struct iwl_nvm_section *nvm_sections);
+void iwl_nvm_fixups(u32 hw_id, unsigned int section, u8 *data,
+		    unsigned int len);
+
+/**
+ * iwl_get_nvm - retrieve NVM data from firmware
+ *
+ * Allocates a new iwl_nvm_data structure, fills it with
+ * NVM data, and returns it to caller.
+ */
+struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+				 const struct iwl_fw *fw);
+#endif /* __iwl_nvm_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
new file mode 100644
index 0000000..b49eda8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -0,0 +1,282 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_op_mode_h__
+#define __iwl_op_mode_h__
+
+#include <linux/netdevice.h>
+#include <linux/debugfs.h>
+
+struct iwl_op_mode;
+struct iwl_trans;
+struct sk_buff;
+struct iwl_device_cmd;
+struct iwl_rx_cmd_buffer;
+struct iwl_fw;
+struct iwl_cfg;
+
+/**
+ * DOC: Operational mode - what is it ?
+ *
+ * The operational mode (a.k.a. op_mode) is the layer that implements
+ * mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
+ * the transport API to access the HW. The op_mode doesn't need to know how the
+ * underlying HW works, since the transport layer takes care of that.
+ *
+ * There can be several op_mode: i.e. different fw APIs will require two
+ * different op_modes. This is why the op_mode is virtualized.
+ */
+
+/**
+ * DOC: Life cycle of the Operational mode
+ *
+ * The operational mode has a very simple life cycle.
+ *
+ *	1) The driver layer (iwl-drv.c) chooses the op_mode based on the
+ *	   capabilities advertised by the fw file (in TLV format).
+ *	2) The driver layer starts the op_mode (ops->start)
+ *	3) The op_mode registers mac80211
+ *	4) The op_mode is governed by mac80211
+ *	5) The driver layer stops the op_mode
+ */
+
+/**
+ * struct iwl_op_mode_ops - op_mode specific operations
+ *
+ * The op_mode exports its ops so that external components can start it and
+ * interact with it. The driver layer typically calls the start and stop
+ * handlers, the transport layer calls the others.
+ *
+ * All the handlers MUST be implemented, except @rx_rss which can be left
+ * out *iff* the opmode will never run on hardware with multi-queue capability.
+ *
+ * @start: start the op_mode. The transport layer is already allocated.
+ *	May sleep
+ * @stop: stop the op_mode. Must free all the memory allocated.
+ *	May sleep
+ * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
+ *	HCMD this Rx responds to. Can't sleep.
+ * @rx_rss: data queue RX notification to the op_mode, for (data) notifications
+ *	received on the RSS queue(s). The queue parameter indicates which of the
+ *	RSS queues received this frame; it will always be non-zero.
+ *	This method must not sleep.
+ * @async_cb: called when an ASYNC command with CMD_WANT_ASYNC_CALLBACK set
+ *	completes. Must be atomic.
+ * @queue_full: notifies that a HW queue is full.
+ *	Must be atomic and called with BH disabled.
+ * @queue_not_full: notifies that a HW queue is not full any more.
+ *	Must be atomic and called with BH disabled.
+ * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
+ *	the radio is killed. Return %true if the device should be stopped by
+ *	the transport immediately after the call. May sleep.
+ * @free_skb: allows the transport layer to free skbs that haven't been
+ *	reclaimed by the op_mode. This can happen when the driver is freed and
+ *	there are Tx packets pending in the transport layer.
+ *	Must be atomic
+ * @nic_error: error notification. Must be atomic and must be called with BH
+ *	disabled.
+ * @cmd_queue_full: Called when the command queue gets full. Must be atomic and
+ *	called with BH disabled.
+ * @nic_config: configure NIC, called before firmware is started.
+ *	May sleep
+ * @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. return 1 to indicate d0i3
+ *	entrance is aborted (e.g. due to held reference). May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
+ */
+struct iwl_op_mode_ops {
+	struct iwl_op_mode *(*start)(struct iwl_trans *trans,
+				     const struct iwl_cfg *cfg,
+				     const struct iwl_fw *fw,
+				     struct dentry *dbgfs_dir);
+	void (*stop)(struct iwl_op_mode *op_mode);
+	void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		   struct iwl_rx_cmd_buffer *rxb);
+	void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
+	void (*async_cb)(struct iwl_op_mode *op_mode,
+			 const struct iwl_device_cmd *cmd);
+	void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
+	void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
+	bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
+	void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
+	void (*nic_error)(struct iwl_op_mode *op_mode);
+	void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
+	void (*nic_config)(struct iwl_op_mode *op_mode);
+	void (*wimax_active)(struct iwl_op_mode *op_mode);
+	int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+	int (*exit_d0i3)(struct iwl_op_mode *op_mode);
+};
+
+int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
+void iwl_opmode_deregister(const char *name);
+
+/**
+ * struct iwl_op_mode - operational mode
+ * @ops: pointer to its own ops
+ *
+ * This holds an implementation of the mac80211 / fw API.
+ */
+struct iwl_op_mode {
+	const struct iwl_op_mode_ops *ops;
+
+	char op_mode_specific[0] __aligned(sizeof(void *));
+};
+
+static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
+{
+	might_sleep();
+	op_mode->ops->stop(op_mode);
+}
+
+static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
+				  struct napi_struct *napi,
+				  struct iwl_rx_cmd_buffer *rxb)
+{
+	return op_mode->ops->rx(op_mode, napi, rxb);
+}
+
+static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
+				      struct napi_struct *napi,
+				      struct iwl_rx_cmd_buffer *rxb,
+				      unsigned int queue)
+{
+	op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
+}
+
+static inline void iwl_op_mode_async_cb(struct iwl_op_mode *op_mode,
+					const struct iwl_device_cmd *cmd)
+{
+	if (op_mode->ops->async_cb)
+		op_mode->ops->async_cb(op_mode, cmd);
+}
+
+static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
+					  int queue)
+{
+	op_mode->ops->queue_full(op_mode, queue);
+}
+
+static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
+					      int queue)
+{
+	op_mode->ops->queue_not_full(op_mode, queue);
+}
+
+static inline bool __must_check
+iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
+{
+	might_sleep();
+	return op_mode->ops->hw_rf_kill(op_mode, state);
+}
+
+static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
+					struct sk_buff *skb)
+{
+	op_mode->ops->free_skb(op_mode, skb);
+}
+
+static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode)
+{
+	op_mode->ops->nic_error(op_mode);
+}
+
+static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+	op_mode->ops->cmd_queue_full(op_mode);
+}
+
+static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
+{
+	might_sleep();
+	op_mode->ops->nic_config(op_mode);
+}
+
+static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
+{
+	might_sleep();
+	op_mode->ops->wimax_active(op_mode);
+}
+
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+	might_sleep();
+
+	if (!op_mode->ops->enter_d0i3)
+		return 0;
+	return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+	might_sleep();
+
+	if (!op_mode->ops->exit_d0i3)
+		return 0;
+	return op_mode->ops->exit_d0i3(op_mode);
+}
+
+#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
new file mode 100644
index 0000000..b7cd813
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,489 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "iwl-drv.h"
+#include "iwl-phy-db.h"
+#include "iwl-debug.h"
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+#define CHANNEL_NUM_SIZE	4	/* num of channels in calib_ch size */
+
+struct iwl_phy_db_entry {
+	u16	size;
+	u8	*data;
+};
+
+/**
+ * struct iwl_phy_db - stores phy configuration and calibration data.
+ *
+ * @cfg: phy configuration.
+ * @calib_nch: non channel specific calibration data.
+ * @calib_ch: channel specific calibration data.
+ * @n_group_papd: number of entries in papd channel group.
+ * @calib_ch_group_papd: calibration data related to papd channel group.
+ * @n_group_txp: number of entries in tx power channel group.
+ * @calib_ch_group_txp: calibration data related to tx power chanel group.
+ */
+struct iwl_phy_db {
+	struct iwl_phy_db_entry	cfg;
+	struct iwl_phy_db_entry	calib_nch;
+	int n_group_papd;
+	struct iwl_phy_db_entry	*calib_ch_group_papd;
+	int n_group_txp;
+	struct iwl_phy_db_entry	*calib_ch_group_txp;
+
+	struct iwl_trans *trans;
+};
+
+enum iwl_phy_db_section_type {
+	IWL_PHY_DB_CFG = 1,
+	IWL_PHY_DB_CALIB_NCH,
+	IWL_PHY_DB_UNUSED,
+	IWL_PHY_DB_CALIB_CHG_PAPD,
+	IWL_PHY_DB_CALIB_CHG_TXP,
+	IWL_PHY_DB_MAX
+};
+
+#define PHY_DB_CMD 0x6c
+
+/* for parsing of tx power channel group data that comes from the firmware*/
+struct iwl_phy_db_chg_txp {
+	__le32 space;
+	__le16 max_channel_idx;
+} __packed;
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
+{
+	struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
+					    GFP_KERNEL);
+
+	if (!phy_db)
+		return phy_db;
+
+	phy_db->trans = trans;
+
+	phy_db->n_group_txp = -1;
+	phy_db->n_group_papd = -1;
+
+	/* TODO: add default values of the phy db. */
+	return phy_db;
+}
+IWL_EXPORT_SYMBOL(iwl_phy_db_init);
+
+/*
+ * get phy db section: returns a pointer to a phy db section specified by
+ * type and channel group id.
+ */
+static struct iwl_phy_db_entry *
+iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
+		       enum iwl_phy_db_section_type type,
+		       u16 chg_id)
+{
+	if (!phy_db || type >= IWL_PHY_DB_MAX)
+		return NULL;
+
+	switch (type) {
+	case IWL_PHY_DB_CFG:
+		return &phy_db->cfg;
+	case IWL_PHY_DB_CALIB_NCH:
+		return &phy_db->calib_nch;
+	case IWL_PHY_DB_CALIB_CHG_PAPD:
+		if (chg_id >= phy_db->n_group_papd)
+			return NULL;
+		return &phy_db->calib_ch_group_papd[chg_id];
+	case IWL_PHY_DB_CALIB_CHG_TXP:
+		if (chg_id >= phy_db->n_group_txp)
+			return NULL;
+		return &phy_db->calib_ch_group_txp[chg_id];
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
+				    enum iwl_phy_db_section_type type,
+				    u16 chg_id)
+{
+	struct iwl_phy_db_entry *entry =
+				iwl_phy_db_get_section(phy_db, type, chg_id);
+	if (!entry)
+		return;
+
+	kfree(entry->data);
+	entry->data = NULL;
+	entry->size = 0;
+}
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db)
+{
+	int i;
+
+	if (!phy_db)
+		return;
+
+	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
+	iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
+
+	for (i = 0; i < phy_db->n_group_papd; i++)
+		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
+	kfree(phy_db->calib_ch_group_papd);
+
+	for (i = 0; i < phy_db->n_group_txp; i++)
+		iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+	kfree(phy_db->calib_ch_group_txp);
+
+	kfree(phy_db);
+}
+IWL_EXPORT_SYMBOL(iwl_phy_db_free);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+			   struct iwl_rx_packet *pkt)
+{
+	struct iwl_calib_res_notif_phy_db *phy_db_notif =
+			(struct iwl_calib_res_notif_phy_db *)pkt->data;
+	enum iwl_phy_db_section_type type = le16_to_cpu(phy_db_notif->type);
+	u16 size  = le16_to_cpu(phy_db_notif->length);
+	struct iwl_phy_db_entry *entry;
+	u16 chg_id = 0;
+
+	if (!phy_db)
+		return -EINVAL;
+
+	if (type == IWL_PHY_DB_CALIB_CHG_PAPD) {
+		chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+		if (phy_db && !phy_db->calib_ch_group_papd) {
+			/*
+			 * Firmware sends the largest index first, so we can use
+			 * it to know how much we should allocate.
+			 */
+			phy_db->calib_ch_group_papd = kcalloc(chg_id + 1,
+							      sizeof(struct iwl_phy_db_entry),
+							      GFP_ATOMIC);
+			if (!phy_db->calib_ch_group_papd)
+				return -ENOMEM;
+			phy_db->n_group_papd = chg_id + 1;
+		}
+	} else if (type == IWL_PHY_DB_CALIB_CHG_TXP) {
+		chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+		if (phy_db && !phy_db->calib_ch_group_txp) {
+			/*
+			 * Firmware sends the largest index first, so we can use
+			 * it to know how much we should allocate.
+			 */
+			phy_db->calib_ch_group_txp = kcalloc(chg_id + 1,
+							     sizeof(struct iwl_phy_db_entry),
+							     GFP_ATOMIC);
+			if (!phy_db->calib_ch_group_txp)
+				return -ENOMEM;
+			phy_db->n_group_txp = chg_id + 1;
+		}
+	}
+
+	entry = iwl_phy_db_get_section(phy_db, type, chg_id);
+	if (!entry)
+		return -EINVAL;
+
+	kfree(entry->data);
+	entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC);
+	if (!entry->data) {
+		entry->size = 0;
+		return -ENOMEM;
+	}
+
+	entry->size = size;
+
+	IWL_DEBUG_INFO(phy_db->trans,
+		       "%s(%d): [PHYDB]SET: Type %d , Size: %d\n",
+		       __func__, __LINE__, type, size);
+
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_phy_db_set_section);
+
+static int is_valid_channel(u16 ch_id)
+{
+	if (ch_id <= 14 ||
+	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
+	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
+	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
+		return 1;
+	return 0;
+}
+
+static u8 ch_id_to_ch_index(u16 ch_id)
+{
+	if (WARN_ON(!is_valid_channel(ch_id)))
+		return 0xff;
+
+	if (ch_id <= 14)
+		return ch_id - 1;
+	if (ch_id <= 64)
+		return (ch_id + 20) / 4;
+	if (ch_id <= 140)
+		return (ch_id - 12) / 4;
+	return (ch_id - 13) / 4;
+}
+
+
+static u16 channel_id_to_papd(u16 ch_id)
+{
+	if (WARN_ON(!is_valid_channel(ch_id)))
+		return 0xff;
+
+	if (1 <= ch_id && ch_id <= 14)
+		return 0;
+	if (36 <= ch_id && ch_id <= 64)
+		return 1;
+	if (100 <= ch_id && ch_id <= 140)
+		return 2;
+	return 3;
+}
+
+static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
+{
+	struct iwl_phy_db_chg_txp *txp_chg;
+	int i;
+	u8 ch_index = ch_id_to_ch_index(ch_id);
+	if (ch_index == 0xff)
+		return 0xff;
+
+	for (i = 0; i < phy_db->n_group_txp; i++) {
+		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
+		if (!txp_chg)
+			return 0xff;
+		/*
+		 * Looking for the first channel group that its max channel is
+		 * higher then wanted channel.
+		 */
+		if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
+			return i;
+	}
+	return 0xff;
+}
+static
+int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
+				u32 type, u8 **data, u16 *size, u16 ch_id)
+{
+	struct iwl_phy_db_entry *entry;
+	u16 ch_group_id = 0;
+
+	if (!phy_db)
+		return -EINVAL;
+
+	/* find wanted channel group */
+	if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
+		ch_group_id = channel_id_to_papd(ch_id);
+	else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
+		ch_group_id = channel_id_to_txp(phy_db, ch_id);
+
+	entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
+	if (!entry)
+		return -EINVAL;
+
+	*data = entry->data;
+	*size = entry->size;
+
+	IWL_DEBUG_INFO(phy_db->trans,
+		       "%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
+		       __func__, __LINE__, type, *size);
+
+	return 0;
+}
+
+static int iwl_send_phy_db_cmd(struct iwl_phy_db *phy_db, u16 type,
+			       u16 length, void *data)
+{
+	struct iwl_phy_db_cmd phy_db_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = PHY_DB_CMD,
+	};
+
+	IWL_DEBUG_INFO(phy_db->trans,
+		       "Sending PHY-DB hcmd of type %d, of length %d\n",
+		       type, length);
+
+	/* Set phy db cmd variables */
+	phy_db_cmd.type = cpu_to_le16(type);
+	phy_db_cmd.length = cpu_to_le16(length);
+
+	/* Set hcmd variables */
+	cmd.data[0] = &phy_db_cmd;
+	cmd.len[0] = sizeof(struct iwl_phy_db_cmd);
+	cmd.data[1] = data;
+	cmd.len[1] = length;
+	cmd.dataflags[1] = IWL_HCMD_DFL_NOCOPY;
+
+	return iwl_trans_send_cmd(phy_db->trans, &cmd);
+}
+
+static int iwl_phy_db_send_all_channel_groups(
+					struct iwl_phy_db *phy_db,
+					enum iwl_phy_db_section_type type,
+					u8 max_ch_groups)
+{
+	u16 i;
+	int err;
+	struct iwl_phy_db_entry *entry;
+
+	/* Send all the  channel specific groups to operational fw */
+	for (i = 0; i < max_ch_groups; i++) {
+		entry = iwl_phy_db_get_section(phy_db,
+					       type,
+					       i);
+		if (!entry)
+			return -EINVAL;
+
+		if (!entry->size)
+			continue;
+
+		/* Send the requested PHY DB section */
+		err = iwl_send_phy_db_cmd(phy_db,
+					  type,
+					  entry->size,
+					  entry->data);
+		if (err) {
+			IWL_ERR(phy_db->trans,
+				"Can't SEND phy_db section %d (%d), err %d\n",
+				type, i, err);
+			return err;
+		}
+
+		IWL_DEBUG_INFO(phy_db->trans,
+			       "Sent PHY_DB HCMD, type = %d num = %d\n",
+			       type, i);
+	}
+
+	return 0;
+}
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
+{
+	u8 *data = NULL;
+	u16 size = 0;
+	int err;
+
+	IWL_DEBUG_INFO(phy_db->trans,
+		       "Sending phy db data and configuration to runtime image\n");
+
+	/* Send PHY DB CFG section */
+	err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CFG,
+					  &data, &size, 0);
+	if (err) {
+		IWL_ERR(phy_db->trans, "Cannot get Phy DB cfg section\n");
+		return err;
+	}
+
+	err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CFG, size, data);
+	if (err) {
+		IWL_ERR(phy_db->trans,
+			"Cannot send HCMD of  Phy DB cfg section\n");
+		return err;
+	}
+
+	err = iwl_phy_db_get_section_data(phy_db, IWL_PHY_DB_CALIB_NCH,
+					  &data, &size, 0);
+	if (err) {
+		IWL_ERR(phy_db->trans,
+			"Cannot get Phy DB non specific channel section\n");
+		return err;
+	}
+
+	err = iwl_send_phy_db_cmd(phy_db, IWL_PHY_DB_CALIB_NCH, size, data);
+	if (err) {
+		IWL_ERR(phy_db->trans,
+			"Cannot send HCMD of Phy DB non specific channel section\n");
+		return err;
+	}
+
+	/* Send all the TXP channel specific data */
+	err = iwl_phy_db_send_all_channel_groups(phy_db,
+						 IWL_PHY_DB_CALIB_CHG_PAPD,
+						 phy_db->n_group_papd);
+	if (err) {
+		IWL_ERR(phy_db->trans,
+			"Cannot send channel specific PAPD groups\n");
+		return err;
+	}
+
+	/* Send all the TXP channel specific data */
+	err = iwl_phy_db_send_all_channel_groups(phy_db,
+						 IWL_PHY_DB_CALIB_CHG_TXP,
+						 phy_db->n_group_txp);
+	if (err) {
+		IWL_ERR(phy_db->trans,
+			"Cannot send channel specific TX power groups\n");
+		return err;
+	}
+
+	IWL_DEBUG_INFO(phy_db->trans,
+		       "Finished sending phy db non channel data\n");
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
new file mode 100644
index 0000000..d34de3f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,82 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PHYDB_H__
+#define __IWL_PHYDB_H__
+
+#include <linux/types.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+
+struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
+
+void iwl_phy_db_free(struct iwl_phy_db *phy_db);
+
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+			   struct iwl_rx_packet *pkt);
+
+
+int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
+
+#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
new file mode 100644
index 0000000..421a869
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -0,0 +1,428 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef	__iwl_prph_h__
+#define __iwl_prph_h__
+#include <linux/bitfield.h>
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via HBUS_TARG_PRPH_* registers.
+ */
+#define PRPH_BASE	(0x00000)
+#define PRPH_END	(0xFFFFF)
+
+/* APMG (power management) constants */
+#define APMG_BASE			(PRPH_BASE + 0x3000)
+#define APMG_CLK_CTRL_REG		(APMG_BASE + 0x0000)
+#define APMG_CLK_EN_REG			(APMG_BASE + 0x0004)
+#define APMG_CLK_DIS_REG		(APMG_BASE + 0x0008)
+#define APMG_PS_CTRL_REG		(APMG_BASE + 0x000c)
+#define APMG_PCIDEV_STT_REG		(APMG_BASE + 0x0010)
+#define APMG_RFKILL_REG			(APMG_BASE + 0x0014)
+#define APMG_RTC_INT_STT_REG		(APMG_BASE + 0x001c)
+#define APMG_RTC_INT_MSK_REG		(APMG_BASE + 0x0020)
+#define APMG_DIGITAL_SVR_REG		(APMG_BASE + 0x0058)
+#define APMG_ANALOG_SVR_REG		(APMG_BASE + 0x006C)
+
+#define APMS_CLK_VAL_MRB_FUNC_MODE	(0x00000001)
+#define APMG_CLK_VAL_DMA_CLK_RQT	(0x00000200)
+#define APMG_CLK_VAL_BSM_CLK_RQT	(0x00000800)
+
+#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS	(0x00400000)
+#define APMG_PS_CTRL_VAL_RESET_REQ		(0x04000000)
+#define APMG_PS_CTRL_MSK_PWR_SRC		(0x03000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN		(0x00000000)
+#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX		(0x02000000)
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK	(0x000001E0) /* bit 8:5 */
+#define APMG_SVR_DIGITAL_VOLTAGE_1_32		(0x00000060)
+
+#define APMG_PCIDEV_STT_VAL_PERSIST_DIS	(0x00000200)
+#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS	(0x00000800)
+#define APMG_PCIDEV_STT_VAL_WAKE_ME	(0x00004000)
+
+#define APMG_RTC_INT_STT_RFKILL		(0x10000000)
+
+/* Device system time */
+#define DEVICE_SYSTEM_TIME_REG 0xA0206C
+
+/* Device NMI register and value for 8000 family and lower hw's */
+#define DEVICE_SET_NMI_REG 0x00a01c30
+#define DEVICE_SET_NMI_VAL_DRV BIT(7)
+/* Device NMI register and value for 9000 family and above hw's */
+#define UREG_NIC_SET_NMI_DRIVER 0x00a05c10
+#define UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER_MSK 0xff000000
+
+/* Shared registers (0x0..0x3ff, via target indirect or periphery */
+#define SHR_BASE	0x00a10000
+
+/* Shared GP1 register */
+#define SHR_APMG_GP1_REG		0x01dc
+#define SHR_APMG_GP1_REG_PRPH		(SHR_BASE + SHR_APMG_GP1_REG)
+#define SHR_APMG_GP1_WF_XTAL_LP_EN	0x00000004
+#define SHR_APMG_GP1_CHICKEN_BIT_SELECT	0x80000000
+
+/* Shared DL_CFG register */
+#define SHR_APMG_DL_CFG_REG			0x01c4
+#define SHR_APMG_DL_CFG_REG_PRPH		(SHR_BASE + SHR_APMG_DL_CFG_REG)
+#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK	0x000000c0
+#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL	0x00000080
+#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP	0x00000100
+
+/* Shared APMG_XTAL_CFG register */
+#define SHR_APMG_XTAL_CFG_REG		0x1c0
+#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ	0x80000000
+
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET		(0x300C)
+#define RELEASE_CPU_RESET_BIT		BIT(24)
+
+/*****************************************************************************
+ *                        7000/3000 series SHR DTS addresses                 *
+ *****************************************************************************/
+
+#define SHR_MISC_WFM_DTS_EN	(0x00a10024)
+#define DTSC_CFG_MODE		(0x00a10604)
+#define DTSC_VREF_AVG		(0x00a10648)
+#define DTSC_VREF5_AVG		(0x00a1064c)
+#define DTSC_CFG_MODE_PERIODIC	(0x2)
+#define DTSC_PTAT_AVG		(0x00a10650)
+
+
+/**
+ * Tx Scheduler
+ *
+ * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
+ * (Transmit Frame Descriptors) from up to 16 circular Tx queues resident in
+ * host DRAM.  It steers each frame's Tx command (which contains the frame
+ * data) into one of up to 7 prioritized Tx DMA FIFO channels within the
+ * device.  A queue maps to only one (selectable by driver) Tx DMA channel,
+ * but one DMA channel may take input from several queues.
+ *
+ * Tx DMA FIFOs have dedicated purposes.
+ *
+ * For 5000 series and up, they are used differently
+ * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
+ *
+ * 0 -- EDCA BK (background) frames, lowest priority
+ * 1 -- EDCA BE (best effort) frames, normal priority
+ * 2 -- EDCA VI (video) frames, higher priority
+ * 3 -- EDCA VO (voice) and management frames, highest priority
+ * 4 -- unused
+ * 5 -- unused
+ * 6 -- unused
+ * 7 -- Commands
+ *
+ * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
+ * In addition, driver can map the remaining queues to Tx DMA/FIFO
+ * channels 0-3 to support 11n aggregation via EDCA DMA channels.
+ *
+ * The driver sets up each queue to work in one of two modes:
+ *
+ * 1)  Scheduler-Ack, in which the scheduler automatically supports a
+ *     block-ack (BA) window of up to 64 TFDs.  In this mode, each queue
+ *     contains TFDs for a unique combination of Recipient Address (RA)
+ *     and Traffic Identifier (TID), that is, traffic of a given
+ *     Quality-Of-Service (QOS) priority, destined for a single station.
+ *
+ *     In scheduler-ack mode, the scheduler keeps track of the Tx status of
+ *     each frame within the BA window, including whether it's been transmitted,
+ *     and whether it's been acknowledged by the receiving station.  The device
+ *     automatically processes block-acks received from the receiving STA,
+ *     and reschedules un-acked frames to be retransmitted (successful
+ *     Tx completion may end up being out-of-order).
+ *
+ *     The driver must maintain the queue's Byte Count table in host DRAM
+ *     for this mode.
+ *     This mode does not support fragmentation.
+ *
+ * 2)  FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
+ *     The device may automatically retry Tx, but will retry only one frame
+ *     at a time, until receiving ACK from receiving station, or reaching
+ *     retry limit and giving up.
+ *
+ *     The command queue (#4/#9) must use this mode!
+ *     This mode does not require use of the Byte Count table in host DRAM.
+ *
+ * Driver controls scheduler operation via 3 means:
+ * 1)  Scheduler registers
+ * 2)  Shared scheduler data base in internal SRAM
+ * 3)  Shared data in host DRAM
+ *
+ * Initialization:
+ *
+ * When loading, driver should allocate memory for:
+ * 1)  16 TFD circular buffers, each with space for (typically) 256 TFDs.
+ * 2)  16 Byte Count circular buffers in 16 KBytes contiguous memory
+ *     (1024 bytes for each queue).
+ *
+ * After receiving "Alive" response from uCode, driver must initialize
+ * the scheduler (especially for queue #4/#9, the command queue, otherwise
+ * the driver can't issue commands!):
+ */
+#define SCD_MEM_LOWER_BOUND		(0x0000)
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ */
+#define SCD_WIN_SIZE				64
+#define SCD_FRAME_LIMIT				64
+
+#define SCD_TXFIFO_POS_TID			(0)
+#define SCD_TXFIFO_POS_RA			(4)
+#define SCD_QUEUE_RA_TID_MAP_RATID_MSK	(0x01FF)
+
+/* agn SCD */
+#define SCD_QUEUE_STTS_REG_POS_TXF	(0)
+#define SCD_QUEUE_STTS_REG_POS_ACTIVE	(3)
+#define SCD_QUEUE_STTS_REG_POS_WSL	(4)
+#define SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
+#define SCD_QUEUE_STTS_REG_MSK		(0x017F0000)
+
+#define SCD_QUEUE_CTX_REG1_CREDIT		(0x00FFFF00)
+#define SCD_QUEUE_CTX_REG1_SUPER_CREDIT		(0xFF000000)
+#define SCD_QUEUE_CTX_REG1_VAL(_n, _v)		FIELD_PREP(SCD_QUEUE_CTX_REG1_ ## _n, _v)
+
+#define SCD_QUEUE_CTX_REG2_WIN_SIZE		(0x0000007F)
+#define SCD_QUEUE_CTX_REG2_FRAME_LIMIT		(0x007F0000)
+#define SCD_QUEUE_CTX_REG2_VAL(_n, _v)		FIELD_PREP(SCD_QUEUE_CTX_REG2_ ## _n, _v)
+
+#define SCD_GP_CTRL_ENABLE_31_QUEUES		BIT(0)
+#define SCD_GP_CTRL_AUTO_ACTIVE_MODE		BIT(18)
+
+/* Context Data */
+#define SCD_CONTEXT_MEM_LOWER_BOUND	(SCD_MEM_LOWER_BOUND + 0x600)
+#define SCD_CONTEXT_MEM_UPPER_BOUND	(SCD_MEM_LOWER_BOUND + 0x6A0)
+
+/* Tx status */
+#define SCD_TX_STTS_MEM_LOWER_BOUND	(SCD_MEM_LOWER_BOUND + 0x6A0)
+#define SCD_TX_STTS_MEM_UPPER_BOUND	(SCD_MEM_LOWER_BOUND + 0x7E0)
+
+/* Translation Data */
+#define SCD_TRANS_TBL_MEM_LOWER_BOUND	(SCD_MEM_LOWER_BOUND + 0x7E0)
+#define SCD_TRANS_TBL_MEM_UPPER_BOUND	(SCD_MEM_LOWER_BOUND + 0x808)
+
+#define SCD_CONTEXT_QUEUE_OFFSET(x)\
+	(SCD_CONTEXT_MEM_LOWER_BOUND + ((x) * 8))
+
+#define SCD_TX_STTS_QUEUE_OFFSET(x)\
+	(SCD_TX_STTS_MEM_LOWER_BOUND + ((x) * 16))
+
+#define SCD_TRANS_TBL_OFFSET_QUEUE(x) \
+	((SCD_TRANS_TBL_MEM_LOWER_BOUND + ((x) * 2)) & 0xfffc)
+
+#define SCD_BASE			(PRPH_BASE + 0xa02c00)
+
+#define SCD_SRAM_BASE_ADDR	(SCD_BASE + 0x0)
+#define SCD_DRAM_BASE_ADDR	(SCD_BASE + 0x8)
+#define SCD_AIT			(SCD_BASE + 0x0c)
+#define SCD_TXFACT		(SCD_BASE + 0x10)
+#define SCD_ACTIVE		(SCD_BASE + 0x14)
+#define SCD_QUEUECHAIN_SEL	(SCD_BASE + 0xe8)
+#define SCD_CHAINEXT_EN		(SCD_BASE + 0x244)
+#define SCD_AGGR_SEL		(SCD_BASE + 0x248)
+#define SCD_INTERRUPT_MASK	(SCD_BASE + 0x108)
+#define SCD_GP_CTRL		(SCD_BASE + 0x1a8)
+#define SCD_EN_CTRL		(SCD_BASE + 0x254)
+
+/*********************** END TX SCHEDULER *************************************/
+
+/* Oscillator clock */
+#define OSC_CLK				(0xa04068)
+#define OSC_CLK_FORCE_CONTROL		(0x8)
+
+#define FH_UCODE_LOAD_STATUS		(0x1AF0)
+
+/*
+ * Replacing FH_UCODE_LOAD_STATUS
+ * This register is writen by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define UREG_UCODE_LOAD_STATUS		(0xa05c40)
+#define UREG_CPU_INIT_RUN		(0xa05c44)
+
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR	(0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR	(0x1E7C)
+
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE		(0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE		(0x420400)
+
+#define LMAC2_PRPH_OFFSET		(0x100000)
+
+/* Rx FIFO */
+#define RXF_SIZE_ADDR			(0xa00c88)
+#define RXF_RD_D_SPACE			(0xa00c40)
+#define RXF_RD_WR_PTR			(0xa00c50)
+#define RXF_RD_RD_PTR			(0xa00c54)
+#define RXF_RD_FENCE_PTR		(0xa00c4c)
+#define RXF_SET_FENCE_MODE		(0xa00c14)
+#define RXF_LD_WR2FENCE		(0xa00c1c)
+#define RXF_FIFO_RD_FENCE_INC		(0xa00c68)
+#define RXF_SIZE_BYTE_CND_POS		(7)
+#define RXF_SIZE_BYTE_CNT_MSK		(0x3ff << RXF_SIZE_BYTE_CND_POS)
+#define RXF_DIFF_FROM_PREV		(0x200)
+
+#define RXF_LD_FENCE_OFFSET_ADDR	(0xa00c10)
+#define RXF_FIFO_RD_FENCE_ADDR		(0xa00c0c)
+
+/* Tx FIFO */
+#define TXF_FIFO_ITEM_CNT		(0xa00438)
+#define TXF_WR_PTR			(0xa00414)
+#define TXF_RD_PTR			(0xa00410)
+#define TXF_FENCE_PTR			(0xa00418)
+#define TXF_LOCK_FENCE			(0xa00424)
+#define TXF_LARC_NUM			(0xa0043c)
+#define TXF_READ_MODIFY_DATA		(0xa00448)
+#define TXF_READ_MODIFY_ADDR		(0xa0044c)
+
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT		(0xA00538)
+#define TXF_CPU2_WR_PTR		(0xA00514)
+#define TXF_CPU2_RD_PTR		(0xA00510)
+#define TXF_CPU2_FENCE_PTR		(0xA00518)
+#define TXF_CPU2_LOCK_FENCE		(0xA00524)
+#define TXF_CPU2_NUM			(0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA	(0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR	(0xA0054C)
+
+/* Radio registers access */
+#define RSP_RADIO_CMD			(0xa02804)
+#define RSP_RADIO_RDDAT			(0xa02814)
+#define RADIO_RSP_ADDR_POS		(6)
+#define RADIO_RSP_RD_CMD		(3)
+
+/* FW monitor */
+#define MON_BUFF_SAMPLE_CTL		(0xa03c00)
+#define MON_BUFF_BASE_ADDR		(0xa03c3c)
+#define MON_BUFF_END_ADDR		(0xa03c40)
+#define MON_BUFF_WRPTR			(0xa03c44)
+#define MON_BUFF_CYCLE_CNT		(0xa03c48)
+
+#define MON_DMARB_RD_CTL_ADDR		(0xa03c60)
+#define MON_DMARB_RD_DATA_ADDR		(0xa03c5c)
+
+#define DBGC_IN_SAMPLE			(0xa03c00)
+#define DBGC_OUT_CTRL			(0xa03c0c)
+
+/* enable the ID buf for read */
+#define WFPM_PS_CTL_CLR			0xA0300C
+#define WFMP_MAC_ADDR_0			0xA03080
+#define WFMP_MAC_ADDR_1			0xA03084
+#define LMPM_PMG_EN			0xA01CEC
+#define RADIO_REG_SYS_MANUAL_DFT_0	0xAD4078
+#define RFIC_REG_RD			0xAD0470
+#define WFPM_CTRL_REG			0xA03030
+#define WFPM_GP2			0xA030B4
+enum {
+	ENABLE_WFPM = BIT(31),
+	WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK	= 0x80000000,
+};
+
+#define AUX_MISC_REG			0xA200B0
+enum {
+	HW_STEP_LOCATION_BITS = 24,
+};
+
+#define AUX_MISC_MASTER1_EN		0xA20818
+enum aux_misc_master1_en {
+	AUX_MISC_MASTER1_EN_SBE_MSK	= 0x1,
+};
+
+#define AUX_MISC_MASTER1_SMPHR_STATUS	0xA20800
+#define RSA_ENABLE			0xA24B08
+#define PREG_AUX_BUS_WPROT_0		0xA04CC0
+#define SB_CPU_1_STATUS			0xA01E30
+#define SB_CPU_2_STATUS			0xA01E34
+#define UMAG_SB_CPU_1_STATUS		0xA038C0
+#define UMAG_SB_CPU_2_STATUS		0xA038C4
+#define UMAG_GEN_HW_STATUS		0xA038C8
+
+/* For UMAG_GEN_HW_STATUS reg check */
+enum {
+	UMAG_GEN_HW_IS_FPGA = BIT(1),
+};
+
+/* FW chicken bits */
+#define LMPM_CHICK			0xA01FF8
+enum {
+	LMPM_CHICK_EXTENDED_ADDR_SPACE = BIT(0),
+};
+
+/* FW chicken bits */
+#define LMPM_PAGE_PASS_NOTIF			0xA03824
+enum {
+	LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
+};
+
+#define UREG_CHICK		(0xA05C00)
+#define UREG_CHICK_MSI_ENABLE	BIT(24)
+#define UREG_CHICK_MSIX_ENABLE	BIT(25)
+#endif				/* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-scd.h b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
new file mode 100644
index 0000000..99b43da
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-scd.h
@@ -0,0 +1,143 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __iwl_scd_h__
+#define __iwl_scd_h__
+
+#include "iwl-trans.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+
+static inline void iwl_scd_txq_set_chain(struct iwl_trans *trans,
+					 u16 txq_id)
+{
+	iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
+}
+
+static inline void iwl_scd_txq_enable_agg(struct iwl_trans *trans,
+					  u16 txq_id)
+{
+	iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+}
+
+static inline void iwl_scd_txq_disable_agg(struct iwl_trans *trans,
+					   u16 txq_id)
+{
+	iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
+}
+
+static inline void iwl_scd_disable_agg(struct iwl_trans *trans)
+{
+	iwl_set_bits_prph(trans, SCD_AGGR_SEL, 0);
+}
+
+static inline void iwl_scd_activate_fifos(struct iwl_trans *trans)
+{
+	iwl_write_prph(trans, SCD_TXFACT, IWL_MASK(0, 7));
+}
+
+static inline void iwl_scd_deactivate_fifos(struct iwl_trans *trans)
+{
+	iwl_write_prph(trans, SCD_TXFACT, 0);
+}
+
+static inline void iwl_scd_enable_set_active(struct iwl_trans *trans,
+					     u32 value)
+{
+	iwl_write_prph(trans, SCD_EN_CTRL, value);
+}
+
+static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x18 + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x284 + (chnl - 20) * 4;
+}
+
+static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x68 + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x2B4 + chnl * 4;
+}
+
+static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
+{
+	if (chnl < 20)
+		return SCD_BASE + 0x10c + chnl * 4;
+	WARN_ON_ONCE(chnl >= 32);
+	return SCD_BASE + 0x334 + chnl * 4;
+}
+
+static inline void iwl_scd_txq_set_inactive(struct iwl_trans *trans,
+					    u16 txq_id)
+{
+	iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+		       (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
+		       (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
new file mode 100644
index 0000000..7e9c924
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -0,0 +1,223 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/bsearch.h>
+
+#include "iwl-trans.h"
+#include "iwl-drv.h"
+#include "iwl-fh.h"
+
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+				  struct device *dev,
+				  const struct iwl_cfg *cfg,
+				  const struct iwl_trans_ops *ops)
+{
+	struct iwl_trans *trans;
+#ifdef CONFIG_LOCKDEP
+	static struct lock_class_key __key;
+#endif
+
+	trans = devm_kzalloc(dev, sizeof(*trans) + priv_size, GFP_KERNEL);
+	if (!trans)
+		return NULL;
+
+#ifdef CONFIG_LOCKDEP
+	lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
+			 &__key, 0);
+#endif
+
+	trans->dev = dev;
+	trans->cfg = cfg;
+	trans->ops = ops;
+	trans->num_rx_queues = 1;
+
+	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
+		 "iwl_cmd_pool:%s", dev_name(trans->dev));
+	trans->dev_cmd_pool =
+		kmem_cache_create(trans->dev_cmd_pool_name,
+				  sizeof(struct iwl_device_cmd),
+				  sizeof(void *),
+				  SLAB_HWCACHE_ALIGN,
+				  NULL);
+	if (!trans->dev_cmd_pool)
+		return NULL;
+
+	WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+
+	return trans;
+}
+
+void iwl_trans_free(struct iwl_trans *trans)
+{
+	kmem_cache_destroy(trans->dev_cmd_pool);
+}
+
+int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	int ret;
+
+	if (unlikely(!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+		     test_bit(STATUS_RFKILL_OPMODE, &trans->status)))
+		return -ERFKILL;
+
+	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+		return -EIO;
+
+	if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return -EIO;
+	}
+
+	if (WARN_ON((cmd->flags & CMD_WANT_ASYNC_CALLBACK) &&
+		    !(cmd->flags & CMD_ASYNC)))
+		return -EINVAL;
+
+	if (!(cmd->flags & CMD_ASYNC))
+		lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
+
+	if (trans->wide_cmd_header && !iwl_cmd_groupid(cmd->id))
+		cmd->id = DEF_ID(cmd->id);
+
+	ret = trans->ops->send_cmd(trans, cmd);
+
+	if (!(cmd->flags & CMD_ASYNC))
+		lock_map_release(&trans->sync_cmd_lockdep_map);
+
+	if (WARN_ON((cmd->flags & CMD_WANT_SKB) && !ret && !cmd->resp_pkt))
+		return -EIO;
+
+	return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_send_cmd);
+
+/* Comparator for struct iwl_hcmd_names.
+ * Used in the binary search over a list of host commands.
+ *
+ * @key: command_id that we're looking for.
+ * @elt: struct iwl_hcmd_names candidate for match.
+ *
+ * @return 0 iff equal.
+ */
+static int iwl_hcmd_names_cmp(const void *key, const void *elt)
+{
+	const struct iwl_hcmd_names *name = elt;
+	u8 cmd1 = *(u8 *)key;
+	u8 cmd2 = name->cmd_id;
+
+	return (cmd1 - cmd2);
+}
+
+const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id)
+{
+	u8 grp, cmd;
+	struct iwl_hcmd_names *ret;
+	const struct iwl_hcmd_arr *arr;
+	size_t size = sizeof(struct iwl_hcmd_names);
+
+	grp = iwl_cmd_groupid(id);
+	cmd = iwl_cmd_opcode(id);
+
+	if (!trans->command_groups || grp >= trans->command_groups_size ||
+	    !trans->command_groups[grp].arr)
+		return "UNKNOWN";
+
+	arr = &trans->command_groups[grp];
+	ret = bsearch(&cmd, arr->arr, arr->size, size, iwl_hcmd_names_cmp);
+	if (!ret)
+		return "UNKNOWN";
+	return ret->cmd_name;
+}
+IWL_EXPORT_SYMBOL(iwl_get_cmd_string);
+
+int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
+{
+	int i, j;
+	const struct iwl_hcmd_arr *arr;
+
+	for (i = 0; i < trans->command_groups_size; i++) {
+		arr = &trans->command_groups[i];
+		if (!arr->arr)
+			continue;
+		for (j = 0; j < arr->size - 1; j++)
+			if (arr->arr[j].cmd_id > arr->arr[j + 1].cmd_id)
+				return -1;
+	}
+	return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+
+void iwl_trans_ref(struct iwl_trans *trans)
+{
+	if (trans->ops->ref)
+		trans->ops->ref(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_ref);
+
+void iwl_trans_unref(struct iwl_trans *trans)
+{
+	if (trans->ops->unref)
+		trans->ops->unref(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_unref);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
new file mode 100644
index 0000000..279dd7b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -0,0 +1,1212 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_h__
+#define __iwl_trans_h__
+
+#include <linux/ieee80211.h>
+#include <linux/mm.h> /* for page_address */
+#include <linux/lockdep.h>
+#include <linux/kernel.h>
+
+#include "iwl-debug.h"
+#include "iwl-config.h"
+#include "fw/img.h"
+#include "iwl-op-mode.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/txq.h"
+
+/**
+ * DOC: Transport layer - what is it ?
+ *
+ * The transport layer is the layer that deals with the HW directly. It provides
+ * an abstraction of the underlying HW to the upper layer. The transport layer
+ * doesn't provide any policy, algorithm or anything of this kind, but only
+ * mechanisms to make the HW do something. It is not completely stateless but
+ * close to it.
+ * We will have an implementation for each different supported bus.
+ */
+
+/**
+ * DOC: Life cycle of the transport layer
+ *
+ * The transport layer has a very precise life cycle.
+ *
+ *	1) A helper function is called during the module initialization and
+ *	   registers the bus driver's ops with the transport's alloc function.
+ *	2) Bus's probe calls to the transport layer's allocation functions.
+ *	   Of course this function is bus specific.
+ *	3) This allocation functions will spawn the upper layer which will
+ *	   register mac80211.
+ *
+ *	4) At some point (i.e. mac80211's start call), the op_mode will call
+ *	   the following sequence:
+ *	   start_hw
+ *	   start_fw
+ *
+ *	5) Then when finished (or reset):
+ *	   stop_device
+ *
+ *	6) Eventually, the free function will be called.
+ */
+
+#define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
+#define FH_RSCSR_FRAME_INVALID		0x55550000
+#define FH_RSCSR_FRAME_ALIGN		0x40
+#define FH_RSCSR_RPA_EN			BIT(25)
+#define FH_RSCSR_RADA_EN		BIT(26)
+#define FH_RSCSR_RXQ_POS		16
+#define FH_RSCSR_RXQ_MASK		0x3F0000
+
+struct iwl_rx_packet {
+	/*
+	 * The first 4 bytes of the RX frame header contain both the RX frame
+	 * size and some flags.
+	 * Bit fields:
+	 * 31:    flag flush RB request
+	 * 30:    flag ignore TC (terminal counter) request
+	 * 29:    flag fast IRQ request
+	 * 28-27: Reserved
+	 * 26:    RADA enabled
+	 * 25:    Offload enabled
+	 * 24:    RPF enabled
+	 * 23:    RSS enabled
+	 * 22:    Checksum enabled
+	 * 21-16: RX queue
+	 * 15-14: Reserved
+	 * 13-00: RX frame size
+	 */
+	__le32 len_n_flags;
+	struct iwl_cmd_header hdr;
+	u8 data[];
+} __packed;
+
+static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
+{
+	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
+{
+	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+
+/**
+ * enum CMD_MODE - how to send the host commands ?
+ *
+ * @CMD_ASYNC: Return right away and don't wait for the response
+ * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
+ *	the response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ *	command queue, but after other high priority commands. Valid only
+ *	with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ *	(i.e. mark it as non-idle).
+ * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
+ *	called after this command completes. Valid only with CMD_ASYNC.
+ */
+enum CMD_MODE {
+	CMD_ASYNC		= BIT(0),
+	CMD_WANT_SKB		= BIT(1),
+	CMD_SEND_IN_RFKILL	= BIT(2),
+	CMD_HIGH_PRIO		= BIT(3),
+	CMD_SEND_IN_IDLE	= BIT(4),
+	CMD_MAKE_TRANS_IDLE	= BIT(5),
+	CMD_WAKE_UP_TRANS	= BIT(6),
+	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct iwl_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwl_device_cmd {
+	union {
+		struct {
+			struct iwl_cmd_header hdr;	/* uCode API */
+			u8 payload[DEF_CMD_PAYLOAD_SIZE];
+		};
+		struct {
+			struct iwl_cmd_header_wide hdr_wide;
+			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
+					sizeof(struct iwl_cmd_header_wide) +
+					sizeof(struct iwl_cmd_header)];
+		};
+	};
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
+
+/*
+ * number of transfer buffers (fragments) per transmit frame descriptor;
+ * this is just the driver's idea, the hardware supports 20
+ */
+#define IWL_MAX_CMD_TBS_PER_TFD	2
+
+/**
+ * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
+ *
+ * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
+ *	ring. The transport layer doesn't map the command's buffer to DMA, but
+ *	rather copies it to a previously allocated DMA buffer. This flag tells
+ *	the transport layer not to copy the command, but to map the existing
+ *	buffer (that is passed in) instead. This saves the memcpy and allows
+ *	commands that are bigger than the fixed buffer to be submitted.
+ *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
+ * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
+ *	chunk internally and free it again after the command completes. This
+ *	can (currently) be used only once per command.
+ *	Note that a TFD entry after a DUP one cannot be a normal copied one.
+ */
+enum iwl_hcmd_dataflag {
+	IWL_HCMD_DFL_NOCOPY	= BIT(0),
+	IWL_HCMD_DFL_DUP	= BIT(1),
+};
+
+/**
+ * struct iwl_host_cmd - Host command to the uCode
+ *
+ * @data: array of chunks that composes the data of the host command
+ * @resp_pkt: response packet, if %CMD_WANT_SKB was set
+ * @_rx_page_order: (internally used to free response packet)
+ * @_rx_page_addr: (internally used to free response packet)
+ * @flags: can be CMD_*
+ * @len: array of the lengths of the chunks in data
+ * @dataflags: IWL_HCMD_DFL_*
+ * @id: command id of the host command, for wide commands encoding the
+ *	version and group as well
+ */
+struct iwl_host_cmd {
+	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
+	struct iwl_rx_packet *resp_pkt;
+	unsigned long _rx_page_addr;
+	u32 _rx_page_order;
+
+	u32 flags;
+	u32 id;
+	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
+	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
+};
+
+static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
+{
+	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
+}
+
+struct iwl_rx_cmd_buffer {
+	struct page *_page;
+	int _offset;
+	bool _page_stolen;
+	u32 _rx_page_order;
+	unsigned int truesize;
+};
+
+static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
+{
+	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
+}
+
+static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
+{
+	return r->_offset;
+}
+
+static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
+{
+	r->_page_stolen = true;
+	get_page(r->_page);
+	return r->_page;
+}
+
+static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
+{
+	__free_pages(r->_page, r->_rx_page_order);
+}
+
+#define MAX_NO_RECLAIM_CMDS	6
+
+#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+/*
+ * Maximum number of HW queues the transport layer
+ * currently supports
+ */
+#define IWL_MAX_HW_QUEUES		32
+#define IWL_MAX_TVQM_QUEUES		512
+
+#define IWL_MAX_TID_COUNT	8
+#define IWL_MGMT_TID		15
+#define IWL_FRAME_LIMIT	64
+#define IWL_MAX_RX_HW_QUEUES	16
+
+/**
+ * enum iwl_wowlan_status - WoWLAN image/device status
+ * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
+ * @IWL_D3_STATUS_RESET: device was reset while suspended
+ */
+enum iwl_d3_status {
+	IWL_D3_STATUS_ALIVE,
+	IWL_D3_STATUS_RESET,
+};
+
+/**
+ * enum iwl_trans_status: transport status flags
+ * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
+ * @STATUS_DEVICE_ENABLED: APM is enabled
+ * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
+ * @STATUS_INT_ENABLED: interrupts are enabled
+ * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
+ * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
+ * @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ *	are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
+ * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
+ */
+enum iwl_trans_status {
+	STATUS_SYNC_HCMD_ACTIVE,
+	STATUS_DEVICE_ENABLED,
+	STATUS_TPOWER_PMI,
+	STATUS_INT_ENABLED,
+	STATUS_RFKILL_HW,
+	STATUS_RFKILL_OPMODE,
+	STATUS_FW_ERROR,
+	STATUS_TRANS_GOING_IDLE,
+	STATUS_TRANS_IDLE,
+	STATUS_TRANS_DEAD,
+};
+
+static inline int
+iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
+{
+	switch (rb_size) {
+	case IWL_AMSDU_2K:
+		return get_order(2 * 1024);
+	case IWL_AMSDU_4K:
+		return get_order(4 * 1024);
+	case IWL_AMSDU_8K:
+		return get_order(8 * 1024);
+	case IWL_AMSDU_12K:
+		return get_order(12 * 1024);
+	default:
+		WARN_ON(1);
+		return -1;
+	}
+}
+
+struct iwl_hcmd_names {
+	u8 cmd_id;
+	const char *const cmd_name;
+};
+
+#define HCMD_NAME(x)	\
+	{ .cmd_id = x, .cmd_name = #x }
+
+struct iwl_hcmd_arr {
+	const struct iwl_hcmd_names *arr;
+	int size;
+};
+
+#define HCMD_ARR(x)	\
+	{ .arr = x, .size = ARRAY_SIZE(x) }
+
+/**
+ * struct iwl_trans_config - transport configuration
+ *
+ * @op_mode: pointer to the upper layer.
+ * @cmd_queue: the index of the command queue.
+ *	Must be set before start_fw.
+ * @cmd_fifo: the fifo for host commands
+ * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
+ * @no_reclaim_cmds: Some devices erroneously don't set the
+ *	SEQ_RX_FRAME bit on some notifications, this is the
+ *	list of such notifications to filter. Max length is
+ *	%MAX_NO_RECLAIM_CMDS.
+ * @n_no_reclaim_cmds: # of commands in list
+ * @rx_buf_size: RX buffer size needed for A-MSDUs
+ *	if unset 4k will be the RX buffer size
+ * @bc_table_dword: set to true if the BC table expects the byte count to be
+ *	in DWORD (as opposed to bytes)
+ * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @sw_csum_tx: transport should compute the TCP checksum
+ * @command_groups: array of command groups, each member is an array of the
+ *	commands in the group; for debugging only
+ * @command_groups_size: number of command groups, to avoid illegal access
+ * @cb_data_offs: offset inside skb->cb to store transport data at, must have
+ *	space for at least two pointers
+ */
+struct iwl_trans_config {
+	struct iwl_op_mode *op_mode;
+
+	u8 cmd_queue;
+	u8 cmd_fifo;
+	unsigned int cmd_q_wdg_timeout;
+	const u8 *no_reclaim_cmds;
+	unsigned int n_no_reclaim_cmds;
+
+	enum iwl_amsdu_size rx_buf_size;
+	bool bc_table_dword;
+	bool scd_set_active;
+	bool sw_csum_tx;
+	const struct iwl_hcmd_arr *command_groups;
+	int command_groups_size;
+
+	u8 cb_data_offs;
+};
+
+struct iwl_trans_dump_data {
+	u32 len;
+	u8 data[];
+};
+
+struct iwl_trans;
+
+struct iwl_trans_txq_scd_cfg {
+	u8 fifo;
+	u8 sta_id;
+	u8 tid;
+	bool aggregate;
+	int frame_limit;
+};
+
+/**
+ * struct iwl_trans_rxq_dma_data - RX queue DMA data
+ * @fr_bd_cb: DMA address of free BD cyclic buffer
+ * @fr_bd_wid: Initial write index of the free BD cyclic buffer
+ * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
+ * @ur_bd_cb: DMA address of used BD cyclic buffer
+ */
+struct iwl_trans_rxq_dma_data {
+	u64 fr_bd_cb;
+	u32 fr_bd_wid;
+	u64 urbd_stts_wrptr;
+	u64 ur_bd_cb;
+};
+
+/**
+ * struct iwl_trans_ops - transport specific operations
+ *
+ * All the handlers MUST be implemented
+ *
+ * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
+ *	out of a low power state. From that point on, the HW can send
+ *	interrupts. May sleep.
+ * @op_mode_leave: Turn off the HW RF kill indication if on
+ *	May sleep
+ * @start_fw: allocates and inits all the resources for the transport
+ *	layer. Also kick a fw image.
+ *	May sleep
+ * @fw_alive: called when the fw sends alive notification. If the fw provides
+ *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
+ *	May sleep
+ * @stop_device: stops the whole device (embedded CPU put to reset) and stops
+ *	the HW. If low_power is true, the NIC will be put in low power state.
+ *	From that point on, the HW will be stopped but will still issue an
+ *	interrupt if the HW RF kill switch is triggered.
+ *	This callback must do the right thing and not crash even if %start_hw()
+ *	was called but not &start_fw(). May sleep.
+ * @d3_suspend: put the device into the correct mode for WoWLAN during
+ *	suspend. This is optional, if not implemented WoWLAN will not be
+ *	supported. This callback may sleep.
+ * @d3_resume: resume the device after WoWLAN, enabling the opmode to
+ *	talk to the WoWLAN image to get its status. This is optional, if not
+ *	implemented WoWLAN will not be supported. This callback may sleep.
+ * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
+ *	If RFkill is asserted in the middle of a SYNC host command, it must
+ *	return -ERFKILL straight away.
+ *	May sleep only if CMD_ASYNC is not set
+ * @tx: send an skb. The transport relies on the op_mode to zero the
+ *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
+ *	the CSUM will be taken care of (TCP CSUM and IP header in case of
+ *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
+ *	header if it is IPv4.
+ *	Must be atomic
+ * @reclaim: free packet until ssn. Returns a list of freed packets.
+ *	Must be atomic
+ * @txq_enable: setup a queue. To setup an AC queue, use the
+ *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
+ *	this one. The op_mode must not configure the HCMD queue. The scheduler
+ *	configuration may be %NULL, in which case the hardware will not be
+ *	configured. If true is returned, the operation mode needs to increment
+ *	the sequence number of the packets routed to this queue because of a
+ *	hardware scheduler bug. May sleep.
+ * @txq_disable: de-configure a Tx queue to send AMPDUs
+ *	Must be atomic
+ * @txq_set_shared_mode: change Tx queue shared/unshared marking
+ * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
+ * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
+ * @freeze_txq_timer: prevents the timer of the queue from firing until the
+ *	queue is set to awake. Must be atomic.
+ * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
+ *	that the transport needs to refcount the calls since this function
+ *	will be called several times with block = true, and then the queues
+ *	need to be unblocked only after the same number of calls with
+ *	block = false.
+ * @write8: write a u8 to a register at offset ofs from the BAR
+ * @write32: write a u32 to a register at offset ofs from the BAR
+ * @read32: read a u32 register at offset ofs from the BAR
+ * @read_prph: read a DWORD from a periphery register
+ * @write_prph: write a DWORD to a periphery register
+ * @read_mem: read device's SRAM in DWORD
+ * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
+ *	will be zeroed.
+ * @configure: configure parameters required by the transport layer from
+ *	the op_mode. May be called several times before start_fw, can't be
+ *	called after that.
+ * @set_pmi: set the power pmi state
+ * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
+ *	Sleeping is not allowed between grab_nic_access and
+ *	release_nic_access.
+ * @release_nic_access: let the NIC go to sleep. The "flags" parameter
+ *	must be the same one that was sent before to the grab_nic_access.
+ * @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ *	certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ *	initially the reference count is 1, making an initial @unref
+ *	necessary to allow low power states.
+ * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
+ *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
+ *	Note that the transport must fill in the proper file headers.
+ * @dump_regs: dump using IWL_ERR configuration space and memory mapped
+ *	registers of the device to diagnose failure, e.g., when HW becomes
+ *	inaccessible.
+ */
+struct iwl_trans_ops {
+
+	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
+	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
+	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
+			bool run_in_rfkill);
+	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
+	void (*stop_device)(struct iwl_trans *trans, bool low_power);
+
+	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
+	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
+			 bool test, bool reset);
+
+	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+
+	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
+		  struct iwl_device_cmd *dev_cmd, int queue);
+	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
+			struct sk_buff_head *skbs);
+
+	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
+			   const struct iwl_trans_txq_scd_cfg *cfg,
+			   unsigned int queue_wdg_timeout);
+	void (*txq_disable)(struct iwl_trans *trans, int queue,
+			    bool configure_scd);
+	/* 22000 functions */
+	int (*txq_alloc)(struct iwl_trans *trans,
+			 struct iwl_tx_queue_cfg_cmd *cmd,
+			 int cmd_id, int size,
+			 unsigned int queue_wdg_timeout);
+	void (*txq_free)(struct iwl_trans *trans, int queue);
+	int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
+			    struct iwl_trans_rxq_dma_data *data);
+
+	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
+				    bool shared);
+
+	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
+	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
+	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
+				 bool freeze);
+	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
+
+	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
+	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
+	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
+	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
+	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
+	int (*read_mem)(struct iwl_trans *trans, u32 addr,
+			void *buf, int dwords);
+	int (*write_mem)(struct iwl_trans *trans, u32 addr,
+			 const void *buf, int dwords);
+	void (*configure)(struct iwl_trans *trans,
+			  const struct iwl_trans_config *trans_cfg);
+	void (*set_pmi)(struct iwl_trans *trans, bool state);
+	void (*sw_reset)(struct iwl_trans *trans);
+	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
+	void (*release_nic_access)(struct iwl_trans *trans,
+				   unsigned long *flags);
+	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
+			      u32 value);
+	void (*ref)(struct iwl_trans *trans);
+	void (*unref)(struct iwl_trans *trans);
+	int  (*suspend)(struct iwl_trans *trans);
+	void (*resume)(struct iwl_trans *trans);
+
+	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
+						 const struct iwl_fw_dbg_trigger_tlv
+						 *trigger);
+
+	void (*dump_regs)(struct iwl_trans *trans);
+};
+
+/**
+ * enum iwl_trans_state - state of the transport layer
+ *
+ * @IWL_TRANS_NO_FW: no fw has sent an alive response
+ * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
+ */
+enum iwl_trans_state {
+	IWL_TRANS_NO_FW = 0,
+	IWL_TRANS_FW_ALIVE	= 1,
+};
+
+/**
+ * DOC: Platform power management
+ *
+ * There are two types of platform power management: system-wide
+ * (WoWLAN) and runtime.
+ *
+ * In system-wide power management the entire platform goes into a low
+ * power state (e.g. idle or suspend to RAM) at the same time and the
+ * device is configured as a wakeup source for the entire platform.
+ * This is usually triggered by userspace activity (e.g. the user
+ * presses the suspend button or a power management daemon decides to
+ * put the platform in low power mode).  The device's behavior in this
+ * mode is dictated by the wake-on-WLAN configuration.
+ *
+ * In runtime power management, only the devices which are themselves
+ * idle enter a low power state.  This is done at runtime, which means
+ * that the entire system is still running normally.  This mode is
+ * usually triggered automatically by the device driver and requires
+ * the ability to enter and exit the low power modes in a very short
+ * time, so there is not much impact in usability.
+ *
+ * The terms used for the device's behavior are as follows:
+ *
+ *	- D0: the device is fully powered and the host is awake;
+ *	- D3: the device is in low power mode and only reacts to
+ *		specific events (e.g. magic-packet received or scan
+ *		results found);
+ *	- D0I3: the device is in low power mode and reacts to any
+ *		activity (e.g. RX);
+ *
+ * These terms reflect the power modes in the firmware and are not to
+ * be confused with the physical device power state.  The NIC can be
+ * in D0I3 mode even if, for instance, the PCI device is in D3 state.
+ */
+
+/**
+ * enum iwl_plat_pm_mode - platform power management mode
+ *
+ * This enumeration describes the device's platform power management
+ * behavior when in idle mode (i.e. runtime power management) or when
+ * in system-wide suspend (i.e WoWLAN).
+ *
+ * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
+ *	device.  At runtime, this means that nothing happens and the
+ *	device always remains in active.  In system-wide suspend mode,
+ *	it means that the all connections will be closed automatically
+ *	by mac80211 before the platform is suspended.
+ * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
+ *	For runtime power management, this mode is not officially
+ *	supported.
+ * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
+ */
+enum iwl_plat_pm_mode {
+	IWL_PLAT_PM_MODE_DISABLED,
+	IWL_PLAT_PM_MODE_D3,
+	IWL_PLAT_PM_MODE_D0I3,
+};
+
+/* Max time to wait for trans to become idle/non-idle on d0i3
+ * enter/exit (in msecs).
+ */
+#define IWL_TRANS_IDLE_TIMEOUT 2000
+
+/**
+ * struct iwl_trans - transport common data
+ *
+ * @ops - pointer to iwl_trans_ops
+ * @op_mode - pointer to the op_mode
+ * @cfg - pointer to the configuration
+ * @drv - pointer to iwl_drv
+ * @status: a bit-mask of transport status flags
+ * @dev - pointer to struct device * that represents the device
+ * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
+ *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
+ * @hw_rf_id a u32 with the device RF ID
+ * @hw_id: a u32 with the ID of the device / sub-device.
+ *	Set during transport allocation.
+ * @hw_id_str: a string with info about HW ID. Set during transport allocation.
+ * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
+ * @wide_cmd_header: true when ucode supports wide command header format
+ * @num_rx_queues: number of RX queues allocated by the transport;
+ *	the transport must set this before calling iwl_drv_start()
+ * @iml_len: the length of the image loader
+ * @iml: a pointer to the image loader itself
+ * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
+ *	The user should use iwl_trans_{alloc,free}_tx_cmd.
+ * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
+ *	starting the firmware, used for tracing
+ * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
+ *	start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
+ * @dbg_dest_tlv: points to the destination TLV for debug
+ * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
+ * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
+ * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
+ * @system_pm_mode: the system-wide power management mode in use.
+ *	This mode is set dynamically, depending on the WoWLAN values
+ *	configured from the userspace at runtime.
+ * @runtime_pm_mode: the runtime power management mode in use.  This
+ *	mode is set during the initialization phase and is not
+ *	supposed to change during runtime.
+ */
+struct iwl_trans {
+	const struct iwl_trans_ops *ops;
+	struct iwl_op_mode *op_mode;
+	const struct iwl_cfg *cfg;
+	struct iwl_drv *drv;
+	enum iwl_trans_state state;
+	unsigned long status;
+
+	struct device *dev;
+	u32 max_skb_frags;
+	u32 hw_rev;
+	u32 hw_rf_id;
+	u32 hw_id;
+	char hw_id_str[52];
+
+	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
+
+	bool pm_support;
+	bool ltr_enabled;
+
+	const struct iwl_hcmd_arr *command_groups;
+	int command_groups_size;
+	bool wide_cmd_header;
+
+	u8 num_rx_queues;
+
+	size_t iml_len;
+	u8 *iml;
+
+	/* The following fields are internal only */
+	struct kmem_cache *dev_cmd_pool;
+	char dev_cmd_pool_name[50];
+
+	struct dentry *dbgfs_dir;
+
+#ifdef CONFIG_LOCKDEP
+	struct lockdep_map sync_cmd_lockdep_map;
+#endif
+
+	const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
+	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
+	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
+	u32 dbg_dump_mask;
+	u8 dbg_dest_reg_num;
+
+	enum iwl_plat_pm_mode system_pm_mode;
+	enum iwl_plat_pm_mode runtime_pm_mode;
+	bool suspending;
+
+	/* pointer to trans specific struct */
+	/*Ensure that this pointer will always be aligned to sizeof pointer */
+	char trans_specific[0] __aligned(sizeof(void *));
+};
+
+const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
+int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
+
+static inline void iwl_trans_configure(struct iwl_trans *trans,
+				       const struct iwl_trans_config *trans_cfg)
+{
+	trans->op_mode = trans_cfg->op_mode;
+
+	trans->ops->configure(trans, trans_cfg);
+	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+}
+
+static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
+{
+	might_sleep();
+
+	return trans->ops->start_hw(trans, low_power);
+}
+
+static inline int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+	return trans->ops->start_hw(trans, true);
+}
+
+static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
+{
+	might_sleep();
+
+	if (trans->ops->op_mode_leave)
+		trans->ops->op_mode_leave(trans);
+
+	trans->op_mode = NULL;
+
+	trans->state = IWL_TRANS_NO_FW;
+}
+
+static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+	might_sleep();
+
+	trans->state = IWL_TRANS_FW_ALIVE;
+
+	trans->ops->fw_alive(trans, scd_addr);
+}
+
+static inline int iwl_trans_start_fw(struct iwl_trans *trans,
+				     const struct fw_img *fw,
+				     bool run_in_rfkill)
+{
+	might_sleep();
+
+	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
+	clear_bit(STATUS_FW_ERROR, &trans->status);
+	return trans->ops->start_fw(trans, fw, run_in_rfkill);
+}
+
+static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
+					  bool low_power)
+{
+	might_sleep();
+
+	trans->ops->stop_device(trans, low_power);
+
+	trans->state = IWL_TRANS_NO_FW;
+}
+
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+	_iwl_trans_stop_device(trans, true);
+}
+
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
+					bool reset)
+{
+	might_sleep();
+	if (trans->ops->d3_suspend)
+		trans->ops->d3_suspend(trans, test, reset);
+}
+
+static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
+				      enum iwl_d3_status *status,
+				      bool test, bool reset)
+{
+	might_sleep();
+	if (!trans->ops->d3_resume)
+		return 0;
+
+	return trans->ops->d3_resume(trans, status, test, reset);
+}
+
+static inline int iwl_trans_suspend(struct iwl_trans *trans)
+{
+	if (!trans->ops->suspend)
+		return 0;
+
+	return trans->ops->suspend(trans);
+}
+
+static inline void iwl_trans_resume(struct iwl_trans *trans)
+{
+	if (trans->ops->resume)
+		trans->ops->resume(trans);
+}
+
+static inline struct iwl_trans_dump_data *
+iwl_trans_dump_data(struct iwl_trans *trans,
+		    const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+	if (!trans->ops->dump_data)
+		return NULL;
+	return trans->ops->dump_data(trans, trigger);
+}
+
+static inline void iwl_trans_dump_regs(struct iwl_trans *trans)
+{
+	if (trans->ops->dump_regs)
+		trans->ops->dump_regs(trans);
+}
+
+static inline struct iwl_device_cmd *
+iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
+{
+	return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
+}
+
+int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+
+static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
+					 struct iwl_device_cmd *dev_cmd)
+{
+	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
+}
+
+static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+			       struct iwl_device_cmd *dev_cmd, int queue)
+{
+	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+		return -EIO;
+
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return -EIO;
+	}
+
+	return trans->ops->tx(trans, skb, dev_cmd, queue);
+}
+
+static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
+				     int ssn, struct sk_buff_head *skbs)
+{
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return;
+	}
+
+	trans->ops->reclaim(trans, queue, ssn, skbs);
+}
+
+static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+					 bool configure_scd)
+{
+	trans->ops->txq_disable(trans, queue, configure_scd);
+}
+
+static inline bool
+iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+			 const struct iwl_trans_txq_scd_cfg *cfg,
+			 unsigned int queue_wdg_timeout)
+{
+	might_sleep();
+
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return false;
+	}
+
+	return trans->ops->txq_enable(trans, queue, ssn,
+				      cfg, queue_wdg_timeout);
+}
+
+static inline int
+iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+			   struct iwl_trans_rxq_dma_data *data)
+{
+	if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
+		return -ENOTSUPP;
+
+	return trans->ops->rxq_dma_data(trans, queue, data);
+}
+
+static inline void
+iwl_trans_txq_free(struct iwl_trans *trans, int queue)
+{
+	if (WARN_ON_ONCE(!trans->ops->txq_free))
+		return;
+
+	trans->ops->txq_free(trans, queue);
+}
+
+static inline int
+iwl_trans_txq_alloc(struct iwl_trans *trans,
+		    struct iwl_tx_queue_cfg_cmd *cmd,
+		    int cmd_id, int size,
+		    unsigned int wdg_timeout)
+{
+	might_sleep();
+
+	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
+		return -ENOTSUPP;
+
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return -EIO;
+	}
+
+	return trans->ops->txq_alloc(trans, cmd, cmd_id, size, wdg_timeout);
+}
+
+static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+						 int queue, bool shared_mode)
+{
+	if (trans->ops->txq_set_shared_mode)
+		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
+}
+
+static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
+					int fifo, int sta_id, int tid,
+					int frame_limit, u16 ssn,
+					unsigned int queue_wdg_timeout)
+{
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = fifo,
+		.sta_id = sta_id,
+		.tid = tid,
+		.frame_limit = frame_limit,
+		.aggregate = sta_id >= 0,
+	};
+
+	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
+}
+
+static inline
+void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
+			     unsigned int queue_wdg_timeout)
+{
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = fifo,
+		.sta_id = -1,
+		.tid = IWL_MAX_TID_COUNT,
+		.frame_limit = IWL_FRAME_LIMIT,
+		.aggregate = false,
+	};
+
+	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
+}
+
+static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+					      unsigned long txqs,
+					      bool freeze)
+{
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return;
+	}
+
+	if (trans->ops->freeze_txq_timer)
+		trans->ops->freeze_txq_timer(trans, txqs, freeze);
+}
+
+static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
+					    bool block)
+{
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return;
+	}
+
+	if (trans->ops->block_txq_ptrs)
+		trans->ops->block_txq_ptrs(trans, block);
+}
+
+static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
+						 u32 txqs)
+{
+	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
+		return -ENOTSUPP;
+
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return -EIO;
+	}
+
+	return trans->ops->wait_tx_queues_empty(trans, txqs);
+}
+
+static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+{
+	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
+		return -ENOTSUPP;
+
+	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
+		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+		return -EIO;
+	}
+
+	return trans->ops->wait_txq_empty(trans, queue);
+}
+
+static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+	trans->ops->write8(trans, ofs, val);
+}
+
+static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	trans->ops->write32(trans, ofs, val);
+}
+
+static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
+{
+	return trans->ops->read32(trans, ofs);
+}
+
+static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+	return trans->ops->read_prph(trans, ofs);
+}
+
+static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
+					u32 val)
+{
+	return trans->ops->write_prph(trans, ofs, val);
+}
+
+static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+				     void *buf, int dwords)
+{
+	return trans->ops->read_mem(trans, addr, buf, dwords);
+}
+
+#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
+	do {								      \
+		if (__builtin_constant_p(bufsize))			      \
+			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
+		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
+	} while (0)
+
+static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
+{
+	u32 value;
+
+	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
+		return 0xa5a5a5a5;
+
+	return value;
+}
+
+static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+				      const void *buf, int dwords)
+{
+	return trans->ops->write_mem(trans, addr, buf, dwords);
+}
+
+static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
+					u32 val)
+{
+	return iwl_trans_write_mem(trans, addr, &val, 1);
+}
+
+static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
+{
+	if (trans->ops->set_pmi)
+		trans->ops->set_pmi(trans, state);
+}
+
+static inline void iwl_trans_sw_reset(struct iwl_trans *trans)
+{
+	if (trans->ops->sw_reset)
+		trans->ops->sw_reset(trans);
+}
+
+static inline void
+iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
+{
+	trans->ops->set_bits_mask(trans, reg, mask, value);
+}
+
+#define iwl_trans_grab_nic_access(trans, flags)	\
+	__cond_lock(nic_access,				\
+		    likely((trans)->ops->grab_nic_access(trans, flags)))
+
+static inline void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
+{
+	trans->ops->release_nic_access(trans, flags);
+	__release(nic_access);
+}
+
+static inline void iwl_trans_fw_error(struct iwl_trans *trans)
+{
+	if (WARN_ON_ONCE(!trans->op_mode))
+		return;
+
+	/* prevent double restarts due to the same erroneous FW */
+	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
+		iwl_op_mode_nic_error(trans->op_mode);
+}
+
+/*****************************************************
+ * transport helper functions
+ *****************************************************/
+struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
+				  struct device *dev,
+				  const struct iwl_cfg *cfg,
+				  const struct iwl_trans_ops *ops);
+void iwl_trans_free(struct iwl_trans *trans);
+void iwl_trans_ref(struct iwl_trans *trans);
+void iwl_trans_unref(struct iwl_trans *trans);
+
+/*****************************************************
+* driver (transport) register/unregister functions
+******************************************************/
+int __must_check iwl_pci_register_driver(void);
+void iwl_pci_unregister_driver(void);
+
+#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
new file mode 100644
index 0000000..9ffd219
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IWLMVM)   += iwlmvm.o
+iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
+iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
+iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
+iwlmvm-y += power.o coex.o
+iwlmvm-y += tt.o offloading.o tdls.o
+iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
+iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
+iwlmvm-y += tof.o
+iwlmvm-$(CONFIG_PM) += d3.o
+
+ccflags-y += -I$(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
new file mode 100644
index 0000000..75d35f6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -0,0 +1,226 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+struct iwl_mvm_iface_iterator_data {
+	struct ieee80211_vif *ignore_vif;
+	int idx;
+
+	struct iwl_mvm_phy_ctxt *phyctxt;
+
+	u16 ids[MAX_MACS_IN_BINDING];
+	u16 colors[MAX_MACS_IN_BINDING];
+};
+
+static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
+			       struct iwl_mvm_iface_iterator_data *data)
+{
+	struct iwl_binding_cmd cmd;
+	struct iwl_mvm_phy_ctxt *phyctxt = data->phyctxt;
+	int i, ret;
+	u32 status;
+	int size;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+		size = sizeof(cmd);
+		if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
+		    !iwl_mvm_is_cdb_supported(mvm))
+			cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+		else
+			cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+	} else {
+		size = IWL_BINDING_CMD_SIZE_V1;
+	}
+
+	cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+							   phyctxt->color));
+	cmd.action = cpu_to_le32(action);
+	cmd.phy = cpu_to_le32(FW_CMD_ID_AND_COLOR(phyctxt->id,
+						  phyctxt->color));
+
+	for (i = 0; i < MAX_MACS_IN_BINDING; i++)
+		cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+	for (i = 0; i < data->idx; i++)
+		cmd.macs[i] = cpu_to_le32(FW_CMD_ID_AND_COLOR(data->ids[i],
+							      data->colors[i]));
+
+	status = 0;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+					  size, &cmd, &status);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to send binding (action:%d): %d\n",
+			action, ret);
+		return ret;
+	}
+
+	if (status) {
+		IWL_ERR(mvm, "Binding command failed: %u\n", status);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+static void iwl_mvm_iface_iterator(void *_data, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif == data->ignore_vif)
+		return;
+
+	if (mvmvif->phy_ctxt != data->phyctxt)
+		return;
+
+	if (WARN_ON_ONCE(data->idx >= MAX_MACS_IN_BINDING))
+		return;
+
+	data->ids[data->idx] = mvmvif->id;
+	data->colors[data->idx] = mvmvif->color;
+	data->idx++;
+}
+
+static int iwl_mvm_binding_update(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif,
+				  struct iwl_mvm_phy_ctxt *phyctxt,
+				  bool add)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_iface_iterator_data data = {
+		.ignore_vif = vif,
+		.phyctxt = phyctxt,
+	};
+	u32 action = FW_CTXT_ACTION_MODIFY;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_iface_iterator,
+						   &data);
+
+	/*
+	 * If there are no other interfaces yet we
+	 * need to create a new binding.
+	 */
+	if (data.idx == 0) {
+		if (add)
+			action = FW_CTXT_ACTION_ADD;
+		else
+			action = FW_CTXT_ACTION_REMOVE;
+	}
+
+	if (add) {
+		if (WARN_ON_ONCE(data.idx >= MAX_MACS_IN_BINDING))
+			return -EINVAL;
+
+		data.ids[data.idx] = mvmvif->id;
+		data.colors[data.idx] = mvmvif->color;
+		data.idx++;
+	}
+
+	return iwl_mvm_binding_cmd(mvm, action, &data);
+}
+
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+		return -EINVAL;
+
+	/*
+	 * Update SF - Disable if needed. if this fails, SF might still be on
+	 * while many macs are bound, which is forbidden - so fail the binding.
+	 */
+	if (iwl_mvm_sf_update(mvm, vif, false))
+		return -EINVAL;
+
+	return iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, true);
+}
+
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+		return -EINVAL;
+
+	ret = iwl_mvm_binding_update(mvm, vif, mvmvif->phy_ctxt, false);
+
+	if (!ret)
+		if (iwl_mvm_sf_update(mvm, vif, true))
+			IWL_ERR(mvm, "Failed to update SF state\n");
+
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
new file mode 100644
index 0000000..016e03a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -0,0 +1,735 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "fw/api/coex.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-debug.h"
+
+/* 20MHz / 40MHz below / 40Mhz above*/
+static const __le64 iwl_ci_mask[][3] = {
+	/* dummy entry for channel 0 */
+	{cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
+	{
+		cpu_to_le64(0x0000001FFFULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x00007FFFFFULL),
+	},
+	{
+		cpu_to_le64(0x000000FFFFULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x0003FFFFFFULL),
+	},
+	{
+		cpu_to_le64(0x000003FFFCULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x000FFFFFFCULL),
+	},
+	{
+		cpu_to_le64(0x00001FFFE0ULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x007FFFFFE0ULL),
+	},
+	{
+		cpu_to_le64(0x00007FFF80ULL),
+		cpu_to_le64(0x00007FFFFFULL),
+		cpu_to_le64(0x01FFFFFF80ULL),
+	},
+	{
+		cpu_to_le64(0x0003FFFC00ULL),
+		cpu_to_le64(0x0003FFFFFFULL),
+		cpu_to_le64(0x0FFFFFFC00ULL),
+	},
+	{
+		cpu_to_le64(0x000FFFF000ULL),
+		cpu_to_le64(0x000FFFFFFCULL),
+		cpu_to_le64(0x3FFFFFF000ULL),
+	},
+	{
+		cpu_to_le64(0x007FFF8000ULL),
+		cpu_to_le64(0x007FFFFFE0ULL),
+		cpu_to_le64(0xFFFFFF8000ULL),
+	},
+	{
+		cpu_to_le64(0x01FFFE0000ULL),
+		cpu_to_le64(0x01FFFFFF80ULL),
+		cpu_to_le64(0xFFFFFE0000ULL),
+	},
+	{
+		cpu_to_le64(0x0FFFF00000ULL),
+		cpu_to_le64(0x0FFFFFFC00ULL),
+		cpu_to_le64(0x0ULL),
+	},
+	{
+		cpu_to_le64(0x3FFFC00000ULL),
+		cpu_to_le64(0x3FFFFFF000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFFFE000000ULL),
+		cpu_to_le64(0xFFFFFF8000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFFF8000000ULL),
+		cpu_to_le64(0xFFFFFE0000ULL),
+		cpu_to_le64(0x0)
+	},
+	{
+		cpu_to_le64(0xFE00000000ULL),
+		cpu_to_le64(0x0ULL),
+		cpu_to_le64(0x0ULL)
+	},
+};
+
+static enum iwl_bt_coex_lut_type
+iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
+{
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	enum iwl_bt_coex_lut_type ret;
+	u16 phy_ctx_id;
+	u32 primary_ch_phy_id, secondary_ch_phy_id;
+
+	/*
+	 * Checking that we hold mvm->mutex is a good idea, but the rate
+	 * control can't acquire the mutex since it runs in Tx path.
+	 * So this is racy in that case, but in the worst case, the AMPDU
+	 * size limit will be wrong for a short time which is not a big
+	 * issue.
+	 */
+
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+	if (!chanctx_conf ||
+	     chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
+		rcu_read_unlock();
+		return BT_COEX_INVALID_LUT;
+	}
+
+	ret = BT_COEX_TX_DIS_LUT;
+
+	if (mvm->cfg->bt_shared_single_ant) {
+		rcu_read_unlock();
+		return ret;
+	}
+
+	phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
+	primary_ch_phy_id = le32_to_cpu(mvm->last_bt_ci_cmd.primary_ch_phy_id);
+	secondary_ch_phy_id =
+		le32_to_cpu(mvm->last_bt_ci_cmd.secondary_ch_phy_id);
+
+	if (primary_ch_phy_id == phy_ctx_id)
+		ret = le32_to_cpu(mvm->last_bt_notif.primary_ch_lut);
+	else if (secondary_ch_phy_id == phy_ctx_id)
+		ret = le32_to_cpu(mvm->last_bt_notif.secondary_ch_lut);
+	/* else - default = TX TX disallowed */
+
+	rcu_read_unlock();
+
+	return ret;
+}
+
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm)
+{
+	struct iwl_bt_coex_cmd bt_cmd = {};
+	u32 mode;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
+		switch (mvm->bt_force_ant_mode) {
+		case BT_FORCE_ANT_BT:
+			mode = BT_COEX_BT;
+			break;
+		case BT_FORCE_ANT_WIFI:
+			mode = BT_COEX_WIFI;
+			break;
+		default:
+			WARN_ON(1);
+			mode = 0;
+		}
+
+		bt_cmd.mode = cpu_to_le32(mode);
+		goto send_cmd;
+	}
+
+	mode = iwlwifi_mod_params.bt_coex_active ? BT_COEX_NW : BT_COEX_DISABLE;
+	bt_cmd.mode = cpu_to_le32(mode);
+
+	if (IWL_MVM_BT_COEX_SYNC2SCO)
+		bt_cmd.enabled_modules |=
+			cpu_to_le32(BT_COEX_SYNC2SCO_ENABLED);
+
+	if (iwl_mvm_is_mplut_supported(mvm))
+		bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_MPLUT_ENABLED);
+
+	bt_cmd.enabled_modules |= cpu_to_le32(BT_COEX_HIGH_BAND_RET);
+
+send_cmd:
+	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+	return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
+}
+
+static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
+				       bool enable)
+{
+	struct iwl_bt_coex_reduced_txp_update_cmd cmd = {};
+	struct iwl_mvm_sta *mvmsta;
+	u32 value;
+	int ret;
+
+	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+	if (!mvmsta)
+		return 0;
+
+	/* nothing to do */
+	if (mvmsta->bt_reduced_txpower == enable)
+		return 0;
+
+	value = mvmsta->sta_id;
+
+	if (enable)
+		value |= BT_REDUCED_TX_POWER_BIT;
+
+	IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
+		       enable ? "en" : "dis", sta_id);
+
+	cmd.reduced_txp = cpu_to_le32(value);
+	mvmsta->bt_reduced_txpower = enable;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_UPDATE_REDUCED_TXP, CMD_ASYNC,
+				   sizeof(cmd), &cmd);
+
+	return ret;
+}
+
+struct iwl_bt_iterator_data {
+	struct iwl_bt_coex_profile_notif *notif;
+	struct iwl_mvm *mvm;
+	struct ieee80211_chanctx_conf *primary;
+	struct ieee80211_chanctx_conf *secondary;
+	bool primary_ll;
+	u8 primary_load;
+	u8 secondary_load;
+};
+
+static inline
+void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       bool enable, int rssi)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->bf_data.last_bt_coex_event = rssi;
+	mvmvif->bf_data.bt_coex_max_thold =
+		enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
+	mvmvif->bf_data.bt_coex_min_thold =
+		enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
+}
+
+#define MVM_COEX_TCM_PERIOD (HZ * 10)
+
+static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
+					 struct iwl_bt_iterator_data *data)
+{
+	unsigned long now = jiffies;
+
+	if (!time_after(now, mvm->bt_coex_last_tcm_ts + MVM_COEX_TCM_PERIOD))
+		return;
+
+	mvm->bt_coex_last_tcm_ts = now;
+
+	/* We assume here that we don't have more than 2 vifs on 2.4GHz */
+
+	/* if the primary is low latency, it will stay primary */
+	if (data->primary_ll)
+		return;
+
+	if (data->primary_load >= data->secondary_load)
+		return;
+
+	swap(data->primary, data->secondary);
+}
+
+/* must be called under rcu_read_lock */
+static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_bt_iterator_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	/* default smps_mode is AUTOMATIC - only used for client modes */
+	enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+	u32 bt_activity_grading;
+	int ave_rssi;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		break;
+	case NL80211_IFTYPE_AP:
+		if (!mvmvif->ap_ibss_active)
+			return;
+		break;
+	default:
+		return;
+	}
+
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+
+	/* If channel context is invalid or not on 2.4GHz .. */
+	if ((!chanctx_conf ||
+	     chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
+		if (vif->type == NL80211_IFTYPE_STATION) {
+			/* ... relax constraints and disable rssi events */
+			iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+					    smps_mode);
+			iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+						    false);
+			iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+		}
+		return;
+	}
+
+	bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+	if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+		smps_mode = IEEE80211_SMPS_STATIC;
+	else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+		smps_mode = IEEE80211_SMPS_DYNAMIC;
+
+	/* relax SMPS constraints for next association */
+	if (!vif->bss_conf.assoc)
+		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+	if (mvmvif->phy_ctxt &&
+	    (mvm->last_bt_notif.rrc_status & BIT(mvmvif->phy_ctxt->id)))
+		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+	IWL_DEBUG_COEX(data->mvm,
+		       "mac %d: bt_activity_grading %d smps_req %d\n",
+		       mvmvif->id, bt_activity_grading, smps_mode);
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+				    smps_mode);
+
+	/* low latency is always primary */
+	if (iwl_mvm_vif_low_latency(mvmvif)) {
+		data->primary_ll = true;
+
+		data->secondary = data->primary;
+		data->primary = chanctx_conf;
+	}
+
+	if (vif->type == NL80211_IFTYPE_AP) {
+		if (!mvmvif->ap_ibss_active)
+			return;
+
+		if (chanctx_conf == data->primary)
+			return;
+
+		if (!data->primary_ll) {
+			/*
+			 * downgrade the current primary no matter what its
+			 * type is.
+			 */
+			data->secondary = data->primary;
+			data->primary = chanctx_conf;
+		} else {
+			/* there is low latency vif - we will be secondary */
+			data->secondary = chanctx_conf;
+		}
+
+		if (data->primary == chanctx_conf)
+			data->primary_load = mvm->tcm.result.load[mvmvif->id];
+		else if (data->secondary == chanctx_conf)
+			data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+		return;
+	}
+
+	/*
+	 * STA / P2P Client, try to be primary if first vif. If we are in low
+	 * latency mode, we are already in primary and just don't do much
+	 */
+	if (!data->primary || data->primary == chanctx_conf)
+		data->primary = chanctx_conf;
+	else if (!data->secondary)
+		/* if secondary is not NULL, it might be a GO */
+		data->secondary = chanctx_conf;
+
+	if (data->primary == chanctx_conf)
+		data->primary_load = mvm->tcm.result.load[mvmvif->id];
+	else if (data->secondary == chanctx_conf)
+		data->secondary_load = mvm->tcm.result.load[mvmvif->id];
+	/*
+	 * don't reduce the Tx power if one of these is true:
+	 *  we are in LOOSE
+	 *  single share antenna product
+	 *  BT is inactive
+	 *  we are not associated
+	 */
+	if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
+	    mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
+	    le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF) {
+		iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
+		iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+		return;
+	}
+
+	/* try to get the avg rssi from fw */
+	ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+
+	/* if the RSSI isn't valid, fake it is very low */
+	if (!ave_rssi)
+		ave_rssi = -100;
+	if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
+		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
+			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+	} else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
+		if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
+			IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+	}
+
+	/* Begin to monitor the RSSI: it may influence the reduced Tx power */
+	iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+}
+
+static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
+{
+	struct iwl_bt_iterator_data data = {
+		.mvm = mvm,
+		.notif = &mvm->last_bt_notif,
+	};
+	struct iwl_bt_coex_ci_cmd cmd = {};
+	u8 ci_bw_idx;
+
+	/* Ignore updates if we are in force mode */
+	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+		return;
+
+	rcu_read_lock();
+	ieee80211_iterate_active_interfaces_atomic(
+					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_bt_notif_iterator, &data);
+
+	iwl_mvm_bt_coex_tcm_based_ci(mvm, &data);
+
+	if (data.primary) {
+		struct ieee80211_chanctx_conf *chan = data.primary;
+		if (WARN_ON(!chan->def.chan)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+			ci_bw_idx = 0;
+		} else {
+			if (chan->def.center_freq1 >
+			    chan->def.chan->center_freq)
+				ci_bw_idx = 2;
+			else
+				ci_bw_idx = 1;
+		}
+
+		cmd.bt_primary_ci =
+			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+		cmd.primary_ch_phy_id =
+			cpu_to_le32(*((u16 *)data.primary->drv_priv));
+	}
+
+	if (data.secondary) {
+		struct ieee80211_chanctx_conf *chan = data.secondary;
+		if (WARN_ON(!data.secondary->def.chan)) {
+			rcu_read_unlock();
+			return;
+		}
+
+		if (chan->def.width < NL80211_CHAN_WIDTH_40) {
+			ci_bw_idx = 0;
+		} else {
+			if (chan->def.center_freq1 >
+			    chan->def.chan->center_freq)
+				ci_bw_idx = 2;
+			else
+				ci_bw_idx = 1;
+		}
+
+		cmd.bt_secondary_ci =
+			iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
+		cmd.secondary_ch_phy_id =
+			cpu_to_le32(*((u16 *)data.secondary->drv_priv));
+	}
+
+	rcu_read_unlock();
+
+	/* Don't spam the fw with the same command over and over */
+	if (memcmp(&cmd, &mvm->last_bt_ci_cmd, sizeof(cmd))) {
+		if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
+					 sizeof(cmd), &cmd))
+			IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
+		memcpy(&mvm->last_bt_ci_cmd, &cmd, sizeof(cmd));
+	}
+}
+
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
+
+	IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
+	IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
+	IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
+		       le32_to_cpu(notif->primary_ch_lut));
+	IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
+		       le32_to_cpu(notif->secondary_ch_lut));
+	IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
+		       le32_to_cpu(notif->bt_activity_grading));
+
+	/* remember this notification for future use: rssi fluctuations */
+	memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
+
+	iwl_mvm_bt_coex_notif_handle(mvm);
+}
+
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   enum ieee80211_rssi_event_data rssi_event)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Ignore updates if we are in force mode */
+	if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
+		return;
+
+	/*
+	 * Rssi update while not associated - can happen since the statistics
+	 * are handled asynchronously
+	 */
+	if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA)
+		return;
+
+	/* No BT - reports should be disabled */
+	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF)
+		return;
+
+	IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
+		       rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
+
+	/*
+	 * Check if rssi is good enough for reduced Tx power, but not in loose
+	 * scheme.
+	 */
+	if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
+	    iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
+		ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+						  false);
+	else
+		ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
+
+	if (ret)
+		IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
+}
+
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF	(4000)
+#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT	(1200)
+
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
+	enum iwl_bt_coex_lut_type lut_type;
+
+	if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+	    BT_HIGH_TRAFFIC)
+		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+
+	if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
+		return LINK_QUAL_AGG_TIME_LIMIT_DEF;
+
+	/* tight coex, high bt traffic, reduce AGG time limit */
+	return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
+}
+
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+	struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
+	enum iwl_bt_coex_lut_type lut_type;
+
+	if (mvm->last_bt_notif.ttc_status & BIT(phy_ctxt->id))
+		return true;
+
+	if (le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+	    BT_HIGH_TRAFFIC)
+		return true;
+
+	/*
+	 * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
+	 * since BT is already killed.
+	 * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
+	 * we Tx.
+	 * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
+	 */
+	lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
+	return lut_type != BT_COEX_LOOSE_LUT;
+}
+
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
+{
+	/* there is no other antenna, shared antenna is always available */
+	if (mvm->cfg->bt_shared_single_ant)
+		return true;
+
+	if (ant & mvm->cfg->non_shared_ant)
+		return true;
+
+	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
+		BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
+{
+	/* there is no other antenna, shared antenna is always available */
+	if (mvm->cfg->bt_shared_single_ant)
+		return true;
+
+	return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
+}
+
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+				    enum nl80211_band band)
+{
+	u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
+
+	if (band != NL80211_BAND_2GHZ)
+		return false;
+
+	return bt_activity >= BT_LOW_TRAFFIC;
+}
+
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+			   struct ieee80211_tx_info *info, u8 ac)
+{
+	__le16 fc = hdr->frame_control;
+	bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
+
+	if (info->band != NL80211_BAND_2GHZ)
+		return 0;
+
+	if (unlikely(mvm->bt_tx_prio))
+		return mvm->bt_tx_prio - 1;
+
+	if (likely(ieee80211_is_data(fc))) {
+		if (likely(ieee80211_is_data_qos(fc))) {
+			switch (ac) {
+			case IEEE80211_AC_BE:
+				return mplut_enabled ? 1 : 0;
+			case IEEE80211_AC_VI:
+				return mplut_enabled ? 2 : 3;
+			case IEEE80211_AC_VO:
+				return 3;
+			default:
+				return 0;
+			}
+		} else if (is_multicast_ether_addr(hdr->addr1)) {
+			return 3;
+		} else
+			return 0;
+	} else if (ieee80211_is_mgmt(fc)) {
+		return ieee80211_is_disassoc(fc) ? 0 : 3;
+	} else if (ieee80211_is_ctl(fc)) {
+		/* ignore cfend and cfendack frames as we never send those */
+		return 3;
+	}
+
+	return 0;
+}
+
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
+{
+	iwl_mvm_bt_coex_notif_handle(mvm);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
new file mode 100644
index 0000000..d61ff66
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -0,0 +1,154 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#include <linux/ieee80211.h>
+
+#define IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM		20
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT	(100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT	(100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT	(10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT	(10 * USEC_PER_MSEC)
+#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT	(2 * 1024) /* defined in TU */
+#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT	(40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE	0
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT		(50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_QUEUES		(IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+					 IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+					 IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+					 IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS	20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS	8
+#define IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS	30
+#define IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS	20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT	50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT	50
+#define IWL_MVM_PS_SNOOZE_INTERVAL		25
+#define IWL_MVM_PS_SNOOZE_WINDOW		50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW		25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT	64
+#define IWL_MVM_BT_COEX_EN_RED_TXP_THRESH	62
+#define IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH	65
+#define IWL_MVM_BT_COEX_SYNC2SCO		1
+#define IWL_MVM_BT_COEX_MPLUT			1
+#define IWL_MVM_BT_COEX_RRC			1
+#define IWL_MVM_BT_COEX_TTC			1
+#define IWL_MVM_BT_COEX_MPLUT_REG0		0x22002200
+#define IWL_MVM_BT_COEX_MPLUT_REG1		0x11118451
+#define IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS	30
+#define IWL_MVM_FW_MCAST_FILTER_PASS_ALL	0
+#define IWL_MVM_FW_BCAST_FILTER_PASS_ALL	0
+#define IWL_MVM_QUOTA_THRESHOLD			4
+#define IWL_MVM_RS_RSSI_BASED_INIT_RATE         0
+#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK	1
+#define IWL_MVM_TOF_IS_RESPONDER		0
+#define IWL_MVM_SW_TX_CSUM_OFFLOAD		0
+#define IWL_MVM_HW_CSUM_DISABLE			0
+#define IWL_MVM_PARSE_NVM			0
+#define IWL_MVM_ADWELL_ENABLE			1
+#define IWL_MVM_ADWELL_MAX_BUDGET		0
+#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH		10 /* percentage */
+#define IWL_MVM_TCM_LOAD_HIGH_THRESH		50 /* percentage */
+#define IWL_MVM_TCM_LOWLAT_ENABLE_THRESH	100 /* packets/10 seconds */
+#define IWL_MVM_UAPSD_NONAGG_PERIOD		5000 /* msecs */
+#define IWL_MVM_UAPSD_NOAGG_LIST_LEN		IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM
+#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
+#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
+#define IWL_MVM_RS_INITIAL_MIMO_NUM_RATES       3
+#define IWL_MVM_RS_INITIAL_SISO_NUM_RATES       3
+#define IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES     2
+#define IWL_MVM_RS_INITIAL_LEGACY_RETRIES       2
+#define IWL_MVM_RS_SECONDARY_LEGACY_RETRIES	1
+#define IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES   16
+#define IWL_MVM_RS_SECONDARY_SISO_NUM_RATES     3
+#define IWL_MVM_RS_SECONDARY_SISO_RETRIES       1
+#define IWL_MVM_RS_RATE_MIN_FAILURE_TH		3
+#define IWL_MVM_RS_RATE_MIN_SUCCESS_TH		8
+#define IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT	5	/* Seconds */
+#define IWL_MVM_RS_IDLE_TIMEOUT			5	/* Seconds */
+#define IWL_MVM_RS_MISSED_RATE_MAX		15
+#define IWL_MVM_RS_LEGACY_FAILURE_LIMIT		160
+#define IWL_MVM_RS_LEGACY_SUCCESS_LIMIT		480
+#define IWL_MVM_RS_LEGACY_TABLE_COUNT		160
+#define IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT	400
+#define IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT	4500
+#define IWL_MVM_RS_NON_LEGACY_TABLE_COUNT	1500
+#define IWL_MVM_RS_SR_FORCE_DECREASE		15	/* percent */
+#define IWL_MVM_RS_SR_NO_DECREASE		85	/* percent */
+#define IWL_MVM_RS_AGG_TIME_LIMIT	        4000    /* 4 msecs. valid 100-8000 */
+#define IWL_MVM_RS_AGG_DISABLE_START	        3
+#define IWL_MVM_RS_AGG_START_THRESHOLD	        10	/* num frames per second */
+#define IWL_MVM_RS_TPC_SR_FORCE_INCREASE	75	/* percent */
+#define IWL_MVM_RS_TPC_SR_NO_INCREASE		85	/* percent */
+#define IWL_MVM_RS_TPC_TX_POWER_STEP		3
+#define IWL_MVM_ENABLE_EBS			1
+
+#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
new file mode 100644
index 0000000..79bdae9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -0,0 +1,2162 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#include <net/cfg80211.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/addrconf.h>
+#include "iwl-modparams.h"
+#include "fw-api.h"
+#include "mvm.h"
+
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    struct cfg80211_gtk_rekey_data *data)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (iwlwifi_mod_params.swcrypto)
+		return;
+
+	mutex_lock(&mvm->mutex);
+
+	memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
+	memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+	mvmvif->rekey_data.replay_ctr =
+		cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
+	mvmvif->rekey_data.valid = true;
+
+	mutex_unlock(&mvm->mutex);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct inet6_dev *idev)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct inet6_ifaddr *ifa;
+	int idx = 0;
+
+	memset(mvmvif->tentative_addrs, 0, sizeof(mvmvif->tentative_addrs));
+
+	read_lock_bh(&idev->lock);
+	list_for_each_entry(ifa, &idev->addr_list, if_list) {
+		mvmvif->target_ipv6_addrs[idx] = ifa->addr;
+		if (ifa->flags & IFA_F_TENTATIVE)
+			__set_bit(idx, mvmvif->tentative_addrs);
+		idx++;
+		if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
+			break;
+	}
+	read_unlock_bh(&idev->lock);
+
+	mvmvif->num_target_ipv6_addrs = idx;
+}
+#endif
+
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif, int idx)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->tx_key_idx = idx;
+}
+
+static void iwl_mvm_convert_p1k(u16 *p1k, __le16 *out)
+{
+	int i;
+
+	for (i = 0; i < IWL_P1K_SIZE; i++)
+		out[i] = cpu_to_le16(p1k[i]);
+}
+
+static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
+				     struct iwl_mvm_key_pn *ptk_pn,
+				     struct ieee80211_key_seq *seq,
+				     int tid, int queues)
+{
+	const u8 *ret = seq->ccmp.pn;
+	int i;
+
+	/* get the PN from mac80211, used on the default queue */
+	ieee80211_get_key_rx_seq(key, tid, seq);
+
+	/* and use the internal data for the other queues */
+	for (i = 1; i < queues; i++) {
+		const u8 *tmp = ptk_pn->q[i].pn[tid];
+
+		if (memcmp(ret, tmp, IEEE80211_CCMP_PN_LEN) <= 0)
+			ret = tmp;
+	}
+
+	return ret;
+}
+
+struct wowlan_key_data {
+	struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+	struct iwl_wowlan_tkip_params_cmd *tkip;
+	bool error, use_rsc_tsc, use_tkip, configure_keys;
+	int wep_key_idx;
+};
+
+static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct ieee80211_sta *sta,
+					struct ieee80211_key_conf *key,
+					void *_data)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct wowlan_key_data *data = _data;
+	struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+	struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+	struct iwl_p1k_cache *rx_p1ks;
+	u8 *rx_mic_key;
+	struct ieee80211_key_seq seq;
+	u32 cur_rx_iv32 = 0;
+	u16 p1k[IWL_P1K_SIZE];
+	int ret, i;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104: { /* hack it for now */
+		struct {
+			struct iwl_mvm_wep_key_cmd wep_key_cmd;
+			struct iwl_mvm_wep_key wep_key;
+		} __packed wkc = {
+			.wep_key_cmd.mac_id_n_color =
+				cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+								mvmvif->color)),
+			.wep_key_cmd.num_keys = 1,
+			/* firmware sets STA_KEY_FLG_WEP_13BYTES */
+			.wep_key_cmd.decryption_type = STA_KEY_FLG_WEP,
+			.wep_key.key_index = key->keyidx,
+			.wep_key.key_size = key->keylen,
+		};
+
+		/*
+		 * This will fail -- the key functions don't set support
+		 * pairwise WEP keys. However, that's better than silently
+		 * failing WoWLAN. Or maybe not?
+		 */
+		if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
+			break;
+
+		memcpy(&wkc.wep_key.key[3], key->key, key->keylen);
+		if (key->keyidx == mvmvif->tx_key_idx) {
+			/* TX key must be at offset 0 */
+			wkc.wep_key.key_offset = 0;
+		} else {
+			/* others start at 1 */
+			data->wep_key_idx++;
+			wkc.wep_key.key_offset = data->wep_key_idx;
+		}
+
+		if (data->configure_keys) {
+			mutex_lock(&mvm->mutex);
+			ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, 0,
+						   sizeof(wkc), &wkc);
+			data->error = ret != 0;
+
+			mvm->ptk_ivlen = key->iv_len;
+			mvm->ptk_icvlen = key->icv_len;
+			mvm->gtk_ivlen = key->iv_len;
+			mvm->gtk_icvlen = key->icv_len;
+			mutex_unlock(&mvm->mutex);
+		}
+
+		/* don't upload key again */
+		return;
+	}
+	default:
+		data->error = true;
+		return;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		/*
+		 * Ignore CMAC keys -- the WoWLAN firmware doesn't support them
+		 * but we also shouldn't abort suspend due to that. It does have
+		 * support for the IGTK key renewal, but doesn't really use the
+		 * IGTK for anything. This means we could spuriously wake up or
+		 * be deauthenticated, but that was considered acceptable.
+		 */
+		return;
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (sta) {
+			u64 pn64;
+
+			tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+			tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+
+			rx_p1ks = data->tkip->rx_uni;
+
+			pn64 = atomic64_read(&key->tx_pn);
+			tkip_tx_sc->iv16 = cpu_to_le16(TKIP_PN_TO_IV16(pn64));
+			tkip_tx_sc->iv32 = cpu_to_le32(TKIP_PN_TO_IV32(pn64));
+
+			ieee80211_get_tkip_p1k_iv(key, TKIP_PN_TO_IV32(pn64),
+						  p1k);
+			iwl_mvm_convert_p1k(p1k, data->tkip->tx.p1k);
+
+			memcpy(data->tkip->mic_keys.tx,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+			       IWL_MIC_KEY_SIZE);
+
+			rx_mic_key = data->tkip->mic_keys.rx_unicast;
+		} else {
+			tkip_sc =
+				data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+			rx_p1ks = data->tkip->rx_multi;
+			rx_mic_key = data->tkip->mic_keys.rx_mcast;
+		}
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211 use TID 0 (as they need to to avoid replay attacks)
+		 * for checking the IV in the frames.
+		 */
+		for (i = 0; i < IWL_NUM_RSC; i++) {
+			ieee80211_get_key_rx_seq(key, i, &seq);
+			tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+			tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+			/* wrapping isn't allowed, AP must rekey */
+			if (seq.tkip.iv32 > cur_rx_iv32)
+				cur_rx_iv32 = seq.tkip.iv32;
+		}
+
+		ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+					  cur_rx_iv32, p1k);
+		iwl_mvm_convert_p1k(p1k, rx_p1ks[0].p1k);
+		ieee80211_get_tkip_rx_p1k(key, vif->bss_conf.bssid,
+					  cur_rx_iv32 + 1, p1k);
+		iwl_mvm_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+		memcpy(rx_mic_key,
+		       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+		       IWL_MIC_KEY_SIZE);
+
+		data->use_tkip = true;
+		data->use_rsc_tsc = true;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		if (sta) {
+			u64 pn64;
+
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+			aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+			pn64 = atomic64_read(&key->tx_pn);
+			aes_tx_sc->pn = cpu_to_le64(pn64);
+		} else {
+			aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+		}
+
+		/*
+		 * For non-QoS this relies on the fact that both the uCode and
+		 * mac80211/our RX code use TID 0 for checking the PN.
+		 */
+		if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+			struct iwl_mvm_sta *mvmsta;
+			struct iwl_mvm_key_pn *ptk_pn;
+			const u8 *pn;
+
+			mvmsta = iwl_mvm_sta_from_mac80211(sta);
+			ptk_pn = rcu_dereference_protected(
+						mvmsta->ptk_pn[key->keyidx],
+						lockdep_is_held(&mvm->mutex));
+			if (WARN_ON(!ptk_pn))
+				break;
+
+			for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+				pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
+						mvm->trans->num_rx_queues);
+				aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+							   ((u64)pn[4] << 8) |
+							   ((u64)pn[3] << 16) |
+							   ((u64)pn[2] << 24) |
+							   ((u64)pn[1] << 32) |
+							   ((u64)pn[0] << 40));
+			}
+		} else {
+			for (i = 0; i < IWL_NUM_RSC; i++) {
+				u8 *pn = seq.ccmp.pn;
+
+				ieee80211_get_key_rx_seq(key, i, &seq);
+				aes_sc[i].pn = cpu_to_le64((u64)pn[5] |
+							   ((u64)pn[4] << 8) |
+							   ((u64)pn[3] << 16) |
+							   ((u64)pn[2] << 24) |
+							   ((u64)pn[1] << 32) |
+							   ((u64)pn[0] << 40));
+			}
+		}
+		data->use_rsc_tsc = true;
+		break;
+	}
+
+	if (data->configure_keys) {
+		mutex_lock(&mvm->mutex);
+		/*
+		 * The D3 firmware hardcodes the key offset 0 as the key it
+		 * uses to transmit packets to the AP, i.e. the PTK.
+		 */
+		if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+			mvm->ptk_ivlen = key->iv_len;
+			mvm->ptk_icvlen = key->icv_len;
+			ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 0);
+		} else {
+			/*
+			 * firmware only supports TSC/RSC for a single key,
+			 * so if there are multiple keep overwriting them
+			 * with new ones -- this relies on mac80211 doing
+			 * list_add_tail().
+			 */
+			mvm->gtk_ivlen = key->iv_len;
+			mvm->gtk_icvlen = key->icv_len;
+			ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, 1);
+		}
+		mutex_unlock(&mvm->mutex);
+		data->error = ret != 0;
+	}
+}
+
+static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
+				 struct cfg80211_wowlan *wowlan)
+{
+	struct iwl_wowlan_patterns_cmd *pattern_cmd;
+	struct iwl_host_cmd cmd = {
+		.id = WOWLAN_PATTERNS,
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int i, err;
+
+	if (!wowlan->n_patterns)
+		return 0;
+
+	cmd.len[0] = sizeof(*pattern_cmd) +
+		wowlan->n_patterns * sizeof(struct iwl_wowlan_pattern);
+
+	pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+	if (!pattern_cmd)
+		return -ENOMEM;
+
+	pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+	for (i = 0; i < wowlan->n_patterns; i++) {
+		int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+		memcpy(&pattern_cmd->patterns[i].mask,
+		       wowlan->patterns[i].mask, mask_len);
+		memcpy(&pattern_cmd->patterns[i].pattern,
+		       wowlan->patterns[i].pattern,
+		       wowlan->patterns[i].pattern_len);
+		pattern_cmd->patterns[i].mask_size = mask_len;
+		pattern_cmd->patterns[i].pattern_size =
+			wowlan->patterns[i].pattern_len;
+	}
+
+	cmd.data[0] = pattern_cmd;
+	err = iwl_mvm_send_cmd(mvm, &cmd);
+	kfree(pattern_cmd);
+	return err;
+}
+
+static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				struct ieee80211_sta *ap_sta)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct ieee80211_chanctx_conf *ctx;
+	u8 chains_static, chains_dynamic;
+	struct cfg80211_chan_def chandef;
+	int ret, i;
+	struct iwl_binding_cmd binding_cmd = {};
+	struct iwl_time_quota_cmd quota_cmd = {};
+	struct iwl_time_quota_data *quota;
+	u32 status;
+	int size;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
+		size = sizeof(binding_cmd);
+		if (mvmvif->phy_ctxt->channel->band == NL80211_BAND_2GHZ ||
+		    !iwl_mvm_is_cdb_supported(mvm))
+			binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
+		else
+			binding_cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+	} else {
+		size = IWL_BINDING_CMD_SIZE_V1;
+	}
+
+	/* add back the PHY */
+	if (WARN_ON(!mvmvif->phy_ctxt))
+		return -EINVAL;
+
+	rcu_read_lock();
+	ctx = rcu_dereference(vif->chanctx_conf);
+	if (WARN_ON(!ctx)) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+	chandef = ctx->def;
+	chains_static = ctx->rx_chains_static;
+	chains_dynamic = ctx->rx_chains_dynamic;
+	rcu_read_unlock();
+
+	ret = iwl_mvm_phy_ctxt_add(mvm, mvmvif->phy_ctxt, &chandef,
+				   chains_static, chains_dynamic);
+	if (ret)
+		return ret;
+
+	/* add back the MAC */
+	mvmvif->uploaded = false;
+
+	if (WARN_ON(!vif->bss_conf.assoc))
+		return -EINVAL;
+
+	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+	if (ret)
+		return ret;
+
+	/* add back binding - XXX refactor? */
+	binding_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+						mvmvif->phy_ctxt->color));
+	binding_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+	binding_cmd.phy =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+						mvmvif->phy_ctxt->color));
+	binding_cmd.macs[0] = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							      mvmvif->color));
+	for (i = 1; i < MAX_MACS_IN_BINDING; i++)
+		binding_cmd.macs[i] = cpu_to_le32(FW_CTXT_INVALID);
+
+	status = 0;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, BINDING_CONTEXT_CMD,
+					  size, &binding_cmd, &status);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to add binding: %d\n", ret);
+		return ret;
+	}
+
+	if (status) {
+		IWL_ERR(mvm, "Binding command failed: %u\n", status);
+		return -EIO;
+	}
+
+	ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
+	if (ret)
+		return ret;
+	rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
+
+	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	if (ret)
+		return ret;
+
+	/* and some quota */
+	quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, 0);
+	quota->id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
+						mvmvif->phy_ctxt->color));
+	quota->quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+	quota->max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
+
+	for (i = 1; i < MAX_BINDINGS; i++) {
+		quota = iwl_mvm_quota_cmd_get_quota(mvm, &quota_cmd, i);
+		quota->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+				   iwl_mvm_quota_cmd_size(mvm), &quota_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send quota: %d\n", ret);
+
+	if (iwl_mvm_is_lar_supported(mvm) && iwl_mvm_init_fw_regd(mvm))
+		IWL_ERR(mvm, "Failed to initialize D3 LAR information\n");
+
+	return 0;
+}
+
+static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_nonqos_seq_query_cmd query_cmd = {
+		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_GET),
+		.mac_id_n_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							mvmvif->color)),
+	};
+	struct iwl_host_cmd cmd = {
+		.id = NON_QOS_TX_COUNTER_CMD,
+		.flags = CMD_WANT_SKB,
+	};
+	int err;
+	u32 size;
+
+	cmd.data[0] = &query_cmd;
+	cmd.len[0] = sizeof(query_cmd);
+
+	err = iwl_mvm_send_cmd(mvm, &cmd);
+	if (err)
+		return err;
+
+	size = iwl_rx_packet_payload_len(cmd.resp_pkt);
+	if (size < sizeof(__le16)) {
+		err = -EINVAL;
+	} else {
+		err = le16_to_cpup((__le16 *)cmd.resp_pkt->data);
+		/* firmware returns next, not last-used seqno */
+		err = (u16) (err - 0x10);
+	}
+
+	iwl_free_resp(&cmd);
+	return err;
+}
+
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_nonqos_seq_query_cmd query_cmd = {
+		.get_set_flag = cpu_to_le32(IWL_NONQOS_SEQ_SET),
+		.mac_id_n_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							mvmvif->color)),
+		.value = cpu_to_le16(mvmvif->seqno),
+	};
+
+	/* return if called during restart, not resume from D3 */
+	if (!mvmvif->seqno_valid)
+		return;
+
+	mvmvif->seqno_valid = false;
+
+	if (iwl_mvm_send_cmd_pdu(mvm, NON_QOS_TX_COUNTER_CMD, 0,
+				 sizeof(query_cmd), &query_cmd))
+		IWL_ERR(mvm, "failed to set non-QoS seqno\n");
+}
+
+static int iwl_mvm_switch_to_d3(struct iwl_mvm *mvm)
+{
+	iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+
+	iwl_mvm_stop_device(mvm);
+	/*
+	 * Set the HW restart bit -- this is mostly true as we're
+	 * going to load new firmware and reprogram that, though
+	 * the reprogramming is going to be manual to avoid adding
+	 * all the MACs that aren't support.
+	 * We don't have to clear up everything though because the
+	 * reprogramming is manual. When we resume, we'll actually
+	 * go through a proper restart sequence again to switch
+	 * back to the runtime firmware image.
+	 */
+	set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+	/* the fw is reset, so all the keys are cleared */
+	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+
+	mvm->ptk_ivlen = 0;
+	mvm->ptk_icvlen = 0;
+	mvm->ptk_ivlen = 0;
+	mvm->ptk_icvlen = 0;
+
+	return iwl_mvm_load_d3_fw(mvm);
+}
+
+static int
+iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
+			  struct cfg80211_wowlan *wowlan,
+			  struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+			  struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+			  struct ieee80211_sta *ap_sta)
+{
+	int ret;
+	struct iwl_mvm_sta *mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+
+	/* TODO: wowlan_config_cmd->wowlan_ba_teardown_tids */
+
+	wowlan_config_cmd->is_11n_connection =
+					ap_sta->ht_cap.ht_supported;
+	wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
+		ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+
+	/* Query the last used seqno and set it */
+	ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
+	if (ret < 0)
+		return ret;
+
+	wowlan_config_cmd->non_qos_seq = cpu_to_le16(ret);
+
+	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, wowlan_config_cmd);
+
+	if (wowlan->disconnect)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+				    IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+	if (wowlan->magic_pkt)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
+	if (wowlan->gtk_rekey_failure)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+	if (wowlan->eap_identity_req)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+	if (wowlan->four_way_handshake)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+	if (wowlan->n_patterns)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+	if (wowlan->rfkill_release)
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+	if (wowlan->tcp) {
+		/*
+		 * Set the "link change" (really "link lost") flag as well
+		 * since that implies losing the TCP connection.
+		 */
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
+				    IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
+				    IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
+				    IWL_WOWLAN_WAKEUP_LINK_CHANGE);
+	}
+
+	if (wowlan->any) {
+		wowlan_config_cmd->wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
+				    IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+				    IWL_WOWLAN_WAKEUP_RX_FRAME |
+				    IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+	}
+
+	return 0;
+}
+
+static void
+iwl_mvm_iter_d0i3_ap_keys(struct iwl_mvm *mvm,
+			  struct ieee80211_vif *vif,
+			  void (*iter)(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta,
+				       struct ieee80211_key_conf *key,
+				       void *data),
+			  void *data)
+{
+	struct ieee80211_sta *ap_sta;
+
+	rcu_read_lock();
+
+	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id]);
+	if (IS_ERR_OR_NULL(ap_sta))
+		goto out;
+
+	ieee80211_iter_keys_rcu(mvm->hw, vif, iter, data);
+out:
+	rcu_read_unlock();
+}
+
+int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     bool d0i3,
+				     u32 cmd_flags)
+{
+	struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+	struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
+	struct wowlan_key_data key_data = {
+		.configure_keys = !d0i3,
+		.use_rsc_tsc = false,
+		.tkip = &tkip_cmd,
+		.use_tkip = false,
+	};
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+	if (!key_data.rsc_tsc)
+		return -ENOMEM;
+
+	/*
+	 * if we have to configure keys, call ieee80211_iter_keys(),
+	 * as we need non-atomic context in order to take the
+	 * required locks.
+	 * for the d0i3 we can't use ieee80211_iter_keys(), as
+	 * taking (almost) any mutex might result in deadlock.
+	 */
+	if (!d0i3) {
+		/*
+		 * Note that currently we don't propagate cmd_flags
+		 * to the iterator. In case of key_data.configure_keys,
+		 * all the configured commands are SYNC, and
+		 * iwl_mvm_wowlan_program_keys() will take care of
+		 * locking/unlocking mvm->mutex.
+		 */
+		ieee80211_iter_keys(mvm->hw, vif,
+				    iwl_mvm_wowlan_program_keys,
+				    &key_data);
+	} else {
+		iwl_mvm_iter_d0i3_ap_keys(mvm, vif,
+					  iwl_mvm_wowlan_program_keys,
+					  &key_data);
+	}
+
+	if (key_data.error) {
+		ret = -EIO;
+		goto out;
+	}
+
+	if (key_data.use_rsc_tsc) {
+		ret = iwl_mvm_send_cmd_pdu(mvm,
+					   WOWLAN_TSC_RSC_PARAM, cmd_flags,
+					   sizeof(*key_data.rsc_tsc),
+					   key_data.rsc_tsc);
+		if (ret)
+			goto out;
+	}
+
+	if (key_data.use_tkip &&
+	    !fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+		ret = iwl_mvm_send_cmd_pdu(mvm,
+					   WOWLAN_TKIP_PARAM,
+					   cmd_flags, sizeof(tkip_cmd),
+					   &tkip_cmd);
+		if (ret)
+			goto out;
+	}
+
+	/* configure rekey data only if offloaded rekey is supported (d3) */
+	if (mvmvif->rekey_data.valid && !d0i3) {
+		memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+		memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
+		       NL80211_KCK_LEN);
+		kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+		memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
+		       NL80211_KEK_LEN);
+		kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+		kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+
+		ret = iwl_mvm_send_cmd_pdu(mvm,
+					   WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
+					   sizeof(kek_kck_cmd),
+					   &kek_kck_cmd);
+		if (ret)
+			goto out;
+	}
+	ret = 0;
+out:
+	kfree(key_data.rsc_tsc);
+	return ret;
+}
+
+static int
+iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
+		      struct cfg80211_wowlan *wowlan,
+		      struct iwl_wowlan_config_cmd *wowlan_config_cmd,
+		      struct ieee80211_vif *vif, struct iwl_mvm_vif *mvmvif,
+		      struct ieee80211_sta *ap_sta)
+{
+	int ret;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+	if (!unified_image) {
+		ret = iwl_mvm_switch_to_d3(mvm);
+		if (ret)
+			return ret;
+
+		ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+		if (ret)
+			return ret;
+	}
+
+	if (!iwlwifi_mod_params.swcrypto) {
+		/*
+		 * This needs to be unlocked due to lock ordering
+		 * constraints. Since we're in the suspend path
+		 * that isn't really a problem though.
+		 */
+		mutex_unlock(&mvm->mutex);
+		ret = iwl_mvm_wowlan_config_key_params(mvm, vif, false,
+						       CMD_ASYNC);
+		mutex_lock(&mvm->mutex);
+		if (ret)
+			return ret;
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+				   sizeof(*wowlan_config_cmd),
+				   wowlan_config_cmd);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_send_patterns(mvm, wowlan);
+	if (ret)
+		return ret;
+
+	return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+}
+
+static int
+iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
+			 struct cfg80211_wowlan *wowlan,
+			 struct cfg80211_sched_scan_request *nd_config,
+			 struct ieee80211_vif *vif)
+{
+	struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+	int ret;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+	if (!unified_image) {
+		ret = iwl_mvm_switch_to_d3(mvm);
+		if (ret)
+			return ret;
+	} else {
+		/* In theory, we wouldn't have to stop a running sched
+		 * scan in order to start another one (for
+		 * net-detect).  But in practice this doesn't seem to
+		 * work properly, so stop any running sched_scan now.
+		 */
+		ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+		if (ret)
+			return ret;
+	}
+
+	/* rfkill release can be either for wowlan or netdetect */
+	if (wowlan->rfkill_release)
+		wowlan_config_cmd.wakeup_filter |=
+			cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, 0,
+				   sizeof(wowlan_config_cmd),
+				   &wowlan_config_cmd);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_sched_scan_start(mvm, vif, nd_config, &mvm->nd_ies,
+				       IWL_MVM_SCAN_NETDETECT);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(mvm->nd_match_sets || mvm->nd_channels))
+		return -EBUSY;
+
+	/* save the sched scan matchsets... */
+	if (nd_config->n_match_sets) {
+		mvm->nd_match_sets = kmemdup(nd_config->match_sets,
+					     sizeof(*nd_config->match_sets) *
+					     nd_config->n_match_sets,
+					     GFP_KERNEL);
+		if (mvm->nd_match_sets)
+			mvm->n_nd_match_sets = nd_config->n_match_sets;
+	}
+
+	/* ...and the sched scan channels for later reporting */
+	mvm->nd_channels = kmemdup(nd_config->channels,
+				   sizeof(*nd_config->channels) *
+				   nd_config->n_channels,
+				   GFP_KERNEL);
+	if (mvm->nd_channels)
+		mvm->n_nd_channels = nd_config->n_channels;
+
+	return 0;
+}
+
+static void iwl_mvm_free_nd(struct iwl_mvm *mvm)
+{
+	kfree(mvm->nd_match_sets);
+	mvm->nd_match_sets = NULL;
+	mvm->n_nd_match_sets = 0;
+	kfree(mvm->nd_channels);
+	mvm->nd_channels = NULL;
+	mvm->n_nd_channels = 0;
+}
+
+static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
+			     struct cfg80211_wowlan *wowlan,
+			     bool test)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct ieee80211_vif *vif = NULL;
+	struct iwl_mvm_vif *mvmvif = NULL;
+	struct ieee80211_sta *ap_sta = NULL;
+	struct iwl_d3_manager_config d3_cfg_cmd_data = {
+		/*
+		 * Program the minimum sleep time to 10 seconds, as many
+		 * platforms have issues processing a wakeup signal while
+		 * still being in the process of suspending.
+		 */
+		.min_sleep_time = cpu_to_le32(10 * 1000 * 1000),
+	};
+	struct iwl_host_cmd d3_cfg_cmd = {
+		.id = D3_CONFIG_CMD,
+		.flags = CMD_WANT_SKB,
+		.data[0] = &d3_cfg_cmd_data,
+		.len[0] = sizeof(d3_cfg_cmd_data),
+	};
+	int ret;
+	int len __maybe_unused;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+	if (!wowlan) {
+		/*
+		 * mac80211 shouldn't get here, but for D3 test
+		 * it doesn't warrant a warning
+		 */
+		WARN_ON(!test);
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+
+	vif = iwl_mvm_get_bss_vif(mvm);
+	if (IS_ERR_OR_NULL(vif)) {
+		ret = 1;
+		goto out_noreset;
+	}
+
+	mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (mvmvif->ap_sta_id == IWL_MVM_INVALID_STA) {
+		/* if we're not associated, this must be netdetect */
+		if (!wowlan->nd_config) {
+			ret = 1;
+			goto out_noreset;
+		}
+
+		ret = iwl_mvm_netdetect_config(
+			mvm, wowlan, wowlan->nd_config, vif);
+		if (ret)
+			goto out;
+
+		mvm->net_detect = true;
+	} else {
+		struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
+
+		ap_sta = rcu_dereference_protected(
+			mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
+			lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(ap_sta)) {
+			ret = -EINVAL;
+			goto out_noreset;
+		}
+
+		ret = iwl_mvm_get_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+						vif, mvmvif, ap_sta);
+		if (ret)
+			goto out_noreset;
+		ret = iwl_mvm_wowlan_config(mvm, wowlan, &wowlan_config_cmd,
+					    vif, mvmvif, ap_sta);
+		if (ret)
+			goto out;
+
+		mvm->net_detect = false;
+	}
+
+	ret = iwl_mvm_power_update_device(mvm);
+	if (ret)
+		goto out;
+
+	ret = iwl_mvm_power_update_mac(mvm);
+	if (ret)
+		goto out;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvm->d3_wake_sysassert)
+		d3_cfg_cmd_data.wakeup_flags |=
+			cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
+#endif
+
+	/*
+	 * TODO: this is needed because the firmware is not stopping
+	 * the recording automatically before entering D3.  This can
+	 * be removed once the FW starts doing that.
+	 */
+	iwl_fw_dbg_stop_recording(&mvm->fwrt);
+
+	/* must be last -- this switches firmware state */
+	ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
+	if (ret)
+		goto out;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	len = iwl_rx_packet_payload_len(d3_cfg_cmd.resp_pkt);
+	if (len >= sizeof(u32)) {
+		mvm->d3_test_pme_ptr =
+			le32_to_cpup((__le32 *)d3_cfg_cmd.resp_pkt->data);
+	}
+#endif
+	iwl_free_resp(&d3_cfg_cmd);
+
+	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
+	iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
+ out:
+	if (ret < 0) {
+		iwl_mvm_free_nd(mvm);
+
+		if (!unified_image) {
+			iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+			if (mvm->fw_restart > 0) {
+				mvm->fw_restart--;
+				ieee80211_restart_hw(mvm->hw);
+			}
+		}
+	}
+ out_noreset:
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static int iwl_mvm_enter_d0i3_sync(struct iwl_mvm *mvm)
+{
+	struct iwl_notification_wait wait_d3;
+	static const u16 d3_notif[] = { D3_CONFIG_CMD };
+	int ret;
+
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_d3,
+				   d3_notif, ARRAY_SIZE(d3_notif),
+				   NULL, NULL);
+
+	ret = iwl_mvm_enter_d0i3(mvm->hw->priv);
+	if (ret)
+		goto remove_notif;
+
+	ret = iwl_wait_notification(&mvm->notif_wait, &wait_d3, HZ);
+	WARN_ON_ONCE(ret);
+	return ret;
+
+remove_notif:
+	iwl_remove_notification(&mvm->notif_wait, &wait_d3);
+	return ret;
+}
+
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_trans *trans = mvm->trans;
+	int ret;
+
+	/* make sure the d0i3 exit work is not pending */
+	flush_work(&mvm->d0i3_exit_work);
+	iwl_mvm_pause_tcm(mvm, true);
+
+	iwl_fw_runtime_suspend(&mvm->fwrt);
+
+	ret = iwl_trans_suspend(trans);
+	if (ret)
+		return ret;
+
+	if (wowlan->any) {
+		trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+		if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+			ret = iwl_mvm_enter_d0i3_sync(mvm);
+
+			if (ret)
+				return ret;
+		}
+
+		mutex_lock(&mvm->d0i3_suspend_mutex);
+		__set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+		mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+		iwl_trans_d3_suspend(trans, false, false);
+
+		return 0;
+	}
+
+	trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
+	return __iwl_mvm_suspend(hw, wowlan, false);
+}
+
+/* converted data from the different status responses */
+struct iwl_wowlan_status_data {
+	u16 pattern_number;
+	u16 qos_seq_ctr[8];
+	u32 wakeup_reasons;
+	u32 wake_packet_length;
+	u32 wake_packet_bufsize;
+	const u8 *wake_packet;
+};
+
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_wowlan_status_data *status)
+{
+	struct sk_buff *pkt = NULL;
+	struct cfg80211_wowlan_wakeup wakeup = {
+		.pattern_idx = -1,
+	};
+	struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+	u32 reasons = status->wakeup_reasons;
+
+	if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
+		wakeup_report = NULL;
+		goto report;
+	}
+
+	pm_wakeup_event(mvm->dev, 0);
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_MAGIC_PACKET)
+		wakeup.magic_pkt = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_PATTERN)
+		wakeup.pattern_idx =
+			status->pattern_number;
+
+	if (reasons & (IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+		       IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH))
+		wakeup.disconnect = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_GTK_REKEY_FAILURE)
+		wakeup.gtk_rekey_failure = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+		wakeup.rfkill_release = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_EAPOL_REQUEST)
+		wakeup.eap_identity_req = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
+		wakeup.four_way_handshake = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
+		wakeup.tcp_connlost = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
+		wakeup.tcp_nomoretokens = true;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
+		wakeup.tcp_match = true;
+
+	if (status->wake_packet_bufsize) {
+		int pktsize = status->wake_packet_bufsize;
+		int pktlen = status->wake_packet_length;
+		const u8 *pktdata = status->wake_packet;
+		struct ieee80211_hdr *hdr = (void *)pktdata;
+		int truncated = pktlen - pktsize;
+
+		/* this would be a firmware bug */
+		if (WARN_ON_ONCE(truncated < 0))
+			truncated = 0;
+
+		if (ieee80211_is_data(hdr->frame_control)) {
+			int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+			int ivlen = 0, icvlen = 4; /* also FCS */
+
+			pkt = alloc_skb(pktsize, GFP_KERNEL);
+			if (!pkt)
+				goto report;
+
+			skb_put_data(pkt, pktdata, hdrlen);
+			pktdata += hdrlen;
+			pktsize -= hdrlen;
+
+			if (ieee80211_has_protected(hdr->frame_control)) {
+				/*
+				 * This is unlocked and using gtk_i(c)vlen,
+				 * but since everything is under RTNL still
+				 * that's not really a problem - changing
+				 * it would be difficult.
+				 */
+				if (is_multicast_ether_addr(hdr->addr1)) {
+					ivlen = mvm->gtk_ivlen;
+					icvlen += mvm->gtk_icvlen;
+				} else {
+					ivlen = mvm->ptk_ivlen;
+					icvlen += mvm->ptk_icvlen;
+				}
+			}
+
+			/* if truncated, FCS/ICV is (partially) gone */
+			if (truncated >= icvlen) {
+				icvlen = 0;
+				truncated -= icvlen;
+			} else {
+				icvlen -= truncated;
+				truncated = 0;
+			}
+
+			pktsize -= ivlen + icvlen;
+			pktdata += ivlen;
+
+			skb_put_data(pkt, pktdata, pktsize);
+
+			if (ieee80211_data_to_8023(pkt, vif->addr, vif->type))
+				goto report;
+			wakeup.packet = pkt->data;
+			wakeup.packet_present_len = pkt->len;
+			wakeup.packet_len = pkt->len - truncated;
+			wakeup.packet_80211 = false;
+		} else {
+			int fcslen = 4;
+
+			if (truncated >= 4) {
+				truncated -= 4;
+				fcslen = 0;
+			} else {
+				fcslen -= truncated;
+				truncated = 0;
+			}
+			pktsize -= fcslen;
+			wakeup.packet = status->wake_packet;
+			wakeup.packet_present_len = pktsize;
+			wakeup.packet_len = pktlen - truncated;
+			wakeup.packet_80211 = true;
+		}
+	}
+
+ report:
+	ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+	kfree_skb(pkt);
+}
+
+static void iwl_mvm_aes_sc_to_seq(struct aes_sc *sc,
+				  struct ieee80211_key_seq *seq)
+{
+	u64 pn;
+
+	pn = le64_to_cpu(sc->pn);
+	seq->ccmp.pn[0] = pn >> 40;
+	seq->ccmp.pn[1] = pn >> 32;
+	seq->ccmp.pn[2] = pn >> 24;
+	seq->ccmp.pn[3] = pn >> 16;
+	seq->ccmp.pn[4] = pn >> 8;
+	seq->ccmp.pn[5] = pn;
+}
+
+static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
+				   struct ieee80211_key_seq *seq)
+{
+	seq->tkip.iv32 = le32_to_cpu(sc->iv32);
+	seq->tkip.iv16 = le16_to_cpu(sc->iv16);
+}
+
+static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
+				   struct ieee80211_sta *sta,
+				   struct ieee80211_key_conf *key)
+{
+	int tid;
+
+	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+	if (sta && iwl_mvm_has_new_rx_api(mvm)) {
+		struct iwl_mvm_sta *mvmsta;
+		struct iwl_mvm_key_pn *ptk_pn;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+		ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
+						   lockdep_is_held(&mvm->mutex));
+		if (WARN_ON(!ptk_pn))
+			return;
+
+		for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+			struct ieee80211_key_seq seq = {};
+			int i;
+
+			iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+			ieee80211_set_key_rx_seq(key, tid, &seq);
+			for (i = 1; i < mvm->trans->num_rx_queues; i++)
+				memcpy(ptk_pn->q[i].pn[tid],
+				       seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
+		}
+	} else {
+		for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+			struct ieee80211_key_seq seq = {};
+
+			iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
+			ieee80211_set_key_rx_seq(key, tid, &seq);
+		}
+	}
+}
+
+static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
+				    struct ieee80211_key_conf *key)
+{
+	int tid;
+
+	BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+
+	for (tid = 0; tid < IWL_NUM_RSC; tid++) {
+		struct ieee80211_key_seq seq = {};
+
+		iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
+		ieee80211_set_key_rx_seq(key, tid, &seq);
+	}
+}
+
+static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
+				   struct ieee80211_key_conf *key,
+				   struct iwl_wowlan_status *status)
+{
+	union iwl_all_tsc_rsc *rsc = &status->gtk.rsc.all_tsc_rsc;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
+		break;
+	case WLAN_CIPHER_SUITE_TKIP:
+		iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+struct iwl_mvm_d3_gtk_iter_data {
+	struct iwl_mvm *mvm;
+	struct iwl_wowlan_status *status;
+	void *last_gtk;
+	u32 cipher;
+	bool find_phase, unhandled_cipher;
+	int num_keys;
+};
+
+static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   struct ieee80211_key_conf *key,
+				   void *_data)
+{
+	struct iwl_mvm_d3_gtk_iter_data *data = _data;
+
+	if (data->unhandled_cipher)
+		return;
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		/* ignore WEP completely, nothing to do */
+		return;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_TKIP:
+		/* we support these */
+		break;
+	default:
+		/* everything else (even CMAC for MFP) - disconnect from AP */
+		data->unhandled_cipher = true;
+		return;
+	}
+
+	data->num_keys++;
+
+	/*
+	 * pairwise key - update sequence counters only;
+	 * note that this assumes no TDLS sessions are active
+	 */
+	if (sta) {
+		struct ieee80211_key_seq seq = {};
+		union iwl_all_tsc_rsc *sc = &data->status->gtk.rsc.all_tsc_rsc;
+
+		if (data->find_phase)
+			return;
+
+		switch (key->cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+			iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
+					       sta, key);
+			atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
+			iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
+			atomic64_set(&key->tx_pn,
+				     (u64)seq.tkip.iv16 |
+				     ((u64)seq.tkip.iv32 << 16));
+			break;
+		}
+
+		/* that's it for this key */
+		return;
+	}
+
+	if (data->find_phase) {
+		data->last_gtk = key;
+		data->cipher = key->cipher;
+		return;
+	}
+
+	if (data->status->num_of_gtk_rekeys)
+		ieee80211_remove_key(key);
+	else if (data->last_gtk == key)
+		iwl_mvm_set_key_rx_seq(data->mvm, key, data->status);
+}
+
+static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_wowlan_status *status)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+		.mvm = mvm,
+		.status = status,
+	};
+	u32 disconnection_reasons =
+		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+		IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+
+	if (!status || !vif->bss_conf.bssid)
+		return false;
+
+	if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+		return false;
+
+	/* find last GTK that we used initially, if any */
+	gtkdata.find_phase = true;
+	ieee80211_iter_keys(mvm->hw, vif,
+			    iwl_mvm_d3_update_keys, &gtkdata);
+	/* not trying to keep connections with MFP/unhandled ciphers */
+	if (gtkdata.unhandled_cipher)
+		return false;
+	if (!gtkdata.num_keys)
+		goto out;
+	if (!gtkdata.last_gtk)
+		return false;
+
+	/*
+	 * invalidate all other GTKs that might still exist and update
+	 * the one that we used
+	 */
+	gtkdata.find_phase = false;
+	ieee80211_iter_keys(mvm->hw, vif,
+			    iwl_mvm_d3_update_keys, &gtkdata);
+
+	if (status->num_of_gtk_rekeys) {
+		struct ieee80211_key_conf *key;
+		struct {
+			struct ieee80211_key_conf conf;
+			u8 key[32];
+		} conf = {
+			.conf.cipher = gtkdata.cipher,
+			.conf.keyidx = status->gtk.key_index,
+		};
+
+		switch (gtkdata.cipher) {
+		case WLAN_CIPHER_SUITE_CCMP:
+			conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+			memcpy(conf.conf.key, status->gtk.decrypt_key,
+			       WLAN_KEY_LEN_CCMP);
+			break;
+		case WLAN_CIPHER_SUITE_TKIP:
+			conf.conf.keylen = WLAN_KEY_LEN_TKIP;
+			memcpy(conf.conf.key, status->gtk.decrypt_key, 16);
+			/* leave TX MIC key zeroed, we don't use it anyway */
+			memcpy(conf.conf.key +
+			       NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+			       status->gtk.tkip_mic_key, 8);
+			break;
+		}
+
+		key = ieee80211_gtk_rekey_add(vif, &conf.conf);
+		if (IS_ERR(key))
+			return false;
+		iwl_mvm_set_key_rx_seq(mvm, key, status);
+	}
+
+	if (status->num_of_gtk_rekeys) {
+		__be64 replay_ctr =
+			cpu_to_be64(le64_to_cpu(status->replay_ctr));
+		ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
+					   (void *)&replay_ctr, GFP_KERNEL);
+	}
+
+out:
+	mvmvif->seqno_valid = true;
+	/* +0x10 because the set API expects next-to-use, not last-used */
+	mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+
+	return true;
+}
+
+static struct iwl_wowlan_status *
+iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	u32 base = mvm->error_event_table[0];
+	struct error_table_start {
+		/* cf. struct iwl_error_event_table */
+		u32 valid;
+		u32 error_id;
+	} err_info;
+	struct iwl_host_cmd cmd = {
+		.id = WOWLAN_GET_STATUSES,
+		.flags = CMD_WANT_SKB,
+	};
+	struct iwl_wowlan_status *status, *fw_status;
+	int ret, len, status_size;
+
+	iwl_trans_read_mem_bytes(mvm->trans, base,
+				 &err_info, sizeof(err_info));
+
+	if (err_info.valid) {
+		IWL_INFO(mvm, "error table is valid (%d) with error (%d)\n",
+			 err_info.valid, err_info.error_id);
+		if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+			struct cfg80211_wowlan_wakeup wakeup = {
+				.rfkill_release = true,
+			};
+			ieee80211_report_wowlan_wakeup(vif, &wakeup,
+						       GFP_KERNEL);
+		}
+		return ERR_PTR(-EIO);
+	}
+
+	/* only for tracing for now */
+	ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, 0, 0, NULL);
+	if (ret)
+		IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+		return ERR_PTR(ret);
+	}
+
+	status_size = sizeof(*fw_status);
+
+	len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+	if (len < status_size) {
+		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+		fw_status = ERR_PTR(-EIO);
+		goto out_free_resp;
+	}
+
+	status = (void *)cmd.resp_pkt->data;
+	if (len != (status_size +
+		    ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4))) {
+		IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+		fw_status = ERR_PTR(-EIO);
+		goto out_free_resp;
+	}
+
+	fw_status = kmemdup(status, len, GFP_KERNEL);
+
+out_free_resp:
+	iwl_free_resp(&cmd);
+	return fw_status;
+}
+
+/* releases the MVM mutex */
+static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_wowlan_status_data status;
+	struct iwl_wowlan_status *fw_status;
+	int i;
+	bool keep;
+	struct iwl_mvm_sta *mvm_ap_sta;
+
+	fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+	if (IS_ERR_OR_NULL(fw_status))
+		goto out_unlock;
+
+	status.pattern_number = le16_to_cpu(fw_status->pattern_number);
+	for (i = 0; i < 8; i++)
+		status.qos_seq_ctr[i] =
+			le16_to_cpu(fw_status->qos_seq_ctr[i]);
+	status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
+	status.wake_packet_length =
+		le32_to_cpu(fw_status->wake_packet_length);
+	status.wake_packet_bufsize =
+		le32_to_cpu(fw_status->wake_packet_bufsize);
+	status.wake_packet = fw_status->wake_packet;
+
+	/* still at hard-coded place 0 for D3 image */
+	mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+	if (!mvm_ap_sta)
+		goto out_free;
+
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		u16 seq = status.qos_seq_ctr[i];
+		/* firmware stores last-used value, we store next value */
+		seq += 0x10;
+		mvm_ap_sta->tid_data[i].seq_number = seq;
+	}
+
+	/* now we have all the data we need, unlock to avoid mac80211 issues */
+	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+
+	keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
+
+	kfree(fw_status);
+	return keep;
+
+out_free:
+	kfree(fw_status);
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+	return false;
+}
+
+void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif,
+			      struct iwl_wowlan_status *status)
+{
+	struct iwl_mvm_d3_gtk_iter_data gtkdata = {
+		.mvm = mvm,
+		.status = status,
+	};
+
+	/*
+	 * rekey handling requires taking locks that can't be taken now.
+	 * however, d0i3 doesn't offload rekey, so we're fine.
+	 */
+	if (WARN_ON_ONCE(status->num_of_gtk_rekeys))
+		return;
+
+	/* find last GTK that we used initially, if any */
+	gtkdata.find_phase = true;
+	iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+
+	gtkdata.find_phase = false;
+	iwl_mvm_iter_d0i3_ap_keys(mvm, vif, iwl_mvm_d3_update_keys, &gtkdata);
+}
+
+struct iwl_mvm_nd_query_results {
+	u32 matched_profiles;
+	struct iwl_scan_offload_profile_match matches[IWL_SCAN_MAX_PROFILES];
+};
+
+static int
+iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
+				struct iwl_mvm_nd_query_results *results)
+{
+	struct iwl_scan_offload_profiles_query *query;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_PROFILES_QUERY_CMD,
+		.flags = CMD_WANT_SKB,
+	};
+	int ret, len;
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "failed to query matched profiles (%d)\n", ret);
+		return ret;
+	}
+
+	len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+	if (len < sizeof(*query)) {
+		IWL_ERR(mvm, "Invalid scan offload profiles query response!\n");
+		ret = -EIO;
+		goto out_free_resp;
+	}
+
+	query = (void *)cmd.resp_pkt->data;
+
+	results->matched_profiles = le32_to_cpu(query->matched_profiles);
+	memcpy(results->matches, query->matches, sizeof(results->matches));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
+#endif
+
+out_free_resp:
+	iwl_free_resp(&cmd);
+	return ret;
+}
+
+static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
+					    struct ieee80211_vif *vif)
+{
+	struct cfg80211_wowlan_nd_info *net_detect = NULL;
+	struct cfg80211_wowlan_wakeup wakeup = {
+		.pattern_idx = -1,
+	};
+	struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+	struct iwl_mvm_nd_query_results query;
+	struct iwl_wowlan_status *fw_status;
+	unsigned long matched_profiles;
+	u32 reasons = 0;
+	int i, j, n_matches, ret;
+
+	fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
+	if (!IS_ERR_OR_NULL(fw_status)) {
+		reasons = le32_to_cpu(fw_status->wakeup_reasons);
+		kfree(fw_status);
+	}
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
+		wakeup.rfkill_release = true;
+
+	if (reasons != IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS)
+		goto out;
+
+	ret = iwl_mvm_netdetect_query_results(mvm, &query);
+	if (ret || !query.matched_profiles) {
+		wakeup_report = NULL;
+		goto out;
+	}
+
+	matched_profiles = query.matched_profiles;
+	if (mvm->n_nd_match_sets) {
+		n_matches = hweight_long(matched_profiles);
+	} else {
+		IWL_ERR(mvm, "no net detect match information available\n");
+		n_matches = 0;
+	}
+
+	net_detect = kzalloc(sizeof(*net_detect) +
+			     (n_matches * sizeof(net_detect->matches[0])),
+			     GFP_KERNEL);
+	if (!net_detect || !n_matches)
+		goto out_report_nd;
+
+	for_each_set_bit(i, &matched_profiles, mvm->n_nd_match_sets) {
+		struct iwl_scan_offload_profile_match *fw_match;
+		struct cfg80211_wowlan_nd_match *match;
+		int idx, n_channels = 0;
+
+		fw_match = &query.matches[i];
+
+		for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++)
+			n_channels += hweight8(fw_match->matching_channels[j]);
+
+		match = kzalloc(sizeof(*match) +
+				(n_channels * sizeof(*match->channels)),
+				GFP_KERNEL);
+		if (!match)
+			goto out_report_nd;
+
+		net_detect->matches[net_detect->n_matches++] = match;
+
+		/* We inverted the order of the SSIDs in the scan
+		 * request, so invert the index here.
+		 */
+		idx = mvm->n_nd_match_sets - i - 1;
+		match->ssid.ssid_len = mvm->nd_match_sets[idx].ssid.ssid_len;
+		memcpy(match->ssid.ssid, mvm->nd_match_sets[idx].ssid.ssid,
+		       match->ssid.ssid_len);
+
+		if (mvm->n_nd_channels < n_channels)
+			continue;
+
+		for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN * 8; j++)
+			if (fw_match->matching_channels[j / 8] & (BIT(j % 8)))
+				match->channels[match->n_channels++] =
+					mvm->nd_channels[j]->center_freq;
+	}
+
+out_report_nd:
+	wakeup.net_detect = net_detect;
+out:
+	iwl_mvm_free_nd(mvm);
+
+	mutex_unlock(&mvm->mutex);
+	ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
+
+	if (net_detect) {
+		for (i = 0; i < net_detect->n_matches; i++)
+			kfree(net_detect->matches[i]);
+		kfree(net_detect);
+	}
+}
+
+static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	const struct fw_img *img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+	u32 len = img->sec[IWL_UCODE_SECTION_DATA].len;
+	u32 offs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+
+	if (!mvm->store_d3_resume_sram)
+		return;
+
+	if (!mvm->d3_resume_sram) {
+		mvm->d3_resume_sram = kzalloc(len, GFP_KERNEL);
+		if (!mvm->d3_resume_sram)
+			return;
+	}
+
+	iwl_trans_read_mem_bytes(mvm->trans, offs, mvm->d3_resume_sram, len);
+#endif
+}
+
+static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	/* skip the one we keep connection on */
+	if (data == vif)
+		return;
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		ieee80211_resume_disconnect(vif);
+}
+
+static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
+{
+	struct ieee80211_vif *vif = NULL;
+	int ret = 1;
+	enum iwl_d3_status d3_status;
+	bool keep = false;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+	bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
+				      IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
+
+	mutex_lock(&mvm->mutex);
+
+	/* get the BSS vif pointer again */
+	vif = iwl_mvm_get_bss_vif(mvm);
+	if (IS_ERR_OR_NULL(vif))
+		goto err;
+
+	ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
+	if (ret)
+		goto err;
+
+	if (d3_status != IWL_D3_STATUS_ALIVE) {
+		IWL_INFO(mvm, "Device was reset during suspend\n");
+		goto err;
+	}
+
+	/* query SRAM first in case we want event logging */
+	iwl_mvm_read_d3_sram(mvm);
+
+	if (d0i3_first) {
+		ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+		if (ret < 0) {
+			IWL_ERR(mvm, "Failed to send D0I3_END_CMD first (%d)\n",
+				ret);
+			goto err;
+		}
+	}
+
+	/*
+	 * Query the current location and source from the D3 firmware so we
+	 * can play it back when we re-intiailize the D0 firmware
+	 */
+	iwl_mvm_update_changed_regdom(mvm);
+
+	if (!unified_image)
+		/*  Re-configure default SAR profile */
+		iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+	if (mvm->net_detect) {
+		/* If this is a non-unified image, we restart the FW,
+		 * so no need to stop the netdetect scan.  If that
+		 * fails, continue and try to get the wake-up reasons,
+		 * but trigger a HW restart by keeping a failure code
+		 * in ret.
+		 */
+		if (unified_image)
+			ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT,
+						false);
+
+		iwl_mvm_query_netdetect_reasons(mvm, vif);
+		/* has unlocked the mutex, so skip that */
+		goto out;
+	} else {
+		keep = iwl_mvm_query_wakeup_reasons(mvm, vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		if (keep)
+			mvm->keep_vif = vif;
+#endif
+		/* has unlocked the mutex, so skip that */
+		goto out_iterate;
+	}
+
+err:
+	iwl_mvm_free_nd(mvm);
+	mutex_unlock(&mvm->mutex);
+
+out_iterate:
+	if (!test)
+		ieee80211_iterate_active_interfaces_rtnl(mvm->hw,
+			IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
+
+out:
+	/* no need to reset the device in unified images, if successful */
+	if (unified_image && !ret) {
+		/* nothing else to do if we already sent D0I3_END_CMD */
+		if (d0i3_first)
+			return 0;
+
+		ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, 0, 0, NULL);
+		if (!ret)
+			return 0;
+	}
+
+	/*
+	 * Reconfigure the device in one of the following cases:
+	 * 1. We are not using a unified image
+	 * 2. We are using a unified image but had an error while exiting D3
+	 */
+	set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+	set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+	/*
+	 * When switching images we return 1, which causes mac80211
+	 * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+	 * This type of reconfig calls iwl_mvm_restart_complete(),
+	 * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+	 * to take the reference here.
+	 */
+	iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+	return 1;
+}
+
+static int iwl_mvm_resume_d3(struct iwl_mvm *mvm)
+{
+	iwl_trans_resume(mvm->trans);
+
+	return __iwl_mvm_resume(mvm, false);
+}
+
+static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
+{
+	bool exit_now;
+	enum iwl_d3_status d3_status;
+	struct iwl_trans *trans = mvm->trans;
+
+	iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+	/*
+	 * make sure to clear D0I3_DEFER_WAKEUP before
+	 * calling iwl_trans_resume(), which might wait
+	 * for d0i3 exit completion.
+	 */
+	mutex_lock(&mvm->d0i3_suspend_mutex);
+	__clear_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
+	exit_now = __test_and_clear_bit(D0I3_PENDING_WAKEUP,
+					&mvm->d0i3_suspend_flags);
+	mutex_unlock(&mvm->d0i3_suspend_mutex);
+	if (exit_now) {
+		IWL_DEBUG_RPM(mvm, "Run deferred d0i3 exit\n");
+		_iwl_mvm_exit_d0i3(mvm);
+	}
+
+	iwl_trans_resume(trans);
+
+	if (iwl_mvm_enter_d0i3_on_suspend(mvm)) {
+		int ret = iwl_mvm_exit_d0i3(mvm->hw->priv);
+
+		if (ret)
+			return ret;
+		/*
+		 * d0i3 exit will be deferred until reconfig_complete.
+		 * make sure there we are out of d0i3.
+		 */
+	}
+	return 0;
+}
+
+int iwl_mvm_resume(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	if (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+		ret = iwl_mvm_resume_d0i3(mvm);
+	else
+		ret = iwl_mvm_resume_d3(mvm);
+
+	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+	iwl_mvm_resume_tcm(mvm);
+
+	iwl_fw_runtime_resume(&mvm->fwrt);
+
+	return ret;
+}
+
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	device_set_wakeup_enable(mvm->trans->dev, enabled);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
+{
+	struct iwl_mvm *mvm = inode->i_private;
+	int err;
+
+	if (mvm->d3_test_active)
+		return -EBUSY;
+
+	file->private_data = inode->i_private;
+
+	ieee80211_stop_queues(mvm->hw);
+	synchronize_net();
+
+	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+
+	iwl_mvm_pause_tcm(mvm, true);
+
+	iwl_fw_runtime_suspend(&mvm->fwrt);
+
+	/* start pseudo D3 */
+	rtnl_lock();
+	err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
+	rtnl_unlock();
+	if (err > 0)
+		err = -EINVAL;
+	if (err) {
+		ieee80211_wake_queues(mvm->hw);
+		return err;
+	}
+	mvm->d3_test_active = true;
+	mvm->keep_vif = NULL;
+	return 0;
+}
+
+static ssize_t iwl_mvm_d3_test_read(struct file *file, char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	u32 pme_asserted;
+
+	while (true) {
+		/* read pme_ptr if available */
+		if (mvm->d3_test_pme_ptr) {
+			pme_asserted = iwl_trans_read_mem32(mvm->trans,
+						mvm->d3_test_pme_ptr);
+			if (pme_asserted)
+				break;
+		}
+
+		if (msleep_interruptible(100))
+			break;
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac,
+					      struct ieee80211_vif *vif)
+{
+	/* skip the one we keep connection on */
+	if (_data == vif)
+		return;
+
+	if (vif->type == NL80211_IFTYPE_STATION)
+		ieee80211_connection_loss(vif);
+}
+
+static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
+{
+	struct iwl_mvm *mvm = inode->i_private;
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+	mvm->d3_test_active = false;
+
+	rtnl_lock();
+	__iwl_mvm_resume(mvm, true);
+	rtnl_unlock();
+
+	iwl_mvm_resume_tcm(mvm);
+
+	iwl_fw_runtime_resume(&mvm->fwrt);
+
+	mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
+	iwl_abort_notification_waits(&mvm->notif_wait);
+	if (!unified_image) {
+		int remaining_time = 10;
+
+		ieee80211_restart_hw(mvm->hw);
+
+		/* wait for restart and disconnect all interfaces */
+		while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		       remaining_time > 0) {
+			remaining_time--;
+			msleep(1000);
+		}
+
+		if (remaining_time == 0)
+			IWL_ERR(mvm, "Timed out waiting for HW restart!\n");
+	}
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_d3_test_disconn_work_iter, mvm->keep_vif);
+
+	ieee80211_wake_queues(mvm->hw);
+
+	return 0;
+}
+
+const struct file_operations iwl_dbgfs_d3_test_ops = {
+	.llseek = no_llseek,
+	.open = iwl_mvm_d3_test_open,
+	.read = iwl_mvm_d3_test_read,
+	.release = iwl_mvm_d3_test_release,
+};
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
new file mode 100644
index 0000000..798605c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -0,0 +1,1570 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw/api/tof.h"
+#include "debugfs.h"
+
+static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 enum iwl_dbgfs_pm_mask param, int val)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_dbgfs_pm *dbgfs_pm = &mvmvif->dbgfs_pm;
+
+	dbgfs_pm->mask |= param;
+
+	switch (param) {
+	case MVM_DEBUGFS_PM_KEEP_ALIVE: {
+		int dtimper = vif->bss_conf.dtim_period ?: 1;
+		int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+		IWL_DEBUG_POWER(mvm, "debugfs: set keep_alive= %d sec\n", val);
+		if (val * MSEC_PER_SEC < 3 * dtimper_msec)
+			IWL_WARN(mvm,
+				 "debugfs: keep alive period (%ld msec) is less than minimum required (%d msec)\n",
+				 val * MSEC_PER_SEC, 3 * dtimper_msec);
+		dbgfs_pm->keep_alive_seconds = val;
+		break;
+	}
+	case MVM_DEBUGFS_PM_SKIP_OVER_DTIM:
+		IWL_DEBUG_POWER(mvm, "skip_over_dtim %s\n",
+				val ? "enabled" : "disabled");
+		dbgfs_pm->skip_over_dtim = val;
+		break;
+	case MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS:
+		IWL_DEBUG_POWER(mvm, "skip_dtim_periods=%d\n", val);
+		dbgfs_pm->skip_dtim_periods = val;
+		break;
+	case MVM_DEBUGFS_PM_RX_DATA_TIMEOUT:
+		IWL_DEBUG_POWER(mvm, "rx_data_timeout=%d\n", val);
+		dbgfs_pm->rx_data_timeout = val;
+		break;
+	case MVM_DEBUGFS_PM_TX_DATA_TIMEOUT:
+		IWL_DEBUG_POWER(mvm, "tx_data_timeout=%d\n", val);
+		dbgfs_pm->tx_data_timeout = val;
+		break;
+	case MVM_DEBUGFS_PM_LPRX_ENA:
+		IWL_DEBUG_POWER(mvm, "lprx %s\n", val ? "enabled" : "disabled");
+		dbgfs_pm->lprx_ena = val;
+		break;
+	case MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD:
+		IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
+		dbgfs_pm->lprx_rssi_threshold = val;
+		break;
+	case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+		IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+		dbgfs_pm->snooze_ena = val;
+		break;
+	case MVM_DEBUGFS_PM_UAPSD_MISBEHAVING:
+		IWL_DEBUG_POWER(mvm, "uapsd_misbehaving_enable=%d\n", val);
+		dbgfs_pm->uapsd_misbehaving = val;
+		break;
+	case MVM_DEBUGFS_PM_USE_PS_POLL:
+		IWL_DEBUG_POWER(mvm, "use_ps_poll=%d\n", val);
+		dbgfs_pm->use_ps_poll = val;
+		break;
+	}
+}
+
+static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	enum iwl_dbgfs_pm_mask param;
+	int val, ret;
+
+	if (!strncmp("keep_alive=", buf, 11)) {
+		if (sscanf(buf + 11, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_KEEP_ALIVE;
+	} else if (!strncmp("skip_over_dtim=", buf, 15)) {
+		if (sscanf(buf + 15, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SKIP_OVER_DTIM;
+	} else if (!strncmp("skip_dtim_periods=", buf, 18)) {
+		if (sscanf(buf + 18, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS;
+	} else if (!strncmp("rx_data_timeout=", buf, 16)) {
+		if (sscanf(buf + 16, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_RX_DATA_TIMEOUT;
+	} else if (!strncmp("tx_data_timeout=", buf, 16)) {
+		if (sscanf(buf + 16, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_TX_DATA_TIMEOUT;
+	} else if (!strncmp("lprx=", buf, 5)) {
+		if (sscanf(buf + 5, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_LPRX_ENA;
+	} else if (!strncmp("lprx_rssi_threshold=", buf, 20)) {
+		if (sscanf(buf + 20, "%d", &val) != 1)
+			return -EINVAL;
+		if (val > POWER_LPRX_RSSI_THRESHOLD_MAX || val <
+		    POWER_LPRX_RSSI_THRESHOLD_MIN)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+	} else if (!strncmp("snooze_enable=", buf, 14)) {
+		if (sscanf(buf + 14, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
+	} else if (!strncmp("uapsd_misbehaving=", buf, 18)) {
+		if (sscanf(buf + 18, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_UAPSD_MISBEHAVING;
+	} else if (!strncmp("use_ps_poll=", buf, 12)) {
+		if (sscanf(buf + 12, "%d", &val) != 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_PM_USE_PS_POLL;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+	iwl_dbgfs_update_pm(mvm, vif, param, val);
+	ret = iwl_mvm_power_update_mac(mvm);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tx_pwr_lmt_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	char buf[64];
+	int bufsz = sizeof(buf);
+	int pos;
+
+	pos = scnprintf(buf, bufsz, "bss limit = %d\n",
+			vif->bss_conf.txpower);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[512];
+	int bufsz = sizeof(buf);
+	int pos;
+
+	pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u8 ap_sta_id;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	char buf[512];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+	int i;
+
+	mutex_lock(&mvm->mutex);
+
+	ap_sta_id = mvmvif->ap_sta_id;
+
+	switch (ieee80211_vif_type_p2p(vif)) {
+	case NL80211_IFTYPE_ADHOC:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+		break;
+	case NL80211_IFTYPE_STATION:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+		break;
+	case NL80211_IFTYPE_AP:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+		break;
+	case NL80211_IFTYPE_P2P_CLIENT:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+		break;
+	case NL80211_IFTYPE_P2P_GO:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+		break;
+	default:
+		break;
+	}
+
+	pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
+			 mvmvif->id, mvmvif->color);
+	pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
+			 vif->bss_conf.bssid);
+	pos += scnprintf(buf+pos, bufsz-pos, "Load: %d\n",
+			 mvm->tcm.result.load[mvmvif->id]);
+	pos += scnprintf(buf+pos, bufsz-pos, "QoS:\n");
+	for (i = 0; i < ARRAY_SIZE(mvmvif->queue_params); i++)
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "\t%d: txop:%d - cw_min:%d - cw_max = %d - aifs = %d upasd = %d\n",
+				 i, mvmvif->queue_params[i].txop,
+				 mvmvif->queue_params[i].cw_min,
+				 mvmvif->queue_params[i].cw_max,
+				 mvmvif->queue_params[i].aifs,
+				 mvmvif->queue_params[i].uapsd);
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    ap_sta_id != IWL_MVM_INVALID_STA) {
+		struct iwl_mvm_sta *mvm_sta;
+
+		mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+		if (mvm_sta) {
+			pos += scnprintf(buf+pos, bufsz-pos,
+					 "ap_sta_id %d - reduced Tx power %d\n",
+					 ap_sta_id,
+					 mvm_sta->bt_reduced_txpower);
+		}
+	}
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	if (chanctx_conf)
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "idle rx chains %d, active rx chains: %d\n",
+				 chanctx_conf->rx_chains_static,
+				 chanctx_conf->rx_chains_dynamic);
+	rcu_read_unlock();
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
+				enum iwl_dbgfs_bf_mask param, int value)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+	dbgfs_bf->mask |= param;
+
+	switch (param) {
+	case MVM_DEBUGFS_BF_ENERGY_DELTA:
+		dbgfs_bf->bf_energy_delta = value;
+		break;
+	case MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA:
+		dbgfs_bf->bf_roaming_energy_delta = value;
+		break;
+	case MVM_DEBUGFS_BF_ROAMING_STATE:
+		dbgfs_bf->bf_roaming_state = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+		dbgfs_bf->bf_temp_threshold = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+		dbgfs_bf->bf_temp_fast_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+		dbgfs_bf->bf_temp_slow_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
+		dbgfs_bf->bf_enable_beacon_filter = value;
+		break;
+	case MVM_DEBUGFS_BF_DEBUG_FLAG:
+		dbgfs_bf->bf_debug_flag = value;
+		break;
+	case MVM_DEBUGFS_BF_ESCAPE_TIMER:
+		dbgfs_bf->bf_escape_timer = value;
+		break;
+	case MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT:
+		dbgfs_bf->ba_enable_beacon_abort = value;
+		break;
+	case MVM_DEBUGFS_BA_ESCAPE_TIMER:
+		dbgfs_bf->ba_escape_timer = value;
+		break;
+	}
+}
+
+static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	enum iwl_dbgfs_bf_mask param;
+	int value, ret = 0;
+
+	if (!strncmp("bf_energy_delta=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ENERGY_DELTA_MIN ||
+		    value > IWL_BF_ENERGY_DELTA_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ENERGY_DELTA;
+	} else if (!strncmp("bf_roaming_energy_delta=", buf, 24)) {
+		if (sscanf(buf+24, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ROAMING_ENERGY_DELTA_MIN ||
+		    value > IWL_BF_ROAMING_ENERGY_DELTA_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA;
+	} else if (!strncmp("bf_roaming_state=", buf, 17)) {
+		if (sscanf(buf+17, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ROAMING_STATE_MIN ||
+		    value > IWL_BF_ROAMING_STATE_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ROAMING_STATE;
+	} else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+		if (sscanf(buf+18, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+		    value > IWL_BF_TEMP_THRESHOLD_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+	} else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+		if (sscanf(buf+20, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+		    value > IWL_BF_TEMP_FAST_FILTER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+	} else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+		if (sscanf(buf+20, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+		    value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
+	} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
+		if (sscanf(buf+24, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER;
+	} else if (!strncmp("bf_debug_flag=", buf, 14)) {
+		if (sscanf(buf+14, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_DEBUG_FLAG;
+	} else if (!strncmp("bf_escape_timer=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BF_ESCAPE_TIMER_MIN ||
+		    value > IWL_BF_ESCAPE_TIMER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BF_ESCAPE_TIMER;
+	} else if (!strncmp("ba_escape_timer=", buf, 16)) {
+		if (sscanf(buf+16, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < IWL_BA_ESCAPE_TIMER_MIN ||
+		    value > IWL_BA_ESCAPE_TIMER_MAX)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BA_ESCAPE_TIMER;
+	} else if (!strncmp("ba_enable_beacon_abort=", buf, 23)) {
+		if (sscanf(buf+23, "%d", &value) != 1)
+			return -EINVAL;
+		if (value < 0 || value > 1)
+			return -EINVAL;
+		param = MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+	iwl_dbgfs_update_bf(vif, param, value);
+	if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
+		ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+	else
+		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_beacon_filter_cmd cmd = {
+		IWL_BF_CMD_CONFIG_DEFAULTS,
+		.bf_enable_beacon_filter =
+			cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+		.ba_enable_beacon_abort =
+			cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
+	};
+
+	iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
+	if (mvmvif->bf_data.bf_enabled)
+		cmd.bf_enable_beacon_filter = cpu_to_le32(1);
+	else
+		cmd.bf_enable_beacon_filter = 0;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
+			 le32_to_cpu(cmd.bf_energy_delta));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
+			 le32_to_cpu(cmd.bf_roaming_energy_delta));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
+			 le32_to_cpu(cmd.bf_roaming_state));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+			 le32_to_cpu(cmd.bf_temp_threshold));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+			 le32_to_cpu(cmd.bf_temp_fast_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+			 le32_to_cpu(cmd.bf_temp_slow_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
+			 le32_to_cpu(cmd.bf_enable_beacon_filter));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
+			 le32_to_cpu(cmd.bf_debug_flag));
+	pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
+			 le32_to_cpu(cmd.bf_escape_timer));
+	pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
+			 le32_to_cpu(cmd.ba_escape_timer));
+	pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
+			 le32_to_cpu(cmd.ba_enable_beacon_abort));
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static inline char *iwl_dbgfs_is_match(char *name, char *buf)
+{
+	int len = strlen(name);
+
+	return !strncmp(name, buf, len) ? buf + len : NULL;
+}
+
+static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 curr_gp2;
+	u64 curr_os;
+	s64 diff;
+	char buf[64];
+	const size_t bufsz = sizeof(buf);
+	int pos = 0;
+
+	iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os);
+	do_div(curr_os, NSEC_PER_USEC);
+	diff = curr_os - curr_gp2;
+	pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_enable_write(struct ieee80211_vif *vif,
+					  char *buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 value;
+	int ret = -EINVAL;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tof_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.tof_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_disabled=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.one_sided_disabled = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_debug_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_debug_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("is_buf=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.tof_cfg.is_buf_required = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_tof_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_config_cmd(mvm);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_enable_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.tof_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "tof_disabled = %d\n",
+			 cmd->tof_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_disabled = %d\n",
+			 cmd->one_sided_disabled);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_debug_mode = %d\n",
+			 cmd->is_debug_mode);
+	pos += scnprintf(buf + pos, bufsz - pos, "is_buf_required = %d\n",
+			 cmd->is_buf_required);
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
+						    char *buf,
+						    size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 value;
+	int ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("burst_period=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (!ret)
+			mvm->tof_data.responder_cfg.burst_period =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("burst_duration=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.burst_duration = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_burst_exp=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.num_of_burst_exp = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("abort_responder=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.abort_responder = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("get_ch_est=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.get_ch_est = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("recv_sta_req_params=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.recv_sta_req_params = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("channel_num=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.channel_num = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bandwidth=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.bandwidth = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("rate=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.rate = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("bssid=", buf);
+	if (data) {
+		u8 *mac = mvm->tof_data.responder_cfg.bssid;
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msecs=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.tsf_timer_offset_msecs =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("toa_offset=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.toa_offset =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("center_freq=", buf);
+	if (data) {
+		struct iwl_tof_responder_config_cmd *cmd =
+			&mvm->tof_data.responder_cfg;
+
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			enum nl80211_band band = (cmd->channel_num <= 14) ?
+						   NL80211_BAND_2GHZ :
+						   NL80211_BAND_5GHZ;
+			struct ieee80211_channel chn = {
+				.band = band,
+				.center_freq = ieee80211_channel_to_frequency(
+					cmd->channel_num, band),
+				};
+			struct cfg80211_chan_def chandef = {
+				.chan =  &chn,
+				.center_freq1 =
+					ieee80211_channel_to_frequency(value,
+								       band),
+			};
+
+			cmd->ctrl_ch_position = iwl_mvm_get_ctrl_pos(&chandef);
+		}
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_per_burst=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_per_burst = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_resp_ts_avail=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.ftm_resp_ts_avail = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("asap_mode=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.responder_cfg.asap_mode = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_responder_cfg=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			ret = iwl_mvm_tof_responder_cmd(mvm, vif);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_responder_params_read(struct file *file,
+						   char __user *user_buf,
+						   size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_responder_config_cmd *cmd;
+
+	cmd = &mvm->tof_data.responder_cfg;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_period = %d\n",
+			 le16_to_cpu(cmd->burst_period));
+	pos += scnprintf(buf + pos, bufsz - pos, "burst_duration = %d\n",
+			 cmd->burst_duration);
+	pos += scnprintf(buf + pos, bufsz - pos, "bandwidth = %d\n",
+			 cmd->bandwidth);
+	pos += scnprintf(buf + pos, bufsz - pos, "channel_num = %d\n",
+			 cmd->channel_num);
+	pos += scnprintf(buf + pos, bufsz - pos, "ctrl_ch_position = 0x%x\n",
+			 cmd->ctrl_ch_position);
+	pos += scnprintf(buf + pos, bufsz - pos, "bssid = %pM\n",
+			 cmd->bssid);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %d\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_burst_exp = %d\n",
+			 cmd->num_of_burst_exp);
+	pos += scnprintf(buf + pos, bufsz - pos, "rate = %d\n", cmd->rate);
+	pos += scnprintf(buf + pos, bufsz - pos, "abort_responder = %d\n",
+			 cmd->abort_responder);
+	pos += scnprintf(buf + pos, bufsz - pos, "get_ch_est = %d\n",
+			 cmd->get_ch_est);
+	pos += scnprintf(buf + pos, bufsz - pos, "recv_sta_req_params = %d\n",
+			 cmd->recv_sta_req_params);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_per_burst = %d\n",
+			 cmd->ftm_per_burst);
+	pos += scnprintf(buf + pos, bufsz - pos, "ftm_resp_ts_avail = %d\n",
+			 cmd->ftm_resp_ts_avail);
+	pos += scnprintf(buf + pos, bufsz - pos, "asap_mode = %d\n",
+			 cmd->asap_mode);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msecs = %d\n",
+			 le16_to_cpu(cmd->tsf_timer_offset_msecs));
+	pos += scnprintf(buf + pos, bufsz - pos, "toa_offset = %d\n",
+			 le16_to_cpu(cmd->toa_offset));
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_write(struct ieee80211_vif *vif,
+						 char *buf, size_t count,
+						 loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 value;
+	int ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("request_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.request_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("initiator=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.initiator = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("one_sided_los_disable=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.one_sided_los_disable = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("req_timeout=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.req_timeout = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("report_policy=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.report_policy = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_random=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.macaddr_random = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("num_of_ap=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req.num_of_ap = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_template=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_template, mac, ETH_ALEN);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("macaddr_mask=", buf);
+	if (data) {
+		u8 mac[ETH_ALEN];
+
+		if (!mac_pton(data, mac)) {
+			ret = -EINVAL;
+			goto out;
+		}
+		memcpy(mvm->tof_data.range_req.macaddr_mask, mac, ETH_ALEN);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ap=", buf);
+	if (data) {
+		struct iwl_tof_range_req_ap_entry ap = {};
+		int size = sizeof(struct iwl_tof_range_req_ap_entry);
+		u16 burst_period;
+		u8 *mac = ap.bssid;
+		unsigned int i;
+
+		if (sscanf(data, "%u %hhd %hhd %hhd"
+			   "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx"
+			   "%hhd %hhd %hd"
+			   "%hhd %hhd %d"
+			   "%hhx %hhd %hhd %hhd",
+			   &i, &ap.channel_num, &ap.bandwidth,
+			   &ap.ctrl_ch_position,
+			   mac, mac + 1, mac + 2, mac + 3, mac + 4, mac + 5,
+			   &ap.measure_type, &ap.num_of_bursts,
+			   &burst_period,
+			   &ap.samples_per_burst, &ap.retries_per_sample,
+			   &ap.tsf_delta, &ap.location_req, &ap.asap_mode,
+			   &ap.enable_dyn_ack, &ap.rssi) != 20) {
+			ret = -EINVAL;
+			goto out;
+		}
+		if (i >= IWL_MVM_TOF_MAX_APS) {
+			IWL_ERR(mvm, "Invalid AP index %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ap.burst_period = cpu_to_le16(burst_period);
+
+		memcpy(&mvm->tof_data.range_req.ap[i], &ap, size);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_request=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value)
+			ret = iwl_mvm_tof_range_request_cmd(mvm, vif);
+		goto out;
+	}
+
+	ret = -EINVAL;
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_request_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[512];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_cmd *cmd;
+	int i;
+
+	cmd = &mvm->tof_data.range_req;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id= %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "initiator= %d\n",
+			 cmd->initiator);
+	pos += scnprintf(buf + pos, bufsz - pos, "one_sided_los_disable = %d\n",
+			 cmd->one_sided_los_disable);
+	pos += scnprintf(buf + pos, bufsz - pos, "req_timeout= %d\n",
+			 cmd->req_timeout);
+	pos += scnprintf(buf + pos, bufsz - pos, "report_policy= %d\n",
+			 cmd->report_policy);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_random= %d\n",
+			 cmd->macaddr_random);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_template= %pM\n",
+			 cmd->macaddr_template);
+	pos += scnprintf(buf + pos, bufsz - pos, "macaddr_mask= %pM\n",
+			 cmd->macaddr_mask);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_ap= %d\n",
+			 cmd->num_of_ap);
+	for (i = 0; i < cmd->num_of_ap; i++) {
+		struct iwl_tof_range_req_ap_entry *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: channel_num=%hhd bw=%hhd"
+				" control=%hhd bssid=%pM type=%hhd"
+				" num_of_bursts=%hhd burst_period=%hd ftm=%hhd"
+				" retries=%hhd tsf_delta=%d"
+				" tsf_delta_direction=%hhd location_req=0x%hhx "
+				" asap=%hhd enable=%hhd rssi=%hhd\n",
+				i, ap->channel_num, ap->bandwidth,
+				ap->ctrl_ch_position, ap->bssid,
+				ap->measure_type, ap->num_of_bursts,
+				ap->burst_period, ap->samples_per_burst,
+				ap->retries_per_sample, ap->tsf_delta,
+				ap->tsf_delta_direction,
+				ap->location_req, ap->asap_mode,
+				ap->enable_dyn_ack, ap->rssi);
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_write(struct ieee80211_vif *vif,
+						 char *buf,
+						 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 value;
+	int ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("tsf_timer_offset_msec=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.tsf_timer_offset_msec =
+							cpu_to_le16(value);
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("min_delta_ftm=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.min_delta_ftm = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw20M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw20M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw40M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw40M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("ftm_format_and_bw80M=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.range_req_ext.ftm_format_and_bw80M =
+									value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_req_ext=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value)
+			ret = iwl_mvm_tof_range_request_ext_cmd(mvm, vif);
+		goto out;
+	}
+
+	ret = -EINVAL;
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_req_ext_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[256];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	struct iwl_tof_range_req_ext_cmd *cmd;
+
+	cmd = &mvm->tof_data.range_req_ext;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "tsf_timer_offset_msec = %hd\n",
+			 cmd->tsf_timer_offset_msec);
+	pos += scnprintf(buf + pos, bufsz - pos, "min_delta_ftm = %hhd\n",
+			 cmd->min_delta_ftm);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw20M = %hhd\n",
+			 cmd->ftm_format_and_bw20M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw40M = %hhd\n",
+			 cmd->ftm_format_and_bw40M);
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "ftm_format_and_bw80M = %hhd\n",
+			 cmd->ftm_format_and_bw80M);
+
+	mutex_unlock(&mvm->mutex);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_write(struct ieee80211_vif *vif,
+					       char *buf,
+					       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u32 value;
+	int abort_id, ret = 0;
+	char *data;
+
+	mutex_lock(&mvm->mutex);
+
+	data = iwl_dbgfs_is_match("abort_id=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0)
+			mvm->tof_data.last_abort_id = value;
+		goto out;
+	}
+
+	data = iwl_dbgfs_is_match("send_range_abort=", buf);
+	if (data) {
+		ret = kstrtou32(data, 10, &value);
+		if (ret == 0 && value) {
+			abort_id = mvm->tof_data.last_abort_id;
+			ret = iwl_mvm_tof_range_abort_cmd(mvm, abort_id);
+			goto out;
+		}
+	}
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_tof_range_abort_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char buf[32];
+	int pos = 0;
+	const size_t bufsz = sizeof(buf);
+	int last_abort_id;
+
+	mutex_lock(&mvm->mutex);
+	last_abort_id = mvm->tof_data.last_abort_id;
+	mutex_unlock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "last_abort_id = %d\n",
+			 last_abort_id);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_tof_range_response_read(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	char *buf;
+	int pos = 0;
+	const size_t bufsz = sizeof(struct iwl_tof_range_rsp_ntfy) + 256;
+	struct iwl_tof_range_rsp_ntfy *cmd;
+	int i, ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+	cmd = &mvm->tof_data.range_resp;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "request_id = %d\n",
+			 cmd->request_id);
+	pos += scnprintf(buf + pos, bufsz - pos, "status = %d\n",
+			 cmd->request_status);
+	pos += scnprintf(buf + pos, bufsz - pos, "last_in_batch = %d\n",
+			 cmd->last_in_batch);
+	pos += scnprintf(buf + pos, bufsz - pos, "num_of_aps = %d\n",
+			 cmd->num_of_aps);
+	for (i = 0; i < cmd->num_of_aps; i++) {
+		struct iwl_tof_range_rsp_ap_entry_ntfy *ap = &cmd->ap[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"ap %.2d: bssid=%pM status=%hhd bw=%hhd"
+				" rtt=%d rtt_var=%d rtt_spread=%d"
+				" rssi=%hhd  rssi_spread=%hhd"
+				" range=%d range_var=%d"
+				" time_stamp=%d\n",
+				i, ap->bssid, ap->measure_status,
+				ap->measure_bw,
+				ap->rtt, ap->rtt_variance, ap->rtt_spread,
+				ap->rssi, ap->rssi_spread, ap->range,
+				ap->range_variance, ap->timestamp);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+					   size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u8 value;
+	int ret;
+
+	ret = kstrtou8(buf, 0, &value);
+	if (ret)
+		return ret;
+	if (value > 1)
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_update_low_latency(mvm, vif, value, LOW_LATENCY_DEBUGFS);
+	mutex_unlock(&mvm->mutex);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[30] = {};
+	int len;
+
+	len = scnprintf(buf, sizeof(buf) - 1,
+			"traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+			!!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC),
+			!!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS),
+			!!(mvmvif->low_latency & LOW_LATENCY_VCMD));
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[20];
+	int len;
+
+	len = sprintf(buf, "%pM\n", mvmvif->uapsd_misbehaving_bssid);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_uapsd_misbehaving_write(struct ieee80211_vif *vif,
+						 char *buf, size_t count,
+						 loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	bool ret;
+
+	mutex_lock(&mvm->mutex);
+	ret = mac_pton(buf, mvmvif->uapsd_misbehaving_bssid);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ? count : -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_write(struct ieee80211_vif *vif, char *buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct iwl_mvm_phy_ctxt *phy_ctxt;
+	u16 value;
+	int ret;
+
+	ret = kstrtou16(buf, 0, &value);
+	if (ret)
+		return ret;
+
+	mutex_lock(&mvm->mutex);
+	rcu_read_lock();
+
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	/* make sure the channel context is assigned */
+	if (!chanctx_conf) {
+		rcu_read_unlock();
+		mutex_unlock(&mvm->mutex);
+		return -EINVAL;
+	}
+
+	phy_ctxt = &mvm->phy_ctxts[*(u16 *)chanctx_conf->drv_priv];
+	rcu_read_unlock();
+
+	mvm->dbgfs_rx_phyinfo = value;
+
+	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chanctx_conf->min_def,
+				       chanctx_conf->rx_chains_static,
+				       chanctx_conf->rx_chains_dynamic);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
+					 char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[8];
+	int len;
+
+	len = scnprintf(buf, sizeof(buf), "0x%04x\n",
+			mvmvif->mvm->dbgfs_rx_phyinfo);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+				  struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int *ret = data;
+
+	if (mvmvif->dbgfs_quota_min)
+		*ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	u16 value;
+	int ret;
+
+	ret = kstrtou16(buf, 0, &value);
+	if (ret)
+		return ret;
+
+	if (value > 95)
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+
+	mvmvif->dbgfs_quota_min = 0;
+	ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+				     iwl_dbgfs_quota_check, &ret);
+	if (ret == 0) {
+		mvmvif->dbgfs_quota_min = value;
+		iwl_mvm_update_quotas(mvm, false, NULL);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct ieee80211_vif *vif = file->private_data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[10];
+	int len;
+
+	len = scnprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const char * const chanwidths[] = {
+	[NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+	[NL80211_CHAN_WIDTH_20] = "ht20",
+	[NL80211_CHAN_WIDTH_40] = "ht40",
+	[NL80211_CHAN_WIDTH_80] = "vht80",
+	[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+	[NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
+#define MVM_DEBUGFS_ADD_FILE_VIF(name, parent, mode) do {		\
+		if (!debugfs_create_file(#name, mode, parent, vif,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+
+MVM_DEBUGFS_READ_FILE_OPS(mac_params);
+MVM_DEBUGFS_READ_FILE_OPS(tx_pwr_lmt);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_enable, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_request, 512);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
+MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+
+
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct dentry *dbgfs_dir = vif->debugfs_dir;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	char buf[100];
+
+	/*
+	 * Check if debugfs directory already exist before creating it.
+	 * This may happen when, for example, resetting hw or suspend-resume
+	 */
+	if (!dbgfs_dir || mvmvif->dbgfs_dir)
+		return;
+
+	mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
+
+	if (!mvmvif->dbgfs_dir) {
+		IWL_ERR(mvm, "Failed to create debugfs directory under %pd\n",
+			dbgfs_dir);
+		return;
+	}
+
+	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+	    ((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
+	     (vif->type == NL80211_IFTYPE_STATION && vif->p2p)))
+		MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, 0600);
+
+	MVM_DEBUGFS_ADD_FILE_VIF(tx_pwr_lmt, mvmvif->dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE_VIF(uapsd_misbehaving, mvmvif->dbgfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+
+	if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+	    mvmvif == mvm->bf_allowed_vif)
+		MVM_DEBUGFS_ADD_FILE_VIF(bf_params, mvmvif->dbgfs_dir, 0600);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT) &&
+	    !vif->p2p && (vif->type != NL80211_IFTYPE_P2P_DEVICE)) {
+		if (IWL_MVM_TOF_IS_RESPONDER && vif->type == NL80211_IFTYPE_AP)
+			MVM_DEBUGFS_ADD_FILE_VIF(tof_responder_params,
+						 mvmvif->dbgfs_dir, 0600);
+
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_request, mvmvif->dbgfs_dir,
+					 0600);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_req_ext, mvmvif->dbgfs_dir,
+					 0600);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_enable, mvmvif->dbgfs_dir,
+					 0600);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_abort, mvmvif->dbgfs_dir,
+					 0600);
+		MVM_DEBUGFS_ADD_FILE_VIF(tof_range_response, mvmvif->dbgfs_dir,
+					 0400);
+	}
+
+	/*
+	 * Create symlink for convenience pointing to interface specific
+	 * debugfs entries for the driver. For example, under
+	 * /sys/kernel/debug/iwlwifi/0000\:02\:00.0/iwlmvm/
+	 * find
+	 * netdev:wlan0 -> ../../../ieee80211/phy0/netdev:wlan0/iwlmvm/
+	 */
+	snprintf(buf, 100, "../../../%pd3/%pd",
+		 dbgfs_dir,
+		 mvmvif->dbgfs_dir);
+
+	mvmvif->dbgfs_slink = debugfs_create_symlink(dbgfs_dir->d_name.name,
+						     mvm->debugfs_dir, buf);
+	if (!mvmvif->dbgfs_slink)
+		IWL_ERR(mvm, "Can't create debugfs symbolic link under %pd\n",
+			dbgfs_dir);
+	return;
+err:
+	IWL_ERR(mvm, "Can't create debugfs entity\n");
+}
+
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	debugfs_remove(mvmvif->dbgfs_slink);
+	mvmvif->dbgfs_slink = NULL;
+
+	debugfs_remove_recursive(mvmvif->dbgfs_dir);
+	mvmvif->dbgfs_dir = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
new file mode 100644
index 0000000..05b7741
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -0,0 +1,2067 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/vmalloc.h>
+#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
+
+#include "mvm.h"
+#include "sta.h"
+#include "iwl-io.h"
+#include "debugfs.h"
+#include "fw/error-dump.h"
+
+static ssize_t iwl_dbgfs_ctdp_budget_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[16];
+	int pos, budget;
+
+	if (!iwl_mvm_is_ctdp_supported(mvm))
+		return -EOPNOTSUPP;
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+	budget = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_REPORT, 0);
+	mutex_unlock(&mvm->mutex);
+
+	if (budget < 0)
+		return budget;
+
+	pos = scnprintf(buf, sizeof(buf), "%d\n", budget);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_stop_ctdp_write(struct iwl_mvm *mvm, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	int ret;
+
+	if (!iwl_mvm_is_ctdp_supported(mvm))
+		return -EOPNOTSUPP;
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_STOP, 0);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_force_ctkill_write(struct iwl_mvm *mvm, char *buf,
+					    size_t count, loff_t *ppos)
+{
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+		return -EIO;
+
+	iwl_mvm_enter_ctkill(mvm);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
+					size_t count, loff_t *ppos)
+{
+	int ret;
+	u32 flush_arg;
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+		return -EIO;
+
+	if (kstrtou32(buf, 0, &flush_arg))
+		return -EINVAL;
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "FLUSHING all tids queues on sta_id = %d\n",
+				    flush_arg);
+		mutex_lock(&mvm->mutex);
+		ret = iwl_mvm_flush_sta_tids(mvm, flush_arg, 0xFF, 0) ? : count;
+		mutex_unlock(&mvm->mutex);
+		return ret;
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm, "FLUSHING queues mask to flush = 0x%x\n",
+			    flush_arg);
+
+	mutex_lock(&mvm->mutex);
+	ret =  iwl_mvm_flush_tx_path(mvm, flush_arg, 0) ? : count;
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm_sta *mvmsta;
+	int sta_id, drain, ret;
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+		return -EIO;
+
+	if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
+		return -EINVAL;
+	if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
+		return -EINVAL;
+	if (drain < 0 || drain > 1)
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+
+	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+	if (!mvmsta)
+		ret = -ENOENT;
+	else
+		ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
+
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	const struct fw_img *img;
+	unsigned int ofs, len;
+	size_t ret;
+	u8 *ptr;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EINVAL;
+
+	/* default is to dump the entire data segment */
+	img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+	ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+	len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+	if (mvm->dbgfs_sram_len) {
+		ofs = mvm->dbgfs_sram_offset;
+		len = mvm->dbgfs_sram_len;
+	}
+
+	ptr = kzalloc(len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	iwl_trans_read_mem_bytes(mvm->trans, ofs, ptr, len);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, ptr, len);
+
+	kfree(ptr);
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_sram_write(struct iwl_mvm *mvm, char *buf,
+				    size_t count, loff_t *ppos)
+{
+	const struct fw_img *img;
+	u32 offset, len;
+	u32 img_offset, img_len;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EINVAL;
+
+	img = &mvm->fw->img[mvm->fwrt.cur_fw_img];
+	img_offset = img->sec[IWL_UCODE_SECTION_DATA].offset;
+	img_len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+	if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+		if ((offset & 0x3) || (len & 0x3))
+			return -EINVAL;
+
+		if (offset + len > img_offset + img_len)
+			return -EINVAL;
+
+		mvm->dbgfs_sram_offset = offset;
+		mvm->dbgfs_sram_len = len;
+	} else {
+		mvm->dbgfs_sram_offset = 0;
+		mvm->dbgfs_sram_len = 0;
+	}
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_set_nic_temperature_read(struct file *file,
+						  char __user *user_buf,
+						  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[16];
+	int pos;
+
+	if (!mvm->temperature_test)
+		pos = scnprintf(buf , sizeof(buf), "disabled\n");
+	else
+		pos = scnprintf(buf , sizeof(buf), "%d\n", mvm->temperature);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/*
+ * Set NIC Temperature
+ * Cause the driver to ignore the actual NIC temperature reported by the FW
+ * Enable: any value between IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -
+ * IWL_MVM_DEBUG_SET_TEMPERATURE_MAX
+ * Disable: IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE
+ */
+static ssize_t iwl_dbgfs_set_nic_temperature_write(struct iwl_mvm *mvm,
+						   char *buf, size_t count,
+						   loff_t *ppos)
+{
+	int temperature;
+
+	if (!iwl_mvm_firmware_running(mvm) && !mvm->temperature_test)
+		return -EIO;
+
+	if (kstrtoint(buf, 10, &temperature))
+		return -EINVAL;
+	/* not a legal temperature */
+	if ((temperature > IWL_MVM_DEBUG_SET_TEMPERATURE_MAX &&
+	     temperature != IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) ||
+	    temperature < IWL_MVM_DEBUG_SET_TEMPERATURE_MIN)
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+	if (temperature == IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE) {
+		if (!mvm->temperature_test)
+			goto out;
+
+		mvm->temperature_test = false;
+		/* Since we can't read the temp while awake, just set
+		 * it to zero until we get the next RX stats from the
+		 * firmware.
+		 */
+		mvm->temperature = 0;
+	} else {
+		mvm->temperature_test = true;
+		mvm->temperature = temperature;
+	}
+	IWL_DEBUG_TEMP(mvm, "%sabling debug set temperature (temp = %d)\n",
+		       mvm->temperature_test ? "En" : "Dis" ,
+		       mvm->temperature);
+	/* handle the temperature change */
+	iwl_mvm_tt_handler(mvm);
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[16];
+	int pos, ret;
+	s32 temp;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_get_temp(mvm, &temp);
+	mutex_unlock(&mvm->mutex);
+
+	if (ret)
+		return -EIO;
+
+	pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+#ifdef CONFIG_ACPI
+static ssize_t iwl_dbgfs_sar_geo_profile_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[256];
+	int pos = 0;
+	int bufsz = sizeof(buf);
+	int tbl_idx;
+	u8 *value;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+	tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
+	if (tbl_idx < 0) {
+		mutex_unlock(&mvm->mutex);
+		return tbl_idx;
+	}
+
+	if (!tbl_idx) {
+		pos = scnprintf(buf, bufsz,
+				"SAR geographic profile disabled\n");
+	} else {
+		value = &mvm->geo_profiles[tbl_idx - 1].values[0];
+
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "Use geographic profile %d\n", tbl_idx);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "2.4GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n",
+				 value[1], value[2], value[0]);
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "5.2GHz:\n\tChain A offset: %hhd dBm\n\tChain B offset: %hhd dBm\n\tmax tx power: %hhd dBm\n",
+				 value[4], value[5], value[3]);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+#endif
+
+static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct ieee80211_sta *sta;
+	char buf[400];
+	int i, pos = 0, bufsz = sizeof(buf);
+
+	mutex_lock(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (!sta)
+			pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
+		else if (IS_ERR(sta))
+			pos += scnprintf(buf + pos, bufsz - pos, "%ld\n",
+					 PTR_ERR(sta));
+		else
+			pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+					 sta->addr);
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct ieee80211_sta *sta = file->private_data;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+	static const size_t bufsz = 2048;
+	char *buff;
+	int desc = 0;
+	ssize_t ret;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+
+	desc += scnprintf(buff + desc, bufsz - desc, "sta_id %d\n",
+			  lq_sta->pers.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "A-MPDU size limit %d\n",
+			  lq_sta->pers.dbg_agg_frame_count_lim);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "valid_tx_ant %s%s%s\n",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+		(iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "last tx rate=0x%X ",
+			  lq_sta->last_rate_n_flags);
+
+	desc += rs_pretty_print_rate(buff + desc, bufsz - desc,
+				     lq_sta->last_rate_n_flags);
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_read(struct file *file,
+						char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char buf[64];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d0=%d\n",
+			 mvm->disable_power_off);
+	pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off_d3=%d\n",
+			 mvm->disable_power_off_d3);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
+						 size_t count, loff_t *ppos)
+{
+	int ret, val;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	if (!strncmp("disable_power_off_d0=", buf, 21)) {
+		if (sscanf(buf + 21, "%d", &val) != 1)
+			return -EINVAL;
+		mvm->disable_power_off = val;
+	} else if (!strncmp("disable_power_off_d3=", buf, 21)) {
+		if (sscanf(buf + 21, "%d", &val) != 1)
+			return -EINVAL;
+		mvm->disable_power_off_d3 = val;
+	} else {
+		return -EINVAL;
+	}
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_power_update_device(mvm);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static
+int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
+			   int pos, int bufsz)
+{
+	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
+
+	BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
+	BT_MBOX_PRINT(0, LE_PROF1, false);
+	BT_MBOX_PRINT(0, LE_PROF2, false);
+	BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
+	BT_MBOX_PRINT(0, CHL_SEQ_N, false);
+	BT_MBOX_PRINT(0, INBAND_S, false);
+	BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
+	BT_MBOX_PRINT(0, LE_SCAN, false);
+	BT_MBOX_PRINT(0, LE_ADV, false);
+	BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
+	BT_MBOX_PRINT(0, OPEN_CON_1, true);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
+
+	BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
+	BT_MBOX_PRINT(1, IP_SR, false);
+	BT_MBOX_PRINT(1, LE_MSTR, false);
+	BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
+	BT_MBOX_PRINT(1, MSG_TYPE, false);
+	BT_MBOX_PRINT(1, SSN, true);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
+
+	BT_MBOX_PRINT(2, SNIFF_ACT, false);
+	BT_MBOX_PRINT(2, PAG, false);
+	BT_MBOX_PRINT(2, INQUIRY, false);
+	BT_MBOX_PRINT(2, CONN, false);
+	BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
+	BT_MBOX_PRINT(2, DISC, false);
+	BT_MBOX_PRINT(2, SCO_TX_ACT, false);
+	BT_MBOX_PRINT(2, SCO_RX_ACT, false);
+	BT_MBOX_PRINT(2, ESCO_RE_TX, false);
+	BT_MBOX_PRINT(2, SCO_DURATION, true);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
+
+	BT_MBOX_PRINT(3, SCO_STATE, false);
+	BT_MBOX_PRINT(3, SNIFF_STATE, false);
+	BT_MBOX_PRINT(3, A2DP_STATE, false);
+	BT_MBOX_PRINT(3, A2DP_SRC, false);
+	BT_MBOX_PRINT(3, ACL_STATE, false);
+	BT_MBOX_PRINT(3, MSTR_STATE, false);
+	BT_MBOX_PRINT(3, OBX_STATE, false);
+	BT_MBOX_PRINT(3, OPEN_CON_2, false);
+	BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
+	BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
+	BT_MBOX_PRINT(3, INBAND_P, false);
+	BT_MBOX_PRINT(3, MSG_TYPE_2, false);
+	BT_MBOX_PRINT(3, SSN_2, false);
+	BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
+
+	return pos;
+}
+
+static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+	char *buf;
+	int ret, pos = 0, bufsz = sizeof(char) * 1024;
+
+	buf = kmalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+			 notif->bt_ci_compliance);
+	pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+			 le32_to_cpu(notif->primary_ch_lut));
+	pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+			 le32_to_cpu(notif->secondary_ch_lut));
+	pos += scnprintf(buf + pos,
+			 bufsz - pos, "bt_activity_grading = %d\n",
+			 le32_to_cpu(notif->bt_activity_grading));
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+			 notif->rrc_status & 0xF);
+	pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+			 notif->ttc_status & 0xF);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
+			 IWL_MVM_BT_COEX_SYNC2SCO);
+	pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
+			 IWL_MVM_BT_COEX_MPLUT);
+
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+
+	return ret;
+}
+#undef BT_MBOX_PRINT
+
+static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
+	char buf[256];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+
+	mutex_lock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "\tPrimary Channel Bitmap 0x%016llx\n",
+			 le64_to_cpu(cmd->bt_primary_ci));
+	pos += scnprintf(buf + pos, bufsz - pos,
+			 "\tSecondary Channel Bitmap 0x%016llx\n",
+			 le64_to_cpu(cmd->bt_secondary_ci));
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
+			   size_t count, loff_t *ppos)
+{
+	u32 bt_tx_prio;
+
+	if (sscanf(buf, "%u", &bt_tx_prio) != 1)
+		return -EINVAL;
+	if (bt_tx_prio > 4)
+		return -EINVAL;
+
+	mvm->bt_tx_prio = bt_tx_prio;
+
+	return count;
+}
+
+static ssize_t
+iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
+			     size_t count, loff_t *ppos)
+{
+	static const char * const modes_str[BT_FORCE_ANT_MAX] = {
+		[BT_FORCE_ANT_DIS] = "dis",
+		[BT_FORCE_ANT_AUTO] = "auto",
+		[BT_FORCE_ANT_BT] = "bt",
+		[BT_FORCE_ANT_WIFI] = "wifi",
+	};
+	int ret, bt_force_ant_mode;
+
+	for (bt_force_ant_mode = 0;
+	     bt_force_ant_mode < ARRAY_SIZE(modes_str);
+	     bt_force_ant_mode++) {
+		if (!strcmp(buf, modes_str[bt_force_ant_mode]))
+			break;
+	}
+
+	if (bt_force_ant_mode >= ARRAY_SIZE(modes_str))
+		return -EINVAL;
+
+	ret = 0;
+	mutex_lock(&mvm->mutex);
+	if (mvm->bt_force_ant_mode == bt_force_ant_mode)
+		goto out;
+
+	mvm->bt_force_ant_mode = bt_force_ant_mode;
+	IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
+		       modes_str[mvm->bt_force_ant_mode]);
+
+	if (iwl_mvm_firmware_running(mvm))
+		ret = iwl_mvm_send_bt_init_conf(mvm);
+	else
+		ret = 0;
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	char *buff, *pos, *endpos;
+	static const size_t bufsz = 1024;
+	int ret;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	pos = buff;
+	endpos = pos + bufsz;
+
+	pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
+			 mvm->trans->cfg->fw_name_pre);
+	pos += scnprintf(pos, endpos - pos, "FW: %s\n",
+			 mvm->fwrt.fw->human_readable);
+	pos += scnprintf(pos, endpos - pos, "Device: %s\n",
+			 mvm->fwrt.trans->cfg->name);
+	pos += scnprintf(pos, endpos - pos, "Bus: %s\n",
+			 mvm->fwrt.dev->bus->name);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+	kfree(buff);
+
+	return ret;
+}
+
+#define PRINT_STATS_LE32(_struct, _memb)				\
+			 pos += scnprintf(buf + pos, bufsz - pos,	\
+					  fmt_table, #_memb,		\
+					  le32_to_cpu(_struct->_memb))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+					  char __user *user_buf, size_t count,
+					  loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	static const char *fmt_table = "\t%-30s %10u\n";
+	static const char *fmt_header = "%-32s\n";
+	int pos = 0;
+	char *buf;
+	int ret;
+	size_t bufsz;
+
+	if (iwl_mvm_has_new_rx_stats_api(mvm))
+		bufsz = ((sizeof(struct mvm_statistics_rx) /
+			  sizeof(__le32)) * 43) + (4 * 33) + 1;
+	else
+		/* 43 = size of each data line; 33 = size of each header */
+		bufsz = ((sizeof(struct mvm_statistics_rx_v3) /
+			  sizeof(__le32)) * 43) + (4 * 33) + 1;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+
+	if (iwl_mvm_firmware_running(mvm))
+		iwl_mvm_request_statistics(mvm, false);
+
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+			 "Statistics_Rx - OFDM");
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct mvm_statistics_rx_phy_v2 *ofdm = &mvm->rx_stats_v3.ofdm;
+
+		PRINT_STATS_LE32(ofdm, ina_cnt);
+		PRINT_STATS_LE32(ofdm, fina_cnt);
+		PRINT_STATS_LE32(ofdm, plcp_err);
+		PRINT_STATS_LE32(ofdm, crc32_err);
+		PRINT_STATS_LE32(ofdm, overrun_err);
+		PRINT_STATS_LE32(ofdm, early_overrun_err);
+		PRINT_STATS_LE32(ofdm, crc32_good);
+		PRINT_STATS_LE32(ofdm, false_alarm_cnt);
+		PRINT_STATS_LE32(ofdm, fina_sync_err_cnt);
+		PRINT_STATS_LE32(ofdm, sfd_timeout);
+		PRINT_STATS_LE32(ofdm, fina_timeout);
+		PRINT_STATS_LE32(ofdm, unresponded_rts);
+		PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+		PRINT_STATS_LE32(ofdm, sent_ack_cnt);
+		PRINT_STATS_LE32(ofdm, sent_cts_cnt);
+		PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+		PRINT_STATS_LE32(ofdm, dsp_self_kill);
+		PRINT_STATS_LE32(ofdm, mh_format_err);
+		PRINT_STATS_LE32(ofdm, re_acq_main_rssi_sum);
+		PRINT_STATS_LE32(ofdm, reserved);
+	} else {
+		struct mvm_statistics_rx_phy *ofdm = &mvm->rx_stats.ofdm;
+
+		PRINT_STATS_LE32(ofdm, unresponded_rts);
+		PRINT_STATS_LE32(ofdm, rxe_frame_lmt_overrun);
+		PRINT_STATS_LE32(ofdm, sent_ba_rsp_cnt);
+		PRINT_STATS_LE32(ofdm, dsp_self_kill);
+		PRINT_STATS_LE32(ofdm, reserved);
+	}
+
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+			 "Statistics_Rx - CCK");
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct mvm_statistics_rx_phy_v2 *cck = &mvm->rx_stats_v3.cck;
+
+		PRINT_STATS_LE32(cck, ina_cnt);
+		PRINT_STATS_LE32(cck, fina_cnt);
+		PRINT_STATS_LE32(cck, plcp_err);
+		PRINT_STATS_LE32(cck, crc32_err);
+		PRINT_STATS_LE32(cck, overrun_err);
+		PRINT_STATS_LE32(cck, early_overrun_err);
+		PRINT_STATS_LE32(cck, crc32_good);
+		PRINT_STATS_LE32(cck, false_alarm_cnt);
+		PRINT_STATS_LE32(cck, fina_sync_err_cnt);
+		PRINT_STATS_LE32(cck, sfd_timeout);
+		PRINT_STATS_LE32(cck, fina_timeout);
+		PRINT_STATS_LE32(cck, unresponded_rts);
+		PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+		PRINT_STATS_LE32(cck, sent_ack_cnt);
+		PRINT_STATS_LE32(cck, sent_cts_cnt);
+		PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+		PRINT_STATS_LE32(cck, dsp_self_kill);
+		PRINT_STATS_LE32(cck, mh_format_err);
+		PRINT_STATS_LE32(cck, re_acq_main_rssi_sum);
+		PRINT_STATS_LE32(cck, reserved);
+	} else {
+		struct mvm_statistics_rx_phy *cck = &mvm->rx_stats.cck;
+
+		PRINT_STATS_LE32(cck, unresponded_rts);
+		PRINT_STATS_LE32(cck, rxe_frame_lmt_overrun);
+		PRINT_STATS_LE32(cck, sent_ba_rsp_cnt);
+		PRINT_STATS_LE32(cck, dsp_self_kill);
+		PRINT_STATS_LE32(cck, reserved);
+	}
+
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+			 "Statistics_Rx - GENERAL");
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct mvm_statistics_rx_non_phy_v3 *general =
+			&mvm->rx_stats_v3.general;
+
+		PRINT_STATS_LE32(general, bogus_cts);
+		PRINT_STATS_LE32(general, bogus_ack);
+		PRINT_STATS_LE32(general, non_bssid_frames);
+		PRINT_STATS_LE32(general, filtered_frames);
+		PRINT_STATS_LE32(general, non_channel_beacons);
+		PRINT_STATS_LE32(general, channel_beacons);
+		PRINT_STATS_LE32(general, num_missed_bcon);
+		PRINT_STATS_LE32(general, adc_rx_saturation_time);
+		PRINT_STATS_LE32(general, ina_detection_search_time);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+		PRINT_STATS_LE32(general, interference_data_flag);
+		PRINT_STATS_LE32(general, channel_load);
+		PRINT_STATS_LE32(general, dsp_false_alarms);
+		PRINT_STATS_LE32(general, beacon_rssi_a);
+		PRINT_STATS_LE32(general, beacon_rssi_b);
+		PRINT_STATS_LE32(general, beacon_rssi_c);
+		PRINT_STATS_LE32(general, beacon_energy_a);
+		PRINT_STATS_LE32(general, beacon_energy_b);
+		PRINT_STATS_LE32(general, beacon_energy_c);
+		PRINT_STATS_LE32(general, num_bt_kills);
+		PRINT_STATS_LE32(general, mac_id);
+		PRINT_STATS_LE32(general, directed_data_mpdu);
+	} else {
+		struct mvm_statistics_rx_non_phy *general =
+			&mvm->rx_stats.general;
+
+		PRINT_STATS_LE32(general, bogus_cts);
+		PRINT_STATS_LE32(general, bogus_ack);
+		PRINT_STATS_LE32(general, non_channel_beacons);
+		PRINT_STATS_LE32(general, channel_beacons);
+		PRINT_STATS_LE32(general, num_missed_bcon);
+		PRINT_STATS_LE32(general, adc_rx_saturation_time);
+		PRINT_STATS_LE32(general, ina_detection_search_time);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_a);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_b);
+		PRINT_STATS_LE32(general, beacon_silence_rssi_c);
+		PRINT_STATS_LE32(general, interference_data_flag);
+		PRINT_STATS_LE32(general, channel_load);
+		PRINT_STATS_LE32(general, beacon_rssi_a);
+		PRINT_STATS_LE32(general, beacon_rssi_b);
+		PRINT_STATS_LE32(general, beacon_rssi_c);
+		PRINT_STATS_LE32(general, beacon_energy_a);
+		PRINT_STATS_LE32(general, beacon_energy_b);
+		PRINT_STATS_LE32(general, beacon_energy_c);
+		PRINT_STATS_LE32(general, num_bt_kills);
+		PRINT_STATS_LE32(general, mac_id);
+	}
+
+	pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+			 "Statistics_Rx - HT");
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct mvm_statistics_rx_ht_phy_v1 *ht =
+			&mvm->rx_stats_v3.ofdm_ht;
+
+		PRINT_STATS_LE32(ht, plcp_err);
+		PRINT_STATS_LE32(ht, overrun_err);
+		PRINT_STATS_LE32(ht, early_overrun_err);
+		PRINT_STATS_LE32(ht, crc32_good);
+		PRINT_STATS_LE32(ht, crc32_err);
+		PRINT_STATS_LE32(ht, mh_format_err);
+		PRINT_STATS_LE32(ht, agg_crc32_good);
+		PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+		PRINT_STATS_LE32(ht, agg_cnt);
+		PRINT_STATS_LE32(ht, unsupport_mcs);
+	} else {
+		struct mvm_statistics_rx_ht_phy *ht =
+			&mvm->rx_stats.ofdm_ht;
+
+		PRINT_STATS_LE32(ht, mh_format_err);
+		PRINT_STATS_LE32(ht, agg_mpdu_cnt);
+		PRINT_STATS_LE32(ht, agg_cnt);
+		PRINT_STATS_LE32(ht, unsupport_mcs);
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+
+	return ret;
+}
+#undef PRINT_STAT_LE32
+
+static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
+					  char __user *user_buf, size_t count,
+					  loff_t *ppos,
+					  struct iwl_mvm_frame_stats *stats)
+{
+	char *buff, *pos, *endpos;
+	int idx, i;
+	int ret;
+	static const size_t bufsz = 1024;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	spin_lock_bh(&mvm->drv_stats_lock);
+
+	pos = buff;
+	endpos = pos + bufsz;
+
+	pos += scnprintf(pos, endpos - pos,
+			 "Legacy/HT/VHT\t:\t%d/%d/%d\n",
+			 stats->legacy_frames,
+			 stats->ht_frames,
+			 stats->vht_frames);
+	pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
+			 stats->bw_20_frames,
+			 stats->bw_40_frames,
+			 stats->bw_80_frames);
+	pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
+			 stats->ngi_frames,
+			 stats->sgi_frames);
+	pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
+			 stats->siso_frames,
+			 stats->mimo2_frames);
+	pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
+			 stats->fail_frames,
+			 stats->success_frames);
+	pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
+			 stats->agg_frames);
+	pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
+			 stats->ampdu_count);
+	pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
+			 stats->ampdu_count > 0 ?
+			 (stats->agg_frames / stats->ampdu_count) : 0);
+
+	pos += scnprintf(pos, endpos - pos, "Last Rates\n");
+
+	idx = stats->last_frame_idx - 1;
+	for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
+		idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
+		if (stats->last_rates[idx] == 0)
+			continue;
+		pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
+				 (int)(ARRAY_SIZE(stats->last_rates) - i));
+		pos += rs_pretty_print_rate(pos, endpos - pos,
+					    stats->last_rates[idx]);
+	}
+	spin_unlock_bh(&mvm->drv_stats_lock);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+	kfree(buff);
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_drv_rx_stats_read(struct file *file,
+					   char __user *user_buf, size_t count,
+					   loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+
+	return iwl_dbgfs_frame_stats_read(mvm, user_buf, count, ppos,
+					  &mvm->drv_rx_stats);
+}
+
+static ssize_t iwl_dbgfs_fw_restart_write(struct iwl_mvm *mvm, char *buf,
+					  size_t count, loff_t *ppos)
+{
+	int __maybe_unused ret;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+
+	/* allow one more restart that we're provoking here */
+	if (mvm->fw_restart >= 0)
+		mvm->fw_restart++;
+
+	/* take the return value to make compiler happy - it will fail anyway */
+	ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, 0, 0, NULL);
+
+	mutex_unlock(&mvm->mutex);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
+				      size_t count, loff_t *ppos)
+{
+	int ret;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_NMI);
+	if (ret)
+		return ret;
+
+	iwl_force_nmi(mvm->trans);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_NMI);
+
+	return count;
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
+				char __user *user_buf,
+				size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	int pos = 0;
+	char buf[32];
+	const size_t bufsz = sizeof(buf);
+
+	/* print which antennas were set for the scan command by the user */
+	pos += scnprintf(buf + pos, bufsz - pos, "Antennas for scan: ");
+	if (mvm->scan_rx_ant & ANT_A)
+		pos += scnprintf(buf + pos, bufsz - pos, "A");
+	if (mvm->scan_rx_ant & ANT_B)
+		pos += scnprintf(buf + pos, bufsz - pos, "B");
+	if (mvm->scan_rx_ant & ANT_C)
+		pos += scnprintf(buf + pos, bufsz - pos, "C");
+	pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
+				 size_t count, loff_t *ppos)
+{
+	u8 scan_rx_ant;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	if (sscanf(buf, "%hhx", &scan_rx_ant) != 1)
+		return -EINVAL;
+	if (scan_rx_ant > ANT_ABC)
+		return -EINVAL;
+	if (scan_rx_ant & ~(iwl_mvm_get_valid_rx_ant(mvm)))
+		return -EINVAL;
+
+	if (mvm->scan_rx_ant != scan_rx_ant) {
+		mvm->scan_rx_ant = scan_rx_ant;
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+			iwl_mvm_config_scan(mvm);
+	}
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+					       char *buf, size_t count,
+					       loff_t *ppos)
+{
+	struct iwl_rss_config_cmd cmd = {
+		.flags = cpu_to_le32(IWL_RSS_ENABLE),
+		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+			     IWL_RSS_HASH_TYPE_IPV4_UDP |
+			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+			     IWL_RSS_HASH_TYPE_IPV6_TCP |
+			     IWL_RSS_HASH_TYPE_IPV6_UDP |
+			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+	};
+	int ret, i, num_repeats, nbytes = count / 2;
+
+	ret = hex2bin(cmd.indirection_table, buf, nbytes);
+	if (ret)
+		return ret;
+
+	/*
+	 * The input is the redirection table, partial or full.
+	 * Repeat the pattern if needed.
+	 * For example, input of 01020F will be repeated 42 times,
+	 * indirecting RSS hash results to queues 1, 2, 15 (skipping
+	 * queues 3 - 14).
+	 */
+	num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+	for (i = 1; i < num_repeats; i++)
+		memcpy(&cmd.indirection_table[i * nbytes],
+		       cmd.indirection_table, nbytes);
+	/* handle cut in the middle pattern for the last places */
+	memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+	       ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+	mutex_lock(&mvm->mutex);
+	if (iwl_mvm_firmware_running(mvm))
+		ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0,
+					   sizeof(cmd), &cmd);
+	else
+		ret = 0;
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
+					     char *buf, size_t count,
+					     loff_t *ppos)
+{
+	struct iwl_rx_cmd_buffer rxb = {
+		._rx_page_order = 0,
+		.truesize = 0, /* not used */
+		._offset = 0,
+	};
+	struct iwl_rx_packet *pkt;
+	struct iwl_rx_mpdu_desc *desc;
+	int bin_len = count / 2;
+	int ret = -EINVAL;
+	size_t mpdu_cmd_hdr_size =
+		(mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		sizeof(struct iwl_rx_mpdu_desc) :
+		IWL_RX_DESC_SIZE_V1;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	/* supporting only 9000 descriptor */
+	if (!mvm->trans->cfg->mq_rx_supported)
+		return -ENOTSUPP;
+
+	rxb._page = alloc_pages(GFP_ATOMIC, 0);
+	if (!rxb._page)
+		return -ENOMEM;
+	pkt = rxb_addr(&rxb);
+
+	ret = hex2bin(page_address(rxb._page), buf, bin_len);
+	if (ret)
+		goto out;
+
+	/* avoid invalid memory access */
+	if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
+		goto out;
+
+	/* check this is RX packet */
+	if (WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd) !=
+	    WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD))
+		goto out;
+
+	/* check the length in metadata matches actual received length */
+	desc = (void *)pkt->data;
+	if (le16_to_cpu(desc->mpdu_len) !=
+	    (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
+		goto out;
+
+	local_bh_disable();
+	iwl_mvm_rx_mpdu_mq(mvm, NULL, &rxb, 0);
+	local_bh_enable();
+	ret = 0;
+
+out:
+	iwl_free_rxb(&rxb);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	int conf;
+	char buf[8];
+	const size_t bufsz = sizeof(buf);
+	int pos = 0;
+
+	mutex_lock(&mvm->mutex);
+	conf = mvm->fwrt.dump.conf;
+	mutex_unlock(&mvm->mutex);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "%d\n", conf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+/*
+ * Enable / Disable continuous recording.
+ * Cause the FW to start continuous recording, by sending the relevant hcmd.
+ * Enable: input of every integer larger than 0, ENABLE_CONT_RECORDING.
+ * Disable: for 0 as input, DISABLE_CONT_RECORDING.
+ */
+static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
+					      char *buf, size_t count,
+					      loff_t *ppos)
+{
+	struct iwl_trans *trans = mvm->trans;
+	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
+	struct iwl_continuous_record_cmd cont_rec = {};
+	int ret, rec_mode;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	if (!dest)
+		return -EOPNOTSUPP;
+
+	if (dest->monitor_mode != SMEM_MODE ||
+	    trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+		return -EOPNOTSUPP;
+
+	ret = kstrtoint(buf, 0, &rec_mode);
+	if (ret)
+		return ret;
+
+	cont_rec.record_mode.enable_recording = rec_mode ?
+		cpu_to_le16(ENABLE_CONT_RECORDING) :
+		cpu_to_le16(DISABLE_CONT_RECORDING);
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd_pdu(mvm, LDBG_CONFIG_CMD, 0,
+				   sizeof(cont_rec), &cont_rec);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_conf_write(struct iwl_mvm *mvm,
+					   char *buf, size_t count,
+					   loff_t *ppos)
+{
+	unsigned int conf_id;
+	int ret;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	ret = kstrtouint(buf, 0, &conf_id);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(conf_id >= FW_DBG_CONF_MAX))
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_fw_start_dbg_conf(&mvm->fwrt, conf_id);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
+					      char *buf, size_t count,
+					      loff_t *ppos)
+{
+	int ret;
+
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+	if (ret)
+		return ret;
+	if (count == 0)
+		return 0;
+
+	iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf,
+			   (count - 1), NULL);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_max_amsdu_len_write(struct iwl_mvm *mvm,
+					     char *buf, size_t count,
+					     loff_t *ppos)
+{
+	unsigned int max_amsdu_len;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &max_amsdu_len);
+	if (ret)
+		return ret;
+
+	if (max_amsdu_len > IEEE80211_MAX_MPDU_LEN_VHT_11454)
+		return -EINVAL;
+	mvm->max_amsdu_len = max_amsdu_len;
+
+	return count;
+}
+
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bcast_filter_cmd cmd;
+	const struct iwl_fw_bcast_filter *filter;
+	char *buf;
+	int bufsz = 1024;
+	int i, j, pos = 0;
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+		ADD_TEXT("None\n");
+		mutex_unlock(&mvm->mutex);
+		goto out;
+	}
+	mutex_unlock(&mvm->mutex);
+
+	for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+		filter = &cmd.filters[i];
+
+		ADD_TEXT("Filter [%d]:\n", i);
+		ADD_TEXT("\tDiscard=%d\n", filter->discard);
+		ADD_TEXT("\tFrame Type: %s\n",
+			 filter->frame_type ? "IPv4" : "Generic");
+
+		for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+			const struct iwl_fw_bcast_filter_attr *attr;
+
+			attr = &filter->attrs[j];
+			if (!attr->mask)
+				break;
+
+			ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+				 j, attr->offset,
+				 attr->offset_type ? "IP End" :
+						     "Payload Start",
+				 be32_to_cpu(attr->mask),
+				 be32_to_cpu(attr->val),
+				 le16_to_cpu(attr->reserved1));
+		}
+	}
+out:
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+					     size_t count, loff_t *ppos)
+{
+	int pos, next_pos;
+	struct iwl_fw_bcast_filter filter = {};
+	struct iwl_bcast_filter_cmd cmd;
+	u32 filter_id, attr_id, mask, value;
+	int err = 0;
+
+	if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+		   &filter.frame_type, &pos) != 3)
+		return -EINVAL;
+
+	if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+	    filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+		return -EINVAL;
+
+	for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+	     attr_id++) {
+		struct iwl_fw_bcast_filter_attr *attr =
+				&filter.attrs[attr_id];
+
+		if (pos >= count)
+			break;
+
+		if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+			   &attr->offset, &attr->offset_type,
+			   &mask, &value, &next_pos) != 4)
+			return -EINVAL;
+
+		attr->mask = cpu_to_be32(mask);
+		attr->val = cpu_to_be32(value);
+		if (mask)
+			filter.num_attrs++;
+
+		pos += next_pos;
+	}
+
+	mutex_lock(&mvm->mutex);
+	memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+	       &filter, sizeof(filter));
+
+	/* send updated bcast filtering configuration */
+	if (iwl_mvm_firmware_running(mvm) &&
+	    mvm->dbgfs_bcast_filtering.override &&
+	    iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+		err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+					   sizeof(cmd), &cmd);
+	mutex_unlock(&mvm->mutex);
+
+	return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+						 char __user *user_buf,
+						 size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_bcast_filter_cmd cmd;
+	char *buf;
+	int bufsz = 1024;
+	int i, pos = 0;
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	mutex_lock(&mvm->mutex);
+	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+		ADD_TEXT("None\n");
+		mutex_unlock(&mvm->mutex);
+		goto out;
+	}
+	mutex_unlock(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+		const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+		ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+			 i, mac->default_discard, mac->attached_filters);
+	}
+out:
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+						  char *buf, size_t count,
+						  loff_t *ppos)
+{
+	struct iwl_bcast_filter_cmd cmd;
+	struct iwl_fw_bcast_mac mac = {};
+	u32 mac_id, attached_filters;
+	int err = 0;
+
+	if (!mvm->bcast_filters)
+		return -ENOENT;
+
+	if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+		   &attached_filters) != 3)
+		return -EINVAL;
+
+	if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+	    mac.default_discard > 1 ||
+	    attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+		return -EINVAL;
+
+	mac.attached_filters = cpu_to_le16(attached_filters);
+
+	mutex_lock(&mvm->mutex);
+	memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+	       &mac, sizeof(mac));
+
+	/* send updated bcast filtering configuration */
+	if (iwl_mvm_firmware_running(mvm) &&
+	    mvm->dbgfs_bcast_filtering.override &&
+	    iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+		err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+					   sizeof(cmd), &cmd);
+	mutex_unlock(&mvm->mutex);
+
+	return err ?: count;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
+				       size_t count, loff_t *ppos)
+{
+	int store;
+
+	if (sscanf(buf, "%d", &store) != 1)
+		return -EINVAL;
+
+	mvm->store_d3_resume_sram = store;
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	const struct fw_img *img;
+	int ofs, len, pos = 0;
+	size_t bufsz, ret;
+	char *buf;
+	u8 *ptr = mvm->d3_resume_sram;
+
+	img = &mvm->fw->img[IWL_UCODE_WOWLAN];
+	len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+	bufsz = len * 4 + 256;
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf, bufsz, "D3 SRAM capture: %sabled\n",
+			 mvm->store_d3_resume_sram ? "en" : "dis");
+
+	if (ptr) {
+		for (ofs = 0; ofs < len; ofs += 16) {
+			pos += scnprintf(buf + pos, bufsz - pos,
+					 "0x%.4x %16ph\n", ofs, ptr + ofs);
+		}
+	} else {
+		pos += scnprintf(buf + pos, bufsz - pos,
+				 "(no data captured)\n");
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+
+	kfree(buf);
+
+	return ret;
+}
+#endif
+
+#define PRINT_MVM_REF(ref) do {						\
+	if (mvm->refs[ref])						\
+		pos += scnprintf(buf + pos, bufsz - pos,		\
+				 "\t(0x%lx): %d %s\n",			\
+				 BIT(ref), mvm->refs[ref], #ref);	\
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	int i, pos = 0;
+	char buf[256];
+	const size_t bufsz = sizeof(buf);
+	u32 refs = 0;
+
+	for (i = 0; i < IWL_MVM_REF_COUNT; i++)
+		if (mvm->refs[i])
+			refs |= BIT(i);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%x\n",
+			 refs);
+
+	PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+	PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+	PRINT_MVM_REF(IWL_MVM_REF_ROC);
+	PRINT_MVM_REF(IWL_MVM_REF_ROC_AUX);
+	PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+	PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+	PRINT_MVM_REF(IWL_MVM_REF_USER);
+	PRINT_MVM_REF(IWL_MVM_REF_TX);
+	PRINT_MVM_REF(IWL_MVM_REF_TX_AGG);
+	PRINT_MVM_REF(IWL_MVM_REF_ADD_IF);
+	PRINT_MVM_REF(IWL_MVM_REF_START_AP);
+	PRINT_MVM_REF(IWL_MVM_REF_BSS_CHANGED);
+	PRINT_MVM_REF(IWL_MVM_REF_PREPARE_TX);
+	PRINT_MVM_REF(IWL_MVM_REF_PROTECT_TDLS);
+	PRINT_MVM_REF(IWL_MVM_REF_CHECK_CTKILL);
+	PRINT_MVM_REF(IWL_MVM_REF_PRPH_READ);
+	PRINT_MVM_REF(IWL_MVM_REF_PRPH_WRITE);
+	PRINT_MVM_REF(IWL_MVM_REF_NMI);
+	PRINT_MVM_REF(IWL_MVM_REF_TM_CMD);
+	PRINT_MVM_REF(IWL_MVM_REF_EXIT_WORK);
+	PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
+	PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
+	PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
+	PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
+	PRINT_MVM_REF(IWL_MVM_REF_RX);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+					 size_t count, loff_t *ppos)
+{
+	unsigned long value;
+	int ret;
+	bool taken;
+
+	ret = kstrtoul(buf, 10, &value);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&mvm->mutex);
+
+	taken = mvm->refs[IWL_MVM_REF_USER];
+	if (value == 1 && !taken)
+		iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+	else if (value == 0 && taken)
+		iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+	else
+		ret = -EINVAL;
+
+	mutex_unlock(&mvm->mutex);
+
+	if (ret < 0)
+		return ret;
+	return count;
+}
+
+#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, mvm,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+	MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
+
+#define MVM_DEBUGFS_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+#define MVM_DEBUGFS_READ_WRITE_STA_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct ieee80211_sta)
+
+#define MVM_DEBUGFS_ADD_STA_FILE_ALIAS(alias, name, parent, mode) do {	\
+		if (!debugfs_create_file(alias, mode, parent, sta,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+#define MVM_DEBUGFS_ADD_STA_FILE(name, parent, mode) \
+	MVM_DEBUGFS_ADD_STA_FILE_ALIAS(#name, name, parent, mode)
+
+static ssize_t
+iwl_dbgfs_prph_reg_read(struct file *file,
+			char __user *user_buf,
+			size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	int pos = 0;
+	char buf[32];
+	const size_t bufsz = sizeof(buf);
+	int ret;
+
+	if (!mvm->dbgfs_prph_reg_addr)
+		return -EINVAL;
+
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_READ);
+	if (ret)
+		return ret;
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Reg 0x%x: (0x%x)\n",
+		mvm->dbgfs_prph_reg_addr,
+		iwl_read_prph(mvm->trans, mvm->dbgfs_prph_reg_addr));
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_READ);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+iwl_dbgfs_prph_reg_write(struct iwl_mvm *mvm, char *buf,
+			 size_t count, loff_t *ppos)
+{
+	u8 args;
+	u32 value;
+	int ret;
+
+	args = sscanf(buf, "%i %i", &mvm->dbgfs_prph_reg_addr, &value);
+	/* if we only want to set the reg address - nothing more to do */
+	if (args == 1)
+		goto out;
+
+	/* otherwise, make sure we have both address and value */
+	if (args != 2)
+		return -EINVAL;
+
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
+	if (ret)
+		return ret;
+
+	iwl_write_prph(mvm->trans, mvm->dbgfs_prph_reg_addr, value);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE);
+out:
+	return count;
+}
+
+static ssize_t
+iwl_dbgfs_send_echo_cmd_write(struct iwl_mvm *mvm, char *buf,
+			      size_t count, loff_t *ppos)
+{
+	int ret;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL);
+	mutex_unlock(&mvm->mutex);
+
+	return ret ?: count;
+}
+
+static ssize_t
+iwl_dbgfs_uapsd_noagg_bssids_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	u8 buf[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM * ETH_ALEN * 3 + 1];
+	unsigned int pos = 0;
+	size_t bufsz = sizeof(buf);
+	int i;
+
+	mutex_lock(&mvm->mutex);
+
+	for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++)
+		pos += scnprintf(buf + pos, bufsz - pos, "%pM\n",
+				 mvm->uapsd_noagg_bssids[i].addr);
+
+	mutex_unlock(&mvm->mutex);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(prph_reg, 64);
+
+/* Device wide debugfs entries */
+MVM_DEBUGFS_READ_FILE_OPS(ctdp_budget);
+MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
+MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
+MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
+MVM_DEBUGFS_READ_FILE_OPS(stations);
+MVM_DEBUGFS_READ_FILE_OPS(rs_data);
+MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
+MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
+MVM_DEBUGFS_READ_FILE_OPS(fw_ver);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
+MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(max_amsdu_len, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl,
+			   (IWL_RSS_INDIRECTION_TABLE_SIZE * 2));
+MVM_DEBUGFS_WRITE_FILE_OPS(inject_packet, 512);
+
+MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
+#endif
+#ifdef CONFIG_ACPI
+MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
+#endif
+
+static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_dbg_mem_access_cmd cmd = {};
+	struct iwl_dbg_mem_access_rsp *rsp;
+	struct iwl_host_cmd hcmd = {
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &cmd, },
+		.len = { sizeof(cmd) },
+	};
+	size_t delta;
+	ssize_t ret, len;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+			     DEBUG_GROUP, 0);
+	cmd.op = cpu_to_le32(DEBUG_MEM_OP_READ);
+
+	/* Take care of alignment of both the position and the length */
+	delta = *ppos & 0x3;
+	cmd.addr = cpu_to_le32(*ppos - delta);
+	cmd.len = cpu_to_le32(min(ALIGN(count + delta, 4) / 4,
+				  (size_t)DEBUG_MEM_MAX_SIZE_DWORDS));
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+	mutex_unlock(&mvm->mutex);
+
+	if (ret < 0)
+		return ret;
+
+	rsp = (void *)hcmd.resp_pkt->data;
+	if (le32_to_cpu(rsp->status) != DEBUG_MEM_STATUS_SUCCESS) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	len = min((size_t)le32_to_cpu(rsp->len) << 2,
+		  iwl_rx_packet_payload_len(hcmd.resp_pkt) - sizeof(*rsp));
+	len = min(len - delta, count);
+	if (len < 0) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	ret = len - copy_to_user(user_buf, (void *)rsp->data + delta, len);
+	*ppos += ret;
+
+out:
+	iwl_free_resp(&hcmd);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_mem_write(struct file *file,
+				   const char __user *user_buf, size_t count,
+				   loff_t *ppos)
+{
+	struct iwl_mvm *mvm = file->private_data;
+	struct iwl_dbg_mem_access_cmd *cmd;
+	struct iwl_dbg_mem_access_rsp *rsp;
+	struct iwl_host_cmd hcmd = {};
+	size_t cmd_size;
+	size_t data_size;
+	u32 op, len;
+	ssize_t ret;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return -EIO;
+
+	hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR,
+			     DEBUG_GROUP, 0);
+
+	if (*ppos & 0x3 || count < 4) {
+		op = DEBUG_MEM_OP_WRITE_BYTES;
+		len = min(count, (size_t)(4 - (*ppos & 0x3)));
+		data_size = len;
+	} else {
+		op = DEBUG_MEM_OP_WRITE;
+		len = min(count >> 2, (size_t)DEBUG_MEM_MAX_SIZE_DWORDS);
+		data_size = len << 2;
+	}
+
+	cmd_size = sizeof(*cmd) + ALIGN(data_size, 4);
+	cmd = kzalloc(cmd_size, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->op = cpu_to_le32(op);
+	cmd->len = cpu_to_le32(len);
+	cmd->addr = cpu_to_le32(*ppos);
+	if (copy_from_user((void *)cmd->data, user_buf, data_size)) {
+		kfree(cmd);
+		return -EFAULT;
+	}
+
+	hcmd.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+	hcmd.data[0] = (void *)cmd;
+	hcmd.len[0] = cmd_size;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+	mutex_unlock(&mvm->mutex);
+
+	kfree(cmd);
+
+	if (ret < 0)
+		return ret;
+
+	rsp = (void *)hcmd.resp_pkt->data;
+	if (rsp->status != DEBUG_MEM_STATUS_SUCCESS) {
+		ret = -ENXIO;
+		goto out;
+	}
+
+	ret = data_size;
+	*ppos += ret;
+
+out:
+	iwl_free_resp(&hcmd);
+	return ret;
+}
+
+static const struct file_operations iwl_dbgfs_mem_ops = {
+	.read = iwl_dbgfs_mem_read,
+	.write = iwl_dbgfs_mem_write,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (iwl_mvm_has_tlc_offload(mvm))
+		MVM_DEBUGFS_ADD_STA_FILE(rs_data, dir, 0400);
+
+	return;
+err:
+	IWL_ERR(mvm, "Can't create the mvm station debugfs entry\n");
+}
+
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
+{
+	struct dentry *bcast_dir __maybe_unused;
+	char buf[100];
+
+	spin_lock_init(&mvm->drv_stats_lock);
+
+	mvm->debugfs_dir = dbgfs_dir;
+
+	MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(nic_temp, dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(ctdp_budget, dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(stop_ctdp, dbgfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(force_ctkill, dbgfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, 0400);
+	MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(max_amsdu_len, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, 0200);
+	MVM_DEBUGFS_ADD_FILE(inject_packet, mvm->debugfs_dir, 0200);
+#ifdef CONFIG_ACPI
+	MVM_DEBUGFS_ADD_FILE(sar_geo_profile, dbgfs_dir, 0400);
+#endif
+
+	if (!debugfs_create_bool("enable_scan_iteration_notif",
+				 0600,
+				 mvm->debugfs_dir,
+				 &mvm->scan_iter_notif_enabled))
+		goto err;
+	if (!debugfs_create_bool("drop_bcn_ap_mode", 0600,
+				 mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
+		goto err;
+
+	MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+		bcast_dir = debugfs_create_dir("bcast_filtering",
+					       mvm->debugfs_dir);
+		if (!bcast_dir)
+			goto err;
+
+		if (!debugfs_create_bool("override", 0600,
+					 bcast_dir,
+					 &mvm->dbgfs_bcast_filtering.override))
+			goto err;
+
+		MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+					   bcast_dir, 0600);
+		MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+					   bcast_dir, 0600);
+	}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+	MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, 0600);
+	MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
+	if (!debugfs_create_bool("d3_wake_sysassert", 0600,
+				 mvm->debugfs_dir, &mvm->d3_wake_sysassert))
+		goto err;
+	if (!debugfs_create_u32("last_netdetect_scans", 0400,
+				mvm->debugfs_dir, &mvm->last_netdetect_scans))
+		goto err;
+#endif
+
+	if (!debugfs_create_u8("ps_disabled", 0400,
+			       mvm->debugfs_dir, &mvm->ps_disabled))
+		goto err;
+	if (!debugfs_create_blob("nvm_hw", 0400,
+				 mvm->debugfs_dir, &mvm->nvm_hw_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_sw", 0400,
+				 mvm->debugfs_dir, &mvm->nvm_sw_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_calib", 0400,
+				 mvm->debugfs_dir, &mvm->nvm_calib_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_prod", 0400,
+				 mvm->debugfs_dir, &mvm->nvm_prod_blob))
+		goto err;
+	if (!debugfs_create_blob("nvm_phy_sku", 0400,
+				 mvm->debugfs_dir, &mvm->nvm_phy_sku_blob))
+		goto err;
+
+	debugfs_create_file("mem", 0600, dbgfs_dir, mvm, &iwl_dbgfs_mem_ops);
+
+	/*
+	 * Create a symlink with mac80211. It will be removed when mac80211
+	 * exists (before the opmode exists which removes the target.)
+	 */
+	snprintf(buf, 100, "../../%pd2", dbgfs_dir->d_parent);
+	if (!debugfs_create_symlink("iwlwifi", mvm->hw->wiphy->debugfsdir, buf))
+		goto err;
+
+	return 0;
+err:
+	IWL_ERR(mvm, "Can't create the mvm debugfs directory\n");
+	return -ENOMEM;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
new file mode 100644
index 0000000..ede6ef8
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.h
@@ -0,0 +1,103 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define MVM_DEBUGFS_READ_FILE_OPS(name)					\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+}
+
+#define MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)		\
+static ssize_t _iwl_dbgfs_##name##_write(struct file *file,		\
+					 const char __user *user_buf,	\
+					 size_t count, loff_t *ppos)	\
+{									\
+	argtype *arg = file->private_data;				\
+	char buf[buflen] = {};						\
+	size_t buf_size = min(count, sizeof(buf) -  1);			\
+									\
+	if (copy_from_user(buf, user_buf, buf_size))			\
+		return -EFAULT;						\
+									\
+	return iwl_dbgfs_##name##_write(arg, buf, buf_size, ppos);	\
+}									\
+
+#define _MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, buflen, argtype)		\
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define _MVM_DEBUGFS_WRITE_FILE_OPS(name, buflen, argtype)		\
+MVM_DEBUGFS_WRITE_WRAPPER(name, buflen, argtype)			\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = _iwl_dbgfs_##name##_write,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
new file mode 100644
index 0000000..e8e74dd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -0,0 +1,100 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __fw_api_h__
+#define __fw_api_h__
+
+#include "fw/api/tdls.h"
+#include "fw/api/mac-cfg.h"
+#include "fw/api/offload.h"
+#include "fw/api/context.h"
+#include "fw/api/time-event.h"
+#include "fw/api/datapath.h"
+#include "fw/api/phy.h"
+#include "fw/api/config.h"
+#include "fw/api/alive.h"
+#include "fw/api/binding.h"
+#include "fw/api/cmdhdr.h"
+#include "fw/api/coex.h"
+#include "fw/api/commands.h"
+#include "fw/api/d3.h"
+#include "fw/api/filter.h"
+#include "fw/api/led.h"
+#include "fw/api/mac.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/phy-ctxt.h"
+#include "fw/api/power.h"
+#include "fw/api/rs.h"
+#include "fw/api/rx.h"
+#include "fw/api/scan.h"
+#include "fw/api/sf.h"
+#include "fw/api/sta.h"
+#include "fw/api/stats.h"
+#include "fw/api/tof.h"
+#include "fw/api/tx.h"
+
+#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
new file mode 100644
index 0000000..16c6c7f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -0,0 +1,1279 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+#include <linux/netdevice.h>
+
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+
+#include "mvm.h"
+#include "fw/dbg.h"
+#include "iwl-phy-db.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-parse.h"
+
+#define MVM_UCODE_ALIVE_TIMEOUT	HZ
+#define MVM_UCODE_CALIB_TIMEOUT	(2*HZ)
+
+#define UCODE_VALID_OK	cpu_to_le32(0x1)
+
+struct iwl_mvm_alive_data {
+	bool valid;
+	u32 scd_base_addr;
+};
+
+static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
+{
+	struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
+		.valid = cpu_to_le32(valid_tx_ant),
+	};
+
+	IWL_DEBUG_FW(mvm, "select valid tx ant: %u\n", valid_tx_ant);
+	return iwl_mvm_send_cmd_pdu(mvm, TX_ANT_CONFIGURATION_CMD, 0,
+				    sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+	int i;
+	struct iwl_rss_config_cmd cmd = {
+		.flags = cpu_to_le32(IWL_RSS_ENABLE),
+		.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+			     IWL_RSS_HASH_TYPE_IPV4_UDP |
+			     IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+			     IWL_RSS_HASH_TYPE_IPV6_TCP |
+			     IWL_RSS_HASH_TYPE_IPV6_UDP |
+			     IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+	};
+
+	if (mvm->trans->num_rx_queues == 1)
+		return 0;
+
+	/* Do not direct RSS traffic to Q 0 which is our fallback queue */
+	for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+		cmd.indirection_table[i] =
+			1 + (i % (mvm->trans->num_rx_queues - 1));
+	netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
+
+	return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int iwl_configure_rxq(struct iwl_mvm *mvm)
+{
+	int i, num_queues, size;
+	struct iwl_rfh_queue_config *cmd;
+
+	/* Do not configure default queue, it is configured via context info */
+	num_queues = mvm->trans->num_rx_queues - 1;
+
+	size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
+
+	cmd = kzalloc(size, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->num_queues = num_queues;
+
+	for (i = 0; i < num_queues; i++) {
+		struct iwl_trans_rxq_dma_data data;
+
+		cmd->data[i].q_num = i + 1;
+		iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
+
+		cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
+		cmd->data[i].urbd_stts_wrptr =
+			cpu_to_le64(data.urbd_stts_wrptr);
+		cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
+		cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm,
+				    WIDE_ID(DATA_PATH_GROUP,
+					    RFH_QUEUE_CONFIG_CMD),
+				    0, size, cmd);
+}
+
+static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_dqa_enable_cmd dqa_cmd = {
+		.cmd_queue = cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE),
+	};
+	u32 cmd_id = iwl_cmd_id(DQA_ENABLE_CMD, DATA_PATH_GROUP, 0);
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send DQA enabling command: %d\n", ret);
+	else
+		IWL_DEBUG_FW(mvm, "Working in DQA mode\n");
+
+	return ret;
+}
+
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
+	__le32 *dump_data = mfu_dump_notif->data;
+	int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
+	int i;
+
+	if (mfu_dump_notif->index_num == 0)
+		IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
+			 le32_to_cpu(mfu_dump_notif->assert_id));
+
+	for (i = 0; i < n_words; i++)
+		IWL_DEBUG_INFO(mvm,
+			       "MFUART assert dump, dword %u: 0x%08x\n",
+			       le16_to_cpu(mfu_dump_notif->index_num) *
+			       n_words + i,
+			       le32_to_cpu(dump_data[i]));
+}
+
+static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
+			 struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_mvm_alive_data *alive_data = data;
+	struct mvm_alive_resp_v3 *palive3;
+	struct mvm_alive_resp *palive;
+	struct iwl_umac_alive *umac;
+	struct iwl_lmac_alive *lmac1;
+	struct iwl_lmac_alive *lmac2 = NULL;
+	u16 status;
+	u32 umac_error_event_table;
+
+	if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+		palive = (void *)pkt->data;
+		umac = &palive->umac_data;
+		lmac1 = &palive->lmac_data[0];
+		lmac2 = &palive->lmac_data[1];
+		status = le16_to_cpu(palive->status);
+	} else {
+		palive3 = (void *)pkt->data;
+		umac = &palive3->umac_data;
+		lmac1 = &palive3->lmac_data;
+		status = le16_to_cpu(palive3->status);
+	}
+
+	mvm->error_event_table[0] = le32_to_cpu(lmac1->error_event_table_ptr);
+	if (lmac2)
+		mvm->error_event_table[1] =
+			le32_to_cpu(lmac2->error_event_table_ptr);
+	mvm->log_event_table = le32_to_cpu(lmac1->log_event_table_ptr);
+
+	umac_error_event_table = le32_to_cpu(umac->error_info_addr);
+
+	if (!umac_error_event_table) {
+		mvm->support_umac_log = false;
+	} else if (umac_error_event_table >=
+		   mvm->trans->cfg->min_umac_error_event_table) {
+		mvm->support_umac_log = true;
+		mvm->umac_error_event_table = umac_error_event_table;
+	} else {
+		IWL_ERR(mvm,
+			"Not valid error log pointer 0x%08X for %s uCode\n",
+			mvm->umac_error_event_table,
+			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+			"Init" : "RT");
+		mvm->support_umac_log = false;
+	}
+
+	alive_data->scd_base_addr = le32_to_cpu(lmac1->scd_base_ptr);
+	alive_data->valid = status == IWL_ALIVE_STATUS_OK;
+
+	IWL_DEBUG_FW(mvm,
+		     "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
+		     status, lmac1->ver_type, lmac1->ver_subtype);
+
+	if (lmac2)
+		IWL_DEBUG_FW(mvm, "Alive ucode CDB\n");
+
+	IWL_DEBUG_FW(mvm,
+		     "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+		     le32_to_cpu(umac->umac_major),
+		     le32_to_cpu(umac->umac_minor));
+
+	return true;
+}
+
+static bool iwl_wait_init_complete(struct iwl_notif_wait_data *notif_wait,
+				   struct iwl_rx_packet *pkt, void *data)
+{
+	WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+
+	return true;
+}
+
+static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
+				  struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_phy_db *phy_db = data;
+
+	if (pkt->hdr.cmd != CALIB_RES_NOTIF_PHY_DB) {
+		WARN_ON(pkt->hdr.cmd != INIT_COMPLETE_NOTIF);
+		return true;
+	}
+
+	WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
+
+	return false;
+}
+
+static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
+					 enum iwl_ucode_type ucode_type)
+{
+	struct iwl_notification_wait alive_wait;
+	struct iwl_mvm_alive_data alive_data;
+	const struct fw_img *fw;
+	int ret, i;
+	enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
+	static const u16 alive_cmd[] = { MVM_ALIVE };
+
+	if (ucode_type == IWL_UCODE_REGULAR &&
+	    iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
+	    !(fw_has_capa(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
+		fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
+	else
+		fw = iwl_get_ucode_image(mvm->fw, ucode_type);
+	if (WARN_ON(!fw))
+		return -EINVAL;
+	iwl_fw_set_current_image(&mvm->fwrt, ucode_type);
+	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+	iwl_init_notification_wait(&mvm->notif_wait, &alive_wait,
+				   alive_cmd, ARRAY_SIZE(alive_cmd),
+				   iwl_alive_fn, &alive_data);
+
+	ret = iwl_trans_start_fw(mvm->trans, fw, ucode_type == IWL_UCODE_INIT);
+	if (ret) {
+		iwl_fw_set_current_image(&mvm->fwrt, old_type);
+		iwl_remove_notification(&mvm->notif_wait, &alive_wait);
+		return ret;
+	}
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the ALIVE notification here.
+	 */
+	ret = iwl_wait_notification(&mvm->notif_wait, &alive_wait,
+				    MVM_UCODE_ALIVE_TIMEOUT);
+	if (ret) {
+		struct iwl_trans *trans = mvm->trans;
+
+		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
+			IWL_ERR(mvm,
+				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+				iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
+				iwl_read_prph(trans, UMAG_SB_CPU_2_STATUS));
+		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+			IWL_ERR(mvm,
+				"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+				iwl_read_prph(trans, SB_CPU_1_STATUS),
+				iwl_read_prph(trans, SB_CPU_2_STATUS));
+		iwl_fw_set_current_image(&mvm->fwrt, old_type);
+		return ret;
+	}
+
+	if (!alive_data.valid) {
+		IWL_ERR(mvm, "Loaded ucode is not valid!\n");
+		iwl_fw_set_current_image(&mvm->fwrt, old_type);
+		return -EIO;
+	}
+
+	iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
+
+	/*
+	 * Note: all the queues are enabled as part of the interface
+	 * initialization, but in firmware restart scenarios they
+	 * could be stopped, so wake them up. In firmware restart,
+	 * mac80211 will have the queues stopped as well until the
+	 * reconfiguration completes. During normal startup, they
+	 * will be empty.
+	 */
+
+	memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
+	mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+
+	for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
+		atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
+
+	set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+	return 0;
+}
+
+static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+	struct iwl_notification_wait init_wait;
+	struct iwl_nvm_access_complete_cmd nvm_complete = {};
+	struct iwl_init_extended_cfg_cmd init_cfg = {
+		.init_flags = cpu_to_le32(BIT(IWL_INIT_NVM)),
+	};
+	static const u16 init_complete[] = {
+		INIT_COMPLETE_NOTIF,
+	};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_init_notification_wait(&mvm->notif_wait,
+				   &init_wait,
+				   init_complete,
+				   ARRAY_SIZE(init_complete),
+				   iwl_wait_init_complete,
+				   NULL);
+
+	/* Will also start the device */
+	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+		goto error;
+	}
+
+	/* Send init config command to mark that we are sending NVM access
+	 * commands
+	 */
+	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(SYSTEM_GROUP,
+						INIT_EXTENDED_CFG_CMD), 0,
+				   sizeof(init_cfg), &init_cfg);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to run init config command: %d\n",
+			ret);
+		goto error;
+	}
+
+	/* Load NVM to NIC if needed */
+	if (mvm->nvm_file_name) {
+		iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+				      mvm->nvm_sections);
+		iwl_mvm_load_nvm_to_nic(mvm);
+	}
+
+	if (IWL_MVM_PARSE_NVM && read_nvm) {
+		ret = iwl_nvm_init(mvm);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+			goto error;
+		}
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
+						NVM_ACCESS_COMPLETE), 0,
+				   sizeof(nvm_complete), &nvm_complete);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to run complete NVM access: %d\n",
+			ret);
+		goto error;
+	}
+
+	/* We wait for the INIT complete notification */
+	ret = iwl_wait_notification(&mvm->notif_wait, &init_wait,
+				    MVM_UCODE_ALIVE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	/* Read the NVM only at driver load time, no need to do this twice */
+	if (!IWL_MVM_PARSE_NVM && read_nvm) {
+		mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw);
+		if (IS_ERR(mvm->nvm_data)) {
+			ret = PTR_ERR(mvm->nvm_data);
+			mvm->nvm_data = NULL;
+			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+			return ret;
+		}
+	}
+
+	return 0;
+
+error:
+	iwl_remove_notification(&mvm->notif_wait, &init_wait);
+	return ret;
+}
+
+static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_phy_cfg_cmd phy_cfg_cmd;
+	enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+
+	/* Set parameters */
+	phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
+
+	/* set flags extra PHY configuration flags from the device's cfg */
+	phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
+
+	phy_cfg_cmd.calib_control.event_trigger =
+		mvm->fw->default_calib[ucode_type].event_trigger;
+	phy_cfg_cmd.calib_control.flow_trigger =
+		mvm->fw->default_calib[ucode_type].flow_trigger;
+
+	IWL_DEBUG_INFO(mvm, "Sending Phy CFG command: 0x%x\n",
+		       phy_cfg_cmd.phy_cfg);
+
+	return iwl_mvm_send_cmd_pdu(mvm, PHY_CONFIGURATION_CMD, 0,
+				    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
+{
+	struct iwl_notification_wait calib_wait;
+	static const u16 init_complete[] = {
+		INIT_COMPLETE_NOTIF,
+		CALIB_RES_NOTIF_PHY_DB
+	};
+	int ret;
+
+	if (iwl_mvm_has_unified_ucode(mvm))
+		return iwl_run_unified_mvm_ucode(mvm, true);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON_ONCE(mvm->calibrating))
+		return 0;
+
+	iwl_init_notification_wait(&mvm->notif_wait,
+				   &calib_wait,
+				   init_complete,
+				   ARRAY_SIZE(init_complete),
+				   iwl_wait_phy_db_entry,
+				   mvm->phy_db);
+
+	/* Will also start the device */
+	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_INIT);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to start INIT ucode: %d\n", ret);
+		goto remove_notif;
+	}
+
+	if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000) {
+		ret = iwl_mvm_send_bt_init_conf(mvm);
+		if (ret)
+			goto remove_notif;
+	}
+
+	/* Read the NVM only at driver load time, no need to do this twice */
+	if (read_nvm) {
+		ret = iwl_nvm_init(mvm);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
+			goto remove_notif;
+		}
+	}
+
+	/* In case we read the NVM from external file, load it to the NIC */
+	if (mvm->nvm_file_name)
+		iwl_mvm_load_nvm_to_nic(mvm);
+
+	WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans));
+
+	/*
+	 * abort after reading the nvm in case RF Kill is on, we will complete
+	 * the init seq later when RF kill will switch to off
+	 */
+	if (iwl_mvm_is_radio_hw_killed(mvm)) {
+		IWL_DEBUG_RF_KILL(mvm,
+				  "jump over all phy activities due to RF kill\n");
+		goto remove_notif;
+	}
+
+	mvm->calibrating = true;
+
+	/* Send TX valid antennas before triggering calibrations */
+	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+	if (ret)
+		goto remove_notif;
+
+	ret = iwl_send_phy_cfg_cmd(mvm);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+			ret);
+		goto remove_notif;
+	}
+
+	/*
+	 * Some things may run in the background now, but we
+	 * just wait for the calibration complete notification.
+	 */
+	ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
+				    MVM_UCODE_CALIB_TIMEOUT);
+	if (!ret)
+		goto out;
+
+	if (iwl_mvm_is_radio_hw_killed(mvm)) {
+		IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
+		ret = 0;
+	} else {
+		IWL_ERR(mvm, "Failed to run INIT calibrations: %d\n",
+			ret);
+	}
+
+	goto out;
+
+remove_notif:
+	iwl_remove_notification(&mvm->notif_wait, &calib_wait);
+out:
+	mvm->calibrating = false;
+	if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
+		/* we want to debug INIT and we have no NVM - fake */
+		mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
+					sizeof(struct ieee80211_channel) +
+					sizeof(struct ieee80211_rate),
+					GFP_KERNEL);
+		if (!mvm->nvm_data)
+			return -ENOMEM;
+		mvm->nvm_data->bands[0].channels = mvm->nvm_data->channels;
+		mvm->nvm_data->bands[0].n_channels = 1;
+		mvm->nvm_data->bands[0].n_bitrates = 1;
+		mvm->nvm_data->bands[0].bitrates =
+			(void *)mvm->nvm_data->channels + 1;
+		mvm->nvm_data->bands[0].bitrates->hw_value = 10;
+	}
+
+	return ret;
+}
+
+static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
+{
+	struct iwl_ltr_config_cmd cmd = {
+		.flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+	};
+
+	if (!mvm->trans->ltr_enabled)
+		return 0;
+
+	return iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+				    sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_ACPI
+static int iwl_mvm_sar_set_profile(struct iwl_mvm *mvm,
+				   union acpi_object *table,
+				   struct iwl_mvm_sar_profile *profile,
+				   bool enabled)
+{
+	int i;
+
+	profile->enabled = enabled;
+
+	for (i = 0; i < ACPI_SAR_TABLE_SIZE; i++) {
+		if ((table[i].type != ACPI_TYPE_INTEGER) ||
+		    (table[i].integer.value > U8_MAX))
+			return -EINVAL;
+
+		profile->table[i] = table[i].integer.value;
+	}
+
+	return 0;
+}
+
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+	union acpi_object *wifi_pkg, *table, *data;
+	bool enabled;
+	int ret;
+
+	data = iwl_acpi_get_object(mvm->dev, ACPI_WRDS_METHOD);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+					 ACPI_WRDS_WIFI_DATA_SIZE);
+	if (IS_ERR(wifi_pkg)) {
+		ret = PTR_ERR(wifi_pkg);
+		goto out_free;
+	}
+
+	if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+
+	enabled = !!(wifi_pkg->package.elements[1].integer.value);
+
+	/* position of the actual table */
+	table = &wifi_pkg->package.elements[2];
+
+	/* The profile from WRDS is officially profile 1, but goes
+	 * into sar_profiles[0] (because we don't have a profile 0).
+	 */
+	ret = iwl_mvm_sar_set_profile(mvm, table, &mvm->sar_profiles[0],
+				      enabled);
+out_free:
+	kfree(data);
+	return ret;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+	union acpi_object *wifi_pkg, *data;
+	bool enabled;
+	int i, n_profiles, ret;
+
+	data = iwl_acpi_get_object(mvm->dev, ACPI_EWRD_METHOD);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+					 ACPI_EWRD_WIFI_DATA_SIZE);
+	if (IS_ERR(wifi_pkg)) {
+		ret = PTR_ERR(wifi_pkg);
+		goto out_free;
+	}
+
+	if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
+	    (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+
+	enabled = !!(wifi_pkg->package.elements[1].integer.value);
+	n_profiles = wifi_pkg->package.elements[2].integer.value;
+
+	/*
+	 * Check the validity of n_profiles.  The EWRD profiles start
+	 * from index 1, so the maximum value allowed here is
+	 * ACPI_SAR_PROFILES_NUM - 1.
+	 */
+	if (n_profiles <= 0 || n_profiles >= ACPI_SAR_PROFILE_NUM) {
+		ret = -EINVAL;
+		goto out_free;
+	}
+
+	for (i = 0; i < n_profiles; i++) {
+		/* the tables start at element 3 */
+		static int pos = 3;
+
+		/* The EWRD profiles officially go from 2 to 4, but we
+		 * save them in sar_profiles[1-3] (because we don't
+		 * have profile 0).  So in the array we start from 1.
+		 */
+		ret = iwl_mvm_sar_set_profile(mvm,
+					      &wifi_pkg->package.elements[pos],
+					      &mvm->sar_profiles[i + 1],
+					      enabled);
+		if (ret < 0)
+			break;
+
+		/* go to the next table */
+		pos += ACPI_SAR_TABLE_SIZE;
+	}
+
+out_free:
+	kfree(data);
+	return ret;
+}
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+	union acpi_object *wifi_pkg, *data;
+	int i, j, ret;
+	int idx = 1;
+
+	data = iwl_acpi_get_object(mvm->dev, ACPI_WGDS_METHOD);
+	if (IS_ERR(data))
+		return PTR_ERR(data);
+
+	wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+					 ACPI_WGDS_WIFI_DATA_SIZE);
+	if (IS_ERR(wifi_pkg)) {
+		ret = PTR_ERR(wifi_pkg);
+		goto out_free;
+	}
+
+	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+		for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
+			union acpi_object *entry;
+
+			entry = &wifi_pkg->package.elements[idx++];
+			if ((entry->type != ACPI_TYPE_INTEGER) ||
+			    (entry->integer.value > U8_MAX)) {
+				ret = -EINVAL;
+				goto out_free;
+			}
+
+			mvm->geo_profiles[i].values[j] = entry->integer.value;
+		}
+	}
+	ret = 0;
+out_free:
+	kfree(data);
+	return ret;
+}
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+{
+	struct iwl_dev_tx_power_cmd cmd = {
+		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+	};
+	int i, j, idx;
+	int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
+	int len = sizeof(cmd);
+
+	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
+	BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
+		     ACPI_SAR_TABLE_SIZE);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+		len = sizeof(cmd.v3);
+
+	for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
+		struct iwl_mvm_sar_profile *prof;
+
+		/* don't allow SAR to be disabled (profile 0 means disable) */
+		if (profs[i] == 0)
+			return -EPERM;
+
+		/* we are off by one, so allow up to ACPI_SAR_PROFILE_NUM */
+		if (profs[i] > ACPI_SAR_PROFILE_NUM)
+			return -EINVAL;
+
+		/* profiles go from 1 to 4, so decrement to access the array */
+		prof = &mvm->sar_profiles[profs[i] - 1];
+
+		/* if the profile is disabled, do nothing */
+		if (!prof->enabled) {
+			IWL_DEBUG_RADIO(mvm, "SAR profile %d is disabled.\n",
+					profs[i]);
+			/* if one of the profiles is disabled, we fail all */
+			return -ENOENT;
+		}
+
+		IWL_DEBUG_RADIO(mvm, "  Chain[%d]:\n", i);
+		for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
+			idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
+			cmd.v3.per_chain_restriction[i][j] =
+				cpu_to_le16(prof->table[idx]);
+			IWL_DEBUG_RADIO(mvm, "    Band[%d] = %d * .125dBm\n",
+					j, prof->table[idx]);
+		}
+	}
+
+	IWL_DEBUG_RADIO(mvm, "Sending REDUCE_TX_POWER_CMD per chain\n");
+
+	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+{
+	struct iwl_geo_tx_power_profiles_resp *resp;
+	int ret;
+
+	struct iwl_geo_tx_power_profiles_cmd geo_cmd = {
+		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE),
+	};
+	struct iwl_host_cmd cmd = {
+		.id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+		.len = { sizeof(geo_cmd), },
+		.flags = CMD_WANT_SKB,
+		.data = { &geo_cmd },
+	};
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+		return ret;
+	}
+
+	resp = (void *)cmd.resp_pkt->data;
+	ret = le32_to_cpu(resp->profile_idx);
+	if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
+		ret = -EIO;
+		IWL_WARN(mvm, "Invalid geographic profile idx (%d)\n", ret);
+	}
+
+	iwl_free_resp(&cmd);
+	return ret;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+	struct iwl_geo_tx_power_profiles_cmd cmd = {
+		.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES),
+	};
+	int ret, i, j;
+	u16 cmd_wide_id =  WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+	/*
+	 * This command is not supported on earlier firmware versions.
+	 * Unfortunately, we don't have a TLV API flag to rely on, so
+	 * rely on the major version which is in the first byte of
+	 * ucode_ver.
+	 */
+	if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+		return 0;
+
+	ret = iwl_mvm_sar_get_wgds_table(mvm);
+	if (ret < 0) {
+		IWL_DEBUG_RADIO(mvm,
+				"Geo SAR BIOS table invalid or unavailable. (%d)\n",
+				ret);
+		/* we don't fail if the table is not available */
+		return 0;
+	}
+
+	IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n");
+
+	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
+		     ACPI_WGDS_TABLE_SIZE + 1 !=  ACPI_WGDS_WIFI_DATA_SIZE);
+
+	BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
+
+	for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+		struct iwl_per_chain_offset *chain =
+			(struct iwl_per_chain_offset *)&cmd.table[i];
+
+		for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+			u8 *value;
+
+			value = &mvm->geo_profiles[i].values[j *
+				ACPI_GEO_PER_CHAIN_SIZE];
+			chain[j].max_tx_power = cpu_to_le16(value[0]);
+			chain[j].chain_a = value[1];
+			chain[j].chain_b = value[2];
+			IWL_DEBUG_RADIO(mvm,
+					"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
+					i, j, value[1], value[2], value[0]);
+		}
+	}
+	return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, sizeof(cmd), &cmd);
+}
+
+#else /* CONFIG_ACPI */
+static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
+{
+	return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+{
+	return -ENOENT;
+}
+
+static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
+{
+	return -ENOENT;
+}
+
+static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+{
+	return 0;
+}
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a,
+			       int prof_b)
+{
+	return -ENOENT;
+}
+
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+{
+	return -ENOENT;
+}
+#endif /* CONFIG_ACPI */
+
+static int iwl_mvm_sar_init(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	ret = iwl_mvm_sar_get_wrds_table(mvm);
+	if (ret < 0) {
+		IWL_DEBUG_RADIO(mvm,
+				"WRDS SAR BIOS table invalid or unavailable. (%d)\n",
+				ret);
+		/*
+		 * If not available, don't fail and don't bother with EWRD.
+		 * Return 1 to tell that we can't use WGDS either.
+		 */
+		return 1;
+	}
+
+	ret = iwl_mvm_sar_get_ewrd_table(mvm);
+	/* if EWRD is not available, we can still use WRDS, so don't fail */
+	if (ret < 0)
+		IWL_DEBUG_RADIO(mvm,
+				"EWRD SAR BIOS table invalid or unavailable. (%d)\n",
+				ret);
+
+	/* choose profile 1 (WRDS) as default for both chains */
+	ret = iwl_mvm_sar_select_profile(mvm, 1, 1);
+
+	/*
+	 * If we don't have profile 0 from BIOS, just skip it.  This
+	 * means that SAR Geo will not be enabled either, even if we
+	 * have other valid profiles.
+	 */
+	if (ret == -ENOENT)
+		return 1;
+
+	return ret;
+}
+
+static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	if (iwl_mvm_has_unified_ucode(mvm))
+		return iwl_run_unified_mvm_ucode(mvm, false);
+
+	ret = iwl_run_init_mvm_ucode(mvm, false);
+
+	if (ret) {
+		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+
+		if (iwlmvm_mod_params.init_dbg)
+			return 0;
+		return ret;
+	}
+
+	/*
+	 * Stop and start the transport without entering low power
+	 * mode. This will save the state of other components on the
+	 * device that are triggered by the INIT firwmare (MFUART).
+	 */
+	_iwl_trans_stop_device(mvm->trans, false);
+	ret = _iwl_trans_start_hw(mvm->trans, false);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR);
+	if (ret)
+		return ret;
+
+	return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img);
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm)
+{
+	int ret, i;
+	struct ieee80211_channel *chan;
+	struct cfg80211_chan_def chandef;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ret = iwl_trans_start_hw(mvm->trans);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_load_rt_fw(mvm);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
+		goto error;
+	}
+
+	iwl_get_shared_mem_conf(&mvm->fwrt);
+
+	ret = iwl_mvm_sf_update(mvm, NULL, false);
+	if (ret)
+		IWL_ERR(mvm, "Failed to initialize Smart Fifo\n");
+
+	mvm->fwrt.dump.conf = FW_DBG_INVALID;
+	/* if we have a destination, assume EARLY START */
+	if (mvm->fw->dbg_dest_tlv)
+		mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE;
+	iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE);
+
+	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+	if (ret)
+		goto error;
+
+	if (!iwl_mvm_has_unified_ucode(mvm)) {
+		/* Send phy db control command and then phy db calibration */
+		ret = iwl_send_phy_db_data(mvm->phy_db);
+		if (ret)
+			goto error;
+
+		ret = iwl_send_phy_cfg_cmd(mvm);
+		if (ret)
+			goto error;
+	}
+
+	ret = iwl_mvm_send_bt_init_conf(mvm);
+	if (ret)
+		goto error;
+
+	/* Init RSS configuration */
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
+		ret = iwl_configure_rxq(mvm);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
+				ret);
+			goto error;
+		}
+	}
+
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		ret = iwl_send_rss_cfg_cmd(mvm);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+				ret);
+			goto error;
+		}
+	}
+
+	/* init the fw <-> mac80211 STA mapping */
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+
+	/* reset quota debouncing buffer - 0xff will yield invalid data */
+	memset(&mvm->last_quota_cmd, 0xff, sizeof(mvm->last_quota_cmd));
+
+	ret = iwl_mvm_send_dqa_cmd(mvm);
+	if (ret)
+		goto error;
+
+	/* Add auxiliary station for scanning */
+	ret = iwl_mvm_add_aux_sta(mvm);
+	if (ret)
+		goto error;
+
+	/* Add all the PHY contexts */
+	chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+	for (i = 0; i < NUM_PHY_CTX; i++) {
+		/*
+		 * The channel used here isn't relevant as it's
+		 * going to be overwritten in the other flows.
+		 * For now use the first channel we have.
+		 */
+		ret = iwl_mvm_phy_ctxt_add(mvm, &mvm->phy_ctxts[i],
+					   &chandef, 1, 1);
+		if (ret)
+			goto error;
+	}
+
+#ifdef CONFIG_THERMAL
+	if (iwl_mvm_is_tt_in_fw(mvm)) {
+		/* in order to give the responsibility of ct-kill and
+		 * TX backoff to FW we need to send empty temperature reporting
+		 * cmd during init time
+		 */
+		iwl_mvm_send_temp_report_ths_cmd(mvm);
+	} else {
+		/* Initialize tx backoffs to the minimal possible */
+		iwl_mvm_tt_tx_backoff(mvm, 0);
+	}
+
+	/* TODO: read the budget from BIOS / Platform NVM */
+
+	/*
+	 * In case there is no budget from BIOS / Platform NVM the default
+	 * budget should be 2000mW (cooling state 0).
+	 */
+	if (iwl_mvm_is_ctdp_supported(mvm)) {
+		ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+					   mvm->cooling_dev.cur_state);
+		if (ret)
+			goto error;
+	}
+#else
+	/* Initialize tx backoffs to the minimal possible */
+	iwl_mvm_tt_tx_backoff(mvm, 0);
+#endif
+
+	WARN_ON(iwl_mvm_config_ltr(mvm));
+
+	ret = iwl_mvm_power_update_device(mvm);
+	if (ret)
+		goto error;
+
+	/*
+	 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
+	 * anyway, so don't init MCC.
+	 */
+	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status)) {
+		ret = iwl_mvm_init_mcc(mvm);
+		if (ret)
+			goto error;
+	}
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		mvm->scan_type = IWL_SCAN_TYPE_NOT_SET;
+		mvm->hb_scan_type = IWL_SCAN_TYPE_NOT_SET;
+		ret = iwl_mvm_config_scan(mvm);
+		if (ret)
+			goto error;
+	}
+
+	/* allow FW/transport low power modes if not during restart */
+	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+		iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+	ret = iwl_mvm_sar_init(mvm);
+	if (ret == 0) {
+		ret = iwl_mvm_sar_geo_init(mvm);
+	} else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) {
+		/*
+		 * If basic SAR is not available, we check for WGDS,
+		 * which should *not* be available either.  If it is
+		 * available, issue an error, because we can't use SAR
+		 * Geo without basic SAR.
+		 */
+		IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n");
+	}
+
+	if (ret < 0)
+		goto error;
+
+	iwl_mvm_leds_sync(mvm);
+
+	IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
+	return 0;
+ error:
+	if (!iwlmvm_mod_params.init_dbg || !ret)
+		iwl_mvm_stop_device(mvm);
+	return ret;
+}
+
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
+{
+	int ret, i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ret = iwl_trans_start_hw(mvm->trans);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_WOWLAN);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to start WoWLAN firmware: %d\n", ret);
+		goto error;
+	}
+
+	ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm));
+	if (ret)
+		goto error;
+
+	/* Send phy db control command and then phy db calibration*/
+	ret = iwl_send_phy_db_data(mvm->phy_db);
+	if (ret)
+		goto error;
+
+	ret = iwl_send_phy_cfg_cmd(mvm);
+	if (ret)
+		goto error;
+
+	/* init the fw <-> mac80211 STA mapping */
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+		RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
+
+	/* Add auxiliary station for scanning */
+	ret = iwl_mvm_add_aux_sta(mvm);
+	if (ret)
+		goto error;
+
+	return 0;
+ error:
+	iwl_mvm_stop_device(mvm);
+	return ret;
+}
+
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_card_state_notif *card_state_notif = (void *)pkt->data;
+	u32 flags = le32_to_cpu(card_state_notif->flags);
+
+	IWL_DEBUG_RF_KILL(mvm, "Card state received: HW:%s SW:%s CT:%s\n",
+			  (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+			  (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+			  (flags & CT_KILL_CARD_DISABLED) ?
+			  "Reached" : "Not reached");
+}
+
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mfuart_load_notif *mfuart_notif = (void *)pkt->data;
+
+	IWL_DEBUG_INFO(mvm,
+		       "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
+		       le32_to_cpu(mfuart_notif->installed_ver),
+		       le32_to_cpu(mfuart_notif->external_ver),
+		       le32_to_cpu(mfuart_notif->status),
+		       le32_to_cpu(mfuart_notif->duration));
+
+	if (iwl_rx_packet_payload_len(pkt) == sizeof(*mfuart_notif))
+		IWL_DEBUG_INFO(mvm,
+			       "MFUART: image size: 0x%08x\n",
+			       le32_to_cpu(mfuart_notif->image_size));
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/led.c b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
new file mode 100644
index 0000000..b272695
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/led.c
@@ -0,0 +1,175 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/leds.h>
+#include "iwl-io.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+
+static void iwl_mvm_send_led_fw_cmd(struct iwl_mvm *mvm, bool on)
+{
+	struct iwl_led_cmd led_cmd = {
+		.status = cpu_to_le32(on),
+	};
+	struct iwl_host_cmd cmd = {
+		.id = WIDE_ID(LONG_GROUP, LEDS_CMD),
+		.len = { sizeof(led_cmd), },
+		.data = { &led_cmd, },
+		.flags = CMD_ASYNC,
+	};
+	int err;
+
+	if (!iwl_mvm_firmware_running(mvm))
+		return;
+
+	err = iwl_mvm_send_cmd(mvm, &cmd);
+
+	if (err)
+		IWL_WARN(mvm, "LED command failed: %d\n", err);
+}
+
+static void iwl_mvm_led_set(struct iwl_mvm *mvm, bool on)
+{
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_LED_CMD_SUPPORT)) {
+		iwl_mvm_send_led_fw_cmd(mvm, on);
+		return;
+	}
+
+	iwl_write32(mvm->trans, CSR_LED_REG,
+		    on ? CSR_LED_REG_TURN_ON : CSR_LED_REG_TURN_OFF);
+}
+
+static void iwl_led_brightness_set(struct led_classdev *led_cdev,
+				   enum led_brightness brightness)
+{
+	struct iwl_mvm *mvm = container_of(led_cdev, struct iwl_mvm, led);
+
+	iwl_mvm_led_set(mvm, brightness > 0);
+}
+
+int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+	int mode = iwlwifi_mod_params.led_mode;
+	int ret;
+
+	switch (mode) {
+	case IWL_LED_BLINK:
+		IWL_ERR(mvm, "Blink led mode not supported, used default\n");
+	case IWL_LED_DEFAULT:
+	case IWL_LED_RF_STATE:
+		mode = IWL_LED_RF_STATE;
+		break;
+	case IWL_LED_DISABLE:
+		IWL_INFO(mvm, "Led disabled\n");
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	mvm->led.name = kasprintf(GFP_KERNEL, "%s-led",
+				   wiphy_name(mvm->hw->wiphy));
+	mvm->led.brightness_set = iwl_led_brightness_set;
+	mvm->led.max_brightness = 1;
+
+	if (mode == IWL_LED_RF_STATE)
+		mvm->led.default_trigger =
+			ieee80211_get_radio_led_name(mvm->hw);
+
+	ret = led_classdev_register(mvm->trans->dev, &mvm->led);
+	if (ret) {
+		kfree(mvm->led.name);
+		IWL_INFO(mvm, "Failed to enable led\n");
+		return ret;
+	}
+
+	mvm->init_status |= IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+	return 0;
+}
+
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+	if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+		return;
+
+	/*
+	 * if we control through the register, we're doing it
+	 * even when the firmware isn't up, so no need to sync
+	 */
+	if (mvm->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+		return;
+
+	iwl_mvm_led_set(mvm, mvm->led.brightness > 0);
+}
+
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+	if (!(mvm->init_status & IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE))
+		return;
+
+	led_classdev_unregister(&mvm->led);
+	kfree(mvm->led.name);
+	mvm->init_status &= ~IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
new file mode 100644
index 0000000..b3fd205
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -0,0 +1,1614 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "fw-api.h"
+#include "mvm.h"
+#include "time-event.h"
+
+const u8 iwl_mvm_ac_to_tx_fifo[] = {
+	IWL_MVM_TX_FIFO_VO,
+	IWL_MVM_TX_FIFO_VI,
+	IWL_MVM_TX_FIFO_BE,
+	IWL_MVM_TX_FIFO_BK,
+};
+
+const u8 iwl_mvm_ac_to_gen2_tx_fifo[] = {
+	IWL_GEN2_EDCA_TX_FIFO_VO,
+	IWL_GEN2_EDCA_TX_FIFO_VI,
+	IWL_GEN2_EDCA_TX_FIFO_BE,
+	IWL_GEN2_EDCA_TX_FIFO_BK,
+};
+
+struct iwl_mvm_mac_iface_iterator_data {
+	struct iwl_mvm *mvm;
+	struct ieee80211_vif *vif;
+	unsigned long available_mac_ids[BITS_TO_LONGS(NUM_MAC_INDEX_DRIVER)];
+	unsigned long available_tsf_ids[BITS_TO_LONGS(NUM_TSF_IDS)];
+	enum iwl_tsf_id preferred_tsf;
+	bool found_vif;
+};
+
+struct iwl_mvm_hw_queues_iface_iterator_data {
+	struct ieee80211_vif *exclude_vif;
+	unsigned long used_hw_queues;
+};
+
+static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mac_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u16 min_bi;
+
+	/* Skip the interface for which we are trying to assign a tsf_id  */
+	if (vif == data->vif)
+		return;
+
+	/*
+	 * The TSF is a hardware/firmware resource, there are 4 and
+	 * the driver should assign and free them as needed. However,
+	 * there are cases where 2 MACs should share the same TSF ID
+	 * for the purpose of clock sync, an optimization to avoid
+	 * clock drift causing overlapping TBTTs/DTIMs for a GO and
+	 * client in the system.
+	 *
+	 * The firmware will decide according to the MAC type which
+	 * will be the master and slave. Clients that need to sync
+	 * with a remote station will be the master, and an AP or GO
+	 * will be the slave.
+	 *
+	 * Depending on the new interface type it can be slaved to
+	 * or become the master of an existing interface.
+	 */
+	switch (data->vif->type) {
+	case NL80211_IFTYPE_STATION:
+		/*
+		 * The new interface is a client, so if the one we're iterating
+		 * is an AP, and the beacon interval of the AP is a multiple or
+		 * divisor of the beacon interval of the client, the same TSF
+		 * should be used to avoid drift between the new client and
+		 * existing AP. The existing AP will get drift updates from the
+		 * new client context in this case.
+		 */
+		if (vif->type != NL80211_IFTYPE_AP ||
+		    data->preferred_tsf != NUM_TSF_IDS ||
+		    !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+			break;
+
+		min_bi = min(data->vif->bss_conf.beacon_int,
+			     vif->bss_conf.beacon_int);
+
+		if (!min_bi)
+			break;
+
+		if ((data->vif->bss_conf.beacon_int -
+		     vif->bss_conf.beacon_int) % min_bi == 0) {
+			data->preferred_tsf = mvmvif->tsf_id;
+			return;
+		}
+		break;
+
+	case NL80211_IFTYPE_AP:
+		/*
+		 * The new interface is AP/GO, so if its beacon interval is a
+		 * multiple or a divisor of the beacon interval of an existing
+		 * interface, it should get drift updates from an existing
+		 * client or use the same TSF as an existing GO. There's no
+		 * drift between TSFs internally but if they used different
+		 * TSFs then a new client MAC could update one of them and
+		 * cause drift that way.
+		 */
+		if ((vif->type != NL80211_IFTYPE_AP &&
+		     vif->type != NL80211_IFTYPE_STATION) ||
+		    data->preferred_tsf != NUM_TSF_IDS ||
+		    !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+			break;
+
+		min_bi = min(data->vif->bss_conf.beacon_int,
+			     vif->bss_conf.beacon_int);
+
+		if (!min_bi)
+			break;
+
+		if ((data->vif->bss_conf.beacon_int -
+		     vif->bss_conf.beacon_int) % min_bi == 0) {
+			data->preferred_tsf = mvmvif->tsf_id;
+			return;
+		}
+		break;
+	default:
+		/*
+		 * For all other interface types there's no need to
+		 * take drift into account. Either they're exclusive
+		 * like IBSS and monitor, or we don't care much about
+		 * their TSF (like P2P Device), but we won't be able
+		 * to share the TSF resource.
+		 */
+		break;
+	}
+
+	/*
+	 * Unless we exited above, we can't share the TSF resource
+	 * that the virtual interface we're iterating over is using
+	 * with the new one, so clear the available bit and if this
+	 * was the preferred one, reset that as well.
+	 */
+	__clear_bit(mvmvif->tsf_id, data->available_tsf_ids);
+
+	if (data->preferred_tsf == mvmvif->tsf_id)
+		data->preferred_tsf = NUM_TSF_IDS;
+}
+
+/*
+ * Get the mask of the queues used by the vif
+ */
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif)
+{
+	u32 qmask = 0, ac;
+
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+		return BIT(IWL_MVM_OFFCHANNEL_QUEUE);
+
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		if (vif->hw_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+			qmask |= BIT(vif->hw_queue[ac]);
+	}
+
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC)
+		qmask |= BIT(vif->cab_queue);
+
+	return qmask;
+}
+
+static void iwl_mvm_iface_hw_queues_iter(void *_data, u8 *mac,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_hw_queues_iface_iterator_data *data = _data;
+
+	/* exclude the given vif */
+	if (vif == data->exclude_vif)
+		return;
+
+	data->used_hw_queues |= iwl_mvm_mac_get_queues_mask(vif);
+}
+
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *exclude_vif)
+{
+	struct iwl_mvm_hw_queues_iface_iterator_data data = {
+		.exclude_vif = exclude_vif,
+		.used_hw_queues =
+			BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
+			BIT(mvm->aux_queue) |
+			BIT(IWL_MVM_DQA_GCAST_QUEUE),
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* mark all VIF used hw queues */
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_iface_hw_queues_iter, &data);
+
+	return data.used_hw_queues;
+}
+
+static void iwl_mvm_mac_iface_iterator(void *_data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mac_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* Iterator may already find the interface being added -- skip it */
+	if (vif == data->vif) {
+		data->found_vif = true;
+		return;
+	}
+
+	/* Mark MAC IDs as used by clearing the available bit, and
+	 * (below) mark TSFs as used if their existing use is not
+	 * compatible with the new interface type.
+	 * No locking or atomic bit operations are needed since the
+	 * data is on the stack of the caller function.
+	 */
+	__clear_bit(mvmvif->id, data->available_mac_ids);
+
+	/* find a suitable tsf_id */
+	iwl_mvm_mac_tsf_id_iter(_data, mac, vif);
+}
+
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_mac_iface_iterator_data data = {
+		.mvm = mvm,
+		.vif = vif,
+		.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+		/* no preference yet */
+		.preferred_tsf = NUM_TSF_IDS,
+	};
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_mac_tsf_id_iter, &data);
+
+	if (data.preferred_tsf != NUM_TSF_IDS)
+		mvmvif->tsf_id = data.preferred_tsf;
+	else if (!test_bit(mvmvif->tsf_id, data.available_tsf_ids))
+		mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+						NUM_TSF_IDS);
+}
+
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_mac_iface_iterator_data data = {
+		.mvm = mvm,
+		.vif = vif,
+		.available_mac_ids = { (1 << NUM_MAC_INDEX_DRIVER) - 1 },
+		.available_tsf_ids = { (1 << NUM_TSF_IDS) - 1 },
+		/* no preference yet */
+		.preferred_tsf = NUM_TSF_IDS,
+		.found_vif = false,
+	};
+	u32 ac;
+	int ret, i, queue_limit;
+	unsigned long used_hw_queues;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/*
+	 * Allocate a MAC ID and a TSF for this MAC, along with the queues
+	 * and other resources.
+	 */
+
+	/*
+	 * Before the iterator, we start with all MAC IDs and TSFs available.
+	 *
+	 * During iteration, all MAC IDs are cleared that are in use by other
+	 * virtual interfaces, and all TSF IDs are cleared that can't be used
+	 * by this new virtual interface because they're used by an interface
+	 * that can't share it with the new one.
+	 * At the same time, we check if there's a preferred TSF in the case
+	 * that we should share it with another interface.
+	 */
+
+	/* Currently, MAC ID 0 should be used only for the managed/IBSS vif */
+	switch (vif->type) {
+	case NL80211_IFTYPE_ADHOC:
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (!vif->p2p)
+			break;
+		/* fall through */
+	default:
+		__clear_bit(0, data.available_mac_ids);
+	}
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_mac_iface_iterator, &data);
+
+	used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, vif);
+
+	/*
+	 * In the case we're getting here during resume, it's similar to
+	 * firmware restart, and with RESUME_ALL the iterator will find
+	 * the vif being added already.
+	 * We don't want to reassign any IDs in either case since doing
+	 * so would probably assign different IDs (as interfaces aren't
+	 * necessarily added in the same order), but the old IDs were
+	 * preserved anyway, so skip ID assignment for both resume and
+	 * recovery.
+	 */
+	if (data.found_vif)
+		return 0;
+
+	/* Therefore, in recovery, we can't get here */
+	if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+		return -EBUSY;
+
+	mvmvif->id = find_first_bit(data.available_mac_ids,
+				    NUM_MAC_INDEX_DRIVER);
+	if (mvmvif->id == NUM_MAC_INDEX_DRIVER) {
+		IWL_ERR(mvm, "Failed to init MAC context - no free ID!\n");
+		ret = -EIO;
+		goto exit_fail;
+	}
+
+	if (data.preferred_tsf != NUM_TSF_IDS)
+		mvmvif->tsf_id = data.preferred_tsf;
+	else
+		mvmvif->tsf_id = find_first_bit(data.available_tsf_ids,
+						NUM_TSF_IDS);
+	if (mvmvif->tsf_id == NUM_TSF_IDS) {
+		IWL_ERR(mvm, "Failed to init MAC context - no free TSF!\n");
+		ret = -EIO;
+		goto exit_fail;
+	}
+
+	mvmvif->color = 0;
+
+	INIT_LIST_HEAD(&mvmvif->time_event_data.list);
+	mvmvif->time_event_data.id = TE_MAX;
+
+	/* No need to allocate data queues to P2P Device MAC.*/
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+			vif->hw_queue[ac] = IEEE80211_INVAL_HW_QUEUE;
+
+		return 0;
+	}
+
+	/*
+	 * queues in mac80211 almost entirely independent of
+	 * the ones here - no real limit
+	 */
+	queue_limit = IEEE80211_MAX_QUEUES;
+	BUILD_BUG_ON(IEEE80211_MAX_QUEUES >
+		     BITS_PER_BYTE *
+		     sizeof(mvm->hw_queue_to_mac80211[0]));
+
+	/*
+	 * Find available queues, and allocate them to the ACs. When in
+	 * DQA-mode they aren't really used, and this is done only so the
+	 * mac80211 ieee80211_check_queues() function won't fail
+	 */
+	for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+		u8 queue = find_first_zero_bit(&used_hw_queues, queue_limit);
+
+		if (queue >= queue_limit) {
+			IWL_ERR(mvm, "Failed to allocate queue\n");
+			ret = -EIO;
+			goto exit_fail;
+		}
+
+		__set_bit(queue, &used_hw_queues);
+		vif->hw_queue[ac] = queue;
+	}
+
+	/* Allocate the CAB queue for softAP and GO interfaces */
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+		/*
+		 * For TVQM this will be overwritten later with the FW assigned
+		 * queue value (when queue is enabled).
+		 */
+		mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+	} else {
+		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+	}
+
+	mvmvif->bcast_sta.sta_id = IWL_MVM_INVALID_STA;
+	mvmvif->mcast_sta.sta_id = IWL_MVM_INVALID_STA;
+	mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++)
+		mvmvif->smps_requests[i] = IEEE80211_SMPS_AUTOMATIC;
+
+	return 0;
+
+exit_fail:
+	memset(mvmvif, 0, sizeof(struct iwl_mvm_vif));
+	memset(vif->hw_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(vif->hw_queue));
+	vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
+	return ret;
+}
+
+static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif,
+			      enum nl80211_band band,
+			      u8 *cck_rates, u8 *ofdm_rates)
+{
+	struct ieee80211_supported_band *sband;
+	unsigned long basic = vif->bss_conf.basic_rates;
+	int lowest_present_ofdm = 100;
+	int lowest_present_cck = 100;
+	u8 cck = 0;
+	u8 ofdm = 0;
+	int i;
+
+	sband = mvm->hw->wiphy->bands[band];
+
+	for_each_set_bit(i, &basic, BITS_PER_LONG) {
+		int hw = sband->bitrates[i].hw_value;
+		if (hw >= IWL_FIRST_OFDM_RATE) {
+			ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
+			if (lowest_present_ofdm > hw)
+				lowest_present_ofdm = hw;
+		} else {
+			BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+			cck |= BIT(hw);
+			if (lowest_present_cck > hw)
+				lowest_present_cck = hw;
+		}
+	}
+
+	/*
+	 * Now we've got the basic rates as bitmaps in the ofdm and cck
+	 * variables. This isn't sufficient though, as there might not
+	 * be all the right rates in the bitmap. E.g. if the only basic
+	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+	 *
+	 *    [...] a STA responding to a received frame shall transmit
+	 *    its Control Response frame [...] at the highest rate in the
+	 *    BSSBasicRateSet parameter that is less than or equal to the
+	 *    rate of the immediately previous frame in the frame exchange
+	 *    sequence ([...]) and that is of the same modulation class
+	 *    ([...]) as the received frame. If no rate contained in the
+	 *    BSSBasicRateSet parameter meets these conditions, then the
+	 *    control frame sent in response to a received frame shall be
+	 *    transmitted at the highest mandatory rate of the PHY that is
+	 *    less than or equal to the rate of the received frame, and
+	 *    that is of the same modulation class as the received frame.
+	 *
+	 * As a consequence, we need to add all mandatory rates that are
+	 * lower than all of the basic rates to these bitmaps.
+	 */
+
+	if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_BIT_MSK(24) >> IWL_FIRST_OFDM_RATE;
+	if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
+		ofdm |= IWL_RATE_BIT_MSK(12) >> IWL_FIRST_OFDM_RATE;
+	/* 6M already there or needed so always add */
+	ofdm |= IWL_RATE_BIT_MSK(6) >> IWL_FIRST_OFDM_RATE;
+
+	/*
+	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+	 * Note, however:
+	 *  - if no CCK rates are basic, it must be ERP since there must
+	 *    be some basic rates at all, so they're OFDM => ERP PHY
+	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
+	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+	 *  - if 5.5M is basic, 1M and 2M are mandatory
+	 *  - if 2M is basic, 1M is mandatory
+	 *  - if 1M is basic, that's the only valid ACK rate.
+	 * As a consequence, it's not as complicated as it sounds, just add
+	 * any lower rates to the ACK rate bitmap.
+	 */
+	if (IWL_RATE_11M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_BIT_MSK(11) >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_5M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_BIT_MSK(5) >> IWL_FIRST_CCK_RATE;
+	if (IWL_RATE_2M_INDEX < lowest_present_cck)
+		cck |= IWL_RATE_BIT_MSK(2) >> IWL_FIRST_CCK_RATE;
+	/* 1M already there or needed so always add */
+	cck |= IWL_RATE_BIT_MSK(1) >> IWL_FIRST_CCK_RATE;
+
+	*cck_rates = cck;
+	*ofdm_rates = ofdm;
+}
+
+static void iwl_mvm_mac_ctxt_set_ht_flags(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif,
+					 struct iwl_mac_ctx_cmd *cmd)
+{
+	/* for both sta and ap, ht_operation_mode hold the protection_mode */
+	u8 protection_mode = vif->bss_conf.ht_operation_mode &
+				 IEEE80211_HT_OP_MODE_PROTECTION;
+	/* The fw does not distinguish between ht and fat */
+	u32 ht_flag = MAC_PROT_FLG_HT_PROT | MAC_PROT_FLG_FAT_PROT;
+
+	IWL_DEBUG_RATE(mvm, "protection mode set to %d\n", protection_mode);
+	/*
+	 * See section 9.23.3.1 of IEEE 80211-2012.
+	 * Nongreenfield HT STAs Present is not supported.
+	 */
+	switch (protection_mode) {
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+		break;
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+	case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+		cmd->protection_flags |= cpu_to_le32(ht_flag);
+		break;
+	case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+		/* Protect when channel wider than 20MHz */
+		if (vif->bss_conf.chandef.width > NL80211_CHAN_WIDTH_20)
+			cmd->protection_flags |= cpu_to_le32(ht_flag);
+		break;
+	default:
+		IWL_ERR(mvm, "Illegal protection mode %d\n",
+			protection_mode);
+		break;
+	}
+}
+
+static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct iwl_mac_ctx_cmd *cmd,
+					const u8 *bssid_override,
+					u32 action)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct ieee80211_chanctx_conf *chanctx;
+	bool ht_enabled = !!(vif->bss_conf.ht_operation_mode &
+			     IEEE80211_HT_OP_MODE_PROTECTION);
+	u8 cck_ack_rates, ofdm_ack_rates;
+	const u8 *bssid = bssid_override ?: vif->bss_conf.bssid;
+	int i;
+
+	cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							    mvmvif->color));
+	cmd->action = cpu_to_le32(action);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		if (vif->p2p)
+			cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_STA);
+		else
+			cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_BSS_STA);
+		break;
+	case NL80211_IFTYPE_AP:
+		cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_GO);
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_LISTENER);
+		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_P2P_DEVICE);
+		break;
+	case NL80211_IFTYPE_ADHOC:
+		cmd->mac_type = cpu_to_le32(FW_MAC_TYPE_IBSS);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+	}
+
+	cmd->tsf_id = cpu_to_le32(mvmvif->tsf_id);
+
+	memcpy(cmd->node_addr, vif->addr, ETH_ALEN);
+
+	if (bssid)
+		memcpy(cmd->bssid_addr, bssid, ETH_ALEN);
+	else
+		eth_broadcast_addr(cmd->bssid_addr);
+
+	rcu_read_lock();
+	chanctx = rcu_dereference(vif->chanctx_conf);
+	iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
+					    : NL80211_BAND_2GHZ,
+			  &cck_ack_rates, &ofdm_ack_rates);
+	rcu_read_unlock();
+
+	cmd->cck_rates = cpu_to_le32((u32)cck_ack_rates);
+	cmd->ofdm_rates = cpu_to_le32((u32)ofdm_ack_rates);
+
+	cmd->cck_short_preamble =
+		cpu_to_le32(vif->bss_conf.use_short_preamble ?
+			    MAC_FLG_SHORT_PREAMBLE : 0);
+	cmd->short_slot =
+		cpu_to_le32(vif->bss_conf.use_short_slot ?
+			    MAC_FLG_SHORT_SLOT : 0);
+
+	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP);
+
+	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+		u8 txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, i);
+
+		cmd->ac[txf].cw_min =
+			cpu_to_le16(mvmvif->queue_params[i].cw_min);
+		cmd->ac[txf].cw_max =
+			cpu_to_le16(mvmvif->queue_params[i].cw_max);
+		cmd->ac[txf].edca_txop =
+			cpu_to_le16(mvmvif->queue_params[i].txop * 32);
+		cmd->ac[txf].aifsn = mvmvif->queue_params[i].aifs;
+		cmd->ac[txf].fifos_mask = BIT(txf);
+	}
+
+	if (vif->bss_conf.qos)
+		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+	if (vif->bss_conf.use_cts_prot)
+		cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+
+	IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+		       vif->bss_conf.use_cts_prot,
+		       vif->bss_conf.ht_operation_mode);
+	if (vif->bss_conf.chandef.width != NL80211_CHAN_WIDTH_20_NOHT)
+		cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_TGN);
+	if (ht_enabled)
+		iwl_mvm_mac_ctxt_set_ht_flags(mvm, vif, cmd);
+}
+
+static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
+				     struct iwl_mac_ctx_cmd *cmd)
+{
+	int ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
+				       sizeof(*cmd), cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send MAC context (action:%d): %d\n",
+			le32_to_cpu(cmd->action), ret);
+	return ret;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    u32 action, bool force_assoc_off,
+				    const u8 *bssid_override)
+{
+	struct iwl_mac_ctx_cmd cmd = {};
+	struct iwl_mac_data_sta *ctxt_sta;
+
+	WARN_ON(vif->type != NL80211_IFTYPE_STATION);
+
+	/* Fill the common data for all mac context types */
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, bssid_override, action);
+
+	if (vif->p2p) {
+		struct ieee80211_p2p_noa_attr *noa =
+			&vif->bss_conf.p2p_noa_attr;
+
+		cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+					IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+		ctxt_sta = &cmd.p2p_sta.sta;
+	} else {
+		ctxt_sta = &cmd.sta;
+	}
+
+	/* We need the dtim_period to set the MAC as associated */
+	if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
+	    !force_assoc_off) {
+		u32 dtim_offs;
+
+		/*
+		 * The DTIM count counts down, so when it is N that means N
+		 * more beacon intervals happen until the DTIM TBTT. Therefore
+		 * add this to the current time. If that ends up being in the
+		 * future, the firmware will handle it.
+		 *
+		 * Also note that the system_timestamp (which we get here as
+		 * "sync_device_ts") and TSF timestamp aren't at exactly the
+		 * same offset in the frame -- the TSF is at the first symbol
+		 * of the TSF, the system timestamp is at signal acquisition
+		 * time. This means there's an offset between them of at most
+		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
+		 * 384us in the longest case), this is currently not relevant
+		 * as the firmware wakes up around 2ms before the TBTT.
+		 */
+		dtim_offs = vif->bss_conf.sync_dtim_count *
+				vif->bss_conf.beacon_int;
+		/* convert TU to usecs */
+		dtim_offs *= 1024;
+
+		ctxt_sta->dtim_tsf =
+			cpu_to_le64(vif->bss_conf.sync_tsf + dtim_offs);
+		ctxt_sta->dtim_time =
+			cpu_to_le32(vif->bss_conf.sync_device_ts + dtim_offs);
+		ctxt_sta->assoc_beacon_arrive_time =
+			cpu_to_le32(vif->bss_conf.sync_device_ts);
+
+		IWL_DEBUG_INFO(mvm, "DTIM TBTT is 0x%llx/0x%x, offset %d\n",
+			       le64_to_cpu(ctxt_sta->dtim_tsf),
+			       le32_to_cpu(ctxt_sta->dtim_time),
+			       dtim_offs);
+
+		ctxt_sta->is_assoc = cpu_to_le32(1);
+	} else {
+		ctxt_sta->is_assoc = cpu_to_le32(0);
+
+		/* Allow beacons to pass through as long as we are not
+		 * associated, or we do not have dtim period information.
+		 */
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+	}
+
+	ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+	ctxt_sta->bi_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+	ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+					      vif->bss_conf.dtim_period);
+	ctxt_sta->dtim_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+					       vif->bss_conf.dtim_period));
+
+	ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval);
+	ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid);
+
+	if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+	if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
+	    !iwlwifi_mod_params.disable_11ax)
+		cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_listener(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif,
+					 u32 action)
+{
+	struct iwl_mac_ctx_cmd cmd = {};
+	u32 tfd_queue_msk = BIT(mvm->snif_queue);
+	int ret;
+
+	WARN_ON(vif->type != NL80211_IFTYPE_MONITOR);
+
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+	cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROMISC |
+				       MAC_FILTER_IN_CONTROL_AND_MGMT |
+				       MAC_FILTER_IN_BEACON |
+				       MAC_FILTER_IN_PROBE_REQUEST |
+				       MAC_FILTER_IN_CRC32);
+	ieee80211_hw_set(mvm->hw, RX_INCLUDES_FCS);
+
+	/* Allocate sniffer station */
+	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->snif_sta, tfd_queue_msk,
+				       vif->type, IWL_STA_GENERAL_PURPOSE);
+	if (ret)
+		return ret;
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     u32 action)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_ctx_cmd cmd = {};
+
+	WARN_ON(vif->type != NL80211_IFTYPE_ADHOC);
+
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+	cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_BEACON |
+				       MAC_FILTER_IN_PROBE_REQUEST);
+
+	/* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */
+	cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int);
+	cmd.ibss.bi_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+
+	/* TODO: Assumes that the beacon id == mac context id */
+	cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id);
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+struct iwl_mvm_go_iterator_data {
+	bool go_active;
+};
+
+static void iwl_mvm_go_iterator(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_go_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+	    mvmvif->ap_ibss_active)
+		data->go_active = true;
+}
+
+static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   u32 action)
+{
+	struct iwl_mac_ctx_cmd cmd = {};
+	struct iwl_mvm_go_iterator_data data = {};
+
+	WARN_ON(vif->type != NL80211_IFTYPE_P2P_DEVICE);
+
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+	cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+
+	/* Override the filter flags to accept only probe requests */
+	cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+
+	/*
+	 * This flag should be set to true when the P2P Device is
+	 * discoverable and there is at least another active P2P GO. Settings
+	 * this flag will allow the P2P Device to be discoverable on other
+	 * channels in addition to its listen channel.
+	 * Note that this flag should not be set in other cases as it opens the
+	 * Rx filters on all MAC and increases the number of interrupts.
+	 */
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+		iwl_mvm_go_iterator, &data);
+
+	cmd.p2p_dev.is_disc_extended = cpu_to_le32(data.go_active ? 1 : 0);
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
+				     __le32 *tim_index, __le32 *tim_size,
+				     u8 *beacon, u32 frame_size)
+{
+	u32 tim_idx;
+	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+	/* The index is relative to frame start but we start looking at the
+	 * variable-length part of the beacon. */
+	tim_idx = mgmt->u.beacon.variable - beacon;
+
+	/* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+	while ((tim_idx < (frame_size - 2)) &&
+			(beacon[tim_idx] != WLAN_EID_TIM))
+		tim_idx += beacon[tim_idx+1] + 2;
+
+	/* If TIM field was found, set variables */
+	if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+		*tim_index = cpu_to_le32(tim_idx);
+		*tim_size = cpu_to_le32((u32)beacon[tim_idx + 1]);
+	} else {
+		IWL_WARN(mvm, "Unable to find TIM Element in beacon\n");
+	}
+}
+
+static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+{
+	struct ieee80211_mgmt *mgmt = (void *)beacon;
+	const u8 *ie;
+
+	if (WARN_ON_ONCE(frame_size <= (mgmt->u.beacon.variable - beacon)))
+		return 0;
+
+	frame_size -= mgmt->u.beacon.variable - beacon;
+
+	ie = cfg80211_find_ie(eid, mgmt->u.beacon.variable, frame_size);
+	if (!ie)
+		return 0;
+
+	return ie - beacon;
+}
+
+static u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
+					   struct ieee80211_vif *vif)
+{
+	u8 rate;
+
+	if (info->band == NL80211_BAND_5GHZ || vif->p2p)
+		rate = IWL_FIRST_OFDM_RATE;
+	else
+		rate = IWL_FIRST_CCK_RATE;
+
+	return rate;
+}
+
+static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    struct sk_buff *beacon,
+				    struct iwl_tx_cmd *tx)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct ieee80211_tx_info *info;
+	u8 rate;
+	u32 tx_flags;
+
+	info = IEEE80211_SKB_CB(beacon);
+
+	/* Set up TX command fields */
+	tx->len = cpu_to_le16((u16)beacon->len);
+	tx->sta_id = mvmvif->bcast_sta.sta_id;
+	tx->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+	tx_flags = TX_CMD_FLG_SEQ_CTL | TX_CMD_FLG_TSF;
+	tx_flags |=
+		iwl_mvm_bt_coex_tx_prio(mvm, (void *)beacon->data, info, 0) <<
+						TX_CMD_FLG_BT_PRIO_POS;
+	tx->tx_flags = cpu_to_le32(tx_flags);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
+		mvm->mgmt_last_antenna_idx =
+			iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+					     mvm->mgmt_last_antenna_idx);
+	}
+
+	tx->rate_n_flags =
+		cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+			    RATE_MCS_ANT_POS);
+
+	rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+
+	tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+	if (rate == IWL_FIRST_CCK_RATE)
+		tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
+					    struct sk_buff *beacon,
+					    void *data, int len)
+{
+	struct iwl_host_cmd cmd = {
+		.id = BEACON_TEMPLATE_CMD,
+		.flags = CMD_ASYNC,
+	};
+
+	cmd.len[0] = len;
+	cmd.data[0] = data;
+	cmd.dataflags[0] = 0;
+	cmd.len[1] = beacon->len;
+	cmd.data[1] = beacon->data;
+	cmd.dataflags[1] = IWL_HCMD_DFL_DUP;
+
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v6(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct sk_buff *beacon)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_beacon_cmd_v6 beacon_cmd = {};
+
+	iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+	beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+					 &beacon_cmd.tim_size,
+					 beacon->data, beacon->len);
+
+	return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+						sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v7(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct sk_buff *beacon)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_beacon_cmd_v7 beacon_cmd = {};
+
+	iwl_mvm_mac_ctxt_set_tx(mvm, vif, beacon, &beacon_cmd.tx);
+
+	beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+					 &beacon_cmd.tim_size,
+					 beacon->data, beacon->len);
+
+	beacon_cmd.csa_offset =
+		cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+						   WLAN_EID_CHANNEL_SWITCH,
+						   beacon->len));
+	beacon_cmd.ecsa_offset =
+		cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+						   WLAN_EID_EXT_CHANSWITCH_ANN,
+						   beacon->len));
+
+	return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+						sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct sk_buff *beacon)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(beacon);
+	struct iwl_mac_beacon_cmd beacon_cmd = {};
+	u8 rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
+	u16 flags;
+
+	flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
+
+	if (rate == IWL_FIRST_CCK_RATE)
+		flags |= IWL_MAC_BEACON_CCK;
+
+	beacon_cmd.flags = cpu_to_le16(flags);
+	beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
+	beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_set_tim(mvm, &beacon_cmd.tim_idx,
+					 &beacon_cmd.tim_size,
+					 beacon->data, beacon->len);
+
+	beacon_cmd.csa_offset =
+		cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+						   WLAN_EID_CHANNEL_SWITCH,
+						   beacon->len));
+	beacon_cmd.ecsa_offset =
+		cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+						   WLAN_EID_EXT_CHANSWITCH_ANN,
+						   beacon->len));
+
+	return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
+						sizeof(beacon_cmd));
+}
+
+static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct sk_buff *beacon)
+{
+	if (WARN_ON(!beacon))
+		return -EINVAL;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD))
+		return iwl_mvm_mac_ctxt_send_beacon_v6(mvm, vif, beacon);
+
+	if (fw_has_api(&mvm->fw->ucode_capa,
+		       IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE))
+		return iwl_mvm_mac_ctxt_send_beacon_v9(mvm, vif, beacon);
+
+	return iwl_mvm_mac_ctxt_send_beacon_v7(mvm, vif, beacon);
+}
+
+/* The beacon template for the AP/GO/IBSS has changed and needs update */
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif)
+{
+	struct sk_buff *beacon;
+	int ret;
+
+	WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+		vif->type != NL80211_IFTYPE_ADHOC);
+
+	beacon = ieee80211_beacon_get_template(mvm->hw, vif, NULL);
+	if (!beacon)
+		return -ENOMEM;
+
+	ret = iwl_mvm_mac_ctxt_send_beacon(mvm, vif, beacon);
+	dev_kfree_skb(beacon);
+	return ret;
+}
+
+struct iwl_mvm_mac_ap_iterator_data {
+	struct iwl_mvm *mvm;
+	struct ieee80211_vif *vif;
+	u32 beacon_device_ts;
+	u16 beacon_int;
+};
+
+/* Find the beacon_device_ts and beacon_int for a managed interface */
+static void iwl_mvm_mac_ap_iterator(void *_data, u8 *mac,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mac_ap_iterator_data *data = _data;
+
+	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+		return;
+
+	/* Station client has higher priority over P2P client*/
+	if (vif->p2p && data->beacon_device_ts)
+		return;
+
+	data->beacon_device_ts = vif->bss_conf.sync_device_ts;
+	data->beacon_int = vif->bss_conf.beacon_int;
+}
+
+/*
+ * Fill the specific data for mac context of type AP of P2P GO
+ */
+static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif,
+					 struct iwl_mac_ctx_cmd *cmd,
+					 struct iwl_mac_data_ap *ctxt_ap,
+					 bool add)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_mac_ap_iterator_data data = {
+		.mvm = mvm,
+		.vif = vif,
+		.beacon_device_ts = 0
+	};
+
+	/* in AP mode, the MCAST FIFO takes the EDCA params from VO */
+	cmd->ac[IWL_MVM_TX_FIFO_VO].fifos_mask |= BIT(IWL_MVM_TX_FIFO_MCAST);
+
+	/*
+	 * in AP mode, pass probe requests and beacons from other APs
+	 * (needed for ht protection); when there're no any associated
+	 * station don't ask FW to pass beacons to prevent unnecessary
+	 * wake-ups.
+	 */
+	cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+	if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
+		cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
+		IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
+	} else {
+		IWL_DEBUG_HC(mvm, "No need to receive beacons\n");
+	}
+
+	ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
+	ctxt_ap->bi_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int));
+	ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int *
+					     vif->bss_conf.dtim_period);
+	ctxt_ap->dtim_reciprocal =
+		cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int *
+					       vif->bss_conf.dtim_period));
+
+	if (!fw_has_api(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_API_STA_TYPE))
+		ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
+
+	/*
+	 * Only set the beacon time when the MAC is being added, when we
+	 * just modify the MAC then we should keep the time -- the firmware
+	 * can otherwise have a "jumping" TBTT.
+	 */
+	if (add) {
+		/*
+		 * If there is a station/P2P client interface which is
+		 * associated, set the AP's TBTT far enough from the station's
+		 * TBTT. Otherwise, set it to the current system time
+		 */
+		ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
+			iwl_mvm_mac_ap_iterator, &data);
+
+		if (data.beacon_device_ts) {
+			u32 rand = (prandom_u32() % (64 - 36)) + 36;
+			mvmvif->ap_beacon_time = data.beacon_device_ts +
+				ieee80211_tu_to_usec(data.beacon_int * rand /
+						     100);
+		} else {
+			mvmvif->ap_beacon_time =
+				iwl_read_prph(mvm->trans,
+					      DEVICE_SYSTEM_TIME_REG);
+		}
+	}
+
+	ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
+	ctxt_ap->beacon_tsf = 0; /* unused */
+
+	/* TODO: Assume that the beacon id == mac context id */
+	ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   u32 action)
+{
+	struct iwl_mac_ctx_cmd cmd = {};
+
+	WARN_ON(vif->type != NL80211_IFTYPE_AP || vif->p2p);
+
+	/* Fill the common data for all mac context types */
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+	/* Fill the data specific for ap mode */
+	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.ap,
+				     action == FW_CTXT_ACTION_ADD);
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   u32 action)
+{
+	struct iwl_mac_ctx_cmd cmd = {};
+	struct ieee80211_p2p_noa_attr *noa = &vif->bss_conf.p2p_noa_attr;
+
+	WARN_ON(vif->type != NL80211_IFTYPE_AP || !vif->p2p);
+
+	/* Fill the common data for all mac context types */
+	iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, NULL, action);
+
+	/* Fill the data specific for GO mode */
+	iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd, &cmd.go.ap,
+				     action == FW_CTXT_ACTION_ADD);
+
+	cmd.go.ctwin = cpu_to_le32(noa->oppps_ctwindow &
+					IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
+	cmd.go.opp_ps_enabled =
+			cpu_to_le32(!!(noa->oppps_ctwindow &
+					IEEE80211_P2P_OPPPS_ENABLE_BIT));
+
+	return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
+}
+
+static int iwl_mvm_mac_ctx_send(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				u32 action, bool force_assoc_off,
+				const u8 *bssid_override)
+{
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		return iwl_mvm_mac_ctxt_cmd_sta(mvm, vif, action,
+						force_assoc_off,
+						bssid_override);
+		break;
+	case NL80211_IFTYPE_AP:
+		if (!vif->p2p)
+			return iwl_mvm_mac_ctxt_cmd_ap(mvm, vif, action);
+		else
+			return iwl_mvm_mac_ctxt_cmd_go(mvm, vif, action);
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		return iwl_mvm_mac_ctxt_cmd_listener(mvm, vif, action);
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return iwl_mvm_mac_ctxt_cmd_p2p_device(mvm, vif, action);
+	case NL80211_IFTYPE_ADHOC:
+		return iwl_mvm_mac_ctxt_cmd_ibss(mvm, vif, action);
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	if (WARN_ONCE(mvmvif->uploaded, "Adding active MAC %pM/%d\n",
+		      vif->addr, ieee80211_vif_type_p2p(vif)))
+		return -EIO;
+
+	ret = iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_ADD,
+				   true, NULL);
+	if (ret)
+		return ret;
+
+	/* will only do anything at resume from D3 time */
+	iwl_mvm_set_last_nonqos_seq(mvm, vif);
+
+	mvmvif->uploaded = true;
+	return 0;
+}
+
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     bool force_assoc_off, const u8 *bssid_override)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (WARN_ONCE(!mvmvif->uploaded, "Changing inactive MAC %pM/%d\n",
+		      vif->addr, ieee80211_vif_type_p2p(vif)))
+		return -EIO;
+
+	return iwl_mvm_mac_ctx_send(mvm, vif, FW_CTXT_ACTION_MODIFY,
+				    force_assoc_off, bssid_override);
+}
+
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_ctx_cmd cmd;
+	int ret;
+
+	if (WARN_ONCE(!mvmvif->uploaded, "Removing inactive MAC %pM/%d\n",
+		      vif->addr, ieee80211_vif_type_p2p(vif)))
+		return -EIO;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							   mvmvif->color));
+	cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, MAC_CONTEXT_CMD, 0,
+				   sizeof(cmd), &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to remove MAC context: %d\n", ret);
+		return ret;
+	}
+
+	mvmvif->uploaded = false;
+
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
+		iwl_mvm_dealloc_snif_sta(mvm);
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *csa_vif, u32 gp2,
+				   bool tx_success)
+{
+	struct iwl_mvm_vif *mvmvif =
+			iwl_mvm_vif_from_mac80211(csa_vif);
+
+	/* Don't start to countdown from a failed beacon */
+	if (!tx_success && !mvmvif->csa_countdown)
+		return;
+
+	mvmvif->csa_countdown = true;
+
+	if (!ieee80211_csa_is_complete(csa_vif)) {
+		int c = ieee80211_csa_update_counter(csa_vif);
+
+		iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
+		if (csa_vif->p2p &&
+		    !iwl_mvm_te_scheduled(&mvmvif->time_event_data) && gp2 &&
+		    tx_success) {
+			u32 rel_time = (c + 1) *
+				       csa_vif->bss_conf.beacon_int -
+				       IWL_MVM_CHANNEL_SWITCH_TIME_GO;
+			u32 apply_time = gp2 + rel_time * 1024;
+
+			iwl_mvm_schedule_csa_period(mvm, csa_vif,
+					 IWL_MVM_CHANNEL_SWITCH_TIME_GO -
+					 IWL_MVM_CHANNEL_SWITCH_MARGIN,
+					 apply_time);
+		}
+	} else if (!iwl_mvm_te_scheduled(&mvmvif->time_event_data)) {
+		/* we don't have CSA NoA scheduled yet, switch now */
+		ieee80211_csa_finish(csa_vif);
+		RCU_INIT_POINTER(mvm->csa_vif, NULL);
+	}
+}
+
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_extended_beacon_notif *beacon = (void *)pkt->data;
+	struct iwl_mvm_tx_resp *beacon_notify_hdr;
+	struct ieee80211_vif *csa_vif;
+	struct ieee80211_vif *tx_blocked_vif;
+	struct agg_tx_status *agg_status;
+	u16 status;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	beacon_notify_hdr = &beacon->beacon_notify_hdr;
+	mvm->ap_last_beacon_gp2 = le32_to_cpu(beacon->gp2);
+	mvm->ibss_manager = beacon->ibss_mgr_status != 0;
+
+	agg_status = iwl_mvm_get_agg_status(mvm, beacon_notify_hdr);
+	status = le16_to_cpu(agg_status->status) & TX_STATUS_MSK;
+	IWL_DEBUG_RX(mvm,
+		     "beacon status %#x retries:%d tsf:0x%16llX gp2:0x%X rate:%d\n",
+		     status, beacon_notify_hdr->failure_frame,
+		     le64_to_cpu(beacon->tsf),
+		     mvm->ap_last_beacon_gp2,
+		     le32_to_cpu(beacon_notify_hdr->initial_rate));
+
+	csa_vif = rcu_dereference_protected(mvm->csa_vif,
+					    lockdep_is_held(&mvm->mutex));
+	if (unlikely(csa_vif && csa_vif->csa_active))
+		iwl_mvm_csa_count_down(mvm, csa_vif, mvm->ap_last_beacon_gp2,
+				       (status == TX_STATUS_SUCCESS));
+
+	tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+						lockdep_is_held(&mvm->mutex));
+	if (unlikely(tx_blocked_vif)) {
+		struct iwl_mvm_vif *mvmvif =
+			iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+		/*
+		 * The channel switch is started and we have blocked the
+		 * stations. If this is the first beacon (the timeout wasn't
+		 * set), set the unblock timeout, otherwise countdown
+		 */
+		if (!mvm->csa_tx_block_bcn_timeout)
+			mvm->csa_tx_block_bcn_timeout =
+				IWL_MVM_CS_UNBLOCK_TX_TIMEOUT;
+		else
+			mvm->csa_tx_block_bcn_timeout--;
+
+		/* Check if the timeout is expired, and unblock tx */
+		if (mvm->csa_tx_block_bcn_timeout == 0) {
+			iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+			RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+		}
+	}
+}
+
+static void iwl_mvm_beacon_loss_iterator(void *_data, u8 *mac,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_missed_beacons_notif *missed_beacons = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	struct iwl_fw_dbg_trigger_missed_bcon *bcon_trig;
+	struct iwl_fw_dbg_trigger_tlv *trigger;
+	u32 stop_trig_missed_bcon, stop_trig_missed_bcon_since_rx;
+	u32 rx_missed_bcon, rx_missed_bcon_since_rx;
+
+	if (mvmvif->id != (u16)le32_to_cpu(missed_beacons->mac_id))
+		return;
+
+	rx_missed_bcon = le32_to_cpu(missed_beacons->consec_missed_beacons);
+	rx_missed_bcon_since_rx =
+		le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx);
+	/*
+	 * TODO: the threshold should be adjusted based on latency conditions,
+	 * and/or in case of a CS flow on one of the other AP vifs.
+	 */
+	if (le32_to_cpu(missed_beacons->consec_missed_beacons_since_last_rx) >
+	     IWL_MVM_MISSED_BEACONS_THRESHOLD)
+		ieee80211_beacon_loss(vif);
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw,
+					FW_DBG_TRIGGER_MISSED_BEACONS))
+		return;
+
+	trigger = iwl_fw_dbg_get_trigger(mvm->fw,
+					 FW_DBG_TRIGGER_MISSED_BEACONS);
+	bcon_trig = (void *)trigger->data;
+	stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
+	stop_trig_missed_bcon_since_rx =
+		le32_to_cpu(bcon_trig->stop_consec_missed_bcon_since_rx);
+
+	/* TODO: implement start trigger */
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif),
+					   trigger))
+		return;
+
+	if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
+	    rx_missed_bcon >= stop_trig_missed_bcon)
+		iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
+}
+
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_missed_beacons_notif *mb = (void *)pkt->data;
+
+	IWL_DEBUG_INFO(mvm,
+		       "missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
+		       le32_to_cpu(mb->mac_id),
+		       le32_to_cpu(mb->consec_missed_beacons),
+		       le32_to_cpu(mb->consec_missed_beacons_since_last_rx),
+		       le32_to_cpu(mb->num_recvd_beacons),
+		       le32_to_cpu(mb->num_expected_beacons));
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_beacon_loss_iterator,
+						   mb);
+}
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+				    struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+	struct ieee80211_rx_status rx_status;
+	struct sk_buff *skb;
+	u32 size = le32_to_cpu(sb->byte_count);
+
+	if (size == 0)
+		return;
+
+	skb = alloc_skb(size, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(mvm, "alloc_skb failed\n");
+		return;
+	}
+
+	/* update rx_status according to the notification's metadata */
+	memset(&rx_status, 0, sizeof(rx_status));
+	rx_status.mactime = le64_to_cpu(sb->tsf);
+	/* TSF as indicated by the firmware  is at INA time */
+	rx_status.flag |= RX_FLAG_MACTIME_PLCP_START;
+	rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+	rx_status.band =
+		(sb->band & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+	rx_status.freq =
+		ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+					       rx_status.band);
+
+	/* copy the data */
+	skb_put_data(skb, sb->data, size);
+	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+	/* pass it as regular rx to mac80211 */
+	ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
+}
+
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+				      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
+	struct ieee80211_vif *csa_vif;
+	struct iwl_mvm_vif *mvmvif;
+	int len = iwl_rx_packet_payload_len(pkt);
+	u32 id_n_color;
+
+	if (WARN_ON_ONCE(len < sizeof(*notif)))
+		return;
+
+	rcu_read_lock();
+
+	csa_vif = rcu_dereference(mvm->csa_vif);
+	if (WARN_ON(!csa_vif || !csa_vif->csa_active))
+		goto out_unlock;
+
+	id_n_color = le32_to_cpu(notif->id_and_color);
+
+	mvmvif = iwl_mvm_vif_from_mac80211(csa_vif);
+	if (WARN(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color) != id_n_color,
+		 "channel switch noa notification on unexpected vif (csa_vif=%d, notif=%d)",
+		 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color), id_n_color))
+		goto out_unlock;
+
+	IWL_DEBUG_INFO(mvm, "Channel Switch Started Notification\n");
+
+	schedule_delayed_work(&mvm->cs_tx_unblock_dwork,
+			      msecs_to_jiffies(IWL_MVM_CS_UNBLOCK_TX_TIMEOUT *
+					       csa_vif->bss_conf.beacon_int));
+
+	ieee80211_csa_finish(csa_vif);
+
+	rcu_read_unlock();
+
+	RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+	return;
+
+out_unlock:
+	rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
new file mode 100644
index 0000000..afed549
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -0,0 +1,4674 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/if_arp.h>
+#include <linux/time.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+#include <net/tcp.h>
+
+#include "iwl-op-mode.h"
+#include "iwl-io.h"
+#include "mvm.h"
+#include "sta.h"
+#include "time-event.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-phy-db.h"
+#include "testmode.h"
+#include "fw/error-dump.h"
+#include "iwl-prph.h"
+#include "iwl-nvm-parse.h"
+
+static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_STATION),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_AP) |
+			BIT(NL80211_IFTYPE_P2P_CLIENT) |
+			BIT(NL80211_IFTYPE_P2P_GO),
+	},
+	{
+		.max = 1,
+		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
+	},
+};
+
+static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
+	{
+		.num_different_channels = 2,
+		.max_interfaces = 3,
+		.limits = iwl_mvm_limits,
+		.n_limits = ARRAY_SIZE(iwl_mvm_limits),
+	},
+};
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ *	be the vif's ip address. in case there is not a single
+ *	ip address (0, or more than 1), this attribute will
+ *	be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ *	the LSB bytes of the vif's mac address
+ */
+enum {
+	BC_FILTER_MAGIC_NONE = 0,
+	BC_FILTER_MAGIC_IP,
+	BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+	{
+		/* arp */
+		.discard = 0,
+		.frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+		.attrs = {
+			{
+				/* frame type - arp, hw type - ethernet */
+				.offset_type =
+					BCAST_FILTER_OFFSET_PAYLOAD_START,
+				.offset = sizeof(rfc1042_header),
+				.val = cpu_to_be32(0x08060001),
+				.mask = cpu_to_be32(0xffffffff),
+			},
+			{
+				/* arp dest ip */
+				.offset_type =
+					BCAST_FILTER_OFFSET_PAYLOAD_START,
+				.offset = sizeof(rfc1042_header) + 2 +
+					  sizeof(struct arphdr) +
+					  ETH_ALEN + sizeof(__be32) +
+					  ETH_ALEN,
+				.mask = cpu_to_be32(0xffffffff),
+				/* mark it as special field */
+				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+			},
+		},
+	},
+	{
+		/* dhcp offer bcast */
+		.discard = 0,
+		.frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+		.attrs = {
+			{
+				/* udp dest port - 68 (bootp client)*/
+				.offset_type = BCAST_FILTER_OFFSET_IP_END,
+				.offset = offsetof(struct udphdr, dest),
+				.val = cpu_to_be32(0x00440000),
+				.mask = cpu_to_be32(0xffff0000),
+			},
+			{
+				/* dhcp - lsb bytes of client hw address */
+				.offset_type = BCAST_FILTER_OFFSET_IP_END,
+				.offset = 38,
+				.mask = cpu_to_be32(0xffffffff),
+				/* mark it as special field */
+				.reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+			},
+		},
+	},
+	/* last filter must be empty */
+	{},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+	if (!iwl_mvm_is_d0i3_supported(mvm))
+		return;
+
+	IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+	spin_lock_bh(&mvm->refs_lock);
+	mvm->refs[ref_type]++;
+	spin_unlock_bh(&mvm->refs_lock);
+	iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+	if (!iwl_mvm_is_d0i3_supported(mvm))
+		return;
+
+	IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+	spin_lock_bh(&mvm->refs_lock);
+	if (WARN_ON(!mvm->refs[ref_type])) {
+		spin_unlock_bh(&mvm->refs_lock);
+		return;
+	}
+	mvm->refs[ref_type]--;
+	spin_unlock_bh(&mvm->refs_lock);
+	iwl_trans_unref(mvm->trans);
+}
+
+static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm,
+				     enum iwl_mvm_ref_type except_ref)
+{
+	int i, j;
+
+	if (!iwl_mvm_is_d0i3_supported(mvm))
+		return;
+
+	spin_lock_bh(&mvm->refs_lock);
+	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+		if (except_ref == i || !mvm->refs[i])
+			continue;
+
+		IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n",
+			      i, mvm->refs[i]);
+		for (j = 0; j < mvm->refs[i]; j++)
+			iwl_trans_unref(mvm->trans);
+		mvm->refs[i] = 0;
+	}
+	spin_unlock_bh(&mvm->refs_lock);
+}
+
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm)
+{
+	int i;
+	bool taken = false;
+
+	if (!iwl_mvm_is_d0i3_supported(mvm))
+		return true;
+
+	spin_lock_bh(&mvm->refs_lock);
+	for (i = 0; i < IWL_MVM_REF_COUNT; i++) {
+		if (mvm->refs[i]) {
+			taken = true;
+			break;
+		}
+	}
+	spin_unlock_bh(&mvm->refs_lock);
+
+	return taken;
+}
+
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+	iwl_mvm_ref(mvm, ref_type);
+
+	if (!wait_event_timeout(mvm->d0i3_exit_waitq,
+				!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status),
+				HZ)) {
+		WARN_ON_ONCE(1);
+		iwl_mvm_unref(mvm, ref_type);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
+{
+	int i;
+
+	memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts));
+	for (i = 0; i < NUM_PHY_CTX; i++) {
+		mvm->phy_ctxts[i].id = i;
+		mvm->phy_ctxts[i].ref = 0;
+	}
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+						  const char *alpha2,
+						  enum iwl_mcc_source src_id,
+						  bool *changed)
+{
+	struct ieee80211_regdomain *regd = NULL;
+	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mcc_update_resp *resp;
+
+	IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	resp = iwl_mvm_update_mcc(mvm, alpha2, src_id);
+	if (IS_ERR_OR_NULL(resp)) {
+		IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n",
+			      PTR_ERR_OR_ZERO(resp));
+		goto out;
+	}
+
+	if (changed) {
+		u32 status = le32_to_cpu(resp->status);
+
+		*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
+			    status == MCC_RESP_ILLEGAL);
+	}
+
+	regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
+				      __le32_to_cpu(resp->n_channels),
+				      resp->channels,
+				      __le16_to_cpu(resp->mcc),
+				      __le16_to_cpu(resp->geo_info));
+	/* Store the return source id */
+	src_id = resp->source_id;
+	kfree(resp);
+	if (IS_ERR_OR_NULL(regd)) {
+		IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n",
+			      PTR_ERR_OR_ZERO(regd));
+		goto out;
+	}
+
+	IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n",
+		      regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id);
+	mvm->lar_regdom_set = true;
+	mvm->mcc_src = src_id;
+
+out:
+	return regd;
+}
+
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm)
+{
+	bool changed;
+	struct ieee80211_regdomain *regd;
+
+	if (!iwl_mvm_is_lar_supported(mvm))
+		return;
+
+	regd = iwl_mvm_get_current_regdomain(mvm, &changed);
+	if (!IS_ERR_OR_NULL(regd)) {
+		/* only update the regulatory core if changed */
+		if (changed)
+			regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+
+		kfree(regd);
+	}
+}
+
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+							  bool *changed)
+{
+	return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ",
+				     iwl_mvm_is_wifi_mcc_supported(mvm) ?
+				     MCC_SOURCE_GET_CURRENT :
+				     MCC_SOURCE_OLD_FW, changed);
+}
+
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm)
+{
+	enum iwl_mcc_source used_src;
+	struct ieee80211_regdomain *regd;
+	int ret;
+	bool changed;
+	const struct ieee80211_regdomain *r =
+			rtnl_dereference(mvm->hw->wiphy->regd);
+
+	if (!r)
+		return -ENOENT;
+
+	/* save the last source in case we overwrite it below */
+	used_src = mvm->mcc_src;
+	if (iwl_mvm_is_wifi_mcc_supported(mvm)) {
+		/* Notify the firmware we support wifi location updates */
+		regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+		if (!IS_ERR_OR_NULL(regd))
+			kfree(regd);
+	}
+
+	/* Now set our last stored MCC and source */
+	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src,
+				     &changed);
+	if (IS_ERR_OR_NULL(regd))
+		return -EIO;
+
+	/* update cfg80211 if the regdomain was changed */
+	if (changed)
+		ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+	else
+		ret = 0;
+
+	kfree(regd);
+	return ret;
+}
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
+{
+	struct ieee80211_hw *hw = mvm->hw;
+	int num_mac, ret, i;
+	static const u32 mvm_ciphers[] = {
+		WLAN_CIPHER_SUITE_WEP40,
+		WLAN_CIPHER_SUITE_WEP104,
+		WLAN_CIPHER_SUITE_TKIP,
+		WLAN_CIPHER_SUITE_CCMP,
+	};
+
+	/* Tell mac80211 our characteristics */
+	ieee80211_hw_set(hw, SIGNAL_DBM);
+	ieee80211_hw_set(hw, SPECTRUM_MGMT);
+	ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+	ieee80211_hw_set(hw, QUEUE_CONTROL);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+	ieee80211_hw_set(hw, SUPPORTS_PS);
+	ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+	ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+	ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
+	ieee80211_hw_set(hw, CONNECTION_MONITOR);
+	ieee80211_hw_set(hw, CHANCTX_STA_CSA);
+	ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+	ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
+	ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+	ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+	ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
+
+	if (iwl_mvm_has_tlc_offload(mvm)) {
+		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
+		ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+	}
+
+	if (iwl_mvm_has_new_rx_api(mvm))
+		ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) {
+		ieee80211_hw_set(hw, AP_LINK_PS);
+	} else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) {
+		/*
+		 * we absolutely need this for the new TX API since that comes
+		 * with many more queues than the current code can deal with
+		 * for station powersave
+		 */
+		return -EINVAL;
+	}
+
+	if (mvm->trans->num_rx_queues > 1)
+		ieee80211_hw_set(hw, USES_RSS);
+
+	if (mvm->trans->max_skb_frags)
+		hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
+
+	hw->queues = IEEE80211_MAX_QUEUES;
+	hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
+	hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+				    IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+	hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC |
+		IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED;
+
+	hw->radiotap_timestamp.units_pos =
+		IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US |
+		IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ;
+	/* this is the case for CCK frames, it's better (only 8) for OFDM */
+	hw->radiotap_timestamp.accuracy = 22;
+
+	if (!iwl_mvm_has_tlc_offload(mvm))
+		hw->rate_control_algorithm = RS_NAME;
+
+	hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES;
+	hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
+
+	BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6);
+	memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers));
+	hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers);
+	hw->wiphy->cipher_suites = mvm->ciphers;
+
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		mvm->ciphers[hw->wiphy->n_cipher_suites] =
+			WLAN_CIPHER_SUITE_GCMP;
+		hw->wiphy->n_cipher_suites++;
+		mvm->ciphers[hw->wiphy->n_cipher_suites] =
+			WLAN_CIPHER_SUITE_GCMP_256;
+		hw->wiphy->n_cipher_suites++;
+	}
+
+	/* Enable 11w if software crypto is not enabled (as the
+	 * firmware will interpret some mgmt packets, so enabling it
+	 * with software crypto isn't safe).
+	 */
+	if (!iwlwifi_mod_params.swcrypto) {
+		ieee80211_hw_set(hw, MFP_CAPABLE);
+		mvm->ciphers[hw->wiphy->n_cipher_suites] =
+			WLAN_CIPHER_SUITE_AES_CMAC;
+		hw->wiphy->n_cipher_suites++;
+		if (iwl_mvm_has_new_rx_api(mvm)) {
+			mvm->ciphers[hw->wiphy->n_cipher_suites] =
+				WLAN_CIPHER_SUITE_BIP_GMAC_128;
+			hw->wiphy->n_cipher_suites++;
+			mvm->ciphers[hw->wiphy->n_cipher_suites] =
+				WLAN_CIPHER_SUITE_BIP_GMAC_256;
+			hw->wiphy->n_cipher_suites++;
+		}
+	}
+
+	/* currently FW API supports only one optional cipher scheme */
+	if (mvm->fw->cs[0].cipher) {
+		const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0];
+		struct ieee80211_cipher_scheme *cs = &mvm->cs[0];
+
+		mvm->hw->n_cipher_schemes = 1;
+
+		cs->cipher = le32_to_cpu(fwcs->cipher);
+		cs->iftype = BIT(NL80211_IFTYPE_STATION);
+		cs->hdr_len = fwcs->hdr_len;
+		cs->pn_len = fwcs->pn_len;
+		cs->pn_off = fwcs->pn_off;
+		cs->key_idx_off = fwcs->key_idx_off;
+		cs->key_idx_mask = fwcs->key_idx_mask;
+		cs->key_idx_shift = fwcs->key_idx_shift;
+		cs->mic_len = fwcs->mic_len;
+
+		mvm->hw->cipher_schemes = mvm->cs;
+		mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher;
+		hw->wiphy->n_cipher_suites++;
+	}
+
+	ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
+	hw->wiphy->features |=
+		NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
+		NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
+		NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+
+	hw->sta_data_size = sizeof(struct iwl_mvm_sta);
+	hw->vif_data_size = sizeof(struct iwl_mvm_vif);
+	hw->chanctx_data_size = sizeof(u16);
+
+	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+		BIT(NL80211_IFTYPE_P2P_CLIENT) |
+		BIT(NL80211_IFTYPE_AP) |
+		BIT(NL80211_IFTYPE_P2P_GO) |
+		BIT(NL80211_IFTYPE_P2P_DEVICE) |
+		BIT(NL80211_IFTYPE_ADHOC);
+
+	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+	hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR;
+	if (iwl_mvm_is_lar_supported(mvm))
+		hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED;
+	else
+		hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
+					       REGULATORY_DISABLE_BEACON_HINTS;
+
+	hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+	hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
+	hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
+	hw->wiphy->n_iface_combinations =
+		ARRAY_SIZE(iwl_mvm_iface_combinations);
+
+	hw->wiphy->max_remain_on_channel_duration = 10000;
+	hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+	/* we can compensate an offset of up to 3 channels = 15 MHz */
+	hw->wiphy->max_adj_channel_rssi_comp = 3 * 5;
+
+	/* Extract MAC address */
+	memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
+	hw->wiphy->addresses = mvm->addresses;
+	hw->wiphy->n_addresses = 1;
+
+	/* Extract additional MAC addresses if available */
+	num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
+		min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
+
+	for (i = 1; i < num_mac; i++) {
+		memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
+		       ETH_ALEN);
+		mvm->addresses[i].addr[5]++;
+		hw->wiphy->n_addresses++;
+	}
+
+	iwl_mvm_reset_phy_ctxts(mvm);
+
+	hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm);
+
+	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+
+	BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
+	BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+		     IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+	else
+		mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+
+	if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+		hw->wiphy->bands[NL80211_BAND_2GHZ] =
+			&mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+	if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+		hw->wiphy->bands[NL80211_BAND_5GHZ] =
+			&mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+		    fw_has_api(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+			hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
+				IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
+	}
+
+	hw->wiphy->hw_version = mvm->trans->hw_id;
+
+	if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+		hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+	else
+		hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	hw->wiphy->max_sched_scan_reqs = 1;
+	hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
+	hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
+	/* we create the 802.11 header and zero length SSID IE. */
+	hw->wiphy->max_sched_scan_ie_len =
+		SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2;
+	hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS;
+	hw->wiphy->max_sched_scan_plan_interval = U16_MAX;
+
+	/*
+	 * the firmware uses u8 for num of iterations, but 0xff is saved for
+	 * infinite loop, so the maximum number of iterations is actually 254.
+	 */
+	hw->wiphy->max_sched_scan_plan_iterations = 254;
+
+	hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
+			       NL80211_FEATURE_LOW_PRIORITY_SCAN |
+			       NL80211_FEATURE_P2P_GO_OPPPS |
+			       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+			       NL80211_FEATURE_DYNAMIC_SMPS |
+			       NL80211_FEATURE_STATIC_SMPS |
+			       NL80211_FEATURE_SUPPORTS_WMM_ADMISSION;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT))
+		hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION;
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT))
+		hw->wiphy->features |= NL80211_FEATURE_QUIET;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+		hw->wiphy->features |=
+			NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+		hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+
+	if (fw_has_api(&mvm->fw->ucode_capa,
+		       IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_SCAN_START_TIME);
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_BSS_PARENT_TSF);
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_SET_SCAN_DWELL);
+	}
+
+	if (iwl_mvm_is_oce_supported(mvm)) {
+		wiphy_ext_feature_set(hw->wiphy,
+			NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP);
+		wiphy_ext_feature_set(hw->wiphy,
+			NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME);
+		wiphy_ext_feature_set(hw->wiphy,
+			NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION);
+		wiphy_ext_feature_set(hw->wiphy,
+			NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE);
+	}
+
+	mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+
+#ifdef CONFIG_PM_SLEEP
+	if (iwl_mvm_is_d0i3_supported(mvm) &&
+	    device_can_wakeup(mvm->trans->dev)) {
+		mvm->wowlan.flags = WIPHY_WOWLAN_ANY;
+		hw->wiphy->wowlan = &mvm->wowlan;
+	}
+
+	if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec &&
+	    mvm->trans->ops->d3_suspend &&
+	    mvm->trans->ops->d3_resume &&
+	    device_can_wakeup(mvm->trans->dev)) {
+		mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
+				     WIPHY_WOWLAN_DISCONNECT |
+				     WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+				     WIPHY_WOWLAN_RFKILL_RELEASE |
+				     WIPHY_WOWLAN_NET_DETECT;
+		if (!iwlwifi_mod_params.swcrypto)
+			mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+					     WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+					     WIPHY_WOWLAN_4WAY_HANDSHAKE;
+
+		mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
+		mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
+		mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
+		mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES;
+		hw->wiphy->wowlan = &mvm->wowlan;
+	}
+#endif
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+	/* assign default bcast filtering configuration */
+	mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
+	ret = iwl_mvm_leds_init(mvm);
+	if (ret)
+		return ret;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) {
+		IWL_DEBUG_TDLS(mvm, "TDLS supported\n");
+		hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+		ieee80211_hw_set(hw, TDLS_WIDER_BW);
+	}
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) {
+		IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n");
+		hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH;
+	}
+
+	hw->netdev_features |= mvm->cfg->features;
+	if (!iwl_mvm_is_csum_supported(mvm)) {
+		hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+					 NETIF_F_RXCSUM);
+		/* We may support SW TX CSUM */
+		if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+			hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+	}
+
+	ret = ieee80211_register_hw(mvm->hw);
+	if (ret)
+		iwl_mvm_leds_exit(mvm);
+	mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+
+	if (mvm->cfg->vht_mu_mimo_supported)
+		wiphy_ext_feature_set(hw->wiphy,
+				      NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+
+	return ret;
+}
+
+static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
+			     struct ieee80211_sta *sta,
+			     struct sk_buff *skb)
+{
+	struct iwl_mvm_sta *mvmsta;
+	bool defer = false;
+
+	/*
+	 * double check the IN_D0I3 flag both before and after
+	 * taking the spinlock, in order to prevent taking
+	 * the spinlock when not needed.
+	 */
+	if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
+		return false;
+
+	spin_lock(&mvm->d0i3_tx_lock);
+	/*
+	 * testing the flag again ensures the skb dequeue
+	 * loop (on d0i3 exit) hasn't run yet.
+	 */
+	if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
+		goto out;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	if (mvmsta->sta_id == IWL_MVM_INVALID_STA ||
+	    mvmsta->sta_id != mvm->d0i3_ap_sta_id)
+		goto out;
+
+	__skb_queue_tail(&mvm->d0i3_tx, skb);
+	ieee80211_stop_queues(mvm->hw);
+
+	/* trigger wakeup */
+	iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
+
+	defer = true;
+out:
+	spin_unlock(&mvm->d0i3_tx_lock);
+	return defer;
+}
+
+static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
+			   struct ieee80211_tx_control *control,
+			   struct sk_buff *skb)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct ieee80211_sta *sta = control->sta;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+
+	if (iwl_mvm_is_radio_killed(mvm)) {
+		IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
+		goto drop;
+	}
+
+	if (info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+	    !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+	    !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+		goto drop;
+
+	/* treat non-bufferable MMPDUs on AP interfaces as broadcast */
+	if ((info->control.vif->type == NL80211_IFTYPE_AP ||
+	     info->control.vif->type == NL80211_IFTYPE_ADHOC) &&
+	    ieee80211_is_mgmt(hdr->frame_control) &&
+	    !ieee80211_is_bufferable_mmpdu(hdr->frame_control))
+		sta = NULL;
+
+	if (sta) {
+		if (iwl_mvm_defer_tx(mvm, sta, skb))
+			return;
+		if (iwl_mvm_tx_skb(mvm, skb, sta))
+			goto drop;
+		return;
+	}
+
+	if (iwl_mvm_tx_skb_non_sta(mvm, skb))
+		goto drop;
+	return;
+ drop:
+	ieee80211_free_txskb(hw, skb);
+}
+
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+		return false;
+	return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+	if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+		return false;
+	if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+		return true;
+
+	/* enabled by default */
+	return true;
+}
+
+#define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...)		\
+	do {								\
+		if (!(le16_to_cpu(_tid_bm) & BIT(_tid)))		\
+			break;						\
+		iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt);	\
+	} while (0)
+
+static void
+iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn,
+			    enum ieee80211_ampdu_mlme_action action)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		return;
+
+	switch (action) {
+	case IEEE80211_AMPDU_TX_OPERATIONAL: {
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid,
+				 "TX AGG START: MAC %pM tid %d ssn %d\n",
+				 sta->addr, tid, tid_data->ssn);
+		break;
+		}
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid,
+				 "TX AGG STOP: MAC %pM tid %d\n",
+				 sta->addr, tid);
+		break;
+	case IEEE80211_AMPDU_RX_START:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid,
+				 "RX AGG START: MAC %pM tid %d ssn %d\n",
+				 sta->addr, tid, rx_ba_ssn);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid,
+				 "RX AGG STOP: MAC %pM tid %d\n",
+				 sta->addr, tid);
+		break;
+	default:
+		break;
+	}
+}
+
+static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_ampdu_params *params)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+	bool tx_agg_ref = false;
+	struct ieee80211_sta *sta = params->sta;
+	enum ieee80211_ampdu_mlme_action action = params->action;
+	u16 tid = params->tid;
+	u16 *ssn = &params->ssn;
+	u16 buf_size = params->buf_size;
+	bool amsdu = params->amsdu;
+	u16 timeout = params->timeout;
+
+	IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
+		     sta->addr, tid, action);
+
+	if (!(mvm->nvm_data->sku_cap_11n_enable))
+		return -EACCES;
+
+	/* return from D0i3 before starting a new Tx aggregation */
+	switch (action) {
+	case IEEE80211_AMPDU_TX_START:
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		/*
+		 * for tx start, wait synchronously until D0i3 exit to
+		 * get the correct sequence number for the tid.
+		 * additionally, some other ampdu actions use direct
+		 * target access, which is not handled automatically
+		 * by the trans layer (unlike commands), so wait for
+		 * d0i3 exit in these cases as well.
+		 */
+		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG);
+		if (ret)
+			return ret;
+
+		tx_agg_ref = true;
+		break;
+	default:
+		break;
+	}
+
+	mutex_lock(&mvm->mutex);
+
+	switch (action) {
+	case IEEE80211_AMPDU_RX_START:
+		if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id ==
+				iwl_mvm_sta_from_mac80211(sta)->sta_id) {
+			struct iwl_mvm_vif *mvmvif;
+			u16 macid = iwl_mvm_vif_from_mac80211(vif)->id;
+			struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid];
+
+			mdata->opened_rx_ba_sessions = true;
+			mvmvif = iwl_mvm_vif_from_mac80211(vif);
+			cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk);
+		}
+		if (!iwl_enable_rx_ampdu(mvm->cfg)) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
+					 timeout);
+		break;
+	case IEEE80211_AMPDU_RX_STOP:
+		ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
+					 timeout);
+		break;
+	case IEEE80211_AMPDU_TX_START:
+		if (!iwl_enable_tx_ampdu(mvm->cfg)) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_CONT:
+		ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
+		break;
+	case IEEE80211_AMPDU_TX_STOP_FLUSH:
+	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+		ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
+		break;
+	case IEEE80211_AMPDU_TX_OPERATIONAL:
+		ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid,
+					      buf_size, amsdu);
+		break;
+	default:
+		WARN_ON_ONCE(1);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret) {
+		u16 rx_ba_ssn = 0;
+
+		if (action == IEEE80211_AMPDU_RX_START)
+			rx_ba_ssn = *ssn;
+
+		iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid,
+					    rx_ba_ssn, action);
+	}
+	mutex_unlock(&mvm->mutex);
+
+	/*
+	 * If the tid is marked as started, we won't use it for offloaded
+	 * traffic on the next D0i3 entry. It's safe to unref.
+	 */
+	if (tx_agg_ref)
+		iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
+
+	return ret;
+}
+
+static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
+				     struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->uploaded = false;
+	mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+	spin_lock_bh(&mvm->time_event_lock);
+	iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	mvmvif->phy_ctxt = NULL;
+	memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+}
+
+static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
+{
+	/* clear the D3 reconfig, we only need it to avoid dumping a
+	 * firmware coredump on reconfiguration, we shouldn't do that
+	 * on D3->D0 transition
+	 */
+	if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) {
+		mvm->fwrt.dump.desc = &iwl_dump_desc_assert;
+		iwl_fw_error_dump(&mvm->fwrt);
+	}
+
+	/* cleanup all stale references (scan, roc), but keep the
+	 * ucode_down ref until reconfig is complete
+	 */
+	iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+	iwl_mvm_stop_device(mvm);
+
+	mvm->scan_status = 0;
+	mvm->ps_disabled = false;
+	mvm->calibrating = false;
+
+	/* just in case one was running */
+	iwl_mvm_cleanup_roc_te(mvm);
+	ieee80211_remain_on_channel_expired(mvm->hw);
+
+	/*
+	 * cleanup all interfaces, even inactive ones, as some might have
+	 * gone down during the HW restart
+	 */
+	ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+
+	mvm->p2p_device_vif = NULL;
+	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+
+	iwl_mvm_reset_phy_ctxts(mvm);
+	memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
+	memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
+	memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
+	memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd));
+
+	ieee80211_wake_queues(mvm->hw);
+
+	/* clear any stale d0i3 state */
+	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+
+	mvm->vif_count = 0;
+	mvm->rx_ba_sessions = 0;
+	mvm->fwrt.dump.conf = FW_DBG_INVALID;
+	mvm->monitor_on = false;
+
+	/* keep statistics ticking */
+	iwl_mvm_accu_radio_stats(mvm);
+}
+
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
+		/*
+		 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
+		 * so later code will - from now on - see that we're doing it.
+		 */
+		set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+		clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+		/* Clean up some internal and mac80211 state on restart */
+		iwl_mvm_restart_cleanup(mvm);
+	} else {
+		/* Hold the reference to prevent runtime suspend while
+		 * the start procedure runs.  It's a bit confusing
+		 * that the UCODE_DOWN reference is taken, but it just
+		 * means "UCODE is not UP yet". ( TODO: rename this
+		 * reference).
+		 */
+		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+	}
+	ret = iwl_mvm_up(mvm);
+
+	if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		/* Something went wrong - we need to finish some cleanup
+		 * that normally iwl_mvm_mac_restart_complete() below
+		 * would do.
+		 */
+		clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+		iwl_mvm_d0i3_enable_tx(mvm, NULL);
+	}
+
+	return ret;
+}
+
+static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	/* Some hw restart cleanups must not hold the mutex */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		/*
+		 * Make sure we are out of d0i3. This is needed
+		 * to make sure the reference accounting is correct
+		 * (and there is no stale d0i3_exit_work).
+		 */
+		wait_event_timeout(mvm->d0i3_exit_waitq,
+				   !test_bit(IWL_MVM_STATUS_IN_D0I3,
+					     &mvm->status),
+				   HZ);
+	}
+
+	mutex_lock(&mvm->mutex);
+	ret = __iwl_mvm_mac_start(mvm);
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+	iwl_mvm_d0i3_enable_tx(mvm, NULL);
+	ret = iwl_mvm_update_quotas(mvm, true, NULL);
+	if (ret)
+		IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
+			ret);
+
+	/* allow transport/FW low power modes */
+	iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+	/*
+	 * If we have TDLS peers, remove them. We don't know the last seqno/PN
+	 * of packets the FW sent out, so we must reconnect.
+	 */
+	iwl_mvm_teardown_tdls_peers(mvm);
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_resume_complete(struct iwl_mvm *mvm)
+{
+	if (iwl_mvm_is_d0i3_supported(mvm) &&
+	    iwl_mvm_enter_d0i3_on_suspend(mvm))
+		WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq,
+					      !test_bit(IWL_MVM_STATUS_IN_D0I3,
+							&mvm->status),
+					      HZ),
+			  "D0i3 exit on resume timed out\n");
+}
+
+static void
+iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
+			      enum ieee80211_reconfig_type reconfig_type)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	switch (reconfig_type) {
+	case IEEE80211_RECONFIG_TYPE_RESTART:
+		iwl_mvm_restart_complete(mvm);
+		break;
+	case IEEE80211_RECONFIG_TYPE_SUSPEND:
+		iwl_mvm_resume_complete(mvm);
+		break;
+	}
+}
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	/* firmware counters are obviously reset now, but we shouldn't
+	 * partially track so also clear the fw_reset_accu counters.
+	 */
+	memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats));
+
+	/* async_handlers_wk is now blocked */
+
+	/*
+	 * The work item could be running or queued if the
+	 * ROC time event stops just as we get here.
+	 */
+	flush_work(&mvm->roc_done_wk);
+
+	iwl_mvm_stop_device(mvm);
+
+	iwl_mvm_async_handlers_purge(mvm);
+	/* async_handlers_list is empty and will stay empty: HW is stopped */
+
+	/* the fw is stopped, the aux sta is dead: clean up driver state */
+	iwl_mvm_del_aux_sta(mvm);
+
+	/*
+	 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the
+	 * hw (as restart_complete() won't be called in this case) and mac80211
+	 * won't execute the restart.
+	 * But make sure to cleanup interfaces that have gone down before/during
+	 * HW restart was requested.
+	 */
+	if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+	    test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+			       &mvm->status))
+		ieee80211_iterate_interfaces(mvm->hw, 0,
+					     iwl_mvm_cleanup_iterator, mvm);
+
+	/* We shouldn't have any UIDs still set.  Loop over all the UIDs to
+	 * make sure there's nothing left there and warn if any is found.
+	 */
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		int i;
+
+		for (i = 0; i < mvm->max_scans; i++) {
+			if (WARN_ONCE(mvm->scan_uid_status[i],
+				      "UMAC scan UID %d status was not cleaned\n",
+				      i))
+				mvm->scan_uid_status[i] = 0;
+		}
+	}
+}
+
+static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	flush_work(&mvm->d0i3_exit_work);
+	flush_work(&mvm->async_handlers_wk);
+	flush_work(&mvm->add_stream_wk);
+
+	/*
+	 * Lock and clear the firmware running bit here already, so that
+	 * new commands coming in elsewhere, e.g. from debugfs, will not
+	 * be able to proceed. This is important here because one of those
+	 * debugfs files causes the firmware dump to be triggered, and if we
+	 * don't stop debugfs accesses before canceling that it could be
+	 * retriggered after we flush it but before we've cleared the bit.
+	 */
+	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+
+	iwl_fw_cancel_dump(&mvm->fwrt);
+	cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
+	cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+	iwl_fw_free_dump_desc(&mvm->fwrt);
+
+	mutex_lock(&mvm->mutex);
+	__iwl_mvm_mac_stop(mvm);
+	mutex_unlock(&mvm->mutex);
+
+	/*
+	 * The worker might have been waiting for the mutex, let it run and
+	 * discover that its list is now empty.
+	 */
+	cancel_work_sync(&mvm->async_handlers_wk);
+}
+
+static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
+{
+	u16 i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < NUM_PHY_CTX; i++)
+		if (!mvm->phy_ctxts[i].ref)
+			return &mvm->phy_ctxts[i];
+
+	IWL_ERR(mvm, "No available PHY context\n");
+	return NULL;
+}
+
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				s16 tx_power)
+{
+	struct iwl_dev_tx_power_cmd cmd = {
+		.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+		.v3.mac_context_id =
+			cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
+		.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+	};
+	int len = sizeof(cmd);
+
+	if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
+		cmd.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
+		len = sizeof(cmd.v3);
+
+	return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+}
+
+static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	mvmvif->mvm = mvm;
+
+	/*
+	 * make sure D0i3 exit is completed, otherwise a target access
+	 * during tx queue configuration could be done when still in
+	 * D0i3 state.
+	 */
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF);
+	if (ret)
+		return ret;
+
+	/*
+	 * Not much to do here. The stack will not allow interface
+	 * types or combinations that we didn't advertise, so we
+	 * don't really have to check the types.
+	 */
+
+	mutex_lock(&mvm->mutex);
+
+	/* make sure that beacon statistics don't go backwards with FW reset */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+		mvmvif->beacon_stats.accu_num_beacons +=
+			mvmvif->beacon_stats.num_beacons;
+
+	/* Allocate resources for the MAC context, and add it to the fw  */
+	ret = iwl_mvm_mac_ctxt_init(mvm, vif);
+	if (ret)
+		goto out_unlock;
+
+	/* Counting number of interfaces is needed for legacy PM */
+	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+		mvm->vif_count++;
+
+	/*
+	 * The AP binding flow can be done only after the beacon
+	 * template is configured (which happens only in the mac80211
+	 * start_ap() flow), and adding the broadcast station can happen
+	 * only after the binding.
+	 * In addition, since modifying the MAC before adding a bcast
+	 * station is not allowed by the FW, delay the adding of MAC context to
+	 * the point where we can also add the bcast station.
+	 * In short: there's not much we can do at this point, other than
+	 * allocating resources :)
+	 */
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+		ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to allocate bcast sta\n");
+			goto out_release;
+		}
+
+		/*
+		 * Only queue for this station is the mcast queue,
+		 * which shouldn't be in TFD mask anyway
+		 */
+		ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta,
+					       0, vif->type,
+					       IWL_STA_MULTICAST);
+		if (ret)
+			goto out_release;
+
+		iwl_mvm_vif_dbgfs_register(mvm, vif);
+		goto out_unlock;
+	}
+
+	mvmvif->features |= hw->netdev_features;
+
+	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+	if (ret)
+		goto out_release;
+
+	ret = iwl_mvm_power_update_mac(mvm);
+	if (ret)
+		goto out_remove_mac;
+
+	/* beacon filtering */
+	ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+	if (ret)
+		goto out_remove_mac;
+
+	if (!mvm->bf_allowed_vif &&
+	    vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
+		mvm->bf_allowed_vif = mvmvif;
+		vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+				     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
+	}
+
+	/*
+	 * P2P_DEVICE interface does not have a channel context assigned to it,
+	 * so a dedicated PHY context is allocated to it and the corresponding
+	 * MAC context is bound to it at this stage.
+	 */
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+
+		mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+		if (!mvmvif->phy_ctxt) {
+			ret = -ENOSPC;
+			goto out_free_bf;
+		}
+
+		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+		ret = iwl_mvm_binding_add_vif(mvm, vif);
+		if (ret)
+			goto out_unref_phy;
+
+		ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif);
+		if (ret)
+			goto out_unbind;
+
+		/* Save a pointer to p2p device vif, so it can later be used to
+		 * update the p2p device MAC when a GO is started/stopped */
+		mvm->p2p_device_vif = vif;
+	}
+
+	iwl_mvm_tcm_add_vif(mvm, vif);
+
+	if (vif->type == NL80211_IFTYPE_MONITOR)
+		mvm->monitor_on = true;
+
+	iwl_mvm_vif_dbgfs_register(mvm, vif);
+	goto out_unlock;
+
+ out_unbind:
+	iwl_mvm_binding_remove_vif(mvm, vif);
+ out_unref_phy:
+	iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ out_free_bf:
+	if (mvm->bf_allowed_vif == mvmvif) {
+		mvm->bf_allowed_vif = NULL;
+		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+	}
+ out_remove_mac:
+	mvmvif->phy_ctxt = NULL;
+	iwl_mvm_mac_ctxt_remove(mvm, vif);
+ out_release:
+	if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+		mvm->vif_count--;
+ out_unlock:
+	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF);
+
+	return ret;
+}
+
+static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif)
+{
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		/*
+		 * Flush the ROC worker which will flush the OFFCHANNEL queue.
+		 * We assume here that all the packets sent to the OFFCHANNEL
+		 * queue are sent in ROC session.
+		 */
+		flush_work(&mvm->roc_done_wk);
+	}
+}
+
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	iwl_mvm_prepare_mac_removal(mvm, vif);
+
+	if (!(vif->type == NL80211_IFTYPE_AP ||
+	      vif->type == NL80211_IFTYPE_ADHOC))
+		iwl_mvm_tcm_rm_vif(mvm, vif);
+
+	mutex_lock(&mvm->mutex);
+
+	if (mvm->bf_allowed_vif == mvmvif) {
+		mvm->bf_allowed_vif = NULL;
+		vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+				       IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+	}
+
+	iwl_mvm_vif_dbgfs_clean(mvm, vif);
+
+	/*
+	 * For AP/GO interface, the tear down of the resources allocated to the
+	 * interface is be handled as part of the stop_ap flow.
+	 */
+	if (vif->type == NL80211_IFTYPE_AP ||
+	    vif->type == NL80211_IFTYPE_ADHOC) {
+#ifdef CONFIG_NL80211_TESTMODE
+		if (vif == mvm->noa_vif) {
+			mvm->noa_vif = NULL;
+			mvm->noa_duration = 0;
+		}
+#endif
+		iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta);
+		iwl_mvm_dealloc_bcast_sta(mvm, vif);
+		goto out_release;
+	}
+
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		mvm->p2p_device_vif = NULL;
+		iwl_mvm_rm_p2p_bcast_sta(mvm, vif);
+		iwl_mvm_binding_remove_vif(mvm, vif);
+		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+		mvmvif->phy_ctxt = NULL;
+	}
+
+	if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
+		mvm->vif_count--;
+
+	iwl_mvm_power_update_mac(mvm);
+	iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+	if (vif->type == NL80211_IFTYPE_MONITOR)
+		mvm->monitor_on = false;
+
+out_release:
+	mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+	return 0;
+}
+
+struct iwl_mvm_mc_iter_data {
+	struct iwl_mvm *mvm;
+	int port_id;
+};
+
+static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
+				      struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_mc_iter_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
+	struct iwl_host_cmd hcmd = {
+		.id = MCAST_FILTER_CMD,
+		.flags = CMD_ASYNC,
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int ret, len;
+
+	/* if we don't have free ports, mcast frames will be dropped */
+	if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM))
+		return;
+
+	if (vif->type != NL80211_IFTYPE_STATION ||
+	    !vif->bss_conf.assoc)
+		return;
+
+	cmd->port_id = data->port_id++;
+	memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+	len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
+
+	hcmd.len[0] = len;
+	hcmd.data[0] = cmd;
+
+	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+	if (ret)
+		IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
+}
+
+static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_mc_iter_data iter_data = {
+		.mvm = mvm,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON_ONCE(!mvm->mcast_filter_cmd))
+		return;
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_mc_iface_iterator, &iter_data);
+}
+
+static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw,
+				     struct netdev_hw_addr_list *mc_list)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mcast_filter_cmd *cmd;
+	struct netdev_hw_addr *addr;
+	int addr_count;
+	bool pass_all;
+	int len;
+
+	addr_count = netdev_hw_addr_list_count(mc_list);
+	pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES ||
+		   IWL_MVM_FW_MCAST_FILTER_PASS_ALL;
+	if (pass_all)
+		addr_count = 0;
+
+	len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4);
+	cmd = kzalloc(len, GFP_ATOMIC);
+	if (!cmd)
+		return 0;
+
+	if (pass_all) {
+		cmd->pass_all = 1;
+		return (u64)(unsigned long)cmd;
+	}
+
+	netdev_hw_addr_list_for_each(addr, mc_list) {
+		IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n",
+				   cmd->count, addr->addr);
+		memcpy(&cmd->addr_list[cmd->count * ETH_ALEN],
+		       addr->addr, ETH_ALEN);
+		cmd->count++;
+	}
+
+	return (u64)(unsigned long)cmd;
+}
+
+static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
+				     unsigned int changed_flags,
+				     unsigned int *total_flags,
+				     u64 multicast)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
+
+	mutex_lock(&mvm->mutex);
+
+	/* replace previous configuration */
+	kfree(mvm->mcast_filter_cmd);
+	mvm->mcast_filter_cmd = cmd;
+
+	if (!cmd)
+		goto out;
+
+	if (changed_flags & FIF_ALLMULTI)
+		cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
+
+	if (cmd->pass_all)
+		cmd->count = 0;
+
+	iwl_mvm_recalc_multicast(mvm);
+out:
+	mutex_unlock(&mvm->mutex);
+	*total_flags = 0;
+}
+
+static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					unsigned int filter_flags,
+					unsigned int changed_flags)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	/* We support only filter for probe requests */
+	if (!(changed_flags & FIF_PROBE_REQ))
+		return;
+
+	/* Supported only for p2p client interfaces */
+	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+	    !vif->p2p)
+		return;
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	mutex_unlock(&mvm->mutex);
+}
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+	struct iwl_mvm *mvm;
+	struct iwl_bcast_filter_cmd *cmd;
+	u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+			 const struct iwl_fw_bcast_filter *in_filter,
+			 struct iwl_fw_bcast_filter *out_filter)
+{
+	struct iwl_fw_bcast_filter_attr *attr;
+	int i;
+
+	memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+	for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+		attr = &out_filter->attrs[i];
+
+		if (!attr->mask)
+			break;
+
+		switch (attr->reserved1) {
+		case cpu_to_le16(BC_FILTER_MAGIC_IP):
+			if (vif->bss_conf.arp_addr_cnt != 1) {
+				attr->mask = 0;
+				continue;
+			}
+
+			attr->val = vif->bss_conf.arp_addr_list[0];
+			break;
+		case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+			attr->val = *(__be32 *)&vif->addr[2];
+			break;
+		default:
+			break;
+		}
+		attr->reserved1 = 0;
+		out_filter->num_attrs++;
+	}
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+					  struct ieee80211_vif *vif)
+{
+	struct iwl_bcast_iter_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct iwl_bcast_filter_cmd *cmd = data->cmd;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_fw_bcast_mac *bcast_mac;
+	int i;
+
+	if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+		return;
+
+	bcast_mac = &cmd->macs[mvmvif->id];
+
+	/*
+	 * enable filtering only for associated stations, but not for P2P
+	 * Clients
+	 */
+	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+	    !vif->bss_conf.assoc)
+		return;
+
+	bcast_mac->default_discard = 1;
+
+	/* copy all configured filters */
+	for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+		/*
+		 * Make sure we don't exceed our filters limit.
+		 * if there is still a valid filter to be configured,
+		 * be on the safe side and just allow bcast for this mac.
+		 */
+		if (WARN_ON_ONCE(data->current_filter >=
+				 ARRAY_SIZE(cmd->filters))) {
+			bcast_mac->default_discard = 0;
+			bcast_mac->attached_filters = 0;
+			break;
+		}
+
+		iwl_mvm_set_bcast_filter(vif,
+					 &mvm->bcast_filters[i],
+					 &cmd->filters[data->current_filter]);
+
+		/* skip current filter if it contains no attributes */
+		if (!cmd->filters[data->current_filter].num_attrs)
+			continue;
+
+		/* attach the filter to current mac */
+		bcast_mac->attached_filters |=
+				cpu_to_le16(BIT(data->current_filter));
+
+		data->current_filter++;
+	}
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+				    struct iwl_bcast_filter_cmd *cmd)
+{
+	struct iwl_bcast_iter_data iter_data = {
+		.mvm = mvm,
+		.cmd = cmd,
+	};
+
+	if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
+		return false;
+
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+	cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* use debugfs filters/macs if override is configured */
+	if (mvm->dbgfs_bcast_filtering.override) {
+		memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+		       sizeof(cmd->filters));
+		memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+		       sizeof(cmd->macs));
+		return true;
+	}
+#endif
+
+	/* if no filters are configured, do nothing */
+	if (!mvm->bcast_filters)
+		return false;
+
+	/* configure and attach these filters for each associated sta vif */
+	ieee80211_iterate_active_interfaces(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_bcast_filter_iterator, &iter_data);
+
+	return true;
+}
+
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
+{
+	struct iwl_bcast_filter_cmd cmd;
+
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+		return 0;
+
+	if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+		return 0;
+
+	return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
+				    sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
+{
+	return 0;
+}
+#endif
+
+static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif)
+{
+	struct iwl_mu_group_mgmt_cmd cmd = {};
+
+	memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership,
+	       WLAN_MEMBERSHIP_LEN);
+	memcpy(cmd.user_position, vif->bss_conf.mu_group.position,
+	       WLAN_USER_POSITION_LEN);
+
+	return iwl_mvm_send_cmd_pdu(mvm,
+				    WIDE_ID(DATA_PATH_GROUP,
+					    UPDATE_MU_GROUPS_CMD),
+				    0, sizeof(cmd), &cmd);
+}
+
+static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac,
+					   struct ieee80211_vif *vif)
+{
+	if (vif->mu_mimo_owner) {
+		struct iwl_mu_group_mgmt_notif *notif = _data;
+
+		/*
+		 * MU-MIMO Group Id action frame is little endian. We treat
+		 * the data received from firmware as if it came from the
+		 * action frame, so no conversion is needed.
+		 */
+		ieee80211_update_mu_groups(vif,
+					   (u8 *)&notif->membership_status,
+					   (u8 *)&notif->user_position);
+	}
+}
+
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+			       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data;
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_mu_mimo_iface_iterator, notif);
+}
+
+static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
+{
+	u8 byte_num = ppe_pos_bit / 8;
+	u8 bit_num = ppe_pos_bit % 8;
+	u8 residue_bits;
+	u8 res;
+
+	if (bit_num <= 5)
+		return (ppe[byte_num] >> bit_num) &
+		       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
+
+	/*
+	 * If bit_num > 5, we have to combine bits with next byte.
+	 * Calculate how many bits we need to take from current byte (called
+	 * here "residue_bits"), and add them to bits from next byte.
+	 */
+
+	residue_bits = 8 - bit_num;
+
+	res = (ppe[byte_num + 1] &
+	       (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
+	      residue_bits;
+	res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
+
+	return res;
+}
+
+static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
+			       struct ieee80211_vif *vif, u8 sta_id)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
+		.sta_id = sta_id,
+		.tid_limit = IWL_MAX_TID_COUNT,
+		.bss_color = vif->bss_conf.bss_color,
+		.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
+		.frame_time_rts_th =
+			cpu_to_le16(vif->bss_conf.frame_time_rts_th),
+	};
+	struct ieee80211_sta *sta;
+	u32 flags;
+	int i;
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
+	if (IS_ERR(sta)) {
+		rcu_read_unlock();
+		WARN(1, "Can't find STA to configure HE\n");
+		return;
+	}
+
+	if (!sta->he_cap.has_he) {
+		rcu_read_unlock();
+		return;
+	}
+
+	flags = 0;
+
+	/* HTC flags */
+	if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
+	    IEEE80211_HE_MAC_CAP0_HTC_HE)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
+	if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
+	      IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
+	    (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+	      IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
+		u8 link_adap =
+			((sta->he_cap.he_cap_elem.mac_cap_info[2] &
+			  IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
+			 (sta->he_cap.he_cap_elem.mac_cap_info[1] &
+			  IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
+
+		if (link_adap == 2)
+			sta_ctxt_cmd.htc_flags |=
+				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
+		else if (link_adap == 3)
+			sta_ctxt_cmd.htc_flags |=
+				cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
+	}
+	if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
+	    IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
+		sta_ctxt_cmd.htc_flags |=
+			cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
+	    IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
+	if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
+		sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
+
+	/* If PPE Thresholds exist, parse them into a FW-familiar format */
+	if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
+	    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
+		u8 nss = (sta->he_cap.ppe_thres[0] &
+			  IEEE80211_PPE_THRES_NSS_MASK) + 1;
+		u8 ru_index_bitmap =
+			(sta->he_cap.ppe_thres[0] &
+			 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
+			IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
+		u8 *ppe = &sta->he_cap.ppe_thres[0];
+		u8 ppe_pos_bit = 7; /* Starting after PPE header */
+
+		/*
+		 * FW currently supports only nss == MAX_HE_SUPP_NSS
+		 *
+		 * If nss > MAX: we can ignore values we don't support
+		 * If nss < MAX: we can set zeros in other streams
+		 */
+		if (nss > MAX_HE_SUPP_NSS) {
+			IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
+				 MAX_HE_SUPP_NSS);
+			nss = MAX_HE_SUPP_NSS;
+		}
+
+		for (i = 0; i < nss; i++) {
+			u8 ru_index_tmp = ru_index_bitmap << 1;
+			u8 bw;
+
+			for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
+				ru_index_tmp >>= 1;
+				if (!(ru_index_tmp & 1))
+					continue;
+
+				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
+					iwl_mvm_he_get_ppe_val(ppe,
+							       ppe_pos_bit);
+				ppe_pos_bit +=
+					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+				sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
+					iwl_mvm_he_get_ppe_val(ppe,
+							       ppe_pos_bit);
+				ppe_pos_bit +=
+					IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+			}
+		}
+
+		flags |= STA_CTXT_HE_PACKET_EXT;
+	}
+	rcu_read_unlock();
+
+	/* Mark MU EDCA as enabled, unless none detected on some AC */
+	flags |= STA_CTXT_HE_MU_EDCA_CW;
+	for (i = 0; i < AC_NUM; i++) {
+		struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
+			&mvmvif->queue_params[i].mu_edca_param_rec;
+
+		if (!mvmvif->queue_params[i].mu_edca) {
+			flags &= ~STA_CTXT_HE_MU_EDCA_CW;
+			break;
+		}
+
+		sta_ctxt_cmd.trig_based_txf[i].cwmin =
+			cpu_to_le16(mu_edca->ecw_min_max & 0xf);
+		sta_ctxt_cmd.trig_based_txf[i].cwmax =
+			cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
+		sta_ctxt_cmd.trig_based_txf[i].aifsn =
+			cpu_to_le16(mu_edca->aifsn);
+		sta_ctxt_cmd.trig_based_txf[i].mu_time =
+			cpu_to_le16(mu_edca->mu_edca_timer);
+	}
+
+	if (vif->bss_conf.multi_sta_back_32bit)
+		flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
+
+	if (vif->bss_conf.ack_enabled)
+		flags |= STA_CTXT_HE_ACK_ENABLED;
+
+	if (vif->bss_conf.uora_exists) {
+		flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
+
+		sta_ctxt_cmd.rand_alloc_ecwmin =
+			vif->bss_conf.uora_ocw_range & 0x7;
+		sta_ctxt_cmd.rand_alloc_ecwmax =
+			(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
+	}
+
+	/* TODO: support Multi BSSID IE */
+
+	sta_ctxt_cmd.flags = cpu_to_le32(flags);
+
+	if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
+						 DATA_PATH_GROUP, 0),
+				 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
+		IWL_ERR(mvm, "Failed to config FW to work HE!\n");
+}
+
+static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
+					     struct ieee80211_vif *vif,
+					     struct ieee80211_bss_conf *bss_conf,
+					     u32 changes)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	/*
+	 * Re-calculate the tsf id, as the master-slave relations depend on the
+	 * beacon interval, which was not known when the station interface was
+	 * added.
+	 */
+	if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
+		if (vif->bss_conf.he_support &&
+		    !iwlwifi_mod_params.disable_11ax)
+			iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
+
+		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+	}
+
+	/*
+	 * If we're not associated yet, take the (new) BSSID before associating
+	 * so the firmware knows. If we're already associated, then use the old
+	 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC
+	 * branch for disassociation below.
+	 */
+	if (changes & BSS_CHANGED_BSSID && !mvmvif->associated)
+		memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
+
+	ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid);
+	if (ret)
+		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+	/* after sending it once, adopt mac80211 data */
+	memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN);
+	mvmvif->associated = bss_conf->assoc;
+
+	if (changes & BSS_CHANGED_ASSOC) {
+		if (bss_conf->assoc) {
+			/* clear statistics to get clean beacon counter */
+			iwl_mvm_request_statistics(mvm, true);
+			memset(&mvmvif->beacon_stats, 0,
+			       sizeof(mvmvif->beacon_stats));
+
+			/* add quota for this interface */
+			ret = iwl_mvm_update_quotas(mvm, true, NULL);
+			if (ret) {
+				IWL_ERR(mvm, "failed to update quotas\n");
+				return;
+			}
+
+			if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+				     &mvm->status)) {
+				/*
+				 * If we're restarting then the firmware will
+				 * obviously have lost synchronisation with
+				 * the AP. It will attempt to synchronise by
+				 * itself, but we can make it more reliable by
+				 * scheduling a session protection time event.
+				 *
+				 * The firmware needs to receive a beacon to
+				 * catch up with synchronisation, use 110% of
+				 * the beacon interval.
+				 *
+				 * Set a large maximum delay to allow for more
+				 * than a single interface.
+				 */
+				u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
+				iwl_mvm_protect_session(mvm, vif, dur, dur,
+							5 * dur, false);
+			}
+
+			iwl_mvm_sf_update(mvm, vif, false);
+			iwl_mvm_power_vif_assoc(mvm, vif);
+			if (vif->p2p) {
+				iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
+				iwl_mvm_update_smps(mvm, vif,
+						    IWL_MVM_SMPS_REQ_PROT,
+						    IEEE80211_SMPS_DYNAMIC);
+			}
+		} else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+			/*
+			 * If update fails - SF might be running in associated
+			 * mode while disassociated - which is forbidden.
+			 */
+			WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false),
+				  "Failed to update SF upon disassociation\n");
+
+			/*
+			 * If we get an assert during the connection (after the
+			 * station has been added, but before the vif is set
+			 * to associated), mac80211 will re-add the station and
+			 * then configure the vif. Since the vif is not
+			 * associated, we would remove the station here and
+			 * this would fail the recovery.
+			 */
+			if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+				      &mvm->status)) {
+				/*
+				 * Remove AP station now that
+				 * the MAC is unassoc
+				 */
+				ret = iwl_mvm_rm_sta_id(mvm, vif,
+							mvmvif->ap_sta_id);
+				if (ret)
+					IWL_ERR(mvm,
+						"failed to remove AP station\n");
+
+				if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+					mvm->d0i3_ap_sta_id =
+						IWL_MVM_INVALID_STA;
+				mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+			}
+
+			/* remove quota for this interface */
+			ret = iwl_mvm_update_quotas(mvm, false, NULL);
+			if (ret)
+				IWL_ERR(mvm, "failed to update quotas\n");
+
+			if (vif->p2p)
+				iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
+
+			/* this will take the cleared BSSID from bss_conf */
+			ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+			if (ret)
+				IWL_ERR(mvm,
+					"failed to update MAC %pM (clear after unassoc)\n",
+					vif->addr);
+		}
+
+		/*
+		 * The firmware tracks the MU-MIMO group on its own.
+		 * However, on HW restart we should restore this data.
+		 */
+		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		    (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) {
+			ret = iwl_mvm_update_mu_groups(mvm, vif);
+			if (ret)
+				IWL_ERR(mvm,
+					"failed to update VHT MU_MIMO groups\n");
+		}
+
+		iwl_mvm_recalc_multicast(mvm);
+		iwl_mvm_configure_bcast_filter(mvm);
+
+		/* reset rssi values */
+		mvmvif->bf_data.ave_beacon_signal = 0;
+
+		iwl_mvm_bt_coex_vif_change(mvm);
+		iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
+				    IEEE80211_SMPS_AUTOMATIC);
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+			iwl_mvm_config_scan(mvm);
+	}
+
+	if (changes & BSS_CHANGED_BEACON_INFO) {
+		/*
+		 * We received a beacon from the associated AP so
+		 * remove the session protection.
+		 */
+		iwl_mvm_stop_session_protection(mvm, vif);
+
+		iwl_mvm_sf_update(mvm, vif, false);
+		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+	}
+
+	if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS |
+		       /*
+			* Send power command on every beacon change,
+			* because we may have not enabled beacon abort yet.
+			*/
+		       BSS_CHANGED_BEACON_INFO)) {
+		ret = iwl_mvm_power_update_mac(mvm);
+		if (ret)
+			IWL_ERR(mvm, "failed to update power mode\n");
+	}
+
+	if (changes & BSS_CHANGED_TXPOWER) {
+		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+				bss_conf->txpower);
+		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+	}
+
+	if (changes & BSS_CHANGED_CQM) {
+		IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
+		/* reset cqm events tracking */
+		mvmvif->bf_data.last_cqm_event = 0;
+		if (mvmvif->bf_data.bf_enabled) {
+			ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+			if (ret)
+				IWL_ERR(mvm,
+					"failed to update CQM thresholds\n");
+		}
+	}
+
+	if (changes & BSS_CHANGED_ARP_FILTER) {
+		IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
+		iwl_mvm_configure_bcast_filter(mvm);
+	}
+}
+
+static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	/*
+	 * iwl_mvm_mac_ctxt_add() might read directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP);
+	if (ret)
+		return ret;
+
+	mutex_lock(&mvm->mutex);
+
+	/* Send the beacon template */
+	ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif);
+	if (ret)
+		goto out_unlock;
+
+	/*
+	 * Re-calculate the tsf id, as the master-slave relations depend on the
+	 * beacon interval, which was not known when the AP interface was added.
+	 */
+	if (vif->type == NL80211_IFTYPE_AP)
+		iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+
+	mvmvif->ap_assoc_sta_count = 0;
+
+	/* Add the mac context */
+	ret = iwl_mvm_mac_ctxt_add(mvm, vif);
+	if (ret)
+		goto out_unlock;
+
+	/* Perform the binding */
+	ret = iwl_mvm_binding_add_vif(mvm, vif);
+	if (ret)
+		goto out_remove;
+
+	/*
+	 * This is not very nice, but the simplest:
+	 * For older FWs adding the mcast sta before the bcast station may
+	 * cause assert 0x2b00.
+	 * This is fixed in later FW so make the order of removal depend on
+	 * the TLV
+	 */
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+		ret = iwl_mvm_add_mcast_sta(mvm, vif);
+		if (ret)
+			goto out_unbind;
+		/*
+		 * Send the bcast station. At this stage the TBTT and DTIM time
+		 * events are added and applied to the scheduler
+		 */
+		ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+		if (ret) {
+			iwl_mvm_rm_mcast_sta(mvm, vif);
+			goto out_unbind;
+		}
+	} else {
+		/*
+		 * Send the bcast station. At this stage the TBTT and DTIM time
+		 * events are added and applied to the scheduler
+		 */
+		ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+		if (ret)
+			goto out_unbind;
+		ret = iwl_mvm_add_mcast_sta(mvm, vif);
+		if (ret) {
+			iwl_mvm_send_rm_bcast_sta(mvm, vif);
+			goto out_unbind;
+		}
+	}
+
+	/* must be set before quota calculations */
+	mvmvif->ap_ibss_active = true;
+
+	/* power updated needs to be done before quotas */
+	iwl_mvm_power_update_mac(mvm);
+
+	ret = iwl_mvm_update_quotas(mvm, false, NULL);
+	if (ret)
+		goto out_quota_failed;
+
+	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+	if (vif->p2p && mvm->p2p_device_vif)
+		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+	iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
+	iwl_mvm_bt_coex_vif_change(mvm);
+
+	/* we don't support TDLS during DCM */
+	if (iwl_mvm_phy_ctx_count(mvm) > 1)
+		iwl_mvm_teardown_tdls_peers(mvm);
+
+	goto out_unlock;
+
+out_quota_failed:
+	iwl_mvm_power_update_mac(mvm);
+	mvmvif->ap_ibss_active = false;
+	iwl_mvm_send_rm_bcast_sta(mvm, vif);
+	iwl_mvm_rm_mcast_sta(mvm, vif);
+out_unbind:
+	iwl_mvm_binding_remove_vif(mvm, vif);
+out_remove:
+	iwl_mvm_mac_ctxt_remove(mvm, vif);
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP);
+	return ret;
+}
+
+static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	iwl_mvm_prepare_mac_removal(mvm, vif);
+
+	mutex_lock(&mvm->mutex);
+
+	/* Handle AP stop while in CSA */
+	if (rcu_access_pointer(mvm->csa_vif) == vif) {
+		iwl_mvm_remove_time_event(mvm, mvmvif,
+					  &mvmvif->time_event_data);
+		RCU_INIT_POINTER(mvm->csa_vif, NULL);
+		mvmvif->csa_countdown = false;
+	}
+
+	if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) {
+		RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+		mvm->csa_tx_block_bcn_timeout = 0;
+	}
+
+	mvmvif->ap_ibss_active = false;
+	mvm->ap_last_beacon_gp2 = 0;
+
+	iwl_mvm_bt_coex_vif_change(mvm);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
+	/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
+	if (vif->p2p && mvm->p2p_device_vif)
+		iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL);
+
+	iwl_mvm_update_quotas(mvm, false, NULL);
+
+	/*
+	 * This is not very nice, but the simplest:
+	 * For older FWs removing the mcast sta before the bcast station may
+	 * cause assert 0x2b00.
+	 * This is fixed in later FW (which will stop beaconing when removing
+	 * bcast station).
+	 * So make the order of removal depend on the TLV
+	 */
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		iwl_mvm_rm_mcast_sta(mvm, vif);
+	iwl_mvm_send_rm_bcast_sta(mvm, vif);
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		iwl_mvm_rm_mcast_sta(mvm, vif);
+	iwl_mvm_binding_remove_vif(mvm, vif);
+
+	iwl_mvm_power_update_mac(mvm);
+
+	iwl_mvm_mac_ctxt_remove(mvm, vif);
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_bss_conf *bss_conf,
+				 u32 changes)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* Changes will be applied when the AP/IBSS is started */
+	if (!mvmvif->ap_ibss_active)
+		return;
+
+	if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+		       BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) &&
+	    iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL))
+		IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
+
+	/* Need to send a new beacon template to the FW */
+	if (changes & BSS_CHANGED_BEACON &&
+	    iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+		IWL_WARN(mvm, "Failed updating beacon data\n");
+
+	if (changes & BSS_CHANGED_TXPOWER) {
+		IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+				bss_conf->txpower);
+		iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+	}
+}
+
+static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_bss_conf *bss_conf,
+				     u32 changes)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	/*
+	 * iwl_mvm_bss_info_changed_station() might call
+	 * iwl_mvm_protect_session(), which reads directly from
+	 * the device (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED))
+		return;
+
+	mutex_lock(&mvm->mutex);
+
+	if (changes & BSS_CHANGED_IDLE && !bss_conf->idle)
+		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes);
+		break;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+		iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes);
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		if (changes & BSS_CHANGED_MU_GROUPS)
+			iwl_mvm_update_mu_groups(mvm, vif);
+		break;
+	default:
+		/* shouldn't happen */
+		WARN_ON_ONCE(1);
+	}
+
+	mutex_unlock(&mvm->mutex);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED);
+}
+
+static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_scan_request *hw_req)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	if (hw_req->req.n_channels == 0 ||
+	    hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
+		return -EINVAL;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	mutex_lock(&mvm->mutex);
+
+	/* Due to a race condition, it's possible that mac80211 asks
+	 * us to stop a hw_scan when it's already stopped.  This can
+	 * happen, for instance, if we stopped the scan ourselves,
+	 * called ieee80211_scan_completed() and the userspace called
+	 * cancel scan scan before ieee80211_scan_work() could run.
+	 * To handle that, simply return if the scan is not running.
+	*/
+	if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
+		iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static void
+iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
+				  struct ieee80211_sta *sta, u16 tids,
+				  int num_frames,
+				  enum ieee80211_frame_release_type reason,
+				  bool more_data)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	/* Called when we need to transmit (a) frame(s) from mac80211 */
+
+	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+					  tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+				    struct ieee80211_sta *sta, u16 tids,
+				    int num_frames,
+				    enum ieee80211_frame_release_type reason,
+				    bool more_data)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	/* Called when we need to transmit (a) frame(s) from agg or dqa queue */
+
+	iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+					  tids, more_data, true);
+}
+
+static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+				     enum sta_notify_cmd cmd,
+				     struct ieee80211_sta *sta)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	unsigned long txqs = 0, tids = 0;
+	int tid;
+
+	/*
+	 * If we have TVQM then we get too high queue numbers - luckily
+	 * we really shouldn't get here with that because such hardware
+	 * should have firmware supporting buffer station offload.
+	 */
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return;
+
+	spin_lock_bh(&mvmsta->lock);
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+		if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE)
+			continue;
+
+		__set_bit(tid_data->txq_id, &txqs);
+
+		if (iwl_mvm_tid_queued(mvm, tid_data) == 0)
+			continue;
+
+		__set_bit(tid, &tids);
+	}
+
+	switch (cmd) {
+	case STA_NOTIFY_SLEEP:
+		for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT)
+			ieee80211_sta_set_buffered(sta, tid, true);
+
+		if (txqs)
+			iwl_trans_freeze_txq_timer(mvm->trans, txqs, true);
+		/*
+		 * The fw updates the STA to be asleep. Tx packets on the Tx
+		 * queues to this station will not be transmitted. The fw will
+		 * send a Tx response with TX_STATUS_FAIL_DEST_PS.
+		 */
+		break;
+	case STA_NOTIFY_AWAKE:
+		if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+			break;
+
+		if (txqs)
+			iwl_trans_freeze_txq_timer(mvm->trans, txqs, false);
+		iwl_mvm_sta_modify_ps_wake(mvm, sta);
+		break;
+	default:
+		break;
+	}
+	spin_unlock_bh(&mvmsta->lock);
+}
+
+static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   enum sta_notify_cmd cmd,
+				   struct ieee80211_sta *sta)
+{
+	__iwl_mvm_mac_sta_notify(hw, cmd, sta);
+}
+
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
+
+	if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+		return;
+
+	rcu_read_lock();
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+	if (WARN_ON(IS_ERR_OR_NULL(sta))) {
+		rcu_read_unlock();
+		return;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	if (!mvmsta->vif ||
+	    mvmsta->vif->type != NL80211_IFTYPE_AP) {
+		rcu_read_unlock();
+		return;
+	}
+
+	if (mvmsta->sleeping != sleeping) {
+		mvmsta->sleeping = sleeping;
+		__iwl_mvm_mac_sta_notify(mvm->hw,
+			sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE,
+			sta);
+		ieee80211_sta_ps_transition(sta, sleeping);
+	}
+
+	if (sleeping) {
+		switch (notif->type) {
+		case IWL_MVM_PM_EVENT_AWAKE:
+		case IWL_MVM_PM_EVENT_ASLEEP:
+			break;
+		case IWL_MVM_PM_EVENT_UAPSD:
+			ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS);
+			break;
+		case IWL_MVM_PM_EVENT_PS_POLL:
+			ieee80211_sta_pspoll(sta);
+			break;
+		default:
+			break;
+		}
+	}
+
+	rcu_read_unlock();
+}
+
+static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+	/*
+	 * This is called before mac80211 does RCU synchronisation,
+	 * so here we already invalidate our internal RCU-protected
+	 * station pointer. The rest of the code will thus no longer
+	 * be able to find the station this way, and we don't rely
+	 * on further RCU synchronisation after the sta_state()
+	 * callback deleted the station.
+	 */
+	mutex_lock(&mvm->mutex);
+	if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id]))
+		rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
+				   ERR_PTR(-ENOENT));
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				const u8 *bssid)
+{
+	int i;
+
+	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		struct iwl_mvm_tcm_mac *mdata;
+
+		mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id];
+		ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+		mdata->opened_rx_ba_sessions = false;
+	}
+
+	if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT))
+		return;
+
+	if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) {
+		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+		return;
+	}
+
+	if (!vif->p2p &&
+	    (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
+		vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+		return;
+	}
+
+	for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) {
+		if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) {
+			vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
+			return;
+		}
+	}
+
+	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+}
+
+static void
+iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
+			   struct ieee80211_vif *vif, u8 *peer_addr,
+			   enum nl80211_tdls_operation action)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_tdls *tdls_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TDLS))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TDLS);
+	tdls_trig = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		return;
+
+	if (!(tdls_trig->action_bitmap & BIT(action)))
+		return;
+
+	if (tdls_trig->peer_mode &&
+	    memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
+		return;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+				"TDLS event occurred, peer %pM, action %d",
+				peer_addr, action);
+}
+
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+					     struct iwl_mvm_sta *mvm_sta)
+{
+	struct iwl_mvm_tid_data *tid_data;
+	struct sk_buff *skb;
+	int i;
+
+	spin_lock_bh(&mvm_sta->lock);
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+		tid_data = &mvm_sta->tid_data[i];
+
+		while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames))) {
+			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+			/*
+			 * The first deferred frame should've stopped the MAC
+			 * queues, so we should never get a second deferred
+			 * frame for the RA/TID.
+			 */
+			iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
+			ieee80211_free_txskb(mvm->hw, skb);
+		}
+	}
+	spin_unlock_bh(&mvm_sta->lock);
+}
+
+static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta,
+				 enum ieee80211_sta_state old_state,
+				 enum ieee80211_sta_state new_state)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	int ret;
+
+	IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n",
+			   sta->addr, old_state, new_state);
+
+	/* this would be a mac80211 bug ... but don't crash */
+	if (WARN_ON_ONCE(!mvmvif->phy_ctxt))
+		return -EINVAL;
+
+	/*
+	 * If we are in a STA removal flow and in DQA mode:
+	 *
+	 * This is after the sync_rcu part, so the queues have already been
+	 * flushed. No more TXs on their way in mac80211's path, and no more in
+	 * the queues.
+	 * Also, we won't be getting any new TX frames for this station.
+	 * What we might have are deferred TX frames that need to be taken care
+	 * of.
+	 *
+	 * Drop any still-queued deferred-frame before removing the STA, and
+	 * make sure the worker is no longer handling frames for this STA.
+	 */
+	if (old_state == IEEE80211_STA_NONE &&
+	    new_state == IEEE80211_STA_NOTEXIST) {
+		iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+		flush_work(&mvm->add_stream_wk);
+
+		/*
+		 * No need to make sure deferred TX indication is off since the
+		 * worker will already remove it if it was on
+		 */
+	}
+
+	mutex_lock(&mvm->mutex);
+	/* track whether or not the station is associated */
+	mvm_sta->sta_state = new_state;
+
+	if (old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE) {
+		/*
+		 * Firmware bug - it'll crash if the beacon interval is less
+		 * than 16. We can't avoid connecting at all, so refuse the
+		 * station state change, this will cause mac80211 to abandon
+		 * attempts to connect to this AP, and eventually wpa_s will
+		 * blacklist the AP...
+		 */
+		if (vif->type == NL80211_IFTYPE_STATION &&
+		    vif->bss_conf.beacon_int < 16) {
+			IWL_ERR(mvm,
+				"AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+				sta->addr, vif->bss_conf.beacon_int);
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		if (sta->tdls &&
+		    (vif->p2p ||
+		     iwl_mvm_tdls_sta_count(mvm, NULL) ==
+						IWL_MVM_TDLS_STA_COUNT ||
+		     iwl_mvm_phy_ctx_count(mvm) > 1)) {
+			IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n");
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+
+		ret = iwl_mvm_add_sta(mvm, vif, sta);
+		if (sta->tdls && ret == 0) {
+			iwl_mvm_recalc_tdls_state(mvm, vif, true);
+			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+						   NL80211_TDLS_SETUP);
+		}
+	} else if (old_state == IEEE80211_STA_NONE &&
+		   new_state == IEEE80211_STA_AUTH) {
+		/*
+		 * EBS may be disabled due to previous failures reported by FW.
+		 * Reset EBS status here assuming environment has been changed.
+		 */
+		mvm->last_ebs_successful = true;
+		iwl_mvm_check_uapsd(mvm, vif, sta->addr);
+		ret = 0;
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_ASSOC) {
+		if (vif->type == NL80211_IFTYPE_AP) {
+			mvmvif->ap_assoc_sta_count++;
+			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+		}
+
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+		ret = iwl_mvm_update_sta(mvm, vif, sta);
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTHORIZED) {
+
+		/* we don't support TDLS during DCM */
+		if (iwl_mvm_phy_ctx_count(mvm) > 1)
+			iwl_mvm_teardown_tdls_peers(mvm);
+
+		if (sta->tdls)
+			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+						   NL80211_TDLS_ENABLE_LINK);
+
+		/* enable beacon filtering */
+		WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+
+		iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band);
+
+		ret = 0;
+	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
+		   new_state == IEEE80211_STA_ASSOC) {
+		/* disable beacon filtering */
+		WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0));
+		ret = 0;
+	} else if (old_state == IEEE80211_STA_ASSOC &&
+		   new_state == IEEE80211_STA_AUTH) {
+		if (vif->type == NL80211_IFTYPE_AP) {
+			mvmvif->ap_assoc_sta_count--;
+			iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+		}
+		ret = 0;
+	} else if (old_state == IEEE80211_STA_AUTH &&
+		   new_state == IEEE80211_STA_NONE) {
+		ret = 0;
+	} else if (old_state == IEEE80211_STA_NONE &&
+		   new_state == IEEE80211_STA_NOTEXIST) {
+		ret = iwl_mvm_rm_sta(mvm, vif, sta);
+		if (sta->tdls) {
+			iwl_mvm_recalc_tdls_state(mvm, vif, false);
+			iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
+						   NL80211_TDLS_DISABLE_LINK);
+		}
+	} else {
+		ret = -EIO;
+	}
+ out_unlock:
+	mutex_unlock(&mvm->mutex);
+
+	if (sta->tdls && ret == 0) {
+		if (old_state == IEEE80211_STA_NOTEXIST &&
+		    new_state == IEEE80211_STA_NONE)
+			ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+		else if (old_state == IEEE80211_STA_NONE &&
+			 new_state == IEEE80211_STA_NOTEXIST)
+			ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID);
+	}
+
+	return ret;
+}
+
+static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	mvm->rts_threshold = value;
+
+	return 0;
+}
+
+static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
+				  struct ieee80211_vif *vif,
+				  struct ieee80211_sta *sta, u32 changed)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    changed & IEEE80211_RC_NSS_CHANGED)
+		iwl_mvm_sf_update(mvm, vif, false);
+}
+
+static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif, u16 ac,
+			       const struct ieee80211_tx_queue_params *params)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->queue_params[ac] = *params;
+
+	/*
+	 * No need to update right away, we'll get BSS_CHANGED_QOS
+	 * The exception is P2P_DEVICE interface which needs immediate update.
+	 */
+	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		int ret;
+
+		mutex_lock(&mvm->mutex);
+		ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+		mutex_unlock(&mvm->mutex);
+		return ret;
+	}
+	return 0;
+}
+
+static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       u16 req_duration)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+	u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
+
+	/*
+	 * iwl_mvm_protect_session() reads directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX))
+		return;
+
+	if (req_duration > duration)
+		duration = req_duration;
+
+	mutex_lock(&mvm->mutex);
+	/* Try really hard to protect the session and hear a beacon */
+	iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false);
+	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX);
+}
+
+static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct cfg80211_sched_scan_request *req,
+					struct ieee80211_scan_ies *ies)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	if (!vif->bss_conf.idle) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	/* Due to a race condition, it's possible that mac80211 asks
+	 * us to stop a sched_scan when it's already stopped.  This
+	 * can happen, for instance, if we stopped the scan ourselves,
+	 * called ieee80211_sched_scan_stopped() and the userspace called
+	 * stop sched scan scan before ieee80211_sched_scan_stopped_work()
+	 * could run.  To handle this, simply return if the scan is
+	 * not running.
+	*/
+	if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) {
+		mutex_unlock(&mvm->mutex);
+		return 0;
+	}
+
+	ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false);
+	mutex_unlock(&mvm->mutex);
+	iwl_mvm_wait_for_async_handlers(mvm);
+
+	return ret;
+}
+
+static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
+			       enum set_key_cmd cmd,
+			       struct ieee80211_vif *vif,
+			       struct ieee80211_sta *sta,
+			       struct ieee80211_key_conf *key)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_mvm_key_pn *ptk_pn;
+	int keyidx = key->keyidx;
+	int ret;
+	u8 key_offset;
+
+	if (iwlwifi_mod_params.swcrypto) {
+		IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n");
+		return -EOPNOTSUPP;
+	}
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+		key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		if (!iwl_mvm_has_new_tx_api(mvm))
+			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE));
+		break;
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+		/* For non-client mode, only use WEP keys for TX as we probably
+		 * don't have a station yet anyway and would then have to keep
+		 * track of the keys, linking them to each of the clients/peers
+		 * as they appear. For now, don't do that, for performance WEP
+		 * offload doesn't really matter much, but we need it for some
+		 * other offload features in client mode.
+		 */
+		if (vif->type != NL80211_IFTYPE_STATION)
+			return 0;
+		break;
+	default:
+		/* currently FW supports only one optional cipher scheme */
+		if (hw->n_cipher_schemes &&
+		    hw->cipher_schemes->cipher == key->cipher)
+			key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+		else
+			return -EOPNOTSUPP;
+	}
+
+	mutex_lock(&mvm->mutex);
+
+	switch (cmd) {
+	case SET_KEY:
+		if ((vif->type == NL80211_IFTYPE_ADHOC ||
+		     vif->type == NL80211_IFTYPE_AP) && !sta) {
+			/*
+			 * GTK on AP interface is a TX-only key, return 0;
+			 * on IBSS they're per-station and because we're lazy
+			 * we don't support them for RX, so do the same.
+			 * CMAC/GMAC in AP/IBSS modes must be done in software.
+			 */
+			if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+			    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+				ret = -EOPNOTSUPP;
+			else
+				ret = 0;
+
+			if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
+			    key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
+			    !iwl_mvm_has_new_tx_api(mvm)) {
+				key->hw_key_idx = STA_KEY_IDX_INVALID;
+				break;
+			}
+		}
+
+		/* During FW restart, in order to restore the state as it was,
+		 * don't try to reprogram keys we previously failed for.
+		 */
+		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		    key->hw_key_idx == STA_KEY_IDX_INVALID) {
+			IWL_DEBUG_MAC80211(mvm,
+					   "skip invalid idx key programming during restart\n");
+			ret = 0;
+			break;
+		}
+
+		if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		    sta && iwl_mvm_has_new_rx_api(mvm) &&
+		    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+		    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+		     key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+		     key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+			struct ieee80211_key_seq seq;
+			int tid, q;
+
+			mvmsta = iwl_mvm_sta_from_mac80211(sta);
+			WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx]));
+			ptk_pn = kzalloc(struct_size(ptk_pn, q,
+						     mvm->trans->num_rx_queues),
+					 GFP_KERNEL);
+			if (!ptk_pn) {
+				ret = -ENOMEM;
+				break;
+			}
+
+			for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+				ieee80211_get_key_rx_seq(key, tid, &seq);
+				for (q = 0; q < mvm->trans->num_rx_queues; q++)
+					memcpy(ptk_pn->q[q].pn[tid],
+					       seq.ccmp.pn,
+					       IEEE80211_CCMP_PN_LEN);
+			}
+
+			rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn);
+		}
+
+		/* in HW restart reuse the index, otherwise request a new one */
+		if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+			key_offset = key->hw_key_idx;
+		else
+			key_offset = STA_KEY_IDX_INVALID;
+
+		IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n");
+		ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
+		if (ret) {
+			IWL_WARN(mvm, "set key failed\n");
+			/*
+			 * can't add key for RX, but we don't need it
+			 * in the device for TX so still return 0
+			 */
+			key->hw_key_idx = STA_KEY_IDX_INVALID;
+			ret = 0;
+		}
+
+		break;
+	case DISABLE_KEY:
+		if (key->hw_key_idx == STA_KEY_IDX_INVALID) {
+			ret = 0;
+			break;
+		}
+
+		if (sta && iwl_mvm_has_new_rx_api(mvm) &&
+		    key->flags & IEEE80211_KEY_FLAG_PAIRWISE &&
+		    (key->cipher == WLAN_CIPHER_SUITE_CCMP ||
+		     key->cipher == WLAN_CIPHER_SUITE_GCMP ||
+		     key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) {
+			mvmsta = iwl_mvm_sta_from_mac80211(sta);
+			ptk_pn = rcu_dereference_protected(
+						mvmsta->ptk_pn[keyidx],
+						lockdep_is_held(&mvm->mutex));
+			RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL);
+			if (ptk_pn)
+				kfree_rcu(ptk_pn, rcu_head);
+		}
+
+		IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n");
+		ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct ieee80211_key_conf *keyconf,
+					struct ieee80211_sta *sta,
+					u32 iv32, u16 *phase1key)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID)
+		return;
+
+	iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key);
+}
+
+
+static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait,
+			       struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_hs20_roc_res *resp;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
+	struct iwl_mvm_time_event_data *te_data = data;
+
+	if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD))
+		return true;
+
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n");
+		return true;
+	}
+
+	resp = (void *)pkt->data;
+
+	IWL_DEBUG_TE(mvm,
+		     "Aux ROC: Recieved response from ucode: status=%d uid=%d\n",
+		     resp->status, resp->event_unique_id);
+
+	te_data->uid = le32_to_cpu(resp->event_unique_id);
+	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+		     te_data->uid);
+
+	spin_lock_bh(&mvm->time_event_lock);
+	list_add_tail(&te_data->list, &mvm->aux_roc_te_list);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	return true;
+}
+
+#define AUX_ROC_MIN_DURATION MSEC_TO_TU(100)
+#define AUX_ROC_MIN_DELAY MSEC_TO_TU(200)
+#define AUX_ROC_MAX_DELAY MSEC_TO_TU(600)
+#define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20)
+#define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10)
+static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
+				    struct ieee80211_channel *channel,
+				    struct ieee80211_vif *vif,
+				    int duration)
+{
+	int res, time_reg = DEVICE_SYSTEM_TIME_REG;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data;
+	static const u16 time_event_response[] = { HOT_SPOT_CMD };
+	struct iwl_notification_wait wait_time_event;
+	u32 dtim_interval = vif->bss_conf.dtim_period *
+		vif->bss_conf.beacon_int;
+	u32 req_dur, delay;
+	struct iwl_hs20_roc_req aux_roc_req = {
+		.action = cpu_to_le32(FW_CTXT_ACTION_ADD),
+		.id_and_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
+		.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
+		/* Set the channel info data */
+		.channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
+			PHY_BAND_24 : PHY_BAND_5,
+		.channel_info.channel = channel->hw_value,
+		.channel_info.width = PHY_VHT_CHANNEL_MODE20,
+		/* Set the time and duration */
+		.apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)),
+	 };
+
+	delay = AUX_ROC_MIN_DELAY;
+	req_dur = MSEC_TO_TU(duration);
+
+	/*
+	 * If we are associated we want the delay time to be at least one
+	 * dtim interval so that the FW can wait until after the DTIM and
+	 * then start the time event, this will potentially allow us to
+	 * remain off-channel for the max duration.
+	 * Since we want to use almost a whole dtim interval we would also
+	 * like the delay to be for 2-3 dtim intervals, in case there are
+	 * other time events with higher priority.
+	 */
+	if (vif->bss_conf.assoc) {
+		delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
+		/* We cannot remain off-channel longer than the DTIM interval */
+		if (dtim_interval <= req_dur) {
+			req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER;
+			if (req_dur <= AUX_ROC_MIN_DURATION)
+				req_dur = dtim_interval -
+					AUX_ROC_MIN_SAFETY_BUFFER;
+		}
+	}
+
+	aux_roc_req.duration = cpu_to_le32(req_dur);
+	aux_roc_req.apply_time_max_delay = cpu_to_le32(delay);
+
+	IWL_DEBUG_TE(mvm,
+		     "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+		     channel->hw_value, req_dur, duration, delay,
+		     dtim_interval);
+	/* Set the node address */
+	memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->time_event_lock);
+
+	if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
+		spin_unlock_bh(&mvm->time_event_lock);
+		return -EIO;
+	}
+
+	te_data->vif = vif;
+	te_data->duration = duration;
+	te_data->id = HOT_SPOT_CMD;
+
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	/*
+	 * Use a notification wait, which really just processes the
+	 * command response and doesn't wait for anything, in order
+	 * to be able to process the response and get the UID inside
+	 * the RX path. Using CMD_WANT_SKB doesn't work because it
+	 * stores the buffer and then wakes up this thread, by which
+	 * time another notification (that the time event started)
+	 * might already be processed unsuccessfully.
+	 */
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+				   time_event_response,
+				   ARRAY_SIZE(time_event_response),
+				   iwl_mvm_rx_aux_roc, te_data);
+
+	res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, sizeof(aux_roc_req),
+				   &aux_roc_req);
+
+	if (res) {
+		IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res);
+		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+		goto out_clear_te;
+	}
+
+	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
+	res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+	/* should never fail */
+	WARN_ON_ONCE(res);
+
+	if (res) {
+ out_clear_te:
+		spin_lock_bh(&mvm->time_event_lock);
+		iwl_mvm_te_clear_data(mvm, te_data);
+		spin_unlock_bh(&mvm->time_event_lock);
+	}
+
+	return res;
+}
+
+static int iwl_mvm_roc(struct ieee80211_hw *hw,
+		       struct ieee80211_vif *vif,
+		       struct ieee80211_channel *channel,
+		       int duration,
+		       enum ieee80211_roc_type type)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct cfg80211_chan_def chandef;
+	struct iwl_mvm_phy_ctxt *phy_ctxt;
+	int ret, i;
+
+	IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
+			   duration, type);
+
+	/*
+	 * Flush the done work, just in case it's still pending, so that
+	 * the work it does can complete and we can accept new frames.
+	 */
+	flush_work(&mvm->roc_done_wk);
+
+	mutex_lock(&mvm->mutex);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
+			/* Use aux roc framework (HS20) */
+			ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
+						       vif, duration);
+			goto out_unlock;
+		}
+		IWL_ERR(mvm, "hotspot not supported\n");
+		ret = -EINVAL;
+		goto out_unlock;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		/* handle below */
+		break;
+	default:
+		IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	for (i = 0; i < NUM_PHY_CTX; i++) {
+		phy_ctxt = &mvm->phy_ctxts[i];
+		if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
+			continue;
+
+		if (phy_ctxt->ref && channel == phy_ctxt->channel) {
+			/*
+			 * Unbind the P2P_DEVICE from the current PHY context,
+			 * and if the PHY context is not used remove it.
+			 */
+			ret = iwl_mvm_binding_remove_vif(mvm, vif);
+			if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+				goto out_unlock;
+
+			iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+			/* Bind the P2P_DEVICE to the current PHY Context */
+			mvmvif->phy_ctxt = phy_ctxt;
+
+			ret = iwl_mvm_binding_add_vif(mvm, vif);
+			if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+				goto out_unlock;
+
+			iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+			goto schedule_time_event;
+		}
+	}
+
+	/* Need to update the PHY context only if the ROC channel changed */
+	if (channel == mvmvif->phy_ctxt->channel)
+		goto schedule_time_event;
+
+	cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+
+	/*
+	 * Change the PHY context configuration as it is currently referenced
+	 * only by the P2P Device MAC
+	 */
+	if (mvmvif->phy_ctxt->ref == 1) {
+		ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt,
+					       &chandef, 1, 1);
+		if (ret)
+			goto out_unlock;
+	} else {
+		/*
+		 * The PHY context is shared with other MACs. Need to remove the
+		 * P2P Device from the binding, allocate an new PHY context and
+		 * create a new binding
+		 */
+		phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+		if (!phy_ctxt) {
+			ret = -ENOSPC;
+			goto out_unlock;
+		}
+
+		ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef,
+					       1, 1);
+		if (ret) {
+			IWL_ERR(mvm, "Failed to change PHY context\n");
+			goto out_unlock;
+		}
+
+		/* Unbind the P2P_DEVICE from the current PHY context */
+		ret = iwl_mvm_binding_remove_vif(mvm, vif);
+		if (WARN(ret, "Failed unbinding P2P_DEVICE\n"))
+			goto out_unlock;
+
+		iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+
+		/* Bind the P2P_DEVICE to the new allocated PHY context */
+		mvmvif->phy_ctxt = phy_ctxt;
+
+		ret = iwl_mvm_binding_add_vif(mvm, vif);
+		if (WARN(ret, "Failed binding P2P_DEVICE\n"))
+			goto out_unlock;
+
+		iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
+	}
+
+schedule_time_event:
+	/* Schedule the time events */
+	ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
+
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+	IWL_DEBUG_MAC80211(mvm, "leave\n");
+	return ret;
+}
+
+static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	IWL_DEBUG_MAC80211(mvm, "enter\n");
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_stop_roc(mvm);
+	mutex_unlock(&mvm->mutex);
+
+	IWL_DEBUG_MAC80211(mvm, "leave\n");
+	return 0;
+}
+
+static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm,
+				 struct ieee80211_chanctx_conf *ctx)
+{
+	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+	struct iwl_mvm_phy_ctxt *phy_ctxt;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	IWL_DEBUG_MAC80211(mvm, "Add channel context\n");
+
+	phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
+	if (!phy_ctxt) {
+		ret = -ENOSPC;
+		goto out;
+	}
+
+	ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+				       ctx->rx_chains_static,
+				       ctx->rx_chains_dynamic);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to add PHY context\n");
+		goto out;
+	}
+
+	iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt);
+	*phy_ctxt_id = phy_ctxt->id;
+out:
+	return ret;
+}
+
+static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
+			       struct ieee80211_chanctx_conf *ctx)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+	ret = __iwl_mvm_add_chanctx(mvm, ctx);
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
+				     struct ieee80211_chanctx_conf *ctx)
+{
+	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt);
+}
+
+static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
+				   struct ieee80211_chanctx_conf *ctx)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	mutex_lock(&mvm->mutex);
+	__iwl_mvm_remove_chanctx(mvm, ctx);
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
+				   struct ieee80211_chanctx_conf *ctx,
+				   u32 changed)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+
+	if (WARN_ONCE((phy_ctxt->ref > 1) &&
+		      (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH |
+				   IEEE80211_CHANCTX_CHANGE_RX_CHAINS |
+				   IEEE80211_CHANCTX_CHANGE_RADAR |
+				   IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)),
+		      "Cannot change PHY. Ref=%d, changed=0x%X\n",
+		      phy_ctxt->ref, changed))
+		return;
+
+	mutex_lock(&mvm->mutex);
+
+	/* we are only changing the min_width, may be a noop */
+	if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
+		if (phy_ctxt->width == ctx->min_def.width)
+			goto out_unlock;
+
+		/* we are just toggling between 20_NOHT and 20 */
+		if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
+		    ctx->min_def.width <= NL80211_CHAN_WIDTH_20)
+			goto out_unlock;
+	}
+
+	iwl_mvm_bt_coex_vif_change(mvm);
+	iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def,
+				 ctx->rx_chains_static,
+				 ctx->rx_chains_dynamic);
+
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+}
+
+static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct ieee80211_chanctx_conf *ctx,
+					bool switching_chanctx)
+{
+	u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
+	struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	mvmvif->phy_ctxt = phy_ctxt;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+		/* only needed if we're switching chanctx (i.e. during CSA) */
+		if (switching_chanctx) {
+			mvmvif->ap_ibss_active = true;
+			break;
+		}
+	case NL80211_IFTYPE_ADHOC:
+		/*
+		 * The AP binding flow is handled as part of the start_ap flow
+		 * (in bss_info_changed), similarly for IBSS.
+		 */
+		ret = 0;
+		goto out;
+	case NL80211_IFTYPE_STATION:
+		mvmvif->csa_bcn_pending = false;
+		break;
+	case NL80211_IFTYPE_MONITOR:
+		/* always disable PS when a monitor interface is active */
+		mvmvif->ps_disabled = true;
+		break;
+	default:
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = iwl_mvm_binding_add_vif(mvm, vif);
+	if (ret)
+		goto out;
+
+	/*
+	 * Power state must be updated before quotas,
+	 * otherwise fw will complain.
+	 */
+	iwl_mvm_power_update_mac(mvm);
+
+	/* Setting the quota at this stage is only required for monitor
+	 * interfaces. For the other types, the bss_info changed flow
+	 * will handle quota settings.
+	 */
+	if (vif->type == NL80211_IFTYPE_MONITOR) {
+		mvmvif->monitor_active = true;
+		ret = iwl_mvm_update_quotas(mvm, false, NULL);
+		if (ret)
+			goto out_remove_binding;
+
+		ret = iwl_mvm_add_snif_sta(mvm, vif);
+		if (ret)
+			goto out_remove_binding;
+
+	}
+
+	/* Handle binding during CSA */
+	if (vif->type == NL80211_IFTYPE_AP) {
+		iwl_mvm_update_quotas(mvm, false, NULL);
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+	}
+
+	if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) {
+		u32 duration = 3 * vif->bss_conf.beacon_int;
+
+		/* iwl_mvm_protect_session() reads directly from the
+		 * device (the system time), so make sure it is
+		 * available.
+		 */
+		ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA);
+		if (ret)
+			goto out_remove_binding;
+
+		/* Protect the session to make sure we hear the first
+		 * beacon on the new channel.
+		 */
+		mvmvif->csa_bcn_pending = true;
+		iwl_mvm_protect_session(mvm, vif, duration, duration,
+					vif->bss_conf.beacon_int / 2,
+					true);
+
+		iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA);
+
+		iwl_mvm_update_quotas(mvm, false, NULL);
+	}
+
+	goto out;
+
+out_remove_binding:
+	iwl_mvm_binding_remove_vif(mvm, vif);
+	iwl_mvm_power_update_mac(mvm);
+out:
+	if (ret)
+		mvmvif->phy_ctxt = NULL;
+	return ret;
+}
+static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_chanctx_conf *ctx)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+	ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false);
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct ieee80211_chanctx_conf *ctx,
+					   bool switching_chanctx)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct ieee80211_vif *disabled_vif = NULL;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_ADHOC:
+		goto out;
+	case NL80211_IFTYPE_MONITOR:
+		mvmvif->monitor_active = false;
+		mvmvif->ps_disabled = false;
+		iwl_mvm_rm_snif_sta(mvm, vif);
+		break;
+	case NL80211_IFTYPE_AP:
+		/* This part is triggered only during CSA */
+		if (!switching_chanctx || !mvmvif->ap_ibss_active)
+			goto out;
+
+		mvmvif->csa_countdown = false;
+
+		/* Set CS bit on all the stations */
+		iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true);
+
+		/* Save blocked iface, the timeout is set on the next beacon */
+		rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif);
+
+		mvmvif->ap_ibss_active = false;
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (!switching_chanctx)
+			break;
+
+		disabled_vif = vif;
+
+		iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL);
+		break;
+	default:
+		break;
+	}
+
+	iwl_mvm_update_quotas(mvm, false, disabled_vif);
+	iwl_mvm_binding_remove_vif(mvm, vif);
+
+out:
+	mvmvif->phy_ctxt = NULL;
+	iwl_mvm_power_update_mac(mvm);
+}
+
+static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
+					 struct ieee80211_vif *vif,
+					 struct ieee80211_chanctx_conf *ctx)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	mutex_lock(&mvm->mutex);
+	__iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false);
+	mutex_unlock(&mvm->mutex);
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
+				struct ieee80211_vif_chanctx_switch *vifs)
+{
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+	__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
+
+	ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx);
+	if (ret) {
+		IWL_ERR(mvm, "failed to add new_ctx during channel switch\n");
+		goto out_reassign;
+	}
+
+	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
+					   true);
+	if (ret) {
+		IWL_ERR(mvm,
+			"failed to assign new_ctx during channel switch\n");
+		goto out_remove;
+	}
+
+	/* we don't support TDLS during DCM - can be caused by channel switch */
+	if (iwl_mvm_phy_ctx_count(mvm) > 1)
+		iwl_mvm_teardown_tdls_peers(mvm);
+
+	goto out;
+
+out_remove:
+	__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
+
+out_reassign:
+	if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) {
+		IWL_ERR(mvm, "failed to add old_ctx back after failure.\n");
+		goto out_restart;
+	}
+
+	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+					 true)) {
+		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+		goto out_restart;
+	}
+
+	goto out;
+
+out_restart:
+	/* things keep failing, better restart the hw */
+	iwl_mvm_nic_restart(mvm, false);
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static int
+iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
+				    struct ieee80211_vif_chanctx_switch *vifs)
+{
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+	__iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true);
+
+	ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx,
+					   true);
+	if (ret) {
+		IWL_ERR(mvm,
+			"failed to assign new_ctx during channel switch\n");
+		goto out_reassign;
+	}
+
+	goto out;
+
+out_reassign:
+	if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx,
+					 true)) {
+		IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n");
+		goto out_restart;
+	}
+
+	goto out;
+
+out_restart:
+	/* things keep failing, better restart the hw */
+	iwl_mvm_nic_restart(mvm, false);
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw,
+				      struct ieee80211_vif_chanctx_switch *vifs,
+				      int n_vifs,
+				      enum ieee80211_chanctx_switch_mode mode)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	/* we only support a single-vif right now */
+	if (n_vifs > 1)
+		return -EOPNOTSUPP;
+
+	switch (mode) {
+	case CHANCTX_SWMODE_SWAP_CONTEXTS:
+		ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs);
+		break;
+	case CHANCTX_SWMODE_REASSIGN_VIF:
+		ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	return mvm->ibss_manager;
+}
+
+static int iwl_mvm_set_tim(struct ieee80211_hw *hw,
+			   struct ieee80211_sta *sta,
+			   bool set)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+	if (!mvm_sta || !mvm_sta->vif) {
+		IWL_ERR(mvm, "Station is not associated to a vif\n");
+		return -EINVAL;
+	}
+
+	return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif);
+}
+
+#ifdef CONFIG_NL80211_TESTMODE
+static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = {
+	[IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 },
+	[IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 },
+	[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 },
+};
+
+static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif,
+				      void *data, int len)
+{
+	struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1];
+	int err;
+	u32 noa_duration;
+
+	err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy,
+			NULL);
+	if (err)
+		return err;
+
+	if (!tb[IWL_MVM_TM_ATTR_CMD])
+		return -EINVAL;
+
+	switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) {
+	case IWL_MVM_TM_CMD_SET_NOA:
+		if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p ||
+		    !vif->bss_conf.enable_beacon ||
+		    !tb[IWL_MVM_TM_ATTR_NOA_DURATION])
+			return -EINVAL;
+
+		noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]);
+		if (noa_duration >= vif->bss_conf.beacon_int)
+			return -EINVAL;
+
+		mvm->noa_duration = noa_duration;
+		mvm->noa_vif = vif;
+
+		return iwl_mvm_update_quotas(mvm, true, NULL);
+	case IWL_MVM_TM_CMD_SET_BEACON_FILTER:
+		/* must be associated client vif - ignore authorized */
+		if (!vif || vif->type != NL80211_IFTYPE_STATION ||
+		    !vif->bss_conf.assoc || !vif->bss_conf.dtim_period ||
+		    !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])
+			return -EINVAL;
+
+		if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
+			return iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+		return iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    void *data, int len)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int err;
+
+	mutex_lock(&mvm->mutex);
+	err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
+	mutex_unlock(&mvm->mutex);
+
+	return err;
+}
+#endif
+
+static void iwl_mvm_channel_switch(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_channel_switch *chsw)
+{
+	/* By implementing this operation, we prevent mac80211 from
+	 * starting its own channel switch timer, so that we can call
+	 * ieee80211_chswitch_done() ourselves at the right time
+	 * (which is when the absence time event starts).
+	 */
+
+	IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw),
+			   "dummy channel switch op\n");
+}
+
+static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_channel_switch *chsw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct ieee80211_vif *csa_vif;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u32 apply_time;
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	mvmvif->csa_failed = false;
+
+	IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
+			   chsw->chandef.center_freq1);
+
+	iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt,
+				       ieee80211_vif_to_wdev(vif),
+				       FW_DBG_TRIGGER_CHANNEL_SWITCH);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+		csa_vif =
+			rcu_dereference_protected(mvm->csa_vif,
+						  lockdep_is_held(&mvm->mutex));
+		if (WARN_ONCE(csa_vif && csa_vif->csa_active,
+			      "Another CSA is already in progress")) {
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+
+		/* we still didn't unblock tx. prevent new CS meanwhile */
+		if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+					      lockdep_is_held(&mvm->mutex))) {
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+
+		rcu_assign_pointer(mvm->csa_vif, vif);
+
+		if (WARN_ONCE(mvmvif->csa_countdown,
+			      "Previous CSA countdown didn't complete")) {
+			ret = -EBUSY;
+			goto out_unlock;
+		}
+
+		mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+
+		break;
+	case NL80211_IFTYPE_STATION:
+		/* Schedule the time event to a bit before beacon 1,
+		 * to make sure we're in the new channel when the
+		 * GO/AP arrives. In case count <= 1 immediately schedule the
+		 * TE (this might result with some packet loss or connection
+		 * loss).
+		 */
+		if (chsw->count <= 1)
+			apply_time = 0;
+		else
+			apply_time = chsw->device_timestamp +
+				((vif->bss_conf.beacon_int * (chsw->count - 1) -
+				  IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024);
+
+		if (chsw->block_tx)
+			iwl_mvm_csa_client_absent(mvm, vif);
+
+		iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int,
+					    apply_time);
+		if (mvmvif->bf_data.bf_enabled) {
+			ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+			if (ret)
+				goto out_unlock;
+		}
+
+		break;
+	default:
+		break;
+	}
+
+	mvmvif->ps_disabled = true;
+
+	ret = iwl_mvm_power_update_ps(mvm);
+	if (ret)
+		goto out_unlock;
+
+	/* we won't be on this channel any longer */
+	iwl_mvm_teardown_tdls_peers(mvm);
+
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	if (mvmvif->csa_failed) {
+		mvmvif->csa_failed = false;
+		ret = -EIO;
+		goto out_unlock;
+	}
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		struct iwl_mvm_sta *mvmsta;
+
+		mvmvif->csa_bcn_pending = false;
+		mvmsta = iwl_mvm_sta_from_staid_protected(mvm,
+							  mvmvif->ap_sta_id);
+
+		if (WARN_ON(!mvmsta)) {
+			ret = -EIO;
+			goto out_unlock;
+		}
+
+		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+
+		iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+		ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0);
+		if (ret)
+			goto out_unlock;
+
+		iwl_mvm_stop_session_protection(mvm, vif);
+	}
+
+	mvmvif->ps_disabled = false;
+
+	ret = iwl_mvm_power_update_ps(mvm);
+
+out_unlock:
+	mutex_unlock(&mvm->mutex);
+
+	return ret;
+}
+
+static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
+{
+	int i;
+
+	if (!iwl_mvm_has_new_tx_api(mvm)) {
+		if (drop) {
+			mutex_lock(&mvm->mutex);
+			iwl_mvm_flush_tx_path(mvm,
+				iwl_mvm_flushable_queues(mvm) & queues, 0);
+			mutex_unlock(&mvm->mutex);
+		} else {
+			iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
+		}
+		return;
+	}
+
+	mutex_lock(&mvm->mutex);
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		struct ieee80211_sta *sta;
+
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		if (drop)
+			iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0);
+		else
+			iwl_mvm_wait_sta_queues_empty(mvm,
+					iwl_mvm_sta_from_mac80211(sta));
+	}
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif, u32 queues, bool drop)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif;
+	struct iwl_mvm_sta *mvmsta;
+	struct ieee80211_sta *sta;
+	int i;
+	u32 msk = 0;
+
+	if (!vif) {
+		iwl_mvm_flush_no_vif(mvm, queues, drop);
+		return;
+	}
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	/* Make sure we're done with the deferred traffic before flushing */
+	flush_work(&mvm->add_stream_wk);
+
+	mutex_lock(&mvm->mutex);
+	mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* flush the AP-station and all TDLS peers */
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		if (mvmsta->vif != vif)
+			continue;
+
+		/* make sure only TDLS peers or the AP are flushed */
+		WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
+
+		if (drop) {
+			if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+				IWL_ERR(mvm, "flush request fail\n");
+		} else {
+			msk |= mvmsta->tfd_queue_msk;
+			if (iwl_mvm_has_new_tx_api(mvm))
+				iwl_mvm_wait_sta_queues_empty(mvm, mvmsta);
+		}
+	}
+
+	mutex_unlock(&mvm->mutex);
+
+	/* this can take a while, and we may need/want other operations
+	 * to succeed while doing this, so do it without the mutex held
+	 */
+	if (!drop && !iwl_mvm_has_new_tx_api(mvm))
+		iwl_trans_wait_tx_queues_empty(mvm->trans, msk);
+}
+
+static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
+				  struct survey_info *survey)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	int ret;
+
+	memset(survey, 0, sizeof(*survey));
+
+	/* only support global statistics right now */
+	if (idx != 0)
+		return -ENOENT;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+		return -ENOENT;
+
+	mutex_lock(&mvm->mutex);
+
+	if (iwl_mvm_firmware_running(mvm)) {
+		ret = iwl_mvm_request_statistics(mvm, false);
+		if (ret)
+			goto out;
+	}
+
+	survey->filled = SURVEY_INFO_TIME |
+			 SURVEY_INFO_TIME_RX |
+			 SURVEY_INFO_TIME_TX |
+			 SURVEY_INFO_TIME_SCAN;
+	survey->time = mvm->accu_radio_stats.on_time_rf +
+		       mvm->radio_stats.on_time_rf;
+	do_div(survey->time, USEC_PER_MSEC);
+
+	survey->time_rx = mvm->accu_radio_stats.rx_time +
+			  mvm->radio_stats.rx_time;
+	do_div(survey->time_rx, USEC_PER_MSEC);
+
+	survey->time_tx = mvm->accu_radio_stats.tx_time +
+			  mvm->radio_stats.tx_time;
+	do_div(survey->time_tx, USEC_PER_MSEC);
+
+	survey->time_scan = mvm->accu_radio_stats.on_time_scan +
+			    mvm->radio_stats.on_time_scan;
+	do_div(survey->time_scan, USEC_PER_MSEC);
+
+	ret = 0;
+ out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta,
+				       struct station_info *sinfo)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	if (mvmsta->avg_energy) {
+		sinfo->signal_avg = mvmsta->avg_energy;
+		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+	}
+
+	/* if beacon filtering isn't on mac80211 does it anyway */
+	if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER))
+		return;
+
+	if (!vif->bss_conf.assoc)
+		return;
+
+	mutex_lock(&mvm->mutex);
+
+	if (mvmvif->ap_sta_id != mvmsta->sta_id)
+		goto unlock;
+
+	if (iwl_mvm_request_statistics(mvm, false))
+		goto unlock;
+
+	sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons +
+			   mvmvif->beacon_stats.accu_num_beacons;
+	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX);
+	if (mvmvif->beacon_stats.avg_signal) {
+		/* firmware only reports a value after RXing a few beacons */
+		sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal;
+		sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
+	}
+ unlock:
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					const struct ieee80211_event *event)
+{
+#define CHECK_MLME_TRIGGER(_cnt, _fmt...)				\
+	do {								\
+		if ((trig_mlme->_cnt) && --(trig_mlme->_cnt))		\
+			break;						\
+		iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt);	\
+	} while (0)
+
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+	trig_mlme = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		return;
+
+	if (event->u.mlme.data == ASSOC_EVENT) {
+		if (event->u.mlme.status == MLME_DENIED)
+			CHECK_MLME_TRIGGER(stop_assoc_denied,
+					   "DENIED ASSOC: reason %d",
+					    event->u.mlme.reason);
+		else if (event->u.mlme.status == MLME_TIMEOUT)
+			CHECK_MLME_TRIGGER(stop_assoc_timeout,
+					   "ASSOC TIMEOUT");
+	} else if (event->u.mlme.data == AUTH_EVENT) {
+		if (event->u.mlme.status == MLME_DENIED)
+			CHECK_MLME_TRIGGER(stop_auth_denied,
+					   "DENIED AUTH: reason %d",
+					   event->u.mlme.reason);
+		else if (event->u.mlme.status == MLME_TIMEOUT)
+			CHECK_MLME_TRIGGER(stop_auth_timeout,
+					   "AUTH TIMEOUT");
+	} else if (event->u.mlme.data == DEAUTH_RX_EVENT) {
+		CHECK_MLME_TRIGGER(stop_rx_deauth,
+				   "DEAUTH RX %d", event->u.mlme.reason);
+	} else if (event->u.mlme.data == DEAUTH_TX_EVENT) {
+		CHECK_MLME_TRIGGER(stop_tx_deauth,
+				   "DEAUTH TX %d", event->u.mlme.reason);
+	}
+#undef CHECK_MLME_TRIGGER
+}
+
+static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  const struct ieee80211_event *event)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid)))
+		return;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+				"BAR received from %pM, tid %d, ssn %d",
+				event->u.ba.sta->addr, event->u.ba.tid,
+				event->u.ba.ssn);
+}
+
+static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
+				       struct ieee80211_vif *vif,
+				       const struct ieee80211_event *event)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+	switch (event->type) {
+	case MLME_EVENT:
+		iwl_mvm_event_mlme_callback(mvm, vif, event);
+		break;
+	case BAR_RX_EVENT:
+		iwl_mvm_event_bar_rx_callback(mvm, vif, event);
+		break;
+	case BA_FRAME_TIMEOUT:
+		iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta,
+						     event->u.ba.tid);
+		break;
+	default:
+		break;
+	}
+}
+
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+				     struct iwl_mvm_internal_rxq_notif *notif,
+				     u32 size)
+{
+	u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!iwl_mvm_has_new_rx_api(mvm))
+		return;
+
+	notif->cookie = mvm->queue_sync_cookie;
+
+	if (notif->sync)
+		atomic_set(&mvm->queue_sync_counter,
+			   mvm->trans->num_rx_queues);
+
+	ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
+		goto out;
+	}
+
+	if (notif->sync) {
+		ret = wait_event_timeout(mvm->rx_sync_waitq,
+					 atomic_read(&mvm->queue_sync_counter) == 0 ||
+					 iwl_mvm_is_radio_killed(mvm),
+					 HZ);
+		WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm));
+	}
+
+out:
+	atomic_set(&mvm->queue_sync_counter, 0);
+	mvm->queue_sync_cookie++;
+}
+
+static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_internal_rxq_notif data = {
+		.type = IWL_MVM_RXQ_EMPTY,
+		.sync = 1,
+	};
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
+	mutex_unlock(&mvm->mutex);
+}
+
+const struct ieee80211_ops iwl_mvm_hw_ops = {
+	.tx = iwl_mvm_mac_tx,
+	.ampdu_action = iwl_mvm_mac_ampdu_action,
+	.start = iwl_mvm_mac_start,
+	.reconfig_complete = iwl_mvm_mac_reconfig_complete,
+	.stop = iwl_mvm_mac_stop,
+	.add_interface = iwl_mvm_mac_add_interface,
+	.remove_interface = iwl_mvm_mac_remove_interface,
+	.config = iwl_mvm_mac_config,
+	.prepare_multicast = iwl_mvm_prepare_multicast,
+	.configure_filter = iwl_mvm_configure_filter,
+	.config_iface_filter = iwl_mvm_config_iface_filter,
+	.bss_info_changed = iwl_mvm_bss_info_changed,
+	.hw_scan = iwl_mvm_mac_hw_scan,
+	.cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan,
+	.sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove,
+	.sta_state = iwl_mvm_mac_sta_state,
+	.sta_notify = iwl_mvm_mac_sta_notify,
+	.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+	.release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
+	.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
+	.sta_rc_update = iwl_mvm_sta_rc_update,
+	.conf_tx = iwl_mvm_mac_conf_tx,
+	.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+	.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
+	.flush = iwl_mvm_mac_flush,
+	.sched_scan_start = iwl_mvm_mac_sched_scan_start,
+	.sched_scan_stop = iwl_mvm_mac_sched_scan_stop,
+	.set_key = iwl_mvm_mac_set_key,
+	.update_tkip_key = iwl_mvm_mac_update_tkip_key,
+	.remain_on_channel = iwl_mvm_roc,
+	.cancel_remain_on_channel = iwl_mvm_cancel_roc,
+	.add_chanctx = iwl_mvm_add_chanctx,
+	.remove_chanctx = iwl_mvm_remove_chanctx,
+	.change_chanctx = iwl_mvm_change_chanctx,
+	.assign_vif_chanctx = iwl_mvm_assign_vif_chanctx,
+	.unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx,
+	.switch_vif_chanctx = iwl_mvm_switch_vif_chanctx,
+
+	.start_ap = iwl_mvm_start_ap_ibss,
+	.stop_ap = iwl_mvm_stop_ap_ibss,
+	.join_ibss = iwl_mvm_start_ap_ibss,
+	.leave_ibss = iwl_mvm_stop_ap_ibss,
+
+	.tx_last_beacon = iwl_mvm_tx_last_beacon,
+
+	.set_tim = iwl_mvm_set_tim,
+
+	.channel_switch = iwl_mvm_channel_switch,
+	.pre_channel_switch = iwl_mvm_pre_channel_switch,
+	.post_channel_switch = iwl_mvm_post_channel_switch,
+
+	.tdls_channel_switch = iwl_mvm_tdls_channel_switch,
+	.tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch,
+	.tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch,
+
+	.event_callback = iwl_mvm_mac_event_callback,
+
+	.sync_rx_queues = iwl_mvm_sync_rx_queues,
+
+	CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
+
+#ifdef CONFIG_PM_SLEEP
+	/* look at d3.c */
+	.suspend = iwl_mvm_suspend,
+	.resume = iwl_mvm_resume,
+	.set_wakeup = iwl_mvm_set_wakeup,
+	.set_rekey_data = iwl_mvm_set_rekey_data,
+#if IS_ENABLED(CONFIG_IPV6)
+	.ipv6_addr_change = iwl_mvm_ipv6_addr_change,
+#endif
+	.set_default_unicast_key = iwl_mvm_set_default_unicast_key,
+#endif
+	.get_survey = iwl_mvm_mac_get_survey,
+	.sta_statistics = iwl_mvm_mac_sta_statistics,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	.sta_add_debugfs = iwl_mvm_sta_add_debugfs,
+#endif
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
new file mode 100644
index 0000000..b3987a0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -0,0 +1,2002 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_H__
+#define __IWL_MVM_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/leds.h>
+#include <linux/in6.h>
+
+#ifdef CONFIG_THERMAL
+#include <linux/thermal.h>
+#endif
+
+#include "iwl-op-mode.h"
+#include "iwl-trans.h"
+#include "fw/notif-wait.h"
+#include "iwl-eeprom-parse.h"
+#include "fw/file.h"
+#include "iwl-config.h"
+#include "sta.h"
+#include "fw-api.h"
+#include "constants.h"
+#include "tof.h"
+#include "fw/runtime.h"
+#include "fw/dbg.h"
+#include "fw/acpi.h"
+#include "iwl-nvm-parse.h"
+
+#include <linux/average.h>
+
+#define IWL_MVM_MAX_ADDRESSES		5
+/* RSSI offset for WkP */
+#define IWL_RSSI_OFFSET 50
+#define IWL_MVM_MISSED_BEACONS_THRESHOLD 8
+/* A TimeUnit is 1024 microsecond */
+#define MSEC_TO_TU(_msec)	(_msec*1000/1024)
+
+/* For GO, this value represents the number of TUs before CSA "beacon
+ * 0" TBTT when the CSA time-event needs to be scheduled to start.  It
+ * must be big enough to ensure that we switch in time.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_GO		40
+
+/* For client, this value represents the number of TUs before CSA
+ * "beacon 1" TBTT, instead.  This is because we don't know when the
+ * GO/AP will be in the new channel, so we switch early enough.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT	10
+
+/*
+ * This value (in TUs) is used to fine tune the CSA NoA end time which should
+ * be just before "beacon 0" TBTT.
+ */
+#define IWL_MVM_CHANNEL_SWITCH_MARGIN 4
+
+/*
+ * Number of beacons to transmit on a new channel until we unblock tx to
+ * the stations, even if we didn't identify them on a new channel
+ */
+#define IWL_MVM_CS_UNBLOCK_TX_TIMEOUT 3
+
+/* offchannel queue towards mac80211 */
+#define IWL_MVM_OFFCHANNEL_QUEUE 0
+
+extern const struct ieee80211_ops iwl_mvm_hw_ops;
+
+/**
+ * struct iwl_mvm_mod_params - module parameters for iwlmvm
+ * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
+ *	We will register to mac80211 to have testmode working. The NIC must not
+ *	be up'ed after the INIT fw asserted. This is useful to be able to use
+ *	proprietary tools over testmode to debug the INIT fw.
+ * @tfd_q_hang_detect: enabled the detection of hung transmit queues
+ * @power_scheme: one of enum iwl_power_scheme
+ */
+struct iwl_mvm_mod_params {
+	bool init_dbg;
+	bool tfd_q_hang_detect;
+	int power_scheme;
+};
+extern struct iwl_mvm_mod_params iwlmvm_mod_params;
+
+struct iwl_mvm_phy_ctxt {
+	u16 id;
+	u16 color;
+	u32 ref;
+
+	enum nl80211_chan_width width;
+
+	/*
+	 * TODO: This should probably be removed. Currently here only for rate
+	 * scaling algorithm
+	 */
+	struct ieee80211_channel *channel;
+};
+
+struct iwl_mvm_time_event_data {
+	struct ieee80211_vif *vif;
+	struct list_head list;
+	unsigned long end_jiffies;
+	u32 duration;
+	bool running;
+	u32 uid;
+
+	/*
+	 * The access to the 'id' field must be done when the
+	 * mvm->time_event_lock is held, as it value is used to indicate
+	 * if the te is in the time event list or not (when id == TE_MAX)
+	 */
+	u32 id;
+};
+
+ /* Power management */
+
+/**
+ * enum iwl_power_scheme
+ * @IWL_POWER_LEVEL_CAM - Continuously Active Mode
+ * @IWL_POWER_LEVEL_BPS - Balanced Power Save (default)
+ * @IWL_POWER_LEVEL_LP  - Low Power
+ */
+enum iwl_power_scheme {
+	IWL_POWER_SCHEME_CAM = 1,
+	IWL_POWER_SCHEME_BPS,
+	IWL_POWER_SCHEME_LP
+};
+
+#define IWL_CONN_MAX_LISTEN_INTERVAL	10
+#define IWL_UAPSD_MAX_SP		IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+enum iwl_dbgfs_pm_mask {
+	MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
+	MVM_DEBUGFS_PM_SKIP_OVER_DTIM = BIT(1),
+	MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS = BIT(2),
+	MVM_DEBUGFS_PM_RX_DATA_TIMEOUT = BIT(3),
+	MVM_DEBUGFS_PM_TX_DATA_TIMEOUT = BIT(4),
+	MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
+	MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+	MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
+	MVM_DEBUGFS_PM_UAPSD_MISBEHAVING = BIT(9),
+	MVM_DEBUGFS_PM_USE_PS_POLL = BIT(10),
+};
+
+struct iwl_dbgfs_pm {
+	u16 keep_alive_seconds;
+	u32 rx_data_timeout;
+	u32 tx_data_timeout;
+	bool skip_over_dtim;
+	u8 skip_dtim_periods;
+	bool lprx_ena;
+	u32 lprx_rssi_threshold;
+	bool snooze_ena;
+	bool uapsd_misbehaving;
+	bool use_ps_poll;
+	int mask;
+};
+
+/* beacon filtering */
+
+enum iwl_dbgfs_bf_mask {
+	MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
+	MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
+	MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
+	MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+	MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+	MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+	MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+	MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+	MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+	MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+	MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
+};
+
+struct iwl_dbgfs_bf {
+	u32 bf_energy_delta;
+	u32 bf_roaming_energy_delta;
+	u32 bf_roaming_state;
+	u32 bf_temp_threshold;
+	u32 bf_temp_fast_filter;
+	u32 bf_temp_slow_filter;
+	u32 bf_enable_beacon_filter;
+	u32 bf_debug_flag;
+	u32 bf_escape_timer;
+	u32 ba_escape_timer;
+	u32 ba_enable_beacon_abort;
+	int mask;
+};
+#endif
+
+enum iwl_mvm_smps_type_request {
+	IWL_MVM_SMPS_REQ_BT_COEX,
+	IWL_MVM_SMPS_REQ_TT,
+	IWL_MVM_SMPS_REQ_PROT,
+	NUM_IWL_MVM_SMPS_REQ,
+};
+
+enum iwl_mvm_ref_type {
+	IWL_MVM_REF_UCODE_DOWN,
+	IWL_MVM_REF_SCAN,
+	IWL_MVM_REF_ROC,
+	IWL_MVM_REF_ROC_AUX,
+	IWL_MVM_REF_P2P_CLIENT,
+	IWL_MVM_REF_AP_IBSS,
+	IWL_MVM_REF_USER,
+	IWL_MVM_REF_TX,
+	IWL_MVM_REF_TX_AGG,
+	IWL_MVM_REF_ADD_IF,
+	IWL_MVM_REF_START_AP,
+	IWL_MVM_REF_BSS_CHANGED,
+	IWL_MVM_REF_PREPARE_TX,
+	IWL_MVM_REF_PROTECT_TDLS,
+	IWL_MVM_REF_CHECK_CTKILL,
+	IWL_MVM_REF_PRPH_READ,
+	IWL_MVM_REF_PRPH_WRITE,
+	IWL_MVM_REF_NMI,
+	IWL_MVM_REF_TM_CMD,
+	IWL_MVM_REF_EXIT_WORK,
+	IWL_MVM_REF_PROTECT_CSA,
+	IWL_MVM_REF_FW_DBG_COLLECT,
+	IWL_MVM_REF_INIT_UCODE,
+	IWL_MVM_REF_SENDING_CMD,
+	IWL_MVM_REF_RX,
+
+	/* update debugfs.c when changing this */
+
+	IWL_MVM_REF_COUNT,
+};
+
+enum iwl_bt_force_ant_mode {
+	BT_FORCE_ANT_DIS = 0,
+	BT_FORCE_ANT_AUTO,
+	BT_FORCE_ANT_BT,
+	BT_FORCE_ANT_WIFI,
+
+	BT_FORCE_ANT_MAX,
+};
+
+/**
+* struct iwl_mvm_low_latency_cause - low latency set causes
+* @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected
+* @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs
+* @LOW_LATENCY_VCMD: low latency mode set from vendor command
+*/
+enum iwl_mvm_low_latency_cause {
+	LOW_LATENCY_TRAFFIC = BIT(0),
+	LOW_LATENCY_DEBUGFS = BIT(1),
+	LOW_LATENCY_VCMD = BIT(2),
+};
+
+/**
+* struct iwl_mvm_vif_bf_data - beacon filtering related data
+* @bf_enabled: indicates if beacon filtering is enabled
+* @ba_enabled: indicated if beacon abort is enabled
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+* @bt_coex_min_thold: minimum threshold for BT coex
+* @bt_coex_max_thold: maximum threshold for BT coex
+* @last_bt_coex_event: rssi of the last BT coex event
+*/
+struct iwl_mvm_vif_bf_data {
+	bool bf_enabled;
+	bool ba_enabled;
+	int ave_beacon_signal;
+	int last_cqm_event;
+	int bt_coex_min_thold;
+	int bt_coex_max_thold;
+	int last_bt_coex_event;
+};
+
+/**
+ * struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
+ * @id: between 0 and 3
+ * @color: to solve races upon MAC addition and removal
+ * @ap_sta_id: the sta_id of the AP - valid only if VIF type is STA
+ * @bssid: BSSID for this (client) interface
+ * @associated: indicates that we're currently associated, used only for
+ *	managing the firmware state in iwl_mvm_bss_info_changed_station()
+ * @ap_assoc_sta_count: count of stations associated to us - valid only
+ *	if VIF type is AP
+ * @uploaded: indicates the MAC context has been added to the device
+ * @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
+ *	should get quota etc.
+ * @pm_enabled - Indicate if MAC power management is allowed
+ * @monitor_active: indicates that monitor context is configured, and that the
+ *	interface should get quota etc.
+ * @low_latency: indicates low latency is set, see
+ *	enum &iwl_mvm_low_latency_cause for causes.
+ * @ps_disabled: indicates that this interface requires PS to be disabled
+ * @queue_params: QoS params for this MAC
+ * @bcast_sta: station used for broadcast packets. Used by the following
+ *  vifs: P2P_DEVICE, GO and AP.
+ * @beacon_skb: the skb used to hold the AP/GO beacon template
+ * @smps_requests: the SMPS requests of different parts of the driver,
+ *	combined on update to yield the overall request to mac80211.
+ * @beacon_stats: beacon statistics, containing the # of received beacons,
+ *	# of received beacons accumulated over FW restart, and the current
+ *	average signal of beacons retrieved from the firmware
+ * @csa_failed: CSA failed to schedule time event, report an error later
+ * @features: hw features active for this vif
+ */
+struct iwl_mvm_vif {
+	struct iwl_mvm *mvm;
+	u16 id;
+	u16 color;
+	u8 ap_sta_id;
+
+	u8 bssid[ETH_ALEN];
+	bool associated;
+	u8 ap_assoc_sta_count;
+
+	u16 cab_queue;
+
+	bool uploaded;
+	bool ap_ibss_active;
+	bool pm_enabled;
+	bool monitor_active;
+	u8 low_latency;
+	bool ps_disabled;
+	struct iwl_mvm_vif_bf_data bf_data;
+
+	struct {
+		u32 num_beacons, accu_num_beacons;
+		u8 avg_signal;
+	} beacon_stats;
+
+	u32 ap_beacon_time;
+
+	enum iwl_tsf_id tsf_id;
+
+	/*
+	 * QoS data from mac80211, need to store this here
+	 * as mac80211 has a separate callback but we need
+	 * to have the data for the MAC context
+	 */
+	struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+	struct iwl_mvm_time_event_data time_event_data;
+	struct iwl_mvm_time_event_data hs_time_event_data;
+
+	struct iwl_mvm_int_sta bcast_sta;
+	struct iwl_mvm_int_sta mcast_sta;
+
+	/*
+	 * Assigned while mac80211 has the interface in a channel context,
+	 * or, for P2P Device, while it exists.
+	 */
+	struct iwl_mvm_phy_ctxt *phy_ctxt;
+
+#ifdef CONFIG_PM
+	/* WoWLAN GTK rekey data */
+	struct {
+		u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+		__le64 replay_ctr;
+		bool valid;
+	} rekey_data;
+
+	int tx_key_idx;
+
+	bool seqno_valid;
+	u16 seqno;
+#endif
+
+#if IS_ENABLED(CONFIG_IPV6)
+	/* IPv6 addresses for WoWLAN */
+	struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
+	unsigned long tentative_addrs[BITS_TO_LONGS(IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)];
+	int num_target_ipv6_addrs;
+#endif
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct dentry *dbgfs_dir;
+	struct dentry *dbgfs_slink;
+	struct iwl_dbgfs_pm dbgfs_pm;
+	struct iwl_dbgfs_bf dbgfs_bf;
+	struct iwl_mac_power_cmd mac_pwr_cmd;
+	int dbgfs_quota_min;
+#endif
+
+	enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
+
+	/* FW identified misbehaving AP */
+	u8 uapsd_misbehaving_bssid[ETH_ALEN];
+
+	struct delayed_work uapsd_nonagg_detected_wk;
+
+	/* Indicates that CSA countdown may be started */
+	bool csa_countdown;
+	bool csa_failed;
+	u16 csa_target_freq;
+
+	/* Indicates that we are waiting for a beacon on a new channel */
+	bool csa_bcn_pending;
+
+	/* TCP Checksum Offload */
+	netdev_features_t features;
+};
+
+static inline struct iwl_mvm_vif *
+iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
+{
+	if (!vif)
+		return NULL;
+	return (void *)vif->drv_priv;
+}
+
+extern const u8 tid_to_mac80211_ac[];
+
+#define IWL_MVM_SCAN_STOPPING_SHIFT	8
+
+enum iwl_scan_status {
+	IWL_MVM_SCAN_REGULAR		= BIT(0),
+	IWL_MVM_SCAN_SCHED		= BIT(1),
+	IWL_MVM_SCAN_NETDETECT		= BIT(2),
+
+	IWL_MVM_SCAN_STOPPING_REGULAR	= BIT(8),
+	IWL_MVM_SCAN_STOPPING_SCHED	= BIT(9),
+	IWL_MVM_SCAN_STOPPING_NETDETECT	= BIT(10),
+
+	IWL_MVM_SCAN_REGULAR_MASK	= IWL_MVM_SCAN_REGULAR |
+					  IWL_MVM_SCAN_STOPPING_REGULAR,
+	IWL_MVM_SCAN_SCHED_MASK		= IWL_MVM_SCAN_SCHED |
+					  IWL_MVM_SCAN_STOPPING_SCHED,
+	IWL_MVM_SCAN_NETDETECT_MASK	= IWL_MVM_SCAN_NETDETECT |
+					  IWL_MVM_SCAN_STOPPING_NETDETECT,
+
+	IWL_MVM_SCAN_STOPPING_MASK	= 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
+	IWL_MVM_SCAN_MASK		= 0xff,
+};
+
+enum iwl_mvm_scan_type {
+	IWL_SCAN_TYPE_NOT_SET,
+	IWL_SCAN_TYPE_UNASSOC,
+	IWL_SCAN_TYPE_WILD,
+	IWL_SCAN_TYPE_MILD,
+	IWL_SCAN_TYPE_FRAGMENTED,
+};
+
+enum iwl_mvm_sched_scan_pass_all_states {
+	SCHED_SCAN_PASS_ALL_DISABLED,
+	SCHED_SCAN_PASS_ALL_ENABLED,
+	SCHED_SCAN_PASS_ALL_FOUND,
+};
+
+/**
+ * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * @ct_kill_exit: worker to exit thermal kill
+ * @dynamic_smps: Is thermal throttling enabled dynamic_smps?
+ * @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
+ * @params: Parameters to configure the thermal throttling algorithm.
+ * @throttle: Is thermal throttling is active?
+ */
+struct iwl_mvm_tt_mgmt {
+	struct delayed_work ct_kill_exit;
+	bool dynamic_smps;
+	u32 tx_backoff;
+	u32 min_backoff;
+	struct iwl_tt_params params;
+	bool throttle;
+};
+
+#ifdef CONFIG_THERMAL
+/**
+ *struct iwl_mvm_thermal_device - thermal zone related data
+ * @temp_trips: temperature thresholds for report
+ * @fw_trips_index: keep indexes to original array - temp_trips
+ * @tzone: thermal zone device data
+*/
+struct iwl_mvm_thermal_device {
+	s16 temp_trips[IWL_MAX_DTS_TRIPS];
+	u8 fw_trips_index[IWL_MAX_DTS_TRIPS];
+	struct thermal_zone_device *tzone;
+};
+
+/*
+ * struct iwl_mvm_cooling_device
+ * @cur_state: current state
+ * @cdev: struct thermal cooling device
+ */
+struct iwl_mvm_cooling_device {
+	u32 cur_state;
+	struct thermal_cooling_device *cdev;
+};
+#endif
+
+#define IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES 8
+
+struct iwl_mvm_frame_stats {
+	u32 legacy_frames;
+	u32 ht_frames;
+	u32 vht_frames;
+	u32 bw_20_frames;
+	u32 bw_40_frames;
+	u32 bw_80_frames;
+	u32 bw_160_frames;
+	u32 sgi_frames;
+	u32 ngi_frames;
+	u32 siso_frames;
+	u32 mimo2_frames;
+	u32 agg_frames;
+	u32 ampdu_count;
+	u32 success_frames;
+	u32 fail_frames;
+	u32 last_rates[IWL_MVM_NUM_LAST_FRAMES_UCODE_RATES];
+	int last_frame_idx;
+};
+
+enum {
+	D0I3_DEFER_WAKEUP,
+	D0I3_PENDING_WAKEUP,
+};
+
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_DISABLE 0xff
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MIN -100
+#define IWL_MVM_DEBUG_SET_TEMPERATURE_MAX 200
+
+enum iwl_mvm_tdls_cs_state {
+	IWL_MVM_TDLS_SW_IDLE = 0,
+	IWL_MVM_TDLS_SW_REQ_SENT,
+	IWL_MVM_TDLS_SW_RESP_RCVD,
+	IWL_MVM_TDLS_SW_REQ_RCVD,
+	IWL_MVM_TDLS_SW_ACTIVE,
+};
+
+enum iwl_mvm_traffic_load {
+	IWL_MVM_TRAFFIC_LOW,
+	IWL_MVM_TRAFFIC_MEDIUM,
+	IWL_MVM_TRAFFIC_HIGH,
+};
+
+DECLARE_EWMA(rate, 16, 16)
+
+struct iwl_mvm_tcm_mac {
+	struct {
+		u32 pkts[IEEE80211_NUM_ACS];
+		u32 airtime;
+	} tx;
+	struct {
+		u32 pkts[IEEE80211_NUM_ACS];
+		u32 airtime;
+		u32 last_ampdu_ref;
+	} rx;
+	struct {
+		/* track AP's transfer in client mode */
+		u64 rx_bytes;
+		struct ewma_rate rate;
+		bool detected;
+	} uapsd_nonagg_detect;
+	bool opened_rx_ba_sessions;
+};
+
+struct iwl_mvm_tcm {
+	struct delayed_work work;
+	spinlock_t lock; /* used when time elapsed */
+	unsigned long ts; /* timestamp when period ends */
+	unsigned long ll_ts;
+	unsigned long uapsd_nonagg_ts;
+	bool paused;
+	struct iwl_mvm_tcm_mac data[NUM_MAC_INDEX_DRIVER];
+	struct {
+		u32 elapsed; /* milliseconds for this TCM period */
+		u32 airtime[NUM_MAC_INDEX_DRIVER];
+		enum iwl_mvm_traffic_load load[NUM_MAC_INDEX_DRIVER];
+		enum iwl_mvm_traffic_load band_load[NUM_NL80211_BANDS];
+		enum iwl_mvm_traffic_load global_load;
+		bool low_latency[NUM_MAC_INDEX_DRIVER];
+		bool change[NUM_MAC_INDEX_DRIVER];
+		bool global_change;
+	} result;
+};
+
+/**
+ * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ *	it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @lock: protect reorder buffer internal state
+ * @mvm: mvm pointer, needed for frame timer context
+ */
+struct iwl_mvm_reorder_buffer {
+	u16 head_sn;
+	u16 num_stored;
+	u16 buf_size;
+	int queue;
+	u16 last_amsdu;
+	u8 last_sub_index;
+	struct timer_list reorder_timer;
+	bool removed;
+	bool valid;
+	spinlock_t lock;
+	struct iwl_mvm *mvm;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct _iwl_mvm_reorder_buf_entry - reorder buffer entry per-queue/per-seqno
+ * @frames: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct _iwl_mvm_reorder_buf_entry {
+	struct sk_buff_head frames;
+	unsigned long reorder_time;
+};
+
+/* make this indirection to get the aligned thing */
+struct iwl_mvm_reorder_buf_entry {
+	struct _iwl_mvm_reorder_buf_entry e;
+}
+#ifndef __CHECKER__
+/* sparse doesn't like this construct: "bad integer constant expression" */
+__aligned(roundup_pow_of_two(sizeof(struct _iwl_mvm_reorder_buf_entry)))
+#endif
+;
+
+/**
+ * struct iwl_mvm_baid_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue, this actually gets
+ *	aligned up to avoid cache line sharing between queues
+ * @last_rx: last rx jiffies, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @mvm: mvm pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ * @reorder_buf_data: data
+ */
+struct iwl_mvm_baid_data {
+	struct rcu_head rcu_head;
+	u8 sta_id;
+	u8 tid;
+	u8 baid;
+	u16 timeout;
+	u16 entries_per_queue;
+	unsigned long last_rx;
+	struct timer_list session_timer;
+	struct iwl_mvm_baid_data __rcu **rcu_ptr;
+	struct iwl_mvm *mvm;
+	struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
+	struct iwl_mvm_reorder_buf_entry entries[];
+};
+
+static inline struct iwl_mvm_baid_data *
+iwl_mvm_baid_data_from_reorder_buf(struct iwl_mvm_reorder_buffer *buf)
+{
+	return (void *)((u8 *)buf -
+			offsetof(struct iwl_mvm_baid_data, reorder_buf) -
+			sizeof(*buf) * buf->queue);
+}
+
+/*
+ * enum iwl_mvm_queue_status - queue status
+ * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
+ *	Basically, this means that this queue can be used for any purpose
+ * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
+ *	This is the state of a queue that has been dedicated for some RATID
+ *	(agg'd or not), but that hasn't yet gone through the actual enablement
+ *	of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
+ *	Note that in this state there is no requirement to already know what TID
+ *	should be used with this queue, it is just marked as a queue that will
+ *	be used, and shouldn't be allocated to anyone else.
+ * @IWL_MVM_QUEUE_READY: queue is ready to be used
+ *	This is the state of a queue that has been fully configured (including
+ *	SCD pointers, etc), has a specific RA/TID assigned to it, and can be
+ *	used to send traffic.
+ * @IWL_MVM_QUEUE_SHARED: queue is shared, or in a process of becoming shared
+ *	This is a state in which a single queue serves more than one TID, all of
+ *	which are not aggregated. Note that the queue is only associated to one
+ *	RA.
+ * @IWL_MVM_QUEUE_INACTIVE: queue is allocated but no traffic on it
+ *	This is a state of a queue that has had traffic on it, but during the
+ *	last %IWL_MVM_DQA_QUEUE_TIMEOUT time period there has been no traffic on
+ *	it. In this state, when a new queue is needed to be allocated but no
+ *	such free queue exists, an inactive queue might be freed and given to
+ *	the new RA/TID.
+ * @IWL_MVM_QUEUE_RECONFIGURING: queue is being reconfigured
+ *	This is the state of a queue that has had traffic pass through it, but
+ *	needs to be reconfigured for some reason, e.g. the queue needs to
+ *	become unshared and aggregations re-enabled on.
+ */
+enum iwl_mvm_queue_status {
+	IWL_MVM_QUEUE_FREE,
+	IWL_MVM_QUEUE_RESERVED,
+	IWL_MVM_QUEUE_READY,
+	IWL_MVM_QUEUE_SHARED,
+	IWL_MVM_QUEUE_INACTIVE,
+	IWL_MVM_QUEUE_RECONFIGURING,
+};
+
+#define IWL_MVM_DQA_QUEUE_TIMEOUT	(5 * HZ)
+#define IWL_MVM_INVALID_QUEUE		0xFFFF
+
+#define IWL_MVM_NUM_CIPHERS             10
+
+struct iwl_mvm_sar_profile {
+	bool enabled;
+	u8 table[ACPI_SAR_TABLE_SIZE];
+};
+
+struct iwl_mvm_geo_profile {
+	u8 values[ACPI_GEO_TABLE_SIZE];
+};
+
+struct iwl_mvm {
+	/* for logger access */
+	struct device *dev;
+
+	struct iwl_trans *trans;
+	const struct iwl_fw *fw;
+	const struct iwl_cfg *cfg;
+	struct iwl_phy_db *phy_db;
+	struct ieee80211_hw *hw;
+
+	/* for protecting access to iwl_mvm */
+	struct mutex mutex;
+	struct list_head async_handlers_list;
+	spinlock_t async_handlers_lock;
+	struct work_struct async_handlers_wk;
+
+	struct work_struct roc_done_wk;
+
+	unsigned long init_status;
+
+	unsigned long status;
+
+	u32 queue_sync_cookie;
+	atomic_t queue_sync_counter;
+	/*
+	 * for beacon filtering -
+	 * currently only one interface can be supported
+	 */
+	struct iwl_mvm_vif *bf_allowed_vif;
+
+	bool hw_registered;
+	bool calibrating;
+	u32 error_event_table[2];
+	u32 log_event_table;
+	u32 umac_error_event_table;
+	bool support_umac_log;
+
+	u32 ampdu_ref;
+	bool ampdu_toggle;
+
+	struct iwl_notif_wait_data notif_wait;
+
+	union {
+		struct mvm_statistics_rx_v3 rx_stats_v3;
+		struct mvm_statistics_rx rx_stats;
+	};
+
+	struct {
+		u64 rx_time;
+		u64 tx_time;
+		u64 on_time_rf;
+		u64 on_time_scan;
+	} radio_stats, accu_radio_stats;
+
+	u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES];
+
+	struct {
+		u8 hw_queue_refcount;
+		u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
+		bool reserved; /* Is this the TXQ reserved for a STA */
+		u8 mac80211_ac; /* The mac80211 AC this queue is mapped to */
+		u8 txq_tid; /* The TID "owner" of this queue*/
+		u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+		/* Timestamp for inactivation per TID of this queue */
+		unsigned long last_frame_time[IWL_MAX_TID_COUNT + 1];
+		enum iwl_mvm_queue_status status;
+	} queue_info[IWL_MAX_HW_QUEUES];
+	spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+	struct work_struct add_stream_wk; /* To add streams to queues */
+
+	atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
+
+	const char *nvm_file_name;
+	struct iwl_nvm_data *nvm_data;
+	/* NVM sections */
+	struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
+
+	struct iwl_fw_runtime fwrt;
+
+	/* EEPROM MAC addresses */
+	struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
+
+	/* data related to data path */
+	struct iwl_rx_phy_info last_phy_info;
+	struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+	unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
+	u8 rx_ba_sessions;
+
+	/* configured by mac80211 */
+	u32 rts_threshold;
+
+	/* Scan status, cmd (pre-allocated) and auxiliary station */
+	unsigned int scan_status;
+	void *scan_cmd;
+	struct iwl_mcast_filter_cmd *mcast_filter_cmd;
+	/* For CDB this is low band scan type, for non-CDB - type. */
+	enum iwl_mvm_scan_type scan_type;
+	enum iwl_mvm_scan_type hb_scan_type;
+
+	enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+	struct delayed_work scan_timeout_dwork;
+
+	/* max number of simultaneous scans the FW supports */
+	unsigned int max_scans;
+
+	/* UMAC scan tracking */
+	u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+
+	/* start time of last scan in TSF of the mac that requested the scan */
+	u64 scan_start;
+
+	/* the vif that requested the current scan */
+	struct iwl_mvm_vif *scan_vif;
+
+	/* rx chain antennas set through debugfs for the scan command */
+	u8 scan_rx_ant;
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+	/* broadcast filters to configure for each associated station */
+	const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct {
+		bool override;
+		struct iwl_bcast_filter_cmd cmd;
+	} dbgfs_bcast_filtering;
+#endif
+#endif
+
+	/* Internal station */
+	struct iwl_mvm_int_sta aux_sta;
+	struct iwl_mvm_int_sta snif_sta;
+
+	bool last_ebs_successful;
+
+	u8 scan_last_antenna_idx; /* to toggle TX between antennas */
+	u8 mgmt_last_antenna_idx;
+
+	/* last smart fifo state that was successfully sent to firmware */
+	enum iwl_sf_state sf_state;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct dentry *debugfs_dir;
+	u32 dbgfs_sram_offset, dbgfs_sram_len;
+	u32 dbgfs_prph_reg_addr;
+	bool disable_power_off;
+	bool disable_power_off_d3;
+
+	bool scan_iter_notif_enabled;
+
+	struct debugfs_blob_wrapper nvm_hw_blob;
+	struct debugfs_blob_wrapper nvm_sw_blob;
+	struct debugfs_blob_wrapper nvm_calib_blob;
+	struct debugfs_blob_wrapper nvm_prod_blob;
+	struct debugfs_blob_wrapper nvm_phy_sku_blob;
+
+	struct iwl_mvm_frame_stats drv_rx_stats;
+	spinlock_t drv_stats_lock;
+	u16 dbgfs_rx_phyinfo;
+#endif
+
+	struct iwl_mvm_phy_ctxt phy_ctxts[NUM_PHY_CTX];
+
+	struct list_head time_event_list;
+	spinlock_t time_event_lock;
+
+	/*
+	 * A bitmap indicating the index of the key in use. The firmware
+	 * can hold 16 keys at most. Reflect this fact.
+	 */
+	unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+	u8 fw_key_deleted[STA_KEY_MAX_NUM];
+
+	/* references taken by the driver and spinlock protecting them */
+	spinlock_t refs_lock;
+	u8 refs[IWL_MVM_REF_COUNT];
+
+	u8 vif_count;
+
+	/* -1 for always, 0 for never, >0 for that many times */
+	s8 fw_restart;
+
+#ifdef CONFIG_IWLWIFI_LEDS
+	struct led_classdev led;
+#endif
+
+	struct ieee80211_vif *p2p_device_vif;
+
+#ifdef CONFIG_PM
+	struct wiphy_wowlan_support wowlan;
+	int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
+
+	/* sched scan settings for net detect */
+	struct ieee80211_scan_ies nd_ies;
+	struct cfg80211_match_set *nd_match_sets;
+	int n_nd_match_sets;
+	struct ieee80211_channel **nd_channels;
+	int n_nd_channels;
+	bool net_detect;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	bool d3_wake_sysassert;
+	bool d3_test_active;
+	bool store_d3_resume_sram;
+	void *d3_resume_sram;
+	u32 d3_test_pme_ptr;
+	struct ieee80211_vif *keep_vif;
+	u32 last_netdetect_scans; /* no. of scans in the last net-detect wake */
+#endif
+#endif
+
+	/* d0i3 */
+	u8 d0i3_ap_sta_id;
+	bool d0i3_offloading;
+	struct work_struct d0i3_exit_work;
+	struct sk_buff_head d0i3_tx;
+	/* protect d0i3_suspend_flags */
+	struct mutex d0i3_suspend_mutex;
+	unsigned long d0i3_suspend_flags;
+	/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
+	spinlock_t d0i3_tx_lock;
+	wait_queue_head_t d0i3_exit_waitq;
+	wait_queue_head_t rx_sync_waitq;
+
+	/* BT-Coex */
+	struct iwl_bt_coex_profile_notif last_bt_notif;
+	struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
+
+	u8 bt_tx_prio;
+	enum iwl_bt_force_ant_mode bt_force_ant_mode;
+
+	/* Aux ROC */
+	struct list_head aux_roc_te_list;
+
+	/* Thermal Throttling and CTkill */
+	struct iwl_mvm_tt_mgmt thermal_throttle;
+#ifdef CONFIG_THERMAL
+	struct iwl_mvm_thermal_device tz_device;
+	struct iwl_mvm_cooling_device cooling_dev;
+#endif
+
+	s32 temperature;	/* Celsius */
+	/*
+	 * Debug option to set the NIC temperature. This option makes the
+	 * driver think this is the actual NIC temperature, and ignore the
+	 * real temperature that is received from the fw
+	 */
+	bool temperature_test;  /* Debug test temperature is enabled */
+
+	unsigned long bt_coex_last_tcm_ts;
+	struct iwl_mvm_tcm tcm;
+
+	u8 uapsd_noagg_bssid_write_idx;
+	struct mac_address uapsd_noagg_bssids[IWL_MVM_UAPSD_NOAGG_BSSIDS_NUM]
+		__aligned(2);
+
+	struct iwl_time_quota_cmd last_quota_cmd;
+
+#ifdef CONFIG_NL80211_TESTMODE
+	u32 noa_duration;
+	struct ieee80211_vif *noa_vif;
+#endif
+
+	/* Tx queues */
+	u16 aux_queue;
+	u16 snif_queue;
+	u16 probe_queue;
+	u16 p2p_dev_queue;
+
+	/* Indicate if device power save is allowed */
+	u8 ps_disabled; /* u8 instead of bool to ease debugfs_create_* usage */
+	unsigned int max_amsdu_len; /* used for debugfs only */
+
+	struct ieee80211_vif __rcu *csa_vif;
+	struct ieee80211_vif __rcu *csa_tx_blocked_vif;
+	u8 csa_tx_block_bcn_timeout;
+
+	/* system time of last beacon (for AP/GO interface) */
+	u32 ap_last_beacon_gp2;
+
+	/* indicates that we transmitted the last beacon */
+	bool ibss_manager;
+
+	bool lar_regdom_set;
+	enum iwl_mcc_source mcc_src;
+
+	/* TDLS channel switch data */
+	struct {
+		struct delayed_work dwork;
+		enum iwl_mvm_tdls_cs_state state;
+
+		/*
+		 * Current cs sta - might be different from periodic cs peer
+		 * station. Value is meaningless when the cs-state is idle.
+		 */
+		u8 cur_sta_id;
+
+		/* TDLS periodic channel-switch peer */
+		struct {
+			u8 sta_id;
+			u8 op_class;
+			bool initiator; /* are we the link initiator */
+			struct cfg80211_chan_def chandef;
+			struct sk_buff *skb; /* ch sw template */
+			u32 ch_sw_tm_ie;
+
+			/* timestamp of last ch-sw request sent (GP2 time) */
+			u32 sent_timestamp;
+		} peer;
+	} tdls_cs;
+
+
+	u32 ciphers[IWL_MVM_NUM_CIPHERS];
+	struct ieee80211_cipher_scheme cs[IWL_UCODE_MAX_CS];
+	struct iwl_mvm_tof_data tof_data;
+
+	struct ieee80211_vif *nan_vif;
+#define IWL_MAX_BAID	32
+	struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
+
+	/*
+	 * Drop beacons from other APs in AP mode when there are no connected
+	 * clients.
+	 */
+	bool drop_bcn_ap_mode;
+
+	struct delayed_work cs_tx_unblock_dwork;
+
+	/* does a monitor vif exist (only one can exist hence bool) */
+	bool monitor_on;
+#ifdef CONFIG_ACPI
+	struct iwl_mvm_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
+	struct iwl_mvm_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+#endif
+};
+
+/* Extract MVM priv from op_mode and _hw */
+#define IWL_OP_MODE_GET_MVM(_iwl_op_mode)		\
+	((struct iwl_mvm *)(_iwl_op_mode)->op_mode_specific)
+
+#define IWL_MAC80211_GET_MVM(_hw)			\
+	IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+
+/**
+ * enum iwl_mvm_status - MVM status bits
+ * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
+ * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
+ * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
+ * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
+ * @IWL_MVM_STATUS_IN_D0I3: NIC is in D0i3
+ * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
+ * @IWL_MVM_STATUS_D3_RECONFIG: D3 reconfiguration is being done
+ * @IWL_MVM_STATUS_FIRMWARE_RUNNING: firmware is running
+ * @IWL_MVM_STATUS_NEED_FLUSH_P2P: need to flush P2P bcast STA
+ */
+enum iwl_mvm_status {
+	IWL_MVM_STATUS_HW_RFKILL,
+	IWL_MVM_STATUS_HW_CTKILL,
+	IWL_MVM_STATUS_ROC_RUNNING,
+	IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+	IWL_MVM_STATUS_IN_HW_RESTART,
+	IWL_MVM_STATUS_IN_D0I3,
+	IWL_MVM_STATUS_ROC_AUX_RUNNING,
+	IWL_MVM_STATUS_D3_RECONFIG,
+	IWL_MVM_STATUS_FIRMWARE_RUNNING,
+	IWL_MVM_STATUS_NEED_FLUSH_P2P,
+};
+
+/* Keep track of completed init configuration */
+enum iwl_mvm_init_status {
+	IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE = BIT(0),
+	IWL_MVM_INIT_STATUS_LEDS_INIT_COMPLETE = BIT(1),
+	IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE = BIT(2),
+	IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE = BIT(3),
+};
+
+static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
+{
+	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status) ||
+	       test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_is_radio_hw_killed(struct iwl_mvm *mvm)
+{
+	return test_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+}
+
+static inline bool iwl_mvm_firmware_running(struct iwl_mvm *mvm)
+{
+	return test_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+}
+
+/* Must be called with rcu_read_lock() held and it can only be
+ * released when mvmsta is not needed anymore.
+ */
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
+{
+	struct ieee80211_sta *sta;
+
+	if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+		return NULL;
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+	/* This can happen if the station has been removed right now */
+	if (IS_ERR_OR_NULL(sta))
+		return NULL;
+
+	return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+	struct ieee80211_sta *sta;
+
+	if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+		return NULL;
+
+	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					lockdep_is_held(&mvm->mutex));
+
+	/* This can happen if the station has been removed right now */
+	if (IS_ERR_OR_NULL(sta))
+		return NULL;
+
+	return iwl_mvm_sta_from_mac80211(sta);
+}
+
+static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
+{
+	return !iwlwifi_mod_params.d0i3_disable &&
+		fw_has_capa(&mvm->fw->ucode_capa,
+			    IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
+static inline bool iwl_mvm_is_adaptive_dwell_v2_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_ADAPTIVE_DWELL_V2);
+}
+
+static inline bool iwl_mvm_is_oce_supported(struct iwl_mvm *mvm)
+{
+	/* OCE should never be enabled for LMAC scan FWs */
+	return fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_OCE);
+}
+
+static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
+{
+	/* For now we only use this mode to differentiate between
+	 * slave transports, which handle D0i3 entry in suspend by
+	 * themselves in conjunction with runtime PM D0i3.  So, this
+	 * function is used to check whether we need to do anything
+	 * when entering suspend or if the transport layer has already
+	 * done it.
+	 */
+	return (mvm->trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) &&
+		(mvm->trans->runtime_pm_mode != IWL_PLAT_PM_MODE_D0I3);
+}
+
+static inline bool iwl_mvm_is_dqa_data_queue(struct iwl_mvm *mvm, u8 queue)
+{
+	return (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE) &&
+	       (queue <= IWL_MVM_DQA_MAX_DATA_QUEUE);
+}
+
+static inline bool iwl_mvm_is_dqa_mgmt_queue(struct iwl_mvm *mvm, u8 queue)
+{
+	return (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) &&
+	       (queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE);
+}
+
+static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm)
+{
+	bool nvm_lar = mvm->nvm_data->lar_enabled;
+	bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+	if (iwlwifi_mod_params.lar_disable)
+		return false;
+
+	/*
+	 * Enable LAR only if it is supported by the FW (TLV) &&
+	 * enabled in the NVM
+	 */
+	if (mvm->cfg->nvm_type == IWL_NVM_EXT)
+		return nvm_lar && tlv_lar;
+	else
+		return tlv_lar;
+}
+
+static inline bool iwl_mvm_is_wifi_mcc_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+	       fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC);
+}
+
+static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_BT_COEX_RRC) &&
+		IWL_MVM_BT_COEX_RRC;
+}
+
+static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
+               !IWL_MVM_HW_CSUM_DISABLE;
+}
+
+static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT) &&
+		IWL_MVM_BT_COEX_MPLUT;
+}
+
+static inline
+bool iwl_mvm_is_p2p_scm_uapsd_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_P2P_SCM_UAPSD) &&
+		!(iwlwifi_mod_params.uapsd_disable &
+		  IWL_DISABLE_UAPSD_P2P_CLIENT);
+}
+
+static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
+}
+
+static inline bool iwl_mvm_has_new_tx_api(struct iwl_mvm *mvm)
+{
+	/* TODO - replace with TLV once defined */
+	return mvm->trans->cfg->use_tfh;
+}
+
+static inline bool iwl_mvm_has_unified_ucode(struct iwl_mvm *mvm)
+{
+	/* TODO - better define this */
+	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_is_cdb_supported(struct iwl_mvm *mvm)
+{
+	/*
+	 * TODO:
+	 * The issue of how to determine CDB APIs and usage is still not fully
+	 * defined.
+	 * There is a compilation for CDB and non-CDB FW, but there may
+	 * be also runtime check.
+	 * For now there is a TLV for checking compilation mode, but a
+	 * runtime check will also have to be here - once defined.
+	 */
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CDB_SUPPORT);
+}
+
+static inline bool iwl_mvm_cdb_scan_api(struct iwl_mvm *mvm)
+{
+	/*
+	 * TODO: should this be the same as iwl_mvm_is_cdb_supported()?
+	 * but then there's a little bit of code in scan that won't make
+	 * any sense...
+	 */
+	return mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000;
+}
+
+static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
+{
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_NEW_RX_STATS);
+}
+
+static inline bool iwl_mvm_has_quota_low_latency(struct iwl_mvm *mvm)
+{
+	return fw_has_api(&mvm->fw->ucode_capa,
+			  IWL_UCODE_TLV_API_QUOTA_LOW_LATENCY);
+}
+
+static inline bool iwl_mvm_has_tlc_offload(const struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_TLC_OFFLOAD);
+}
+
+static inline struct agg_tx_status *
+iwl_mvm_get_agg_status(struct iwl_mvm *mvm, void *tx_resp)
+{
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return &((struct iwl_mvm_tx_resp *)tx_resp)->status;
+	else
+		return ((struct iwl_mvm_tx_resp_v3 *)tx_resp)->status;
+}
+
+static inline bool iwl_mvm_is_tt_in_fw(struct iwl_mvm *mvm)
+{
+#ifdef CONFIG_THERMAL
+	/* these two TLV are redundant since the responsibility to CT-kill by
+	 * FW happens only after we send at least one command of
+	 * temperature THs report.
+	 */
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CT_KILL_BY_FW) &&
+	       fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT);
+#else /* CONFIG_THERMAL */
+	return false;
+#endif /* CONFIG_THERMAL */
+}
+
+static inline bool iwl_mvm_is_ctdp_supported(struct iwl_mvm *mvm)
+{
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_CTDP_SUPPORT);
+}
+
+extern const u8 iwl_mvm_ac_to_tx_fifo[];
+extern const u8 iwl_mvm_ac_to_gen2_tx_fifo[];
+
+static inline u8 iwl_mvm_mac_ac_to_tx_fifo(struct iwl_mvm *mvm,
+					   enum ieee80211_ac_numbers ac)
+{
+	return iwl_mvm_has_new_tx_api(mvm) ?
+		iwl_mvm_ac_to_gen2_tx_fifo[ac] : iwl_mvm_ac_to_tx_fifo[ac];
+}
+
+struct iwl_rate_info {
+	u8 plcp;	/* uCode API:  IWL_RATE_6M_PLCP, etc. */
+	u8 plcp_siso;	/* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_mimo2;	/* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+	u8 plcp_mimo3;  /* uCode API:  IWL_RATE_MIMO3_6M_PLCP, etc. */
+	u8 ieee;	/* MAC header:  IWL_RATE_6M_IEEE, etc. */
+};
+
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
+int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
+
+/******************
+ * MVM Methods
+ ******************/
+/* uCode */
+int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
+
+/* Utils */
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+					enum nl80211_band band);
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+			       enum nl80211_band band,
+			       struct ieee80211_tx_rate *r);
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+u8 first_antenna(u8 mask);
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime);
+
+/* Tx / Host Commands */
+int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
+				  struct iwl_host_cmd *cmd);
+int __must_check iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+				      u32 flags, u16 len, const void *data);
+int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm,
+					 struct iwl_host_cmd *cmd,
+					 u32 *status);
+int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id,
+					     u16 len, const void *data,
+					     u32 *status);
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+		   struct ieee80211_sta *sta);
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb);
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+			struct iwl_tx_cmd *tx_cmd,
+			struct ieee80211_tx_info *info, u8 sta_id);
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_sta *sta, __le16 fc);
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status);
+#else
+static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
+#endif
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+			   u16 tids, u32 flags);
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
+
+static inline void iwl_mvm_set_tx_cmd_ccmp(struct ieee80211_tx_info *info,
+					   struct iwl_tx_cmd *tx_cmd)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+	tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+	memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+}
+
+static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
+{
+	flush_work(&mvm->async_handlers_wk);
+}
+
+/* Statistics */
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+				  struct iwl_rx_packet *pkt);
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
+
+/* NVM */
+int iwl_nvm_init(struct iwl_mvm *mvm);
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm);
+
+static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
+{
+	return mvm->nvm_data && mvm->nvm_data->valid_tx_ant ?
+	       mvm->fw->valid_tx_ant & mvm->nvm_data->valid_tx_ant :
+	       mvm->fw->valid_tx_ant;
+}
+
+static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
+{
+	return mvm->nvm_data && mvm->nvm_data->valid_rx_ant ?
+	       mvm->fw->valid_rx_ant & mvm->nvm_data->valid_rx_ant :
+	       mvm->fw->valid_rx_ant;
+}
+
+static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm)
+{
+	u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN |
+			   FW_PHY_CFG_RX_CHAIN);
+	u32 valid_rx_ant = iwl_mvm_get_valid_rx_ant(mvm);
+	u32 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+	phy_config |= valid_tx_ant << FW_PHY_CFG_TX_CHAIN_POS |
+		      valid_rx_ant << FW_PHY_CFG_RX_CHAIN_POS;
+
+	return mvm->fw->phy_config & phy_config;
+}
+
+int iwl_mvm_up(struct iwl_mvm *mvm);
+int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
+
+int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+				    struct iwl_bcast_filter_cmd *cmd);
+
+/*
+ * FW notifications / CMD responses handlers
+ * Convention: iwl_mvm_rx_<NAME OF THE CMD>
+ */
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb, int queue);
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+			      struct iwl_rx_cmd_buffer *rxb, int queue);
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+			    const u8 *data, u32 count);
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+			    int queue);
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
+				   struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_mfuart_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_shared_mem_cfg_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM PHY */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+			 struct cfg80211_chan_def *chandef,
+			 u8 chains_static, u8 chains_dynamic);
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+			     struct cfg80211_chan_def *chandef,
+			     u8 chains_static, u8 chains_dynamic);
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm,
+			  struct iwl_mvm_phy_ctxt *ctxt);
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm,
+			    struct iwl_mvm_phy_ctxt *ctxt);
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm);
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef);
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef);
+
+/* MAC (virtual interface) programming */
+int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_add(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     bool force_assoc_off, const u8 *bssid_override);
+int iwl_mvm_mac_ctxt_remove(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u32 iwl_mvm_mac_get_queues_mask(struct ieee80211_vif *vif);
+int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif);
+void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
+			     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
+				     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+				    struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
+			       struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif);
+unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *exclude_vif);
+void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
+				      struct iwl_rx_cmd_buffer *rxb);
+/* Bindings */
+int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+
+/* Quota management */
+static inline size_t iwl_mvm_quota_cmd_size(struct iwl_mvm *mvm)
+{
+	return iwl_mvm_has_quota_low_latency(mvm) ?
+		sizeof(struct iwl_time_quota_cmd) :
+		sizeof(struct iwl_time_quota_cmd_v1);
+}
+
+static inline struct iwl_time_quota_data
+*iwl_mvm_quota_cmd_get_quota(struct iwl_mvm *mvm,
+			     struct iwl_time_quota_cmd *cmd,
+			     int i)
+{
+	struct iwl_time_quota_data_v1 *quotas;
+
+	if (iwl_mvm_has_quota_low_latency(mvm))
+		return &cmd->quotas[i];
+
+	quotas = (struct iwl_time_quota_data_v1 *)cmd->quotas;
+	return (struct iwl_time_quota_data *)&quotas[i];
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm, bool force_upload,
+			  struct ieee80211_vif *disabled_vif);
+
+/* Scanning */
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   struct cfg80211_scan_request *req,
+			   struct ieee80211_scan_ies *ies);
+int iwl_mvm_scan_size(struct iwl_mvm *mvm);
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+
+/* Scheduled scan */
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct cfg80211_sched_scan_request *req,
+			     struct ieee80211_scan_ies *ies,
+			     int type);
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+
+/* UMAC scan */
+int iwl_mvm_config_scan(struct iwl_mvm *mvm);
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
+
+/* MVM debugfs */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
+void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#else
+static inline int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm,
+					 struct dentry *dbgfs_dir)
+{
+	return 0;
+}
+static inline void
+iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+static inline void
+iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+
+/* rate scaling */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+void rs_update_last_rssi(struct iwl_mvm *mvm,
+			 struct iwl_mvm_sta *mvmsta,
+			 struct ieee80211_rx_status *rx_status);
+
+/* power management */
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				 char *buf, int bufsz);
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb);
+
+#ifdef CONFIG_IWLWIFI_LEDS
+int iwl_mvm_leds_init(struct iwl_mvm *mvm);
+void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
+void iwl_mvm_leds_sync(struct iwl_mvm *mvm);
+#else
+static inline int iwl_mvm_leds_init(struct iwl_mvm *mvm)
+{
+	return 0;
+}
+static inline void iwl_mvm_leds_exit(struct iwl_mvm *mvm)
+{
+}
+static inline void iwl_mvm_leds_sync(struct iwl_mvm *mvm)
+{
+}
+#endif
+
+/* D3 (WoWLAN, NetDetect) */
+int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+int iwl_mvm_resume(struct ieee80211_hw *hw);
+void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    struct cfg80211_gtk_rekey_data *data);
+void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
+			      struct ieee80211_vif *vif,
+			      struct inet6_dev *idev);
+void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif, int idx);
+extern const struct file_operations iwl_dbgfs_d3_test_ops;
+#ifdef CONFIG_PM
+int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     bool host_awake,
+				     u32 cmd_flags);
+void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif,
+			      struct iwl_wowlan_status *status);
+void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif);
+#else
+static inline int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
+						   struct ieee80211_vif *vif,
+						   bool host_awake,
+						   u32 cmd_flags)
+{
+	return 0;
+}
+
+static inline void iwl_mvm_d0i3_update_keys(struct iwl_mvm *mvm,
+					    struct ieee80211_vif *vif,
+					    struct iwl_wowlan_status *status)
+{
+}
+
+static inline void
+iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+}
+#endif
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+				struct iwl_wowlan_config_cmd *cmd);
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+			       struct ieee80211_vif *vif,
+			       bool disable_offloading,
+			       bool offload_ns,
+			       u32 cmd_flags);
+
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+bool iwl_mvm_ref_taken(struct iwl_mvm *mvm);
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode);
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode);
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm);
+
+/* BT Coex */
+int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
+void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   enum ieee80211_rssi_event_data);
+void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
+u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta);
+bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
+bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
+bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
+				    enum nl80211_band band);
+u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+			   struct ieee80211_tx_info *info, u8 ac);
+
+/* beacon filtering */
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+					 struct iwl_beacon_filter_cmd *cmd);
+#else
+static inline void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+					 struct iwl_beacon_filter_cmd *cmd)
+{}
+#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   bool enable, u32 flags);
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 u32 flags);
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif,
+				  u32 flags);
+/* SMPS */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+				enum iwl_mvm_smps_type_request req_type,
+				enum ieee80211_smps_mode smps_request);
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm);
+
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			      bool low_latency,
+			      enum iwl_mvm_low_latency_cause cause);
+/* get SystemLowLatencyMode - only needed for beacon threshold? */
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band);
+
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+	/*
+	 * should this consider associated/active/... state?
+	 *
+	 * Normally low-latency should only be active on interfaces
+	 * that are active, but at least with debugfs it can also be
+	 * enabled on interfaces that aren't active. However, when
+	 * interface aren't active then they aren't added into the
+	 * binding, so this has no real impact. For now, just return
+	 * the current desired low-latency state.
+	 */
+	return mvmvif->low_latency;
+}
+
+static inline
+void iwl_mvm_vif_set_low_latency(struct iwl_mvm_vif *mvmvif, bool set,
+				 enum iwl_mvm_low_latency_cause cause)
+{
+	if (set)
+		mvmvif->low_latency |= cause;
+	else
+		mvmvif->low_latency &= ~cause;
+}
+
+/* hw scheduler queue config */
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+			unsigned int wdg_timeout);
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+			    u8 sta_id, u8 tid, unsigned int timeout);
+
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u8 tid, u8 flags);
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq);
+
+/* Return a bitmask with all the hw supported queues, except for the
+ * command queue, which can't be flushed.
+ */
+static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
+{
+	return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) &
+		~BIT(IWL_MVM_DQA_CMD_QUEUE));
+}
+
+static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
+{
+	iwl_fw_cancel_timestamp(&mvm->fwrt);
+	iwl_free_fw_paging(&mvm->fwrt);
+	clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+	iwl_fw_dump_conf_clear(&mvm->fwrt);
+	iwl_trans_stop_device(mvm->trans);
+}
+
+/* Stop/start all mac queues in a given bitmap */
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+
+/* Re-configure the SCD for a queue that has already been configured */
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+			 int tid, int frame_limit, u16 ssn);
+
+/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm,
+			struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff);
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm);
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm);
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 budget);
+
+/* Location Aware Regulatory */
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+		   enum iwl_mcc_source src_id);
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm);
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb);
+struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
+						  const char *alpha2,
+						  enum iwl_mcc_source src_id,
+						  bool *changed);
+struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm,
+							  bool *changed);
+int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm);
+void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm);
+
+/* smart fifo */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+		      bool added_vif);
+
+/* TDLS */
+
+/*
+ * We use TID 4 (VI) as a FW-used-only TID when TDLS connections are present.
+ * This TID is marked as used vs the AP and all connected TDLS peers.
+ */
+#define IWL_MVM_TDLS_FW_TID 4
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm);
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			       bool sta_added);
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif);
+int iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				struct ieee80211_sta *sta, u8 oper_class,
+				struct cfg80211_chan_def *chandef,
+				struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie);
+void iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+				      struct ieee80211_vif *vif,
+				      struct ieee80211_tdls_ch_sw_params *params);
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct ieee80211_sta *sta);
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+				     struct iwl_mvm_internal_rxq_notif *notif,
+				     u32 size);
+void iwl_mvm_reorder_timer_expired(struct timer_list *t);
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm);
+
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm);
+
+#define MVM_TCM_PERIOD_MSEC 500
+#define MVM_TCM_PERIOD (HZ * MVM_TCM_PERIOD_MSEC / 1000)
+#define MVM_LL_PERIOD (10 * HZ)
+void iwl_mvm_tcm_work(struct work_struct *work);
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel);
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm);
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed);
+
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    bool tdls, bool cmd_q);
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     const char *errmsg);
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  const struct ieee80211_sta *sta,
+					  u16 tid);
+
+int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b);
+int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_sta_add_debugfs(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta,
+			     struct dentry *dir);
+#endif
+
+#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
new file mode 100644
index 0000000..f2579c9
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -0,0 +1,645 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/firmware.h>
+#include <linux/rtnetlink.h>
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-eeprom-read.h"
+#include "iwl-nvm-parse.h"
+#include "iwl-prph.h"
+#include "fw/acpi.h"
+
+/* Default NVM size to read */
+#define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
+
+#define NVM_WRITE_OPCODE 1
+#define NVM_READ_OPCODE 0
+
+/* load nvm chunk response */
+enum {
+	READ_NVM_CHUNK_SUCCEED = 0,
+	READ_NVM_CHUNK_NOT_VALID_ADDRESS = 1
+};
+
+/*
+ * prepare the NVM host command w/ the pointers to the nvm buffer
+ * and send it to fw
+ */
+static int iwl_nvm_write_chunk(struct iwl_mvm *mvm, u16 section,
+			       u16 offset, u16 length, const u8 *data)
+{
+	struct iwl_nvm_access_cmd nvm_access_cmd = {
+		.offset = cpu_to_le16(offset),
+		.length = cpu_to_le16(length),
+		.type = cpu_to_le16(section),
+		.op_code = NVM_WRITE_OPCODE,
+	};
+	struct iwl_host_cmd cmd = {
+		.id = NVM_ACCESS_CMD,
+		.len = { sizeof(struct iwl_nvm_access_cmd), length },
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &nvm_access_cmd, data },
+		/* data may come from vmalloc, so use _DUP */
+		.dataflags = { 0, IWL_HCMD_DFL_DUP },
+	};
+	struct iwl_rx_packet *pkt;
+	struct iwl_nvm_access_resp *nvm_resp;
+	int ret;
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret)
+		return ret;
+
+	pkt = cmd.resp_pkt;
+	/* Extract & check NVM write response */
+	nvm_resp = (void *)pkt->data;
+	if (le16_to_cpu(nvm_resp->status) != READ_NVM_CHUNK_SUCCEED) {
+		IWL_ERR(mvm,
+			"NVM access write command failed for section %u (status = 0x%x)\n",
+			section, le16_to_cpu(nvm_resp->status));
+		ret = -EIO;
+	}
+
+	iwl_free_resp(&cmd);
+	return ret;
+}
+
+static int iwl_nvm_read_chunk(struct iwl_mvm *mvm, u16 section,
+			      u16 offset, u16 length, u8 *data)
+{
+	struct iwl_nvm_access_cmd nvm_access_cmd = {
+		.offset = cpu_to_le16(offset),
+		.length = cpu_to_le16(length),
+		.type = cpu_to_le16(section),
+		.op_code = NVM_READ_OPCODE,
+	};
+	struct iwl_nvm_access_resp *nvm_resp;
+	struct iwl_rx_packet *pkt;
+	struct iwl_host_cmd cmd = {
+		.id = NVM_ACCESS_CMD,
+		.flags = CMD_WANT_SKB | CMD_SEND_IN_RFKILL,
+		.data = { &nvm_access_cmd, },
+	};
+	int ret, bytes_read, offset_read;
+	u8 *resp_data;
+
+	cmd.len[0] = sizeof(struct iwl_nvm_access_cmd);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret)
+		return ret;
+
+	pkt = cmd.resp_pkt;
+
+	/* Extract NVM response */
+	nvm_resp = (void *)pkt->data;
+	ret = le16_to_cpu(nvm_resp->status);
+	bytes_read = le16_to_cpu(nvm_resp->length);
+	offset_read = le16_to_cpu(nvm_resp->offset);
+	resp_data = nvm_resp->data;
+	if (ret) {
+		if ((offset != 0) &&
+		    (ret == READ_NVM_CHUNK_NOT_VALID_ADDRESS)) {
+			/*
+			 * meaning of NOT_VALID_ADDRESS:
+			 * driver try to read chunk from address that is
+			 * multiple of 2K and got an error since addr is empty.
+			 * meaning of (offset != 0): driver already
+			 * read valid data from another chunk so this case
+			 * is not an error.
+			 */
+			IWL_DEBUG_EEPROM(mvm->trans->dev,
+					 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
+					 offset);
+			ret = 0;
+		} else {
+			IWL_DEBUG_EEPROM(mvm->trans->dev,
+					 "NVM access command failed with status %d (device: %s)\n",
+					 ret, mvm->cfg->name);
+			ret = -EIO;
+		}
+		goto exit;
+	}
+
+	if (offset_read != offset) {
+		IWL_ERR(mvm, "NVM ACCESS response with invalid offset %d\n",
+			offset_read);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Write data to NVM */
+	memcpy(data + offset, resp_data, bytes_read);
+	ret = bytes_read;
+
+exit:
+	iwl_free_resp(&cmd);
+	return ret;
+}
+
+static int iwl_nvm_write_section(struct iwl_mvm *mvm, u16 section,
+				 const u8 *data, u16 length)
+{
+	int offset = 0;
+
+	/* copy data in chunks of 2k (and remainder if any) */
+
+	while (offset < length) {
+		int chunk_size, ret;
+
+		chunk_size = min(IWL_NVM_DEFAULT_CHUNK_SIZE,
+				 length - offset);
+
+		ret = iwl_nvm_write_chunk(mvm, section, offset,
+					  chunk_size, data + offset);
+		if (ret < 0)
+			return ret;
+
+		offset += chunk_size;
+	}
+
+	return 0;
+}
+
+/*
+ * Reads an NVM section completely.
+ * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
+ * by uCode, we need to manually check in this case that we don't
+ * overflow and try to read more than the EEPROM size.
+ * For 7000 family NICs, we supply the maximal size we can read, and
+ * the uCode fills the response with as much data as we can,
+ * without overflowing, so no check is needed.
+ */
+static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
+				u8 *data, u32 size_read)
+{
+	u16 length, offset = 0;
+	int ret;
+
+	/* Set nvm section read length */
+	length = IWL_NVM_DEFAULT_CHUNK_SIZE;
+
+	ret = length;
+
+	/* Read the NVM until exhausted (reading less than requested) */
+	while (ret == length) {
+		/* Check no memory assumptions fail and cause an overflow */
+		if ((size_read + offset + length) >
+		    mvm->cfg->base_params->eeprom_size) {
+			IWL_ERR(mvm, "EEPROM size is too small for NVM\n");
+			return -ENOBUFS;
+		}
+
+		ret = iwl_nvm_read_chunk(mvm, section, offset, length, data);
+		if (ret < 0) {
+			IWL_DEBUG_EEPROM(mvm->trans->dev,
+					 "Cannot read NVM from section %d offset %d, length %d\n",
+					 section, offset, length);
+			return ret;
+		}
+		offset += ret;
+	}
+
+	iwl_nvm_fixups(mvm->trans->hw_id, section, data, offset);
+
+	IWL_DEBUG_EEPROM(mvm->trans->dev,
+			 "NVM section %d read completed\n", section);
+	return offset;
+}
+
+static struct iwl_nvm_data *
+iwl_parse_nvm_sections(struct iwl_mvm *mvm)
+{
+	struct iwl_nvm_section *sections = mvm->nvm_sections;
+	const __be16 *hw;
+	const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku;
+	bool lar_enabled;
+	int regulatory_type;
+
+	/* Checking for required sections */
+	if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) {
+		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+		    !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
+			IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n");
+			return NULL;
+		}
+	} else {
+		if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP)
+			regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP;
+		else
+			regulatory_type = NVM_SECTION_TYPE_REGULATORY;
+
+		/* SW and REGULATORY sections are mandatory */
+		if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
+		    !mvm->nvm_sections[regulatory_type].data) {
+			IWL_ERR(mvm,
+				"Can't parse empty family 8000 OTP/NVM sections\n");
+			return NULL;
+		}
+		/* MAC_OVERRIDE or at least HW section must exist */
+		if (!mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data &&
+		    !mvm->nvm_sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+			IWL_ERR(mvm,
+				"Can't parse mac_address, empty sections\n");
+			return NULL;
+		}
+
+		/* PHY_SKU section is mandatory in B0 */
+		if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
+			IWL_ERR(mvm,
+				"Can't parse phy_sku in B0, empty sections\n");
+			return NULL;
+		}
+	}
+
+	hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data;
+	sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
+	calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
+	mac_override =
+		(const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+	phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data;
+
+	regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ?
+		(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data :
+		(const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data;
+
+	lar_enabled = !iwlwifi_mod_params.lar_disable &&
+		      fw_has_capa(&mvm->fw->ucode_capa,
+				  IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+
+	return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib,
+				  regulatory, mac_override, phy_sku,
+				  mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant,
+				  lar_enabled);
+}
+
+/* Loads the NVM data stored in mvm->nvm_sections into the NIC */
+int iwl_mvm_load_nvm_to_nic(struct iwl_mvm *mvm)
+{
+	int i, ret = 0;
+	struct iwl_nvm_section *sections = mvm->nvm_sections;
+
+	IWL_DEBUG_EEPROM(mvm->trans->dev, "'Write to NVM\n");
+
+	for (i = 0; i < ARRAY_SIZE(mvm->nvm_sections); i++) {
+		if (!mvm->nvm_sections[i].data || !mvm->nvm_sections[i].length)
+			continue;
+		ret = iwl_nvm_write_section(mvm, i, sections[i].data,
+					    sections[i].length);
+		if (ret < 0) {
+			IWL_ERR(mvm, "iwl_mvm_send_cmd failed: %d\n", ret);
+			break;
+		}
+	}
+	return ret;
+}
+
+int iwl_nvm_init(struct iwl_mvm *mvm)
+{
+	int ret, section;
+	u32 size_read = 0;
+	u8 *nvm_buffer, *temp;
+	const char *nvm_file_C = mvm->cfg->default_nvm_file_C_step;
+
+	if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+		return -EINVAL;
+
+	/* load NVM values from nic */
+	/* Read From FW NVM */
+	IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
+
+	nvm_buffer = kmalloc(mvm->cfg->base_params->eeprom_size,
+			     GFP_KERNEL);
+	if (!nvm_buffer)
+		return -ENOMEM;
+	for (section = 0; section < NVM_MAX_NUM_SECTIONS; section++) {
+		/* we override the constness for initial read */
+		ret = iwl_nvm_read_section(mvm, section, nvm_buffer,
+					   size_read);
+		if (ret < 0)
+			continue;
+		size_read += ret;
+		temp = kmemdup(nvm_buffer, ret, GFP_KERNEL);
+		if (!temp) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		iwl_nvm_fixups(mvm->trans->hw_id, section, temp, ret);
+
+		mvm->nvm_sections[section].data = temp;
+		mvm->nvm_sections[section].length = ret;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		switch (section) {
+		case NVM_SECTION_TYPE_SW:
+			mvm->nvm_sw_blob.data = temp;
+			mvm->nvm_sw_blob.size  = ret;
+			break;
+		case NVM_SECTION_TYPE_CALIBRATION:
+			mvm->nvm_calib_blob.data = temp;
+			mvm->nvm_calib_blob.size  = ret;
+			break;
+		case NVM_SECTION_TYPE_PRODUCTION:
+			mvm->nvm_prod_blob.data = temp;
+			mvm->nvm_prod_blob.size  = ret;
+			break;
+		case NVM_SECTION_TYPE_PHY_SKU:
+			mvm->nvm_phy_sku_blob.data = temp;
+			mvm->nvm_phy_sku_blob.size  = ret;
+			break;
+		default:
+			if (section == mvm->cfg->nvm_hw_section_num) {
+				mvm->nvm_hw_blob.data = temp;
+				mvm->nvm_hw_blob.size = ret;
+				break;
+			}
+		}
+#endif
+	}
+	if (!size_read)
+		IWL_ERR(mvm, "OTP is blank\n");
+	kfree(nvm_buffer);
+
+	/* Only if PNVM selected in the mod param - load external NVM  */
+	if (mvm->nvm_file_name) {
+		/* read External NVM file from the mod param */
+		ret = iwl_read_external_nvm(mvm->trans, mvm->nvm_file_name,
+					    mvm->nvm_sections);
+		if (ret) {
+			mvm->nvm_file_name = nvm_file_C;
+
+			if ((ret == -EFAULT || ret == -ENOENT) &&
+			    mvm->nvm_file_name) {
+				/* in case nvm file was failed try again */
+				ret = iwl_read_external_nvm(mvm->trans,
+							    mvm->nvm_file_name,
+							    mvm->nvm_sections);
+				if (ret)
+					return ret;
+			} else {
+				return ret;
+			}
+		}
+	}
+
+	/* parse the relevant nvm sections */
+	mvm->nvm_data = iwl_parse_nvm_sections(mvm);
+	if (!mvm->nvm_data)
+		return -ENODATA;
+	IWL_DEBUG_EEPROM(mvm->trans->dev, "nvm version = %x\n",
+			 mvm->nvm_data->nvm_version);
+
+	return 0;
+}
+
+struct iwl_mcc_update_resp *
+iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2,
+		   enum iwl_mcc_source src_id)
+{
+	struct iwl_mcc_update_cmd mcc_update_cmd = {
+		.mcc = cpu_to_le16(alpha2[0] << 8 | alpha2[1]),
+		.source_id = (u8)src_id,
+	};
+	struct iwl_mcc_update_resp *resp_cp;
+	struct iwl_rx_packet *pkt;
+	struct iwl_host_cmd cmd = {
+		.id = MCC_UPDATE_CMD,
+		.flags = CMD_WANT_SKB,
+		.data = { &mcc_update_cmd },
+	};
+
+	int ret;
+	u32 status;
+	int resp_len, n_channels;
+	u16 mcc;
+	bool resp_v2 = fw_has_capa(&mvm->fw->ucode_capa,
+				   IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
+
+	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+		return ERR_PTR(-EOPNOTSUPP);
+
+	cmd.len[0] = sizeof(struct iwl_mcc_update_cmd);
+	if (!resp_v2)
+		cmd.len[0] = sizeof(struct iwl_mcc_update_cmd_v1);
+
+	IWL_DEBUG_LAR(mvm, "send MCC update to FW with '%c%c' src = %d\n",
+		      alpha2[0], alpha2[1], src_id);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret)
+		return ERR_PTR(ret);
+
+	pkt = cmd.resp_pkt;
+
+	/* Extract MCC response */
+	if (resp_v2) {
+		struct iwl_mcc_update_resp *mcc_resp = (void *)pkt->data;
+
+		n_channels =  __le32_to_cpu(mcc_resp->n_channels);
+		resp_len = sizeof(struct iwl_mcc_update_resp) +
+			   n_channels * sizeof(__le32);
+		resp_cp = kmemdup(mcc_resp, resp_len, GFP_KERNEL);
+		if (!resp_cp) {
+			resp_cp = ERR_PTR(-ENOMEM);
+			goto exit;
+		}
+	} else {
+		struct iwl_mcc_update_resp_v1 *mcc_resp_v1 = (void *)pkt->data;
+
+		n_channels =  __le32_to_cpu(mcc_resp_v1->n_channels);
+		resp_len = sizeof(struct iwl_mcc_update_resp) +
+			   n_channels * sizeof(__le32);
+		resp_cp = kzalloc(resp_len, GFP_KERNEL);
+		if (!resp_cp) {
+			resp_cp = ERR_PTR(-ENOMEM);
+			goto exit;
+		}
+
+		resp_cp->status = mcc_resp_v1->status;
+		resp_cp->mcc = mcc_resp_v1->mcc;
+		resp_cp->cap = mcc_resp_v1->cap;
+		resp_cp->source_id = mcc_resp_v1->source_id;
+		resp_cp->n_channels = mcc_resp_v1->n_channels;
+		memcpy(resp_cp->channels, mcc_resp_v1->channels,
+		       n_channels * sizeof(__le32));
+	}
+
+	status = le32_to_cpu(resp_cp->status);
+
+	mcc = le16_to_cpu(resp_cp->mcc);
+
+	/* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+	if (mcc == 0) {
+		mcc = 0x3030;  /* "00" - world */
+		resp_cp->mcc = cpu_to_le16(mcc);
+	}
+
+	IWL_DEBUG_LAR(mvm,
+		      "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
+		      status, mcc, mcc >> 8, mcc & 0xff, n_channels);
+
+exit:
+	iwl_free_resp(&cmd);
+	return resp_cp;
+}
+
+int iwl_mvm_init_mcc(struct iwl_mvm *mvm)
+{
+	bool tlv_lar;
+	bool nvm_lar;
+	int retval;
+	struct ieee80211_regdomain *regd;
+	char mcc[3];
+
+	if (mvm->cfg->nvm_type == IWL_NVM_EXT) {
+		tlv_lar = fw_has_capa(&mvm->fw->ucode_capa,
+				      IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
+		nvm_lar = mvm->nvm_data->lar_enabled;
+		if (tlv_lar != nvm_lar)
+			IWL_INFO(mvm,
+				 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
+				 tlv_lar ? "enabled" : "disabled",
+				 nvm_lar ? "enabled" : "disabled");
+	}
+
+	if (!iwl_mvm_is_lar_supported(mvm))
+		return 0;
+
+	/*
+	 * try to replay the last set MCC to FW. If it doesn't exist,
+	 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
+	 */
+	retval = iwl_mvm_init_fw_regd(mvm);
+	if (retval != -ENOENT)
+		return retval;
+
+	/*
+	 * Driver regulatory hint for initial update, this also informs the
+	 * firmware we support wifi location updates.
+	 * Disallow scans that might crash the FW while the LAR regdomain
+	 * is not set.
+	 */
+	mvm->lar_regdom_set = false;
+
+	regd = iwl_mvm_get_current_regdomain(mvm, NULL);
+	if (IS_ERR_OR_NULL(regd))
+		return -EIO;
+
+	if (iwl_mvm_is_wifi_mcc_supported(mvm) &&
+	    !iwl_acpi_get_mcc(mvm->dev, mcc)) {
+		kfree(regd);
+		regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc,
+					     MCC_SOURCE_BIOS, NULL);
+		if (IS_ERR_OR_NULL(regd))
+			return -EIO;
+	}
+
+	retval = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd);
+	kfree(regd);
+	return retval;
+}
+
+void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
+				struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mcc_chub_notif *notif = (void *)pkt->data;
+	enum iwl_mcc_source src;
+	char mcc[3];
+	struct ieee80211_regdomain *regd;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_is_vif_assoc(mvm) && notif->source_id == MCC_SOURCE_WIFI) {
+		IWL_DEBUG_LAR(mvm, "Ignore mcc update while associated\n");
+		return;
+	}
+
+	if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm)))
+		return;
+
+	mcc[0] = le16_to_cpu(notif->mcc) >> 8;
+	mcc[1] = le16_to_cpu(notif->mcc) & 0xff;
+	mcc[2] = '\0';
+	src = notif->source_id;
+
+	IWL_DEBUG_LAR(mvm,
+		      "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
+		      mcc, src);
+	regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, mcc, src, NULL);
+	if (IS_ERR_OR_NULL(regd))
+		return;
+
+	regulatory_set_wiphy_regd(mvm->hw->wiphy, regd);
+	kfree(regd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
new file mode 100644
index 0000000..6338d9c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -0,0 +1,255 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/bitops.h>
+#include "mvm.h"
+
+void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
+				struct iwl_wowlan_config_cmd *cmd)
+{
+	int i;
+
+	/*
+	 * For QoS counters, we store the one to use next, so subtract 0x10
+	 * since the uCode will add 0x10 *before* using the value while we
+	 * increment after using the value (i.e. store the next value to use).
+	 */
+	for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+		u16 seq = mvm_ap_sta->tid_data[i].seq_number;
+		seq -= 0x10;
+		cmd->qos_seq[i] = cpu_to_le16(seq);
+	}
+}
+
+int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
+			       struct ieee80211_vif *vif,
+			       bool disable_offloading,
+			       bool offload_ns,
+			       u32 cmd_flags)
+{
+	union {
+		struct iwl_proto_offload_cmd_v1 v1;
+		struct iwl_proto_offload_cmd_v2 v2;
+		struct iwl_proto_offload_cmd_v3_small v3s;
+		struct iwl_proto_offload_cmd_v3_large v3l;
+	} cmd = {};
+	struct iwl_host_cmd hcmd = {
+		.id = PROT_OFFLOAD_CONFIG_CMD,
+		.flags = cmd_flags,
+		.data[0] = &cmd,
+		.dataflags[0] = IWL_HCMD_DFL_DUP,
+	};
+	struct iwl_proto_offload_cmd_common *common;
+	u32 enabled = 0, size;
+	u32 capa_flags = mvm->fw->ucode_capa.flags;
+#if IS_ENABLED(CONFIG_IPV6)
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int i;
+	/*
+	 * Skip tentative address when ns offload is enabled to avoid
+	 * violating RFC4862.
+	 * Keep tentative address when ns offload is disabled so the NS packets
+	 * will not be filtered out and will wake up the host.
+	 */
+	bool skip_tentative = offload_ns;
+
+	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
+	    capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+		struct iwl_ns_config *nsc;
+		struct iwl_targ_addr *addrs;
+		int n_nsc, n_addrs;
+		int c;
+		int num_skipped = 0;
+
+		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+			nsc = cmd.v3s.ns_config;
+			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
+			addrs = cmd.v3s.targ_addrs;
+			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
+		} else {
+			nsc = cmd.v3l.ns_config;
+			n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
+			addrs = cmd.v3l.targ_addrs;
+			n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
+		}
+
+		/*
+		 * For each address we have (and that will fit) fill a target
+		 * address struct and combine for NS offload structs with the
+		 * solicited node addresses.
+		 */
+		for (i = 0, c = 0;
+		     i < mvmvif->num_target_ipv6_addrs &&
+		     i < n_addrs && c < n_nsc; i++) {
+			struct in6_addr solicited_addr;
+			int j;
+
+			if (skip_tentative &&
+			    test_bit(i, mvmvif->tentative_addrs)) {
+				num_skipped++;
+				continue;
+			}
+
+			addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
+						  &solicited_addr);
+			for (j = 0; j < c; j++)
+				if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
+						  &solicited_addr) == 0)
+					break;
+			if (j == c)
+				c++;
+			addrs[i].addr = mvmvif->target_ipv6_addrs[i];
+			addrs[i].config_num = cpu_to_le32(j);
+			nsc[j].dest_ipv6_addr = solicited_addr;
+			memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
+		}
+
+		if (mvmvif->num_target_ipv6_addrs - num_skipped)
+			enabled |= IWL_D3_PROTO_IPV6_VALID;
+
+		if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
+			cmd.v3s.num_valid_ipv6_addrs =
+				cpu_to_le32(i - num_skipped);
+		else
+			cmd.v3l.num_valid_ipv6_addrs =
+				cpu_to_le32(i - num_skipped);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+		bool found = false;
+
+		BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+			     sizeof(mvmvif->target_ipv6_addrs[0]));
+
+		for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+				    IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++) {
+			if (skip_tentative &&
+			    test_bit(i, mvmvif->tentative_addrs))
+				continue;
+
+			memcpy(cmd.v2.target_ipv6_addr[i],
+			       &mvmvif->target_ipv6_addrs[i],
+			       sizeof(cmd.v2.target_ipv6_addr[i]));
+
+			found = true;
+		}
+		if (found) {
+			enabled |= IWL_D3_PROTO_IPV6_VALID;
+			memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+		}
+	} else {
+		bool found = false;
+		BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+			     sizeof(mvmvif->target_ipv6_addrs[0]));
+
+		for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+				    IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++) {
+			if (skip_tentative &&
+			    test_bit(i, mvmvif->tentative_addrs))
+				continue;
+
+			memcpy(cmd.v1.target_ipv6_addr[i],
+			       &mvmvif->target_ipv6_addrs[i],
+			       sizeof(cmd.v1.target_ipv6_addr[i]));
+
+			found = true;
+		}
+
+		if (found) {
+			enabled |= IWL_D3_PROTO_IPV6_VALID;
+			memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+		}
+	}
+
+	if (offload_ns && (enabled & IWL_D3_PROTO_IPV6_VALID))
+		enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+#endif
+	if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
+		common = &cmd.v3s.common;
+		size = sizeof(cmd.v3s);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
+		common = &cmd.v3l.common;
+		size = sizeof(cmd.v3l);
+	} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+		common = &cmd.v2.common;
+		size = sizeof(cmd.v2);
+	} else {
+		common = &cmd.v1.common;
+		size = sizeof(cmd.v1);
+	}
+
+	if (vif->bss_conf.arp_addr_cnt) {
+		enabled |= IWL_D3_PROTO_OFFLOAD_ARP | IWL_D3_PROTO_IPV4_VALID;
+		common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+		memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
+	}
+
+	if (!disable_offloading)
+		common->enabled = cpu_to_le32(enabled);
+
+	hcmd.len[0] = size;
+	return iwl_mvm_send_cmd(mvm, &hcmd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
new file mode 100644
index 0000000..0e26619
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -0,0 +1,1731 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "iwl-op-mode.h"
+#include "fw/img.h"
+#include "iwl-debug.h"
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "mvm.h"
+#include "iwl-phy-db.h"
+#include "iwl-eeprom-parse.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "rs.h"
+#include "fw/api/scan.h"
+#include "time-event.h"
+#include "fw-api.h"
+#include "fw/api/scan.h"
+#include "fw/acpi.h"
+
+#define DRV_DESCRIPTION	"The new Intel(R) wireless AGN driver for Linux"
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct iwl_op_mode_ops iwl_mvm_ops;
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
+
+struct iwl_mvm_mod_params iwlmvm_mod_params = {
+	.power_scheme = IWL_POWER_SCHEME_BPS,
+	.tfd_q_hang_detect = true
+	/* rest of fields are 0 by default */
+};
+
+module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
+MODULE_PARM_DESC(init_dbg,
+		 "set to true to debug an ASSERT in INIT fw (default: false");
+module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
+MODULE_PARM_DESC(power_scheme,
+		 "power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
+module_param_named(tfd_q_hang_detect, iwlmvm_mod_params.tfd_q_hang_detect,
+		   bool, 0444);
+MODULE_PARM_DESC(tfd_q_hang_detect,
+		 "TFD queues hang detection (default: true");
+
+/*
+ * module init and exit functions
+ */
+static int __init iwl_mvm_init(void)
+{
+	int ret;
+
+	ret = iwl_mvm_rate_control_register();
+	if (ret) {
+		pr_err("Unable to register rate control algorithm: %d\n", ret);
+		return ret;
+	}
+
+	ret = iwl_opmode_register("iwlmvm", &iwl_mvm_ops);
+	if (ret)
+		pr_err("Unable to register MVM op_mode: %d\n", ret);
+
+	return ret;
+}
+module_init(iwl_mvm_init);
+
+static void __exit iwl_mvm_exit(void)
+{
+	iwl_opmode_deregister("iwlmvm");
+	iwl_mvm_rate_control_unregister();
+}
+module_exit(iwl_mvm_exit);
+
+static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+	u32 reg_val = 0;
+	u32 phy_config = iwl_mvm_get_phy_config(mvm);
+
+	radio_cfg_type = (phy_config & FW_PHY_CFG_RADIO_TYPE) >>
+			 FW_PHY_CFG_RADIO_TYPE_POS;
+	radio_cfg_step = (phy_config & FW_PHY_CFG_RADIO_STEP) >>
+			 FW_PHY_CFG_RADIO_STEP_POS;
+	radio_cfg_dash = (phy_config & FW_PHY_CFG_RADIO_DASH) >>
+			 FW_PHY_CFG_RADIO_DASH_POS;
+
+	/* SKU control */
+	reg_val |= CSR_HW_REV_STEP(mvm->trans->hw_rev) <<
+				CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+	reg_val |= CSR_HW_REV_DASH(mvm->trans->hw_rev) <<
+				CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+	/* radio configuration */
+	reg_val |= radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+	reg_val |= radio_cfg_step << CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+	reg_val |= radio_cfg_dash << CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+	WARN_ON((radio_cfg_type << CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE) &
+		 ~CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE);
+
+	/*
+	 * TODO: Bits 7-8 of CSR in 8000 HW family and higher set the ADC
+	 * sampling, and shouldn't be set to any non-zero value.
+	 * The same is supposed to be true of the other HW, but unsetting
+	 * them (such as the 7260) causes automatic tests to fail on seemingly
+	 * unrelated errors. Need to further investigate this, but for now
+	 * we'll separate cases.
+	 */
+	if (mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+		reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
+
+	iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
+				CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+				CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+				CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+				CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+				CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+				CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+				CSR_HW_IF_CONFIG_REG_BIT_MAC_SI,
+				reg_val);
+
+	IWL_DEBUG_INFO(mvm, "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
+		       radio_cfg_step, radio_cfg_dash);
+
+	/*
+	 * W/A : NIC is stuck in a reset state after Early PCIe power off
+	 * (PCIe power is lost before PERST# is asserted), causing ME FW
+	 * to lose ownership and not being able to obtain it back.
+	 */
+	if (!mvm->trans->cfg->apmg_not_supported)
+		iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+				       APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+				       ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+}
+
+/**
+ * enum iwl_rx_handler_context context for Rx handler
+ * @RX_HANDLER_SYNC : this means that it will be called in the Rx path
+ *	which can't acquire mvm->mutex.
+ * @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
+ *	(and only in this case!), it should be set as ASYNC. In that case,
+ *	it will be called from a worker with mvm->mutex held.
+ * @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
+ *	mutex itself, it will be called from a worker without mvm->mutex held.
+ */
+enum iwl_rx_handler_context {
+	RX_HANDLER_SYNC,
+	RX_HANDLER_ASYNC_LOCKED,
+	RX_HANDLER_ASYNC_UNLOCKED,
+};
+
+/**
+ * struct iwl_rx_handlers handler for FW notification
+ * @cmd_id: command id
+ * @context: see &iwl_rx_handler_context
+ * @fn: the function is called when notification is received
+ */
+struct iwl_rx_handlers {
+	u16 cmd_id;
+	enum iwl_rx_handler_context context;
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+#define RX_HANDLER(_cmd_id, _fn, _context)	\
+	{ .cmd_id = _cmd_id, .fn = _fn, .context = _context }
+#define RX_HANDLER_GRP(_grp, _cmd, _fn, _context)	\
+	{ .cmd_id = WIDE_ID(_grp, _cmd), .fn = _fn, .context = _context }
+
+/*
+ * Handlers for fw notifications
+ * Convention: RX_HANDLER(CMD_NAME, iwl_mvm_rx_CMD_NAME
+ * This list should be in order of frequency for performance purposes.
+ *
+ * The handler can be one from three contexts, see &iwl_rx_handler_context
+ */
+static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
+	RX_HANDLER(TX_CMD, iwl_mvm_rx_tx_cmd, RX_HANDLER_SYNC),
+	RX_HANDLER(BA_NOTIF, iwl_mvm_rx_ba_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER_GRP(DATA_PATH_GROUP, TLC_MNG_UPDATE_NOTIF,
+		       iwl_mvm_tlc_update_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
+		   RX_HANDLER_ASYNC_LOCKED),
+
+	RX_HANDLER(BA_WINDOW_STATUS_NOTIFICATION_ID,
+		   iwl_mvm_window_status_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif,
+		   RX_HANDLER_SYNC),
+	RX_HANDLER(MCC_CHUB_UPDATE_CMD, iwl_mvm_rx_chub_update_mcc,
+		   RX_HANDLER_ASYNC_LOCKED),
+
+	RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER(SCAN_ITERATION_COMPLETE,
+		   iwl_mvm_rx_lmac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+	RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
+		   iwl_mvm_rx_lmac_scan_complete_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_scan_match_found,
+		   RX_HANDLER_SYNC),
+	RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
+		   iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif,
+		   RX_HANDLER_SYNC),
+
+	RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
+		   RX_HANDLER_SYNC),
+
+	RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC),
+	RX_HANDLER(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION,
+		   iwl_mvm_power_uapsd_misbehaving_ap_notif, RX_HANDLER_SYNC),
+	RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
+		       iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
+	RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
+		       iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
+
+	RX_HANDLER(TDLS_CHANNEL_SWITCH_NOTIFICATION, iwl_mvm_rx_tdls_notif,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif,
+		   RX_HANDLER_SYNC),
+	RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler,
+		   RX_HANDLER_ASYNC_LOCKED),
+	RX_HANDLER_GRP(DEBUG_GROUP, MFU_ASSERT_DUMP_NTF,
+		       iwl_mvm_mfu_assert_dump_notif, RX_HANDLER_SYNC),
+	RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+		       iwl_mvm_rx_stored_beacon_notif, RX_HANDLER_SYNC),
+	RX_HANDLER_GRP(DATA_PATH_GROUP, MU_GROUP_MGMT_NOTIF,
+		       iwl_mvm_mu_mimo_grp_notif, RX_HANDLER_SYNC),
+	RX_HANDLER_GRP(DATA_PATH_GROUP, STA_PM_NOTIF,
+		       iwl_mvm_sta_pm_notif, RX_HANDLER_SYNC),
+};
+#undef RX_HANDLER
+#undef RX_HANDLER_GRP
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
+	HCMD_NAME(MVM_ALIVE),
+	HCMD_NAME(REPLY_ERROR),
+	HCMD_NAME(ECHO_CMD),
+	HCMD_NAME(INIT_COMPLETE_NOTIF),
+	HCMD_NAME(PHY_CONTEXT_CMD),
+	HCMD_NAME(DBG_CFG),
+	HCMD_NAME(SCAN_CFG_CMD),
+	HCMD_NAME(SCAN_REQ_UMAC),
+	HCMD_NAME(SCAN_ABORT_UMAC),
+	HCMD_NAME(SCAN_COMPLETE_UMAC),
+	HCMD_NAME(TOF_CMD),
+	HCMD_NAME(TOF_NOTIFICATION),
+	HCMD_NAME(BA_WINDOW_STATUS_NOTIFICATION_ID),
+	HCMD_NAME(ADD_STA_KEY),
+	HCMD_NAME(ADD_STA),
+	HCMD_NAME(REMOVE_STA),
+	HCMD_NAME(FW_GET_ITEM_CMD),
+	HCMD_NAME(TX_CMD),
+	HCMD_NAME(SCD_QUEUE_CFG),
+	HCMD_NAME(TXPATH_FLUSH),
+	HCMD_NAME(MGMT_MCAST_KEY),
+	HCMD_NAME(WEP_KEY),
+	HCMD_NAME(SHARED_MEM_CFG),
+	HCMD_NAME(TDLS_CHANNEL_SWITCH_CMD),
+	HCMD_NAME(MAC_CONTEXT_CMD),
+	HCMD_NAME(TIME_EVENT_CMD),
+	HCMD_NAME(TIME_EVENT_NOTIFICATION),
+	HCMD_NAME(BINDING_CONTEXT_CMD),
+	HCMD_NAME(TIME_QUOTA_CMD),
+	HCMD_NAME(NON_QOS_TX_COUNTER_CMD),
+	HCMD_NAME(LEDS_CMD),
+	HCMD_NAME(LQ_CMD),
+	HCMD_NAME(FW_PAGING_BLOCK_CMD),
+	HCMD_NAME(SCAN_OFFLOAD_REQUEST_CMD),
+	HCMD_NAME(SCAN_OFFLOAD_ABORT_CMD),
+	HCMD_NAME(HOT_SPOT_CMD),
+	HCMD_NAME(SCAN_OFFLOAD_PROFILES_QUERY_CMD),
+	HCMD_NAME(BT_COEX_UPDATE_REDUCED_TXP),
+	HCMD_NAME(BT_COEX_CI),
+	HCMD_NAME(PHY_CONFIGURATION_CMD),
+	HCMD_NAME(CALIB_RES_NOTIF_PHY_DB),
+	HCMD_NAME(PHY_DB_CMD),
+	HCMD_NAME(SCAN_OFFLOAD_COMPLETE),
+	HCMD_NAME(SCAN_OFFLOAD_UPDATE_PROFILES_CMD),
+	HCMD_NAME(POWER_TABLE_CMD),
+	HCMD_NAME(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
+	HCMD_NAME(REPLY_THERMAL_MNG_BACKOFF),
+	HCMD_NAME(DC2DC_CONFIG_CMD),
+	HCMD_NAME(NVM_ACCESS_CMD),
+	HCMD_NAME(BEACON_NOTIFICATION),
+	HCMD_NAME(BEACON_TEMPLATE_CMD),
+	HCMD_NAME(TX_ANT_CONFIGURATION_CMD),
+	HCMD_NAME(BT_CONFIG),
+	HCMD_NAME(STATISTICS_CMD),
+	HCMD_NAME(STATISTICS_NOTIFICATION),
+	HCMD_NAME(EOSP_NOTIFICATION),
+	HCMD_NAME(REDUCE_TX_POWER_CMD),
+	HCMD_NAME(CARD_STATE_NOTIFICATION),
+	HCMD_NAME(MISSED_BEACONS_NOTIFICATION),
+	HCMD_NAME(TDLS_CONFIG_CMD),
+	HCMD_NAME(MAC_PM_POWER_TABLE),
+	HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
+	HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+	HCMD_NAME(RSS_CONFIG_CMD),
+	HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
+	HCMD_NAME(REPLY_RX_PHY_CMD),
+	HCMD_NAME(REPLY_RX_MPDU_CMD),
+	HCMD_NAME(FRAME_RELEASE),
+	HCMD_NAME(BA_NOTIF),
+	HCMD_NAME(MCC_UPDATE_CMD),
+	HCMD_NAME(MCC_CHUB_UPDATE_CMD),
+	HCMD_NAME(MARKER_CMD),
+	HCMD_NAME(BT_PROFILE_NOTIFICATION),
+	HCMD_NAME(BCAST_FILTER_CMD),
+	HCMD_NAME(MCAST_FILTER_CMD),
+	HCMD_NAME(REPLY_SF_CFG_CMD),
+	HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
+	HCMD_NAME(D3_CONFIG_CMD),
+	HCMD_NAME(PROT_OFFLOAD_CONFIG_CMD),
+	HCMD_NAME(OFFLOADS_QUERY_CMD),
+	HCMD_NAME(REMOTE_WAKE_CONFIG_CMD),
+	HCMD_NAME(MATCH_FOUND_NOTIFICATION),
+	HCMD_NAME(DTS_MEASUREMENT_NOTIFICATION),
+	HCMD_NAME(WOWLAN_PATTERNS),
+	HCMD_NAME(WOWLAN_CONFIGURATION),
+	HCMD_NAME(WOWLAN_TSC_RSC_PARAM),
+	HCMD_NAME(WOWLAN_TKIP_PARAM),
+	HCMD_NAME(WOWLAN_KEK_KCK_MATERIAL),
+	HCMD_NAME(WOWLAN_GET_STATUSES),
+	HCMD_NAME(SCAN_ITERATION_COMPLETE),
+	HCMD_NAME(D0I3_END_CMD),
+	HCMD_NAME(LTR_CONFIG),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+	HCMD_NAME(SHARED_MEM_CFG_CMD),
+	HCMD_NAME(INIT_EXTENDED_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+	HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
+	HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
+	HCMD_NAME(CTDP_CONFIG_CMD),
+	HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
+	HCMD_NAME(GEO_TX_POWER_LIMIT),
+	HCMD_NAME(CT_KILL_NOTIFICATION),
+	HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
+	HCMD_NAME(DQA_ENABLE_CMD),
+	HCMD_NAME(UPDATE_MU_GROUPS_CMD),
+	HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+	HCMD_NAME(STA_HE_CTXT_CMD),
+	HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
+	HCMD_NAME(STA_PM_NOTIF),
+	HCMD_NAME(MU_GROUP_MGMT_NOTIF),
+	HCMD_NAME(RX_QUEUES_NOTIFICATION),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+	HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+	HCMD_NAME(STORED_BEACON_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_regulatory_and_nvm_names[] = {
+	HCMD_NAME(NVM_ACCESS_COMPLETE),
+	HCMD_NAME(NVM_GET_INFO),
+};
+
+static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
+	[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+	[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+	[SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+	[MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
+	[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+	[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
+	[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
+	[REGULATORY_AND_NVM_GROUP] =
+		HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+};
+
+/* this forward declaration can avoid to export the function */
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
+{
+	const struct iwl_pwr_tx_backoff *backoff = mvm->cfg->pwr_tx_backoffs;
+	u64 dflt_pwr_limit;
+
+	if (!backoff)
+		return 0;
+
+	dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+
+	while (backoff->pwr) {
+		if (dflt_pwr_limit >= backoff->pwr)
+			return backoff->backoff;
+
+		backoff++;
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
+{
+	struct iwl_mvm *mvm =
+		container_of(work, struct iwl_mvm, cs_tx_unblock_dwork.work);
+	struct ieee80211_vif *tx_blocked_vif;
+	struct iwl_mvm_vif *mvmvif;
+
+	mutex_lock(&mvm->mutex);
+
+	tx_blocked_vif =
+		rcu_dereference_protected(mvm->csa_tx_blocked_vif,
+					  lockdep_is_held(&mvm->mutex));
+
+	if (!tx_blocked_vif)
+		goto unlock;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+	iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
+	RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
+unlock:
+	mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_fwrt_dump_start(void *ctx)
+{
+	struct iwl_mvm *mvm = ctx;
+	int ret;
+
+	ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+	if (ret)
+		return ret;
+
+	mutex_lock(&mvm->mutex);
+
+	return 0;
+}
+
+static void iwl_mvm_fwrt_dump_end(void *ctx)
+{
+	struct iwl_mvm *mvm = ctx;
+
+	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
+}
+
+static bool iwl_mvm_fwrt_fw_running(void *ctx)
+{
+	return iwl_mvm_firmware_running(ctx);
+}
+
+static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
+	.dump_start = iwl_mvm_fwrt_dump_start,
+	.dump_end = iwl_mvm_fwrt_dump_end,
+	.fw_running = iwl_mvm_fwrt_fw_running,
+};
+
+static struct iwl_op_mode *
+iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+		      const struct iwl_fw *fw, struct dentry *dbgfs_dir)
+{
+	struct ieee80211_hw *hw;
+	struct iwl_op_mode *op_mode;
+	struct iwl_mvm *mvm;
+	struct iwl_trans_config trans_cfg = {};
+	static const u8 no_reclaim_cmds[] = {
+		TX_CMD,
+	};
+	int err, scan_size;
+	u32 min_backoff;
+
+	/*
+	 * We use IWL_MVM_STATION_COUNT to check the validity of the station
+	 * index all over the driver - check that its value corresponds to the
+	 * array size.
+	 */
+	BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
+
+	/********************************
+	 * 1. Allocating and configuring HW data
+	 ********************************/
+	hw = ieee80211_alloc_hw(sizeof(struct iwl_op_mode) +
+				sizeof(struct iwl_mvm),
+				&iwl_mvm_hw_ops);
+	if (!hw)
+		return NULL;
+
+	if (cfg->max_rx_agg_size)
+		hw->max_rx_aggregation_subframes = cfg->max_rx_agg_size;
+
+	if (cfg->max_tx_agg_size)
+		hw->max_tx_aggregation_subframes = cfg->max_tx_agg_size;
+
+	op_mode = hw->priv;
+
+	mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	mvm->dev = trans->dev;
+	mvm->trans = trans;
+	mvm->cfg = cfg;
+	mvm->fw = fw;
+	mvm->hw = hw;
+
+	iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
+			    dbgfs_dir);
+
+	mvm->init_status = 0;
+
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		op_mode->ops = &iwl_mvm_ops_mq;
+		trans->rx_mpdu_cmd_hdr_size =
+			(trans->cfg->device_family >=
+			 IWL_DEVICE_FAMILY_22560) ?
+			sizeof(struct iwl_rx_mpdu_desc) :
+			IWL_RX_DESC_SIZE_V1;
+	} else {
+		op_mode->ops = &iwl_mvm_ops;
+		trans->rx_mpdu_cmd_hdr_size =
+			sizeof(struct iwl_rx_mpdu_res_start);
+
+		if (WARN_ON(trans->num_rx_queues > 1))
+			goto out_free;
+	}
+
+	mvm->fw_restart = iwlwifi_mod_params.fw_restart ? -1 : 0;
+
+	mvm->aux_queue = IWL_MVM_DQA_AUX_QUEUE;
+	mvm->snif_queue = IWL_MVM_DQA_INJECT_MONITOR_QUEUE;
+	mvm->probe_queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+	mvm->p2p_dev_queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+
+	mvm->sf_state = SF_UNINIT;
+	if (iwl_mvm_has_unified_ucode(mvm))
+		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_REGULAR);
+	else
+		iwl_fw_set_current_image(&mvm->fwrt, IWL_UCODE_INIT);
+	mvm->drop_bcn_ap_mode = true;
+
+	mutex_init(&mvm->mutex);
+	mutex_init(&mvm->d0i3_suspend_mutex);
+	spin_lock_init(&mvm->async_handlers_lock);
+	INIT_LIST_HEAD(&mvm->time_event_list);
+	INIT_LIST_HEAD(&mvm->aux_roc_te_list);
+	INIT_LIST_HEAD(&mvm->async_handlers_list);
+	spin_lock_init(&mvm->time_event_lock);
+	spin_lock_init(&mvm->queue_info_lock);
+
+	INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
+	INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
+	INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
+	INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+	INIT_DELAYED_WORK(&mvm->scan_timeout_dwork, iwl_mvm_scan_timeout_wk);
+	INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
+
+	spin_lock_init(&mvm->d0i3_tx_lock);
+	spin_lock_init(&mvm->refs_lock);
+	skb_queue_head_init(&mvm->d0i3_tx);
+	init_waitqueue_head(&mvm->d0i3_exit_waitq);
+	init_waitqueue_head(&mvm->rx_sync_waitq);
+
+	atomic_set(&mvm->queue_sync_counter, 0);
+
+	SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
+
+	spin_lock_init(&mvm->tcm.lock);
+	INIT_DELAYED_WORK(&mvm->tcm.work, iwl_mvm_tcm_work);
+	mvm->tcm.ts = jiffies;
+	mvm->tcm.ll_ts = jiffies;
+	mvm->tcm.uapsd_nonagg_ts = jiffies;
+
+	INIT_DELAYED_WORK(&mvm->cs_tx_unblock_dwork, iwl_mvm_tx_unblock_dwork);
+
+	/*
+	 * Populate the state variables that the transport layer needs
+	 * to know about.
+	 */
+	trans_cfg.op_mode = op_mode;
+	trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
+	trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
+	switch (iwlwifi_mod_params.amsdu_size) {
+	case IWL_AMSDU_DEF:
+	case IWL_AMSDU_4K:
+		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+		break;
+	case IWL_AMSDU_8K:
+		trans_cfg.rx_buf_size = IWL_AMSDU_8K;
+		break;
+	case IWL_AMSDU_12K:
+		trans_cfg.rx_buf_size = IWL_AMSDU_12K;
+		break;
+	default:
+		pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
+		       iwlwifi_mod_params.amsdu_size);
+		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+	}
+
+	/* the hardware splits the A-MSDU */
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		trans_cfg.rx_buf_size = IWL_AMSDU_2K;
+		/* TODO: remove when balanced power mode is fw supported */
+		iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
+	} else if (mvm->cfg->mq_rx_supported) {
+		trans_cfg.rx_buf_size = IWL_AMSDU_4K;
+	}
+
+	trans->wide_cmd_header = true;
+	trans_cfg.bc_table_dword =
+		mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
+
+	trans_cfg.command_groups = iwl_mvm_groups;
+	trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
+
+	trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+	trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
+	trans_cfg.scd_set_active = true;
+
+	trans_cfg.cb_data_offs = offsetof(struct ieee80211_tx_info,
+					  driver_data[2]);
+
+	trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
+
+	/* Set a short watchdog for the command queue */
+	trans_cfg.cmd_q_wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, NULL, false, true);
+
+	snprintf(mvm->hw->wiphy->fw_version,
+		 sizeof(mvm->hw->wiphy->fw_version),
+		 "%s", fw->fw_version);
+
+	/* Configure transport layer */
+	iwl_trans_configure(mvm->trans, &trans_cfg);
+
+	trans->rx_mpdu_cmd = REPLY_RX_MPDU_CMD;
+	trans->dbg_dest_tlv = mvm->fw->dbg_dest_tlv;
+	trans->dbg_dest_reg_num = mvm->fw->dbg_dest_reg_num;
+	memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
+	       sizeof(trans->dbg_conf_tlv));
+	trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
+	trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
+
+	trans->iml = mvm->fw->iml;
+	trans->iml_len = mvm->fw->iml_len;
+
+	/* set up notification wait support */
+	iwl_notification_wait_init(&mvm->notif_wait);
+
+	/* Init phy db */
+	mvm->phy_db = iwl_phy_db_init(trans);
+	if (!mvm->phy_db) {
+		IWL_ERR(mvm, "Cannot init phy_db\n");
+		goto out_free;
+	}
+
+	IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
+		 mvm->cfg->name, mvm->trans->hw_rev);
+
+	if (iwlwifi_mod_params.nvm_file)
+		mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
+	else
+		IWL_DEBUG_EEPROM(mvm->trans->dev,
+				 "working without external nvm file\n");
+
+	err = iwl_trans_start_hw(mvm->trans);
+	if (err)
+		goto out_free;
+
+	mutex_lock(&mvm->mutex);
+	iwl_mvm_ref(mvm, IWL_MVM_REF_INIT_UCODE);
+	err = iwl_run_init_mvm_ucode(mvm, true);
+	if (!iwlmvm_mod_params.init_dbg || !err)
+		iwl_mvm_stop_device(mvm);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_INIT_UCODE);
+	mutex_unlock(&mvm->mutex);
+	if (err < 0) {
+		IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", err);
+		goto out_free;
+	}
+
+	scan_size = iwl_mvm_scan_size(mvm);
+
+	mvm->scan_cmd = kmalloc(scan_size, GFP_KERNEL);
+	if (!mvm->scan_cmd)
+		goto out_free;
+
+	/* Set EBS as successful as long as not stated otherwise by the FW. */
+	mvm->last_ebs_successful = true;
+
+	err = iwl_mvm_mac_setup_register(mvm);
+	if (err)
+		goto out_free;
+	mvm->hw_registered = true;
+
+	min_backoff = iwl_mvm_min_backoff(mvm);
+	iwl_mvm_thermal_initialize(mvm, min_backoff);
+
+	err = iwl_mvm_dbgfs_register(mvm, dbgfs_dir);
+	if (err)
+		goto out_unregister;
+
+	if (!iwl_mvm_has_new_rx_stats_api(mvm))
+		memset(&mvm->rx_stats_v3, 0,
+		       sizeof(struct mvm_statistics_rx_v3));
+	else
+		memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
+	/* The transport always starts with a taken reference, we can
+	 * release it now if d0i3 is supported */
+	if (iwl_mvm_is_d0i3_supported(mvm))
+		iwl_trans_unref(mvm->trans);
+
+	iwl_mvm_tof_init(mvm);
+
+	return op_mode;
+
+ out_unregister:
+	if (iwlmvm_mod_params.init_dbg)
+		return op_mode;
+
+	ieee80211_unregister_hw(mvm->hw);
+	mvm->hw_registered = false;
+	iwl_mvm_leds_exit(mvm);
+	iwl_mvm_thermal_exit(mvm);
+ out_free:
+	iwl_fw_flush_dump(&mvm->fwrt);
+
+	if (iwlmvm_mod_params.init_dbg)
+		return op_mode;
+	iwl_phy_db_free(mvm->phy_db);
+	kfree(mvm->scan_cmd);
+	iwl_trans_op_mode_leave(trans);
+
+	ieee80211_free_hw(mvm->hw);
+	return NULL;
+}
+
+static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	int i;
+
+	/* If d0i3 is supported, we have released the reference that
+	 * the transport started with, so we should take it back now
+	 * that we are leaving.
+	 */
+	if (iwl_mvm_is_d0i3_supported(mvm))
+		iwl_trans_ref(mvm->trans);
+
+	iwl_mvm_leds_exit(mvm);
+
+	iwl_mvm_thermal_exit(mvm);
+
+	if (mvm->init_status & IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE) {
+		ieee80211_unregister_hw(mvm->hw);
+		mvm->init_status &= ~IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE;
+	}
+
+	kfree(mvm->scan_cmd);
+	kfree(mvm->mcast_filter_cmd);
+	mvm->mcast_filter_cmd = NULL;
+
+#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
+	kfree(mvm->d3_resume_sram);
+#endif
+	iwl_trans_op_mode_leave(mvm->trans);
+
+	iwl_phy_db_free(mvm->phy_db);
+	mvm->phy_db = NULL;
+
+	kfree(mvm->nvm_data);
+	for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
+		kfree(mvm->nvm_sections[i].data);
+
+	cancel_delayed_work_sync(&mvm->tcm.work);
+
+	iwl_mvm_tof_clean(mvm);
+
+	mutex_destroy(&mvm->mutex);
+	mutex_destroy(&mvm->d0i3_suspend_mutex);
+
+	ieee80211_free_hw(mvm->hw);
+}
+
+struct iwl_async_handler_entry {
+	struct list_head list;
+	struct iwl_rx_cmd_buffer rxb;
+	enum iwl_rx_handler_context context;
+	void (*fn)(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
+};
+
+void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
+{
+	struct iwl_async_handler_entry *entry, *tmp;
+
+	spin_lock_bh(&mvm->async_handlers_lock);
+	list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+		iwl_free_rxb(&entry->rxb);
+		list_del(&entry->list);
+		kfree(entry);
+	}
+	spin_unlock_bh(&mvm->async_handlers_lock);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm =
+		container_of(wk, struct iwl_mvm, async_handlers_wk);
+	struct iwl_async_handler_entry *entry, *tmp;
+	LIST_HEAD(local_list);
+
+	/* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
+
+	/*
+	 * Sync with Rx path with a lock. Remove all the entries from this list,
+	 * add them to a local one (lock free), and then handle them.
+	 */
+	spin_lock_bh(&mvm->async_handlers_lock);
+	list_splice_init(&mvm->async_handlers_list, &local_list);
+	spin_unlock_bh(&mvm->async_handlers_lock);
+
+	list_for_each_entry_safe(entry, tmp, &local_list, list) {
+		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+			mutex_lock(&mvm->mutex);
+		entry->fn(mvm, &entry->rxb);
+		iwl_free_rxb(&entry->rxb);
+		list_del(&entry->list);
+		if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+			mutex_unlock(&mvm->mutex);
+		kfree(entry);
+	}
+}
+
+static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
+					    struct iwl_rx_packet *pkt)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_cmd *cmds_trig;
+	int i;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_FW_NOTIF);
+	cmds_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(cmds_trig->cmds); i++) {
+		/* don't collect on CMD 0 */
+		if (!cmds_trig->cmds[i].cmd_id)
+			break;
+
+		if (cmds_trig->cmds[i].cmd_id != pkt->hdr.cmd ||
+		    cmds_trig->cmds[i].group_id != pkt->hdr.group_id)
+			continue;
+
+		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+					"CMD 0x%02x.%02x received",
+					pkt->hdr.group_id, pkt->hdr.cmd);
+		break;
+	}
+}
+
+static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      struct iwl_rx_packet *pkt)
+{
+	int i;
+
+	iwl_mvm_rx_check_trigger(mvm, pkt);
+
+	/*
+	 * Do the notification wait before RX handlers so
+	 * even if the RX handler consumes the RXB we have
+	 * access to it in the notification wait entry.
+	 */
+	iwl_notification_wait_notify(&mvm->notif_wait, pkt);
+
+	for (i = 0; i < ARRAY_SIZE(iwl_mvm_rx_handlers); i++) {
+		const struct iwl_rx_handlers *rx_h = &iwl_mvm_rx_handlers[i];
+		struct iwl_async_handler_entry *entry;
+
+		if (rx_h->cmd_id != WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd))
+			continue;
+
+		if (rx_h->context == RX_HANDLER_SYNC) {
+			rx_h->fn(mvm, rxb);
+			return;
+		}
+
+		entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+		/* we can't do much... */
+		if (!entry)
+			return;
+
+		entry->rxb._page = rxb_steal_page(rxb);
+		entry->rxb._offset = rxb->_offset;
+		entry->rxb._rx_page_order = rxb->_rx_page_order;
+		entry->fn = rx_h->fn;
+		entry->context = rx_h->context;
+		spin_lock(&mvm->async_handlers_lock);
+		list_add_tail(&entry->list, &mvm->async_handlers_list);
+		spin_unlock(&mvm->async_handlers_lock);
+		schedule_work(&mvm->async_handlers_wk);
+		break;
+	}
+}
+
+static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
+		       struct napi_struct *napi,
+		       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+		iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
+	else if (cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_PHY_CMD))
+		iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode,
+			  struct napi_struct *napi,
+			  struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+	if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, 0);
+	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+					 RX_QUEUES_NOTIFICATION)))
+		iwl_mvm_rx_queue_notif(mvm, rxb, 0);
+	else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE))
+		iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
+	else
+		iwl_mvm_rx_common(mvm, rxb, pkt);
+}
+
+void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+	int q;
+
+	if (WARN_ON_ONCE(!mq))
+		return;
+
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_inc_return(&mvm->mac80211_queue_stop_count[q]) > 1) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "mac80211 %d already stopped\n", q);
+			continue;
+		}
+
+		ieee80211_stop_queue(mvm->hw, q);
+	}
+}
+
+static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode,
+			     const struct iwl_device_cmd *cmd)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	/*
+	 * For now, we only set the CMD_WANT_ASYNC_CALLBACK for ADD_STA
+	 * commands that need to block the Tx queues.
+	 */
+	iwl_trans_block_txq_ptrs(mvm->trans, false);
+}
+
+static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	unsigned long mq;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->hw_queue_to_mac80211[hw_queue];
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	iwl_mvm_stop_mac_queues(mvm, mq);
+}
+
+void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq)
+{
+	int q;
+
+	if (WARN_ON_ONCE(!mq))
+		return;
+
+	for_each_set_bit(q, &mq, IEEE80211_MAX_QUEUES) {
+		if (atomic_dec_return(&mvm->mac80211_queue_stop_count[q]) > 0) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "mac80211 %d still stopped\n", q);
+			continue;
+		}
+
+		ieee80211_wake_queue(mvm->hw, q);
+	}
+}
+
+static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	unsigned long mq;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	mq = mvm->hw_queue_to_mac80211[hw_queue];
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	iwl_mvm_start_mac_queues(mvm, mq);
+}
+
+static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
+{
+	bool state = iwl_mvm_is_radio_killed(mvm);
+
+	if (state)
+		wake_up(&mvm->rx_sync_waitq);
+
+	wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+}
+
+void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
+{
+	if (state)
+		set_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+	else
+		clear_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
+
+	iwl_mvm_set_rfkill_state(mvm);
+}
+
+static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	bool calibrating = READ_ONCE(mvm->calibrating);
+
+	if (state)
+		set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+	else
+		clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+
+	iwl_mvm_set_rfkill_state(mvm);
+
+	/* iwl_run_init_mvm_ucode is waiting for results, abort it */
+	if (calibrating)
+		iwl_abort_notification_waits(&mvm->notif_wait);
+
+	/*
+	 * Stop the device if we run OPERATIONAL firmware or if we are in the
+	 * middle of the calibrations.
+	 */
+	return state && (mvm->fwrt.cur_fw_img != IWL_UCODE_INIT || calibrating);
+}
+
+static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	struct ieee80211_tx_info *info;
+
+	info = IEEE80211_SKB_CB(skb);
+	iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+	ieee80211_free_txskb(mvm->hw, skb);
+}
+
+struct iwl_mvm_reprobe {
+	struct device *dev;
+	struct work_struct work;
+};
+
+static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+{
+	struct iwl_mvm_reprobe *reprobe;
+
+	reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
+	if (device_reprobe(reprobe->dev))
+		dev_err(reprobe->dev, "reprobe failed!\n");
+	kfree(reprobe);
+	module_put(THIS_MODULE);
+}
+
+void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
+{
+	iwl_abort_notification_waits(&mvm->notif_wait);
+
+	/*
+	 * This is a bit racy, but worst case we tell mac80211 about
+	 * a stopped/aborted scan when that was already done which
+	 * is not a problem. It is necessary to abort any os scan
+	 * here because mac80211 requires having the scan cleared
+	 * before restarting.
+	 * We'll reset the scan_status to NONE in restart cleanup in
+	 * the next start() call from mac80211. If restart isn't called
+	 * (no fw restart) scan status will stay busy.
+	 */
+	iwl_mvm_report_scan_aborted(mvm);
+
+	/*
+	 * If we're restarting already, don't cycle restarts.
+	 * If INIT fw asserted, it will likely fail again.
+	 * If WoWLAN fw asserted, don't restart either, mac80211
+	 * can't recover this since we're already half suspended.
+	 */
+	if (!mvm->fw_restart && fw_error) {
+		iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+					NULL);
+	} else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		struct iwl_mvm_reprobe *reprobe;
+
+		IWL_ERR(mvm,
+			"Firmware error during reconfiguration - reprobe!\n");
+
+		/*
+		 * get a module reference to avoid doing this while unloading
+		 * anyway and to avoid scheduling a work with code that's
+		 * being removed.
+		 */
+		if (!try_module_get(THIS_MODULE)) {
+			IWL_ERR(mvm, "Module is being unloaded - abort\n");
+			return;
+		}
+
+		reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
+		if (!reprobe) {
+			module_put(THIS_MODULE);
+			return;
+		}
+		reprobe->dev = mvm->trans->dev;
+		INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
+		schedule_work(&reprobe->work);
+	} else if (mvm->fwrt.cur_fw_img == IWL_UCODE_REGULAR &&
+		   mvm->hw_registered) {
+		/* don't let the transport/FW power down */
+		iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
+		if (fw_error && mvm->fw_restart > 0)
+			mvm->fw_restart--;
+		set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+		ieee80211_restart_hw(mvm->hw);
+	}
+}
+
+static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	iwl_mvm_dump_nic_error_log(mvm);
+
+	iwl_mvm_nic_restart(mvm, true);
+}
+
+static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	WARN_ON(1);
+	iwl_mvm_nic_restart(mvm, true);
+}
+
+struct iwl_d0i3_iter_data {
+	struct iwl_mvm *mvm;
+	struct ieee80211_vif *connected_vif;
+	u8 ap_sta_id;
+	u8 vif_count;
+	u8 offloading_tid;
+	bool disable_offloading;
+};
+
+static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					struct iwl_d0i3_iter_data *iter_data)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvmsta;
+	u32 available_tids = 0;
+	u8 tid;
+
+	if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
+		    mvmvif->ap_sta_id == IWL_MVM_INVALID_STA))
+		return false;
+
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+	if (!mvmsta)
+		return false;
+
+	spin_lock_bh(&mvmsta->lock);
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+		struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+
+		/*
+		 * in case of pending tx packets, don't use this tid
+		 * for offloading in order to prevent reuse of the same
+		 * qos seq counters.
+		 */
+		if (iwl_mvm_tid_queued(mvm, tid_data))
+			continue;
+
+		if (tid_data->state != IWL_AGG_OFF)
+			continue;
+
+		available_tids |= BIT(tid);
+	}
+	spin_unlock_bh(&mvmsta->lock);
+
+	/*
+	 * disallow protocol offloading if we have no available tid
+	 * (with no pending frames and no active aggregation,
+	 * as we don't handle "holes" properly - the scheduler needs the
+	 * frame's seq number and TFD index to match)
+	 */
+	if (!available_tids)
+		return true;
+
+	/* for simplicity, just use the first available tid */
+	iter_data->offloading_tid = ffs(available_tids) - 1;
+	return false;
+}
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+					struct ieee80211_vif *vif)
+{
+	struct iwl_d0i3_iter_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+	IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+	if (vif->type != NL80211_IFTYPE_STATION ||
+	    !vif->bss_conf.assoc)
+		return;
+
+	/*
+	 * in case of pending tx packets or active aggregations,
+	 * avoid offloading features in order to prevent reuse of
+	 * the same qos seq counters.
+	 */
+	if (iwl_mvm_disallow_offloading(mvm, vif, data))
+		data->disable_offloading = true;
+
+	iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+	iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading,
+				   false, flags);
+
+	/*
+	 * on init/association, mvm already configures POWER_TABLE_CMD
+	 * and REPLY_MCAST_FILTER_CMD, so currently don't
+	 * reconfigure them (we might want to use different
+	 * params later on, though).
+	 */
+	data->ap_sta_id = mvmvif->ap_sta_id;
+	data->vif_count++;
+
+	/*
+	 * no new commands can be sent at this stage, so it's safe
+	 * to save the vif pointer during d0i3 entrance.
+	 */
+	data->connected_vif = vif;
+}
+
+static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
+				    struct iwl_wowlan_config_cmd *cmd,
+				    struct iwl_d0i3_iter_data *iter_data)
+{
+	struct ieee80211_sta *ap_sta;
+	struct iwl_mvm_sta *mvm_ap_sta;
+
+	if (iter_data->ap_sta_id == IWL_MVM_INVALID_STA)
+		return;
+
+	rcu_read_lock();
+
+	ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
+	if (IS_ERR_OR_NULL(ap_sta))
+		goto out;
+
+	mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
+	cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
+	cmd->offloading_tid = iter_data->offloading_tid;
+	cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
+		ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
+	/*
+	 * The d0i3 uCode takes care of the nonqos counters,
+	 * so configure only the qos seq ones.
+	 */
+	iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, cmd);
+out:
+	rcu_read_unlock();
+}
+
+int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+	int ret;
+	struct iwl_d0i3_iter_data d0i3_iter_data = {
+		.mvm = mvm,
+	};
+	struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+		.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+					     IWL_WOWLAN_WAKEUP_BEACON_MISS |
+					     IWL_WOWLAN_WAKEUP_LINK_CHANGE),
+	};
+	struct iwl_d3_manager_config d3_cfg_cmd = {
+		.min_sleep_time = cpu_to_le32(1000),
+		.wakeup_flags = cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR),
+	};
+
+	IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
+		return -EINVAL;
+
+	set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+
+	/*
+	 * iwl_mvm_ref_sync takes a reference before checking the flag.
+	 * so by checking there is no held reference we prevent a state
+	 * in which iwl_mvm_ref_sync continues successfully while we
+	 * configure the firmware to enter d0i3
+	 */
+	if (iwl_mvm_ref_taken(mvm)) {
+		IWL_DEBUG_RPM(mvm->trans, "abort d0i3 due to taken ref\n");
+		clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+		wake_up(&mvm->d0i3_exit_waitq);
+		return 1;
+	}
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_enter_d0i3_iterator,
+						   &d0i3_iter_data);
+	if (d0i3_iter_data.vif_count == 1) {
+		mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+		mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
+	} else {
+		WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+		mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+		mvm->d0i3_offloading = false;
+	}
+
+	iwl_mvm_pause_tcm(mvm, true);
+	/* make sure we have no running tx while configuring the seqno */
+	synchronize_net();
+
+	/* Flush the hw queues, in case something got queued during entry */
+	/* TODO new tx api */
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		WARN_ONCE(1, "d0i3: Need to implement flush TX queue\n");
+	} else {
+		ret = iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm),
+					    flags);
+		if (ret)
+			return ret;
+	}
+
+	/* configure wowlan configuration only if needed */
+	if (mvm->d0i3_ap_sta_id != IWL_MVM_INVALID_STA) {
+		/* wake on beacons only if beacon storing isn't supported */
+		if (!fw_has_capa(&mvm->fw->ucode_capa,
+				 IWL_UCODE_TLV_CAPA_BEACON_STORING))
+			wowlan_config_cmd.wakeup_filter |=
+				cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+
+		iwl_mvm_wowlan_config_key_params(mvm,
+						 d0i3_iter_data.connected_vif,
+						 true, flags);
+
+		iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd,
+					&d0i3_iter_data);
+
+		ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+					   sizeof(wowlan_config_cmd),
+					   &wowlan_config_cmd);
+		if (ret)
+			return ret;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+				    flags | CMD_MAKE_TRANS_IDLE,
+				    sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = _data;
+	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+	IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+	if (vif->type != NL80211_IFTYPE_STATION ||
+	    !vif->bss_conf.assoc)
+		return;
+
+	iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+struct iwl_mvm_d0i3_exit_work_iter_data {
+	struct iwl_mvm *mvm;
+	struct iwl_wowlan_status *status;
+	u32 wakeup_reasons;
+};
+
+static void iwl_mvm_d0i3_exit_work_iter(void *_data, u8 *mac,
+					struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_d0i3_exit_work_iter_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u32 reasons = data->wakeup_reasons;
+
+	/* consider only the relevant station interface */
+	if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc ||
+	    data->mvm->d0i3_ap_sta_id != mvmvif->ap_sta_id)
+		return;
+
+	if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH)
+		iwl_mvm_connection_loss(data->mvm, vif, "D0i3");
+	else if (reasons & IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON)
+		ieee80211_beacon_loss(vif);
+	else
+		iwl_mvm_d0i3_update_keys(data->mvm, vif, data->status);
+}
+
+void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
+{
+	struct ieee80211_sta *sta = NULL;
+	struct iwl_mvm_sta *mvm_ap_sta;
+	int i;
+	bool wake_queues = false;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->d0i3_tx_lock);
+
+	if (mvm->d0i3_ap_sta_id == IWL_MVM_INVALID_STA)
+		goto out;
+
+	IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
+
+	/* get the sta in order to update seq numbers and re-enqueue skbs */
+	sta = rcu_dereference_protected(
+			mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
+			lockdep_is_held(&mvm->mutex));
+
+	if (IS_ERR_OR_NULL(sta)) {
+		sta = NULL;
+		goto out;
+	}
+
+	if (mvm->d0i3_offloading && qos_seq) {
+		/* update qos seq numbers if offloading was enabled */
+		mvm_ap_sta = iwl_mvm_sta_from_mac80211(sta);
+		for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+			u16 seq = le16_to_cpu(qos_seq[i]);
+			/* firmware stores last-used one, we store next one */
+			seq += 0x10;
+			mvm_ap_sta->tid_data[i].seq_number = seq;
+		}
+	}
+out:
+	/* re-enqueue (or drop) all packets */
+	while (!skb_queue_empty(&mvm->d0i3_tx)) {
+		struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
+
+		if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
+			ieee80211_free_txskb(mvm->hw, skb);
+
+		/* if the skb_queue is not empty, we need to wake queues */
+		wake_queues = true;
+	}
+	clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
+	wake_up(&mvm->d0i3_exit_waitq);
+	mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+	if (wake_queues)
+		ieee80211_wake_queues(mvm->hw);
+
+	spin_unlock_bh(&mvm->d0i3_tx_lock);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+	struct iwl_host_cmd get_status_cmd = {
+		.id = WOWLAN_GET_STATUSES,
+		.flags = CMD_HIGH_PRIO | CMD_WANT_SKB,
+	};
+	struct iwl_mvm_d0i3_exit_work_iter_data iter_data = {
+		.mvm = mvm,
+	};
+
+	struct iwl_wowlan_status *status;
+	int ret;
+	u32 wakeup_reasons = 0;
+	__le16 *qos_seq = NULL;
+
+	mutex_lock(&mvm->mutex);
+	ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+	if (ret)
+		goto out;
+
+	status = (void *)get_status_cmd.resp_pkt->data;
+	wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+	qos_seq = status->qos_seq_ctr;
+
+	IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+	iter_data.wakeup_reasons = wakeup_reasons;
+	iter_data.status = status;
+	ieee80211_iterate_active_interfaces(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_d0i3_exit_work_iter,
+					    &iter_data);
+out:
+	iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
+
+	IWL_DEBUG_INFO(mvm, "d0i3 exit completed (wakeup reasons: 0x%x)\n",
+		       wakeup_reasons);
+
+	/* qos_seq might point inside resp_pkt, so free it only now */
+	if (get_status_cmd.resp_pkt)
+		iwl_free_resp(&get_status_cmd);
+
+	/* the FW might have updated the regdomain */
+	iwl_mvm_update_changed_regdom(mvm);
+
+	iwl_mvm_resume_tcm(mvm);
+	iwl_mvm_unref(mvm, IWL_MVM_REF_EXIT_WORK);
+	mutex_unlock(&mvm->mutex);
+}
+
+int _iwl_mvm_exit_d0i3(struct iwl_mvm *mvm)
+{
+	u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+		    CMD_WAKE_UP_TRANS;
+	int ret;
+
+	IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+	if (WARN_ON_ONCE(mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR))
+		return -EINVAL;
+
+	mutex_lock(&mvm->d0i3_suspend_mutex);
+	if (test_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags)) {
+		IWL_DEBUG_RPM(mvm, "Deferring d0i3 exit until resume\n");
+		__set_bit(D0I3_PENDING_WAKEUP, &mvm->d0i3_suspend_flags);
+		mutex_unlock(&mvm->d0i3_suspend_mutex);
+		return 0;
+	}
+	mutex_unlock(&mvm->d0i3_suspend_mutex);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+	if (ret)
+		goto out;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_exit_d0i3_iterator,
+						   mvm);
+out:
+	schedule_work(&mvm->d0i3_exit_work);
+	return ret;
+}
+
+int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+
+	iwl_mvm_ref(mvm, IWL_MVM_REF_EXIT_WORK);
+	return _iwl_mvm_exit_d0i3(mvm);
+}
+
+#define IWL_MVM_COMMON_OPS					\
+	/* these could be differentiated */			\
+	.async_cb = iwl_mvm_async_cb,				\
+	.queue_full = iwl_mvm_stop_sw_queue,			\
+	.queue_not_full = iwl_mvm_wake_sw_queue,		\
+	.hw_rf_kill = iwl_mvm_set_hw_rfkill_state,		\
+	.free_skb = iwl_mvm_free_skb,				\
+	.nic_error = iwl_mvm_nic_error,				\
+	.cmd_queue_full = iwl_mvm_cmd_queue_full,		\
+	.nic_config = iwl_mvm_nic_config,			\
+	.enter_d0i3 = iwl_mvm_enter_d0i3,			\
+	.exit_d0i3 = iwl_mvm_exit_d0i3,				\
+	/* as we only register one, these MUST be common! */	\
+	.start = iwl_op_mode_mvm_start,				\
+	.stop = iwl_op_mode_mvm_stop
+
+static const struct iwl_op_mode_ops iwl_mvm_ops = {
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx,
+};
+
+static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
+			      struct napi_struct *napi,
+			      struct iwl_rx_cmd_buffer *rxb,
+			      unsigned int queue)
+{
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u16 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
+
+	if (unlikely(cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)))
+		iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
+	else if (unlikely(cmd == WIDE_ID(DATA_PATH_GROUP,
+					 RX_QUEUES_NOTIFICATION)))
+		iwl_mvm_rx_queue_notif(mvm, rxb, queue);
+	else if (likely(cmd == WIDE_ID(LEGACY_GROUP, REPLY_RX_MPDU_CMD)))
+		iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
+}
+
+static const struct iwl_op_mode_ops iwl_mvm_ops_mq = {
+	IWL_MVM_COMMON_OPS,
+	.rx = iwl_mvm_rx_mq,
+	.rx_rss = iwl_mvm_rx_mq_rss,
+};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
new file mode 100644
index 0000000..7f5434b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -0,0 +1,326 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017           Intel Deutschland GmbH
+ * Copyright(c) 2018           Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2018           Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+/* Maps the driver specific channel width definition to the fw values */
+u8 iwl_mvm_get_channel_width(struct cfg80211_chan_def *chandef)
+{
+	switch (chandef->width) {
+	case NL80211_CHAN_WIDTH_20_NOHT:
+	case NL80211_CHAN_WIDTH_20:
+		return PHY_VHT_CHANNEL_MODE20;
+	case NL80211_CHAN_WIDTH_40:
+		return PHY_VHT_CHANNEL_MODE40;
+	case NL80211_CHAN_WIDTH_80:
+		return PHY_VHT_CHANNEL_MODE80;
+	case NL80211_CHAN_WIDTH_160:
+		return PHY_VHT_CHANNEL_MODE160;
+	default:
+		WARN(1, "Invalid channel width=%u", chandef->width);
+		return PHY_VHT_CHANNEL_MODE20;
+	}
+}
+
+/*
+ * Maps the driver specific control channel position (relative to the center
+ * freq) definitions to the the fw values
+ */
+u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
+{
+	switch (chandef->chan->center_freq - chandef->center_freq1) {
+	case -70:
+		return PHY_VHT_CTRL_POS_4_BELOW;
+	case -50:
+		return PHY_VHT_CTRL_POS_3_BELOW;
+	case -30:
+		return PHY_VHT_CTRL_POS_2_BELOW;
+	case -10:
+		return PHY_VHT_CTRL_POS_1_BELOW;
+	case  10:
+		return PHY_VHT_CTRL_POS_1_ABOVE;
+	case  30:
+		return PHY_VHT_CTRL_POS_2_ABOVE;
+	case  50:
+		return PHY_VHT_CTRL_POS_3_ABOVE;
+	case  70:
+		return PHY_VHT_CTRL_POS_4_ABOVE;
+	default:
+		WARN(1, "Invalid channel definition");
+	case 0:
+		/*
+		 * The FW is expected to check the control channel position only
+		 * when in HT/VHT and the channel width is not 20MHz. Return
+		 * this value as the default one.
+		 */
+		return PHY_VHT_CTRL_POS_1_BELOW;
+	}
+}
+
+/*
+ * Construct the generic fields of the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
+				     struct iwl_phy_context_cmd *cmd,
+				     u32 action, u32 apply_time)
+{
+	memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
+
+	cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
+							    ctxt->color));
+	cmd->action = cpu_to_le32(action);
+	cmd->apply_time = cpu_to_le32(apply_time);
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+				      struct iwl_phy_context_cmd *cmd,
+				      struct cfg80211_chan_def *chandef,
+				      u8 chains_static, u8 chains_dynamic)
+{
+	u8 active_cnt, idle_cnt;
+
+	/* Set the channel info data */
+	cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
+	      PHY_BAND_24 : PHY_BAND_5);
+
+	cmd->ci.channel = chandef->chan->hw_value;
+	cmd->ci.width = iwl_mvm_get_channel_width(chandef);
+	cmd->ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+
+	/* Set rx the chains */
+	idle_cnt = chains_static;
+	active_cnt = chains_dynamic;
+
+	/* In scenarios where we only ever use a single-stream rates,
+	 * i.e. legacy 11b/g/a associations, single-stream APs or even
+	 * static SMPS, enable both chains to get diversity, improving
+	 * the case where we're far enough from the AP that attenuation
+	 * between the two antennas is sufficiently different to impact
+	 * performance.
+	 */
+	if (active_cnt == 1 && iwl_mvm_rx_diversity_allowed(mvm)) {
+		idle_cnt = 2;
+		active_cnt = 2;
+	}
+
+	cmd->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+					PHY_RX_CHAIN_VALID_POS);
+	cmd->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+	cmd->rxchain_info |= cpu_to_le32(active_cnt <<
+					 PHY_RX_CHAIN_MIMO_CNT_POS);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (unlikely(mvm->dbgfs_rx_phyinfo))
+		cmd->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+#endif
+
+	cmd->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+}
+
+/*
+ * Send a command to apply the current phy configuration. The command is send
+ * only if something in the configuration changed: in case that this is the
+ * first time that the phy configuration is applied or in case that the phy
+ * configuration changed from the previous apply.
+ */
+static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
+				  struct iwl_mvm_phy_ctxt *ctxt,
+				  struct cfg80211_chan_def *chandef,
+				  u8 chains_static, u8 chains_dynamic,
+				  u32 action, u32 apply_time)
+{
+	struct iwl_phy_context_cmd cmd;
+	int ret;
+
+	/* Set the command header fields */
+	iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+
+	/* Set the command data */
+	iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+				  chains_static, chains_dynamic);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0,
+				   sizeof(struct iwl_phy_context_cmd),
+				   &cmd);
+	if (ret)
+		IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
+	return ret;
+}
+
+/*
+ * Send a command to add a PHY context based on the current HW configuration.
+ */
+int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+			 struct cfg80211_chan_def *chandef,
+			 u8 chains_static, u8 chains_dynamic)
+{
+	WARN_ON(!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+		ctxt->ref);
+	lockdep_assert_held(&mvm->mutex);
+
+	ctxt->channel = chandef->chan;
+
+	return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+				      chains_static, chains_dynamic,
+				      FW_CTXT_ACTION_ADD, 0);
+}
+
+/*
+ * Update the number of references to the given PHY context. This is valid only
+ * in case the PHY context was already created, i.e., its reference count > 0.
+ */
+void iwl_mvm_phy_ctxt_ref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+	lockdep_assert_held(&mvm->mutex);
+	ctxt->ref++;
+}
+
+/*
+ * Send a command to modify the PHY context based on the current HW
+ * configuration. Note that the function does not check that the configuration
+ * changed.
+ */
+int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
+			     struct cfg80211_chan_def *chandef,
+			     u8 chains_static, u8 chains_dynamic)
+{
+	enum iwl_ctxt_action action = FW_CTXT_ACTION_MODIFY;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+	    ctxt->channel->band != chandef->chan->band) {
+		int ret;
+
+		/* ... remove it here ...*/
+		ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+					     chains_static, chains_dynamic,
+					     FW_CTXT_ACTION_REMOVE, 0);
+		if (ret)
+			return ret;
+
+		/* ... and proceed to add it again */
+		action = FW_CTXT_ACTION_ADD;
+	}
+
+	ctxt->channel = chandef->chan;
+	ctxt->width = chandef->width;
+	return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
+				      chains_static, chains_dynamic,
+				      action, 0);
+}
+
+void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON_ONCE(!ctxt))
+		return;
+
+	ctxt->ref--;
+
+	/*
+	 * Move unused phy's to a default channel. When the phy is moved the,
+	 * fw will cleanup immediate quiet bit if it was previously set,
+	 * otherwise we might not be able to reuse this phy.
+	 */
+	if (ctxt->ref == 0) {
+		struct ieee80211_channel *chan;
+		struct cfg80211_chan_def chandef;
+
+		chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
+		cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
+		iwl_mvm_phy_ctxt_changed(mvm, ctxt, &chandef, 1, 1);
+	}
+}
+
+static void iwl_mvm_binding_iterator(void *_data, u8 *mac,
+				     struct ieee80211_vif *vif)
+{
+	unsigned long *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (!mvmvif->phy_ctxt)
+		return;
+
+	if (vif->type == NL80211_IFTYPE_STATION ||
+	    vif->type == NL80211_IFTYPE_AP)
+		__set_bit(mvmvif->phy_ctxt->id, data);
+}
+
+int iwl_mvm_phy_ctx_count(struct iwl_mvm *mvm)
+{
+	unsigned long phy_ctxt_counter = 0;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_binding_iterator,
+						   &phy_ctxt_counter);
+
+	return hweight8(phy_ctxt_counter);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
new file mode 100644
index 0000000..c11fe26
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -0,0 +1,1063 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/etherdevice.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw/api/power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC    25
+
+static
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+				   struct iwl_beacon_filter_cmd *cmd,
+				   u32 flags)
+{
+	IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+			le32_to_cpu(cmd->ba_enable_beacon_abort));
+	IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+			le32_to_cpu(cmd->ba_escape_timer));
+	IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+			le32_to_cpu(cmd->bf_debug_flag));
+	IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+			le32_to_cpu(cmd->bf_enable_beacon_filter));
+	IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+			le32_to_cpu(cmd->bf_energy_delta));
+	IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+			le32_to_cpu(cmd->bf_escape_timer));
+	IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+			le32_to_cpu(cmd->bf_roaming_energy_delta));
+	IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+			le32_to_cpu(cmd->bf_roaming_state));
+	IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+			le32_to_cpu(cmd->bf_temp_threshold));
+	IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+			le32_to_cpu(cmd->bf_temp_fast_filter));
+	IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+			le32_to_cpu(cmd->bf_temp_slow_filter));
+
+	return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+				    sizeof(struct iwl_beacon_filter_cmd), cmd);
+}
+
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_beacon_filter_cmd *cmd,
+					  bool d0i3)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif->bss_conf.cqm_rssi_thold && !d0i3) {
+		cmd->bf_energy_delta =
+			cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+		/* fw uses an absolute value for this */
+		cmd->bf_roaming_state =
+			cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+	}
+	cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+}
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+			      struct iwl_mac_power_cmd *cmd)
+{
+	IWL_DEBUG_POWER(mvm,
+			"Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+			cmd->id_and_color, iwlmvm_mod_params.power_scheme,
+			le16_to_cpu(cmd->flags));
+	IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+			le16_to_cpu(cmd->keep_alive_seconds));
+
+	if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+		IWL_DEBUG_POWER(mvm, "Disable power management\n");
+		return;
+	}
+
+	IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+			le32_to_cpu(cmd->rx_data_timeout));
+	IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+			le32_to_cpu(cmd->tx_data_timeout));
+	if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+		IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+				cmd->skip_dtim_periods);
+	if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+		IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+				cmd->lprx_rssi_threshold);
+	if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+		IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+		IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+				le32_to_cpu(cmd->rx_data_timeout_uapsd));
+		IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+				le32_to_cpu(cmd->tx_data_timeout_uapsd));
+		IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+		IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+		IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
+	}
+}
+
+static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  struct iwl_mac_power_cmd *cmd)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	enum ieee80211_ac_numbers ac;
+	bool tid_found = false;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	/* set advanced pm flag with no uapsd ACs to enable ps-poll */
+	if (mvmvif->dbgfs_pm.use_ps_poll) {
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+		return;
+	}
+#endif
+
+	for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+		if (!mvmvif->queue_params[ac].uapsd)
+			continue;
+
+		if (mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN)
+			cmd->flags |=
+				cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+
+		cmd->uapsd_ac_flags |= BIT(ac);
+
+		/* QNDP TID - the highest TID with no admission control */
+		if (!tid_found && !mvmvif->queue_params[ac].acm) {
+			tid_found = true;
+			switch (ac) {
+			case IEEE80211_AC_VO:
+				cmd->qndp_tid = 6;
+				break;
+			case IEEE80211_AC_VI:
+				cmd->qndp_tid = 5;
+				break;
+			case IEEE80211_AC_BE:
+				cmd->qndp_tid = 0;
+				break;
+			case IEEE80211_AC_BK:
+				cmd->qndp_tid = 1;
+				break;
+			}
+		}
+	}
+
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK);
+
+	if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+				    BIT(IEEE80211_AC_VI) |
+				    BIT(IEEE80211_AC_BE) |
+				    BIT(IEEE80211_AC_BK))) {
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+		cmd->snooze_interval = cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+		cmd->snooze_window =
+			(mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+				cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+				cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+	}
+
+	cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
+
+	if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN || cmd->flags &
+	    cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+		cmd->rx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+	} else {
+		cmd->rx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout_uapsd =
+			cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+	}
+
+	if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+		cmd->heavy_tx_thld_packets =
+			IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
+		cmd->heavy_rx_thld_packets =
+			IWL_MVM_PS_SNOOZE_HEAVY_RX_THLD_PACKETS;
+	} else {
+		cmd->heavy_tx_thld_packets =
+			IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+		cmd->heavy_rx_thld_packets =
+			IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+	}
+	cmd->heavy_tx_thld_percentage =
+		IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+	cmd->heavy_rx_thld_percentage =
+		IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
+}
+
+static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	bool *is_p2p_standalone = _data;
+
+	switch (ieee80211_vif_type_p2p(vif)) {
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		*is_p2p_standalone = false;
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (vif->bss_conf.assoc)
+			*is_p2p_standalone = false;
+		break;
+
+	default:
+		break;
+	}
+}
+
+static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (!memcmp(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+		    ETH_ALEN))
+		return false;
+
+	/*
+	 * Avoid using uAPSD if P2P client is associated to GO that uses
+	 * opportunistic power save. This is due to current FW limitation.
+	 */
+	if (vif->p2p &&
+	    (vif->bss_conf.p2p_noa_attr.oppps_ctwindow &
+	    IEEE80211_P2P_OPPPS_ENABLE_BIT))
+		return false;
+
+	/*
+	 * Avoid using uAPSD if client is in DCM -
+	 * low latency issue in Miracast
+	 */
+	if (iwl_mvm_phy_ctx_count(mvm) >= 2)
+		return false;
+
+	if (vif->p2p) {
+		/* Allow U-APSD only if p2p is stand alone */
+		bool is_p2p_standalone = true;
+
+		if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
+			return false;
+
+		ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_p2p_standalone_iterator,
+					&is_p2p_standalone);
+
+		if (!is_p2p_standalone)
+			return false;
+	}
+
+	return true;
+}
+
+static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
+{
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	struct ieee80211_channel *chan;
+	bool radar_detect = false;
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	WARN_ON(!chanctx_conf);
+	if (chanctx_conf) {
+		chan = chanctx_conf->def.chan;
+		radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+	}
+	rcu_read_unlock();
+
+	return radar_detect;
+}
+
+static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
+					   struct ieee80211_vif *vif,
+					   struct iwl_mac_power_cmd *cmd,
+					   bool host_awake)
+{
+	int dtimper = vif->bss_conf.dtim_period ?: 1;
+	int skip;
+
+	/* disable, in case we're supposed to override */
+	cmd->skip_dtim_periods = 0;
+	cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+
+	if (iwl_mvm_power_is_radar(vif))
+		return;
+
+	if (dtimper >= 10)
+		return;
+
+	/* TODO: check that multicast wake lock is off */
+
+	if (host_awake) {
+		if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+			return;
+		skip = 2;
+	} else {
+		int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+
+		if (WARN_ON(!dtimper_tu))
+			return;
+		/* configure skip over dtim up to 306TU - 314 msec */
+		skip = max_t(u8, 1, 306 / dtimper_tu);
+	}
+
+	/* the firmware really expects "look at every X DTIMs", so add 1 */
+	cmd->skip_dtim_periods = 1 + skip;
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    struct iwl_mac_power_cmd *cmd,
+				    bool host_awake)
+{
+	int dtimper, bi;
+	int keep_alive;
+	struct iwl_mvm_vif *mvmvif __maybe_unused =
+		iwl_mvm_vif_from_mac80211(vif);
+
+	cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+							    mvmvif->color));
+	dtimper = vif->bss_conf.dtim_period;
+	bi = vif->bss_conf.beacon_int;
+
+	/*
+	 * Regardless of power management state the driver must set
+	 * keep alive period. FW will use it for sending keep alive NDPs
+	 * immediately after association. Check that keep alive period
+	 * is at least 3 * DTIM
+	 */
+	keep_alive = DIV_ROUND_UP(ieee80211_tu_to_usec(3 * dtimper * bi),
+				  USEC_PER_SEC);
+	keep_alive = max(keep_alive, POWER_KEEP_ALIVE_PERIOD_SEC);
+	cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
+
+	if (mvm->ps_disabled)
+		return;
+
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+	if (!vif->bss_conf.ps || !mvmvif->pm_enabled)
+		return;
+
+	if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+	    (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS) ||
+	     !IWL_MVM_P2P_LOWLATENCY_PS_ENABLE))
+		return;
+
+	cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+	if (vif->bss_conf.beacon_rate &&
+	    (vif->bss_conf.beacon_rate->bitrate == 10 ||
+	     vif->bss_conf.beacon_rate->bitrate == 60)) {
+		cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+		cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
+	}
+
+	iwl_mvm_power_config_skip_dtim(mvm, vif, cmd, host_awake);
+
+	if (!host_awake) {
+		cmd->rx_data_timeout =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout =
+			cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+	} else if (iwl_mvm_vif_low_latency(mvmvif) && vif->p2p &&
+		   fw_has_capa(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS)) {
+		cmd->tx_data_timeout =
+			cpu_to_le32(IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT);
+		cmd->rx_data_timeout =
+			cpu_to_le32(IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT);
+	} else {
+		cmd->rx_data_timeout =
+			cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+		cmd->tx_data_timeout =
+			cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
+	}
+
+	if (iwl_mvm_power_allow_uapsd(mvm, vif))
+		iwl_mvm_power_configure_uapsd(mvm, vif, cmd);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+		cmd->keep_alive_seconds =
+			cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+		if (mvmvif->dbgfs_pm.skip_over_dtim)
+			cmd->flags |=
+				cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+		else
+			cmd->flags &=
+				cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+	}
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+		cmd->rx_data_timeout =
+			cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+		cmd->tx_data_timeout =
+			cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+		cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+		if (mvmvif->dbgfs_pm.lprx_ena)
+			cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+		else
+			cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+	}
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+		cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+		if (mvmvif->dbgfs_pm.snooze_ena)
+			cmd->flags |=
+				cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+		else
+			cmd->flags &=
+				cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+	}
+	if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_UAPSD_MISBEHAVING) {
+		u16 flag = POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK;
+		if (mvmvif->dbgfs_pm.uapsd_misbehaving)
+			cmd->flags |= cpu_to_le16(flag);
+		else
+			cmd->flags &= cpu_to_le16(flag);
+	}
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mac_power_cmd cmd = {};
+
+	iwl_mvm_power_build_cmd(mvm, vif, &cmd,
+				mvm->fwrt.cur_fw_img != IWL_UCODE_WOWLAN);
+	iwl_mvm_power_log(mvm, &cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
+#endif
+
+	return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, 0,
+				    sizeof(cmd), &cmd);
+}
+
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
+{
+	struct iwl_device_power_cmd cmd = {
+		.flags = 0,
+	};
+
+	if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+		mvm->ps_disabled = true;
+
+	if (!mvm->ps_disabled)
+		cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if ((mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN) ?
+			mvm->disable_power_off_d3 : mvm->disable_power_off)
+		cmd.flags &=
+			cpu_to_le16(~DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+	IWL_DEBUG_POWER(mvm,
+			"Sending device power command with flags = 0x%X\n",
+			cmd.flags);
+
+	return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, 0, sizeof(cmd),
+				    &cmd);
+}
+
+void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (memcmp(vif->bss_conf.bssid, mvmvif->uapsd_misbehaving_bssid,
+		   ETH_ALEN))
+		eth_zero_addr(mvmvif->uapsd_misbehaving_bssid);
+}
+
+static void iwl_mvm_power_uapsd_misbehav_ap_iterator(void *_data, u8 *mac,
+						     struct ieee80211_vif *vif)
+{
+	u8 *ap_sta_id = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* The ap_sta_id is not expected to change during current association
+	 * so no explicit protection is needed
+	 */
+	if (mvmvif->ap_sta_id == *ap_sta_id)
+		memcpy(mvmvif->uapsd_misbehaving_bssid, vif->bss_conf.bssid,
+		       ETH_ALEN);
+}
+
+void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_uapsd_misbehaving_ap_notif *notif = (void *)pkt->data;
+	u8 ap_sta_id = le32_to_cpu(notif->sta_id);
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_power_uapsd_misbehav_ap_iterator, &ap_sta_id);
+}
+
+struct iwl_power_vifs {
+	struct iwl_mvm *mvm;
+	struct ieee80211_vif *bss_vif;
+	struct ieee80211_vif *p2p_vif;
+	struct ieee80211_vif *ap_vif;
+	struct ieee80211_vif *monitor_vif;
+	bool p2p_active;
+	bool bss_active;
+	bool ap_active;
+	bool monitor_active;
+};
+
+static void iwl_mvm_power_disable_pm_iterator(void *_data, u8* mac,
+					      struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	mvmvif->pm_enabled = false;
+}
+
+static void iwl_mvm_power_ps_disabled_iterator(void *_data, u8* mac,
+					       struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	bool *disable_ps = _data;
+
+	if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+		*disable_ps |= mvmvif->ps_disabled;
+}
+
+static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_power_vifs *power_iterator = _data;
+	bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
+
+	switch (ieee80211_vif_type_p2p(vif)) {
+	case NL80211_IFTYPE_P2P_DEVICE:
+		break;
+
+	case NL80211_IFTYPE_P2P_GO:
+	case NL80211_IFTYPE_AP:
+		/* only a single MAC of the same type */
+		WARN_ON(power_iterator->ap_vif);
+		power_iterator->ap_vif = vif;
+		if (active)
+			power_iterator->ap_active = true;
+		break;
+
+	case NL80211_IFTYPE_MONITOR:
+		/* only a single MAC of the same type */
+		WARN_ON(power_iterator->monitor_vif);
+		power_iterator->monitor_vif = vif;
+		if (active)
+			power_iterator->monitor_active = true;
+		break;
+
+	case NL80211_IFTYPE_P2P_CLIENT:
+		/* only a single MAC of the same type */
+		WARN_ON(power_iterator->p2p_vif);
+		power_iterator->p2p_vif = vif;
+		if (active)
+			power_iterator->p2p_active = true;
+		break;
+
+	case NL80211_IFTYPE_STATION:
+		power_iterator->bss_vif = vif;
+		if (active)
+			power_iterator->bss_active = true;
+		break;
+
+	default:
+		break;
+	}
+}
+
+static void iwl_mvm_power_set_pm(struct iwl_mvm *mvm,
+				 struct iwl_power_vifs *vifs)
+{
+	struct iwl_mvm_vif *bss_mvmvif = NULL;
+	struct iwl_mvm_vif *p2p_mvmvif = NULL;
+	struct iwl_mvm_vif *ap_mvmvif = NULL;
+	bool client_same_channel = false;
+	bool ap_same_channel = false;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* set pm_enable to false */
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_power_disable_pm_iterator,
+					NULL);
+
+	if (vifs->bss_vif)
+		bss_mvmvif = iwl_mvm_vif_from_mac80211(vifs->bss_vif);
+
+	if (vifs->p2p_vif)
+		p2p_mvmvif = iwl_mvm_vif_from_mac80211(vifs->p2p_vif);
+
+	if (vifs->ap_vif)
+		ap_mvmvif = iwl_mvm_vif_from_mac80211(vifs->ap_vif);
+
+	/* don't allow PM if any TDLS stations exist */
+	if (iwl_mvm_tdls_sta_count(mvm, NULL))
+		return;
+
+	/* enable PM on bss if bss stand alone */
+	if (vifs->bss_active && !vifs->p2p_active && !vifs->ap_active) {
+		bss_mvmvif->pm_enabled = true;
+		return;
+	}
+
+	/* enable PM on p2p if p2p stand alone */
+	if (vifs->p2p_active && !vifs->bss_active && !vifs->ap_active) {
+		p2p_mvmvif->pm_enabled = true;
+		return;
+	}
+
+	if (vifs->bss_active && vifs->p2p_active)
+		client_same_channel = (bss_mvmvif->phy_ctxt->id ==
+				       p2p_mvmvif->phy_ctxt->id);
+	if (vifs->bss_active && vifs->ap_active)
+		ap_same_channel = (bss_mvmvif->phy_ctxt->id ==
+				   ap_mvmvif->phy_ctxt->id);
+
+	/* clients are not stand alone: enable PM if DCM */
+	if (!(client_same_channel || ap_same_channel)) {
+		if (vifs->bss_active)
+			bss_mvmvif->pm_enabled = true;
+		if (vifs->p2p_active)
+			p2p_mvmvif->pm_enabled = true;
+		return;
+	}
+
+	/*
+	 * There is only one channel in the system and there are only
+	 * bss and p2p clients that share it
+	 */
+	if (client_same_channel && !vifs->ap_active) {
+		/* share same channel*/
+		bss_mvmvif->pm_enabled = true;
+		p2p_mvmvif->pm_enabled = true;
+	}
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif, char *buf,
+				 int bufsz)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_power_cmd cmd = {};
+	int pos = 0;
+
+	mutex_lock(&mvm->mutex);
+	memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+	mutex_unlock(&mvm->mutex);
+
+	pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+			 iwlmvm_mod_params.power_scheme);
+	pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+			 le16_to_cpu(cmd.flags));
+	pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+			 le16_to_cpu(cmd.keep_alive_seconds));
+
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+			 (cmd.flags &
+			 cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ? 1 : 0);
+	pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+			 cmd.skip_dtim_periods);
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+		pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+				 le32_to_cpu(cmd.rx_data_timeout));
+		pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+				 le32_to_cpu(cmd.tx_data_timeout));
+	}
+	if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+		pos += scnprintf(buf+pos, bufsz-pos,
+				 "lprx_rssi_threshold = %d\n",
+				 cmd.lprx_rssi_threshold);
+
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout_uapsd = %d\n",
+			 le32_to_cpu(cmd.rx_data_timeout_uapsd));
+	pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout_uapsd = %d\n",
+			 le32_to_cpu(cmd.tx_data_timeout_uapsd));
+	pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n", cmd.qndp_tid);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_ac_flags = 0x%x\n",
+			 cmd.uapsd_ac_flags);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_max_sp = %d\n",
+			 cmd.uapsd_max_sp);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_packets = %d\n",
+			 cmd.heavy_tx_thld_packets);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_packets = %d\n",
+			 cmd.heavy_rx_thld_packets);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_tx_thld_percentage = %d\n",
+			 cmd.heavy_tx_thld_percentage);
+	pos += scnprintf(buf+pos, bufsz-pos, "heavy_rx_thld_percentage = %d\n",
+			 cmd.heavy_rx_thld_percentage);
+	pos += scnprintf(buf+pos, bufsz-pos, "uapsd_misbehaving_enable = %d\n",
+			 (cmd.flags &
+			  cpu_to_le16(POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK)) ?
+			 1 : 0);
+
+	if (!(cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)))
+		return pos;
+
+	pos += scnprintf(buf+pos, bufsz-pos, "snooze_interval = %d\n",
+			 cmd.snooze_interval);
+	pos += scnprintf(buf+pos, bufsz-pos, "snooze_window = %d\n",
+			 cmd.snooze_window);
+
+	return pos;
+}
+
+void
+iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
+					 struct iwl_beacon_filter_cmd *cmd)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
+
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
+		cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
+		cmd->bf_roaming_energy_delta =
+				cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
+		cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+		cmd->bf_temp_threshold =
+				cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+		cmd->bf_temp_fast_filter =
+				cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+		cmd->bf_temp_slow_filter =
+				cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
+		cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
+		cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
+		cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
+	if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
+		cmd->ba_enable_beacon_abort =
+				cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
+}
+#endif
+
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+					 struct ieee80211_vif *vif,
+					 struct iwl_beacon_filter_cmd *cmd,
+					 u32 cmd_flags,
+					 bool d0i3)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	if (mvmvif != mvm->bf_allowed_vif || !vif->bss_conf.dtim_period ||
+	    vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+		return 0;
+
+	iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd, d0i3);
+	if (!d0i3)
+		iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+	ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
+
+	/* don't change bf_enabled in case of temporary d0i3 configuration */
+	if (!ret && !d0i3)
+		mvmvif->bf_data.bf_enabled = true;
+
+	return ret;
+}
+
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 u32 flags)
+{
+	struct iwl_beacon_filter_cmd cmd = {
+		IWL_BF_CMD_CONFIG_DEFAULTS,
+		.bf_enable_beacon_filter = cpu_to_le32(1),
+	};
+
+	return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  u32 flags, bool d0i3)
+{
+	struct iwl_beacon_filter_cmd cmd = {};
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+		return 0;
+
+	ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
+
+	/* don't change bf_enabled in case of temporary d0i3 configuration */
+	if (!ret && !d0i3)
+		mvmvif->bf_data.bf_enabled = false;
+
+	return ret;
+}
+
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif,
+				  u32 flags)
+{
+	return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+}
+
+static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
+{
+	bool disable_ps;
+	int ret;
+
+	/* disable PS if CAM */
+	disable_ps = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+	/* ...or if any of the vifs require PS to be off */
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_power_ps_disabled_iterator,
+					&disable_ps);
+
+	/* update device power state if it has changed */
+	if (mvm->ps_disabled != disable_ps) {
+		bool old_ps_disabled = mvm->ps_disabled;
+
+		mvm->ps_disabled = disable_ps;
+		ret = iwl_mvm_power_update_device(mvm);
+		if (ret) {
+			mvm->ps_disabled = old_ps_disabled;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
+				struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_beacon_filter_cmd cmd = {
+		IWL_BF_CMD_CONFIG_DEFAULTS,
+		.bf_enable_beacon_filter = cpu_to_le32(1),
+	};
+
+	if (!mvmvif->bf_data.bf_enabled)
+		return 0;
+
+	if (mvm->fwrt.cur_fw_img == IWL_UCODE_WOWLAN)
+		cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+	mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
+				       mvm->ps_disabled ||
+				       !vif->bss_conf.ps ||
+				       iwl_mvm_vif_low_latency(mvmvif));
+
+	return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
+}
+
+int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
+{
+	struct iwl_power_vifs vifs = {
+		.mvm = mvm,
+	};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* get vifs info */
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_power_get_vifs_iterator, &vifs);
+
+	ret = iwl_mvm_power_set_ps(mvm);
+	if (ret)
+		return ret;
+
+	if (vifs.bss_vif)
+		return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+	return 0;
+}
+
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
+{
+	struct iwl_power_vifs vifs = {
+		.mvm = mvm,
+	};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* get vifs info */
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_power_get_vifs_iterator, &vifs);
+
+	iwl_mvm_power_set_pm(mvm, &vifs);
+
+	ret = iwl_mvm_power_set_ps(mvm);
+	if (ret)
+		return ret;
+
+	if (vifs.bss_vif) {
+		ret = iwl_mvm_power_send_cmd(mvm, vifs.bss_vif);
+		if (ret)
+			return ret;
+	}
+
+	if (vifs.p2p_vif) {
+		ret = iwl_mvm_power_send_cmd(mvm, vifs.p2p_vif);
+		if (ret)
+			return ret;
+	}
+
+	if (vifs.bss_vif)
+		return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+	return 0;
+}
+
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   bool enable, u32 flags)
+{
+	int ret;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mac_power_cmd cmd = {};
+
+	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+		return 0;
+
+	if (!vif->bss_conf.assoc)
+		return 0;
+
+	iwl_mvm_power_build_cmd(mvm, vif, &cmd, !enable);
+
+	iwl_mvm_power_log(mvm, &cmd);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
+#endif
+	ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+				   sizeof(cmd), &cmd);
+	if (ret)
+		return ret;
+
+	/* configure beacon filtering */
+	if (mvmvif != mvm->bf_allowed_vif)
+		return 0;
+
+	if (enable) {
+		struct iwl_beacon_filter_cmd cmd_bf = {
+			IWL_BF_CMD_CONFIG_D0I3,
+			.bf_enable_beacon_filter = cpu_to_le32(1),
+		};
+		/*
+		 * When beacon storing is supported - disable beacon filtering
+		 * altogether - the latest beacon will be sent when exiting d0i3
+		 */
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_BEACON_STORING))
+			ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags,
+							     true);
+		else
+			ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+							    flags, true);
+	} else {
+		if (mvmvif->bf_data.bf_enabled)
+			ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+		else
+			ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+	}
+
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/quota.c b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
new file mode 100644
index 0000000..690559b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/quota.c
@@ -0,0 +1,359 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <net/mac80211.h>
+#include "fw-api.h"
+#include "mvm.h"
+
+#define QUOTA_100	IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
+struct iwl_mvm_quota_iterator_data {
+	int n_interfaces[MAX_BINDINGS];
+	int colors[MAX_BINDINGS];
+	int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	int dbgfs_min[MAX_BINDINGS];
+#endif
+	int n_low_latency_bindings;
+	struct ieee80211_vif *disabled_vif;
+};
+
+static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_quota_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u16 id;
+
+	/* skip disabled interfaces here immediately */
+	if (vif == data->disabled_vif)
+		return;
+
+	if (!mvmvif->phy_ctxt)
+		return;
+
+	/* currently, PHY ID == binding ID */
+	id = mvmvif->phy_ctxt->id;
+
+	/* need at least one binding per PHY */
+	BUILD_BUG_ON(NUM_PHY_CTX > MAX_BINDINGS);
+
+	if (WARN_ON_ONCE(id >= MAX_BINDINGS))
+		return;
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_STATION:
+		if (vif->bss_conf.assoc)
+			break;
+		return;
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+		if (mvmvif->ap_ibss_active)
+			break;
+		return;
+	case NL80211_IFTYPE_MONITOR:
+		if (mvmvif->monitor_active)
+			break;
+		return;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return;
+	default:
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	if (data->colors[id] < 0)
+		data->colors[id] = mvmvif->phy_ctxt->color;
+	else
+		WARN_ON_ONCE(data->colors[id] != mvmvif->phy_ctxt->color);
+
+	data->n_interfaces[id]++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvmvif->dbgfs_quota_min)
+		data->dbgfs_min[id] = max(data->dbgfs_min[id],
+					  mvmvif->dbgfs_quota_min);
+#endif
+
+	if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+		data->n_low_latency_bindings++;
+		data->low_latency[id] = true;
+	}
+}
+
+static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
+					 struct iwl_time_quota_cmd *cmd)
+{
+#ifdef CONFIG_NL80211_TESTMODE
+	struct iwl_mvm_vif *mvmvif;
+	int i, phy_id = -1, beacon_int = 0;
+
+	if (!mvm->noa_duration || !mvm->noa_vif)
+		return;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(mvm->noa_vif);
+	if (!mvmvif->ap_ibss_active)
+		return;
+
+	phy_id = mvmvif->phy_ctxt->id;
+	beacon_int = mvm->noa_vif->bss_conf.beacon_int;
+
+	for (i = 0; i < MAX_BINDINGS; i++) {
+		struct iwl_time_quota_data *data =
+					iwl_mvm_quota_cmd_get_quota(mvm, cmd,
+								    i);
+		u32 id_n_c = le32_to_cpu(data->id_and_color);
+		u32 id = (id_n_c & FW_CTXT_ID_MSK) >> FW_CTXT_ID_POS;
+		u32 quota = le32_to_cpu(data->quota);
+
+		if (id != phy_id)
+			continue;
+
+		quota *= (beacon_int - mvm->noa_duration);
+		quota /= beacon_int;
+
+		IWL_DEBUG_QUOTA(mvm, "quota: adjust for NoA from %d to %d\n",
+				le32_to_cpu(data->quota), quota);
+
+		data->quota = cpu_to_le32(quota);
+	}
+#endif
+}
+
+int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
+			  bool force_update,
+			  struct ieee80211_vif *disabled_vif)
+{
+	struct iwl_time_quota_cmd cmd = {};
+	int i, idx, err, num_active_macs, quota, quota_rem, n_non_lowlat;
+	struct iwl_mvm_quota_iterator_data data = {
+		.n_interfaces = {},
+		.colors = { -1, -1, -1, -1 },
+		.disabled_vif = disabled_vif,
+	};
+	struct iwl_time_quota_cmd *last = &mvm->last_quota_cmd;
+	struct iwl_time_quota_data *qdata, *last_data;
+	bool send = false;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA))
+		return 0;
+
+	/* update all upon completion */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+		return 0;
+
+	/* iterator data above must match */
+	BUILD_BUG_ON(MAX_BINDINGS != 4);
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_quota_iterator, &data);
+
+	/*
+	 * The FW's scheduling session consists of
+	 * IWL_MVM_MAX_QUOTA fragments. Divide these fragments
+	 * equally between all the bindings that require quota
+	 */
+	num_active_macs = 0;
+	for (i = 0; i < MAX_BINDINGS; i++) {
+		qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+		qdata->id_and_color = cpu_to_le32(FW_CTXT_INVALID);
+		num_active_macs += data.n_interfaces[i];
+	}
+
+	n_non_lowlat = num_active_macs;
+
+	if (data.n_low_latency_bindings == 1) {
+		for (i = 0; i < MAX_BINDINGS; i++) {
+			if (data.low_latency[i]) {
+				n_non_lowlat -= data.n_interfaces[i];
+				break;
+			}
+		}
+	}
+
+	if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+		/*
+		 * Reserve quota for the low latency binding in case that
+		 * there are several data bindings but only a single
+		 * low latency one. Split the rest of the quota equally
+		 * between the other data interfaces.
+		 */
+		quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+		quota_rem = QUOTA_100 - n_non_lowlat * quota -
+			    QUOTA_LOWLAT_MIN;
+		IWL_DEBUG_QUOTA(mvm,
+				"quota: low-latency binding active, remaining quota per other binding: %d\n",
+				quota);
+	} else if (num_active_macs) {
+		/*
+		 * There are 0 or more than 1 low latency bindings, or all the
+		 * data interfaces belong to the single low latency binding.
+		 * Split the quota equally between the data interfaces.
+		 */
+		quota = QUOTA_100 / num_active_macs;
+		quota_rem = QUOTA_100 % num_active_macs;
+		IWL_DEBUG_QUOTA(mvm,
+				"quota: splitting evenly per binding: %d\n",
+				quota);
+	} else {
+		/* values don't really matter - won't be used */
+		quota = 0;
+		quota_rem = 0;
+	}
+
+	for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
+		if (data.colors[i] < 0)
+			continue;
+
+		qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, idx);
+
+		qdata->id_and_color =
+			cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
+
+		if (data.n_interfaces[i] <= 0)
+			qdata->quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+		else if (data.dbgfs_min[i])
+			qdata->quota =
+				cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
+		else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+			 data.low_latency[i])
+			/*
+			 * There is more than one binding, but only one of the
+			 * bindings is in low latency. For this case, allocate
+			 * the minimal required quota for the low latency
+			 * binding.
+			 */
+			qdata->quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+		else
+			qdata->quota =
+				cpu_to_le32(quota * data.n_interfaces[i]);
+
+		WARN_ONCE(le32_to_cpu(qdata->quota) > QUOTA_100,
+			  "Binding=%d, quota=%u > max=%u\n",
+			  idx, le32_to_cpu(qdata->quota), QUOTA_100);
+
+		qdata->max_duration = cpu_to_le32(0);
+
+		idx++;
+	}
+
+	/* Give the remainder of the session to the first data binding */
+	for (i = 0; i < MAX_BINDINGS; i++) {
+		qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+		if (le32_to_cpu(qdata->quota) != 0) {
+			le32_add_cpu(&qdata->quota, quota_rem);
+			IWL_DEBUG_QUOTA(mvm,
+					"quota: giving remainder of %d to binding %d\n",
+					quota_rem, i);
+			break;
+		}
+	}
+
+	iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
+
+	/* check that we have non-zero quota for all valid bindings */
+	for (i = 0; i < MAX_BINDINGS; i++) {
+		qdata = iwl_mvm_quota_cmd_get_quota(mvm, &cmd, i);
+		last_data = iwl_mvm_quota_cmd_get_quota(mvm, last, i);
+		if (qdata->id_and_color != last_data->id_and_color)
+			send = true;
+		if (qdata->max_duration != last_data->max_duration)
+			send = true;
+		if (abs((int)le32_to_cpu(qdata->quota) -
+			(int)le32_to_cpu(last_data->quota))
+						> IWL_MVM_QUOTA_THRESHOLD)
+			send = true;
+		if (qdata->id_and_color == cpu_to_le32(FW_CTXT_INVALID))
+			continue;
+		WARN_ONCE(qdata->quota == 0,
+			  "zero quota on binding %d\n", i);
+	}
+
+	if (!send && !force_update) {
+		/* don't send a practically unchanged command, the firmware has
+		 * to re-initialize a lot of state and that can have an adverse
+		 * impact on it
+		 */
+		return 0;
+	}
+
+	err = iwl_mvm_send_cmd_pdu(mvm, TIME_QUOTA_CMD, 0,
+				   iwl_mvm_quota_cmd_size(mvm), &cmd);
+
+	if (err)
+		IWL_ERR(mvm, "Failed to send quota: %d\n", err);
+	else
+		mvm->last_quota_cmd = cmd;
+	return err;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
new file mode 100644
index 0000000..8169d14
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -0,0 +1,369 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017        Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+
+static u8 rs_fw_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+	switch (sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
+		return IWL_TLC_MNG_CH_WIDTH_160MHZ;
+	case IEEE80211_STA_RX_BW_80:
+		return IWL_TLC_MNG_CH_WIDTH_80MHZ;
+	case IEEE80211_STA_RX_BW_40:
+		return IWL_TLC_MNG_CH_WIDTH_40MHZ;
+	case IEEE80211_STA_RX_BW_20:
+	default:
+		return IWL_TLC_MNG_CH_WIDTH_20MHZ;
+	}
+}
+
+static u8 rs_fw_set_active_chains(u8 chains)
+{
+	u8 fw_chains = 0;
+
+	if (chains & ANT_A)
+		fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
+	if (chains & ANT_B)
+		fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
+	if (chains & ANT_C)
+		WARN(false,
+		     "tlc offload doesn't support antenna C. chains: 0x%x\n",
+		     chains);
+
+	return fw_chains;
+}
+
+static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	u8 supp = 0;
+
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
+		supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
+	if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
+		supp |= BIT(IWL_TLC_MNG_CH_WIDTH_40MHZ);
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_80)
+		supp |= BIT(IWL_TLC_MNG_CH_WIDTH_80MHZ);
+	if (vht_cap->cap & IEEE80211_VHT_CAP_SHORT_GI_160)
+		supp |= BIT(IWL_TLC_MNG_CH_WIDTH_160MHZ);
+
+	return supp;
+}
+
+static u16 rs_fw_set_config_flags(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	bool vht_ena = vht_cap && vht_cap->vht_supported;
+	u16 flags = 0;
+
+	if (mvm->cfg->ht_params->stbc &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_STBC_MSK;
+
+	if (mvm->cfg->ht_params->ldpc &&
+	    ((ht_cap && (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)) ||
+	     (vht_ena && (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))))
+		flags |= IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK;
+
+	return flags;
+}
+
+static
+int rs_fw_vht_highest_rx_mcs_index(const struct ieee80211_sta_vht_cap *vht_cap,
+				   int nss)
+{
+	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+		(0x3 << (2 * (nss - 1)));
+	rx_mcs >>= (2 * (nss - 1));
+
+	switch (rx_mcs) {
+	case IEEE80211_VHT_MCS_SUPPORT_0_7:
+		return IWL_TLC_MNG_HT_RATE_MCS7;
+	case IEEE80211_VHT_MCS_SUPPORT_0_8:
+		return IWL_TLC_MNG_HT_RATE_MCS8;
+	case IEEE80211_VHT_MCS_SUPPORT_0_9:
+		return IWL_TLC_MNG_HT_RATE_MCS9;
+	default:
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	return 0;
+}
+
+static void
+rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
+			    const struct ieee80211_sta_vht_cap *vht_cap,
+			    struct iwl_tlc_config_cmd *cmd)
+{
+	u16 supp;
+	int i, highest_mcs;
+
+	for (i = 0; i < sta->rx_nss; i++) {
+		if (i == MAX_NSS)
+			break;
+
+		highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+		if (!highest_mcs)
+			continue;
+
+		supp = BIT(highest_mcs + 1) - 1;
+		if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
+			supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
+
+		cmd->ht_rates[i][0] = cpu_to_le16(supp);
+		if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+			cmd->ht_rates[i][1] = cmd->ht_rates[i][0];
+	}
+}
+
+static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
+{
+	switch (mcs) {
+	case IEEE80211_HE_MCS_SUPPORT_0_7:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_9:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_11:
+		return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
+	case IEEE80211_HE_MCS_NOT_SUPPORTED:
+		return 0;
+	}
+
+	WARN(1, "invalid HE MCS %d\n", mcs);
+	return 0;
+}
+
+static void
+rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
+			   const struct ieee80211_sta_he_cap *he_cap,
+			   struct iwl_tlc_config_cmd *cmd)
+{
+	u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
+	u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
+	int i;
+
+	for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
+		u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
+		u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
+
+		cmd->ht_rates[i][0] =
+			cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
+		cmd->ht_rates[i][1] =
+			cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
+	}
+}
+
+static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
+				 struct ieee80211_supported_band *sband,
+				 struct iwl_tlc_config_cmd *cmd)
+{
+	int i;
+	unsigned long tmp;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
+	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+
+	/* non HT rates */
+	supp = 0;
+	tmp = sta->supp_rates[sband->band];
+	for_each_set_bit(i, &tmp, BITS_PER_LONG)
+		supp |= BIT(sband->bitrates[i].hw_value);
+
+	cmd->non_ht_rates = cpu_to_le16(supp);
+	cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
+
+	/* HT/VHT rates */
+	if (he_cap && he_cap->has_he) {
+		cmd->mode = IWL_TLC_MNG_MODE_HE;
+		rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
+	} else if (vht_cap && vht_cap->vht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_VHT;
+		rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
+	} else if (ht_cap && ht_cap->ht_supported) {
+		cmd->mode = IWL_TLC_MNG_MODE_HT;
+		cmd->ht_rates[0][0] = cpu_to_le16(ht_cap->mcs.rx_mask[0]);
+		cmd->ht_rates[1][0] = cpu_to_le16(ht_cap->mcs.rx_mask[1]);
+	}
+}
+
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_tlc_update_notif *notif;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_lq_sta_rs_fw *lq_sta;
+	u32 flags;
+
+	rcu_read_lock();
+
+	notif = (void *)pkt->data;
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]);
+	if (IS_ERR_OR_NULL(sta)) {
+		IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+			notif->sta_id);
+		goto out;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	if (!mvmsta) {
+		IWL_ERR(mvm, "Invalid sta id (%d) in FW TLC notification\n",
+			notif->sta_id);
+		goto out;
+	}
+
+	flags = le32_to_cpu(notif->flags);
+
+	lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
+		lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+		IWL_DEBUG_RATE(mvm, "new rate_n_flags: 0x%X\n",
+			       lq_sta->last_rate_n_flags);
+	}
+
+	if (flags & IWL_TLC_NOTIF_FLAG_AMSDU) {
+		u16 size = le32_to_cpu(notif->amsdu_size);
+
+		if (WARN_ON(sta->max_amsdu_len < size))
+			goto out;
+
+		mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+		mvmsta->max_amsdu_len = size;
+
+		IWL_DEBUG_RATE(mvm,
+			       "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
+			       le32_to_cpu(notif->amsdu_size), size,
+			       mvmsta->amsdu_enabled);
+	}
+out:
+	rcu_read_unlock();
+}
+
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band)
+{
+	struct ieee80211_hw *hw = mvm->hw;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+	u32 cmd_id = iwl_cmd_id(TLC_MNG_CONFIG_CMD, DATA_PATH_GROUP, 0);
+	struct ieee80211_supported_band *sband;
+	struct iwl_tlc_config_cmd cfg_cmd = {
+		.sta_id = mvmsta->sta_id,
+		.max_ch_width = rs_fw_bw_from_sta_bw(sta),
+		.flags = cpu_to_le16(rs_fw_set_config_flags(mvm, sta)),
+		.chains = rs_fw_set_active_chains(iwl_mvm_get_valid_tx_ant(mvm)),
+		.max_mpdu_len = cpu_to_le16(sta->max_amsdu_len),
+		.sgi_ch_width_supp = rs_fw_sgi_cw_support(sta),
+		.amsdu = iwl_mvm_is_csum_supported(mvm),
+	};
+	int ret;
+
+	memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_mvm_reset_frame_stats(mvm);
+#endif
+	sband = hw->wiphy->bands[band];
+	rs_fw_set_supp_rates(sta, sband, &cfg_cmd);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cfg_cmd), &cfg_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
+}
+
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable)
+{
+	/* TODO: need to introduce a new FW cmd since LQ cmd is not relevant */
+	IWL_DEBUG_RATE(mvm, "tx protection - not implemented yet.\n");
+	return 0;
+}
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta)
+{
+	struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw;
+
+	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+	lq_sta->pers.drv = mvm;
+	lq_sta->pers.sta_id = mvmsta->sta_id;
+	lq_sta->pers.chains = 0;
+	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
+	lq_sta->last_rate_n_flags = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->pers.dbg_fixed_rate = 0;
+#endif
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
new file mode 100644
index 0000000..f2830b5
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -0,0 +1,4165 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+#include "rs.h"
+#include "fw-api.h"
+#include "sta.h"
+#include "iwl-op-mode.h"
+#include "mvm.h"
+#include "debugfs.h"
+
+#define IWL_RATE_MAX_WINDOW		62	/* # tx in history window */
+
+/* Calculations of success ratio are done in fixed point where 12800 is 100%.
+ * Use this macro when dealing with thresholds consts set as a percentage
+ */
+#define RS_PERCENT(x) (128 * x)
+
+static u8 rs_ht_to_legacy[] = {
+	[IWL_RATE_MCS_0_INDEX] = IWL_RATE_6M_INDEX,
+	[IWL_RATE_MCS_1_INDEX] = IWL_RATE_9M_INDEX,
+	[IWL_RATE_MCS_2_INDEX] = IWL_RATE_12M_INDEX,
+	[IWL_RATE_MCS_3_INDEX] = IWL_RATE_18M_INDEX,
+	[IWL_RATE_MCS_4_INDEX] = IWL_RATE_24M_INDEX,
+	[IWL_RATE_MCS_5_INDEX] = IWL_RATE_36M_INDEX,
+	[IWL_RATE_MCS_6_INDEX] = IWL_RATE_48M_INDEX,
+	[IWL_RATE_MCS_7_INDEX] = IWL_RATE_54M_INDEX,
+	[IWL_RATE_MCS_8_INDEX] = IWL_RATE_54M_INDEX,
+	[IWL_RATE_MCS_9_INDEX] = IWL_RATE_54M_INDEX,
+};
+
+static const u8 ant_toggle_lookup[] = {
+	[ANT_NONE] = ANT_NONE,
+	[ANT_A] = ANT_B,
+	[ANT_B] = ANT_A,
+	[ANT_AB] = ANT_AB,
+};
+
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn)			      \
+	[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP,	      \
+				    IWL_RATE_HT_SISO_MCS_##s##_PLCP,  \
+				    IWL_RATE_HT_MIMO2_MCS_##s##_PLCP, \
+				    IWL_RATE_VHT_SISO_MCS_##s##_PLCP, \
+				    IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP,\
+				    IWL_RATE_##rp##M_INDEX,	      \
+				    IWL_RATE_##rn##M_INDEX }
+
+#define IWL_DECLARE_MCS_RATE(s)						  \
+	[IWL_RATE_MCS_##s##_INDEX] = { IWL_RATE_INVM_PLCP,		  \
+				       IWL_RATE_HT_SISO_MCS_##s##_PLCP,	  \
+				       IWL_RATE_HT_MIMO2_MCS_##s##_PLCP,  \
+				       IWL_RATE_VHT_SISO_MCS_##s##_PLCP,  \
+				       IWL_RATE_VHT_MIMO2_MCS_##s##_PLCP, \
+				       IWL_RATE_INVM_INDEX,	          \
+				       IWL_RATE_INVM_INDEX }
+
+/*
+ * Parameter order:
+ *   rate, ht rate, prev rate, next rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to IWL_RATE_INVALID
+ *
+ */
+static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
+	IWL_DECLARE_RATE_INFO(1, INV, INV, 2),   /*  1mbps */
+	IWL_DECLARE_RATE_INFO(2, INV, 1, 5),     /*  2mbps */
+	IWL_DECLARE_RATE_INFO(5, INV, 2, 11),    /*5.5mbps */
+	IWL_DECLARE_RATE_INFO(11, INV, 9, 12),   /* 11mbps */
+	IWL_DECLARE_RATE_INFO(6, 0, 5, 11),      /*  6mbps ; MCS 0 */
+	IWL_DECLARE_RATE_INFO(9, INV, 6, 11),    /*  9mbps */
+	IWL_DECLARE_RATE_INFO(12, 1, 11, 18),    /* 12mbps ; MCS 1 */
+	IWL_DECLARE_RATE_INFO(18, 2, 12, 24),    /* 18mbps ; MCS 2 */
+	IWL_DECLARE_RATE_INFO(24, 3, 18, 36),    /* 24mbps ; MCS 3 */
+	IWL_DECLARE_RATE_INFO(36, 4, 24, 48),    /* 36mbps ; MCS 4 */
+	IWL_DECLARE_RATE_INFO(48, 5, 36, 54),    /* 48mbps ; MCS 5 */
+	IWL_DECLARE_RATE_INFO(54, 6, 48, INV),   /* 54mbps ; MCS 6 */
+	IWL_DECLARE_MCS_RATE(7),                 /* MCS 7 */
+	IWL_DECLARE_MCS_RATE(8),                 /* MCS 8 */
+	IWL_DECLARE_MCS_RATE(9),                 /* MCS 9 */
+};
+
+enum rs_action {
+	RS_ACTION_STAY = 0,
+	RS_ACTION_DOWNSCALE = -1,
+	RS_ACTION_UPSCALE = 1,
+};
+
+enum rs_column_mode {
+	RS_INVALID = 0,
+	RS_LEGACY,
+	RS_SISO,
+	RS_MIMO2,
+};
+
+#define MAX_NEXT_COLUMNS 7
+#define MAX_COLUMN_CHECKS 3
+
+struct rs_tx_column;
+
+typedef bool (*allow_column_func_t) (struct iwl_mvm *mvm,
+				     struct ieee80211_sta *sta,
+				     struct rs_rate *rate,
+				     const struct rs_tx_column *next_col);
+
+struct rs_tx_column {
+	enum rs_column_mode mode;
+	u8 ant;
+	bool sgi;
+	enum rs_column next_columns[MAX_NEXT_COLUMNS];
+	allow_column_func_t checks[MAX_COLUMN_CHECKS];
+};
+
+static bool rs_ant_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			 struct rs_rate *rate,
+			 const struct rs_tx_column *next_col)
+{
+	return iwl_mvm_bt_coex_is_ant_avail(mvm, next_col->ant);
+}
+
+static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  struct rs_rate *rate,
+			  const struct rs_tx_column *next_col)
+{
+	if (!sta->ht_cap.ht_supported)
+		return false;
+
+	if (sta->smps_mode == IEEE80211_SMPS_STATIC)
+		return false;
+
+	if (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) < 2)
+		return false;
+
+	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+		return false;
+
+	if (mvm->nvm_data->sku_cap_mimo_disabled)
+		return false;
+
+	return true;
+}
+
+static bool rs_siso_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  struct rs_rate *rate,
+			  const struct rs_tx_column *next_col)
+{
+	if (!sta->ht_cap.ht_supported)
+		return false;
+
+	return true;
+}
+
+static bool rs_sgi_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			 struct rs_rate *rate,
+			 const struct rs_tx_column *next_col)
+{
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+
+	if (is_ht20(rate) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_20))
+		return true;
+	if (is_ht40(rate) && (ht_cap->cap &
+			     IEEE80211_HT_CAP_SGI_40))
+		return true;
+	if (is_ht80(rate) && (vht_cap->cap &
+			     IEEE80211_VHT_CAP_SHORT_GI_80))
+		return true;
+	if (is_ht160(rate) && (vht_cap->cap &
+			     IEEE80211_VHT_CAP_SHORT_GI_160))
+		return true;
+
+	return false;
+}
+
+static const struct rs_tx_column rs_tx_columns[] = {
+	[RS_COLUMN_LEGACY_ANT_A] = {
+		.mode = RS_LEGACY,
+		.ant = ANT_A,
+		.next_columns = {
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_ant_allow,
+		},
+	},
+	[RS_COLUMN_LEGACY_ANT_B] = {
+		.mode = RS_LEGACY,
+		.ant = ANT_B,
+		.next_columns = {
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_ant_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_A] = {
+		.mode = RS_SISO,
+		.ant = ANT_A,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_ant_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_B] = {
+		.mode = RS_SISO,
+		.ant = ANT_B,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_SISO_ANT_B_SGI,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_ant_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_A_SGI] = {
+		.mode = RS_SISO,
+		.ant = ANT_A,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_B_SGI,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_ant_allow,
+			rs_sgi_allow,
+		},
+	},
+	[RS_COLUMN_SISO_ANT_B_SGI] = {
+		.mode = RS_SISO,
+		.ant = ANT_B,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_SISO_ANT_B,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_siso_allow,
+			rs_ant_allow,
+			rs_sgi_allow,
+		},
+	},
+	[RS_COLUMN_MIMO2] = {
+		.mode = RS_MIMO2,
+		.ant = ANT_AB,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A,
+			RS_COLUMN_MIMO2_SGI,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_mimo_allow,
+		},
+	},
+	[RS_COLUMN_MIMO2_SGI] = {
+		.mode = RS_MIMO2,
+		.ant = ANT_AB,
+		.sgi = true,
+		.next_columns = {
+			RS_COLUMN_SISO_ANT_A_SGI,
+			RS_COLUMN_MIMO2,
+			RS_COLUMN_LEGACY_ANT_A,
+			RS_COLUMN_LEGACY_ANT_B,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+			RS_COLUMN_INVALID,
+		},
+		.checks = {
+			rs_mimo_allow,
+			rs_sgi_allow,
+		},
+	},
+};
+
+static inline u8 rs_extract_rate(u32 rate_n_flags)
+{
+	/* also works for HT because bits 7:6 are zero there */
+	return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+}
+
+static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+	int idx = 0;
+
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+		idx += IWL_RATE_MCS_0_INDEX;
+
+		/* skip 9M not supported in HT*/
+		if (idx >= IWL_RATE_9M_INDEX)
+			idx += 1;
+		if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
+			return idx;
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK ||
+		   rate_n_flags & RATE_MCS_HE_MSK) {
+		idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		idx += IWL_RATE_MCS_0_INDEX;
+
+		/* skip 9M not supported in VHT*/
+		if (idx >= IWL_RATE_9M_INDEX)
+			idx++;
+		if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
+			return idx;
+		if ((rate_n_flags & RATE_MCS_HE_MSK) &&
+		    (idx <= IWL_LAST_HE_RATE))
+			return idx;
+	} else {
+		/* legacy rate format, search for match in table */
+
+		u8 legacy_rate = rs_extract_rate(rate_n_flags);
+		for (idx = 0; idx < ARRAY_SIZE(iwl_rates); idx++)
+			if (iwl_rates[idx].plcp == legacy_rate)
+				return idx;
+	}
+
+	return IWL_RATE_INVALID;
+}
+
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta,
+				  struct iwl_lq_sta *lq_sta,
+				  int tid, bool ndp);
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+			   struct ieee80211_sta *sta,
+			   struct iwl_lq_sta *lq_sta,
+			   const struct rs_rate *initial_rate);
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ *	1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
+	7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
+};
+
+/* Expected TpT tables. 4 indexes:
+ * 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
+ */
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 42, 0,  76, 102, 124, 159, 183, 193, 202, 216, 0},
+	{0, 0, 0, 0, 46, 0,  82, 110, 132, 168, 192, 202, 210, 225, 0},
+	{0, 0, 0, 0, 49, 0,  97, 145, 192, 285, 375, 420, 464, 551, 0},
+	{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
+};
+
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  77, 0, 127, 160, 184, 220, 242, 250,  257,  269,  275},
+	{0, 0, 0, 0,  83, 0, 135, 169, 193, 229, 250, 257,  264,  275,  280},
+	{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828,  911, 1070, 1173},
+	{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
+};
+
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 130, 0, 191, 223, 244,  273,  288,  294,  298,  305,  308},
+	{0, 0, 0, 0, 138, 0, 200, 231, 251,  279,  293,  298,  302,  308,  312},
+	{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
+	{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
+};
+
+static const u16 expected_tpt_siso_160MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 191, 0, 244, 288,  298,  308,  313,  318,  323,  328,  330},
+	{0, 0, 0, 0, 200, 0, 251, 293,  302,  312,  317,  322,  327,  332,  334},
+	{0, 0, 0, 0, 439, 0, 875, 1307, 1736, 2584, 3419, 3831, 4240, 5049, 5581},
+	{0, 0, 0, 0, 488, 0, 972, 1451, 1925, 2864, 3785, 4240, 4691, 5581, 6165},
+};
+
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0,  74, 0, 123, 155, 179, 213, 235, 243, 250,  261, 0},
+	{0, 0, 0, 0,  81, 0, 131, 164, 187, 221, 242, 250, 256,  267, 0},
+	{0, 0, 0, 0,  98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
+	{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
+};
+
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 123, 0, 182, 214, 235,  264,  279,  285,  289,  296,  300},
+	{0, 0, 0, 0, 131, 0, 191, 222, 242,  270,  284,  289,  293,  300,  303},
+	{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
+	{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
+};
+
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 182, 0, 240,  264,  278,  299,  308,  311,  313,  317,  319},
+	{0, 0, 0, 0, 190, 0, 247,  269,  282,  302,  310,  313,  315,  319,  320},
+	{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
+	{0, 0, 0, 0, 474, 0, 920, 1338, 1732, 2464, 3116, 3418, 3705, 4225, 4545},
+};
+
+static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
+	{0, 0, 0, 0, 240, 0, 278,  308,  313,  319,  322,  324,  328,  330,   334},
+	{0, 0, 0, 0, 247, 0, 282,  310,  315,  320,  323,  325,  329,  332,   338},
+	{0, 0, 0, 0, 875, 0, 1735, 2582, 3414, 5043, 6619, 7389, 8147, 9629,  10592},
+	{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+	{  "1", "BPSK DSSS"},
+	{  "2", "QPSK DSSS"},
+	{"5.5", "BPSK CCK"},
+	{ "11", "QPSK CCK"},
+	{  "6", "BPSK 1/2"},
+	{  "9", "BPSK 1/2"},
+	{ "12", "QPSK 1/2"},
+	{ "18", "QPSK 3/4"},
+	{ "24", "16QAM 1/2"},
+	{ "36", "16QAM 3/4"},
+	{ "48", "64QAM 2/3"},
+	{ "54", "64QAM 3/4"},
+	{ "60", "64QAM 5/6"},
+};
+
+#define MCS_INDEX_PER_STREAM	(8)
+
+static const char *rs_pretty_ant(u8 ant)
+{
+	static const char * const ant_name[] = {
+		[ANT_NONE] = "None",
+		[ANT_A]    = "A",
+		[ANT_B]    = "B",
+		[ANT_AB]   = "AB",
+		[ANT_C]    = "C",
+		[ANT_AC]   = "AC",
+		[ANT_BC]   = "BC",
+		[ANT_ABC]  = "ABC",
+	};
+
+	if (ant > ANT_ABC)
+		return "UNKNOWN";
+
+	return ant_name[ant];
+}
+
+static const char *rs_pretty_lq_type(enum iwl_table_type type)
+{
+	static const char * const lq_types[] = {
+		[LQ_NONE] = "NONE",
+		[LQ_LEGACY_A] = "LEGACY_A",
+		[LQ_LEGACY_G] = "LEGACY_G",
+		[LQ_HT_SISO] = "HT SISO",
+		[LQ_HT_MIMO2] = "HT MIMO",
+		[LQ_VHT_SISO] = "VHT SISO",
+		[LQ_VHT_MIMO2] = "VHT MIMO",
+		[LQ_HE_SISO] = "HE SISO",
+		[LQ_HE_MIMO2] = "HE MIMO",
+	};
+
+	if (type < LQ_NONE || type >= LQ_MAX)
+		return "UNKNOWN";
+
+	return lq_types[type];
+}
+
+static char *rs_pretty_rate(const struct rs_rate *rate)
+{
+	static char buf[40];
+	static const char * const legacy_rates[] = {
+		[IWL_RATE_1M_INDEX] = "1M",
+		[IWL_RATE_2M_INDEX] = "2M",
+		[IWL_RATE_5M_INDEX] = "5.5M",
+		[IWL_RATE_11M_INDEX] = "11M",
+		[IWL_RATE_6M_INDEX] = "6M",
+		[IWL_RATE_9M_INDEX] = "9M",
+		[IWL_RATE_12M_INDEX] = "12M",
+		[IWL_RATE_18M_INDEX] = "18M",
+		[IWL_RATE_24M_INDEX] = "24M",
+		[IWL_RATE_36M_INDEX] = "36M",
+		[IWL_RATE_48M_INDEX] = "48M",
+		[IWL_RATE_54M_INDEX] = "54M",
+	};
+	static const char *const ht_vht_rates[] = {
+		[IWL_RATE_MCS_0_INDEX] = "MCS0",
+		[IWL_RATE_MCS_1_INDEX] = "MCS1",
+		[IWL_RATE_MCS_2_INDEX] = "MCS2",
+		[IWL_RATE_MCS_3_INDEX] = "MCS3",
+		[IWL_RATE_MCS_4_INDEX] = "MCS4",
+		[IWL_RATE_MCS_5_INDEX] = "MCS5",
+		[IWL_RATE_MCS_6_INDEX] = "MCS6",
+		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+	};
+	const char *rate_str;
+
+	if (is_type_legacy(rate->type) && (rate->index <= IWL_RATE_54M_INDEX))
+		rate_str = legacy_rates[rate->index];
+	else if ((is_type_ht(rate->type) || is_type_vht(rate->type)) &&
+		 (rate->index >= IWL_RATE_MCS_0_INDEX) &&
+		 (rate->index <= IWL_RATE_MCS_9_INDEX))
+		rate_str = ht_vht_rates[rate->index];
+	else
+		rate_str = "BAD_RATE";
+
+	sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
+		rs_pretty_ant(rate->ant), rate_str);
+	return buf;
+}
+
+static inline void rs_dump_rate(struct iwl_mvm *mvm, const struct rs_rate *rate,
+				const char *prefix)
+{
+	IWL_DEBUG_RATE(mvm,
+		       "%s: %s BW: %d SGI: %d LDPC: %d STBC: %d\n",
+		       prefix, rs_pretty_rate(rate), rate->bw,
+		       rate->sgi, rate->ldpc, rate->stbc);
+}
+
+static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
+{
+	window->data = 0;
+	window->success_counter = 0;
+	window->success_ratio = IWL_INVALID_VALUE;
+	window->counter = 0;
+	window->average_tpt = IWL_INVALID_VALUE;
+}
+
+static void rs_rate_scale_clear_tbl_windows(struct iwl_mvm *mvm,
+					    struct iwl_scale_tbl_info *tbl)
+{
+	int i;
+
+	IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
+	for (i = 0; i < IWL_RATE_COUNT; i++)
+		rs_rate_scale_clear_window(&tbl->win[i]);
+
+	for (i = 0; i < ARRAY_SIZE(tbl->tpc_win); i++)
+		rs_rate_scale_clear_window(&tbl->tpc_win[i]);
+}
+
+static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+	return (ant_type & valid_antenna) == ant_type;
+}
+
+static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
+				     struct iwl_lq_sta *lq_data, u8 tid,
+				     struct ieee80211_sta *sta)
+{
+	int ret = -EAGAIN;
+
+	IWL_DEBUG_HT(mvm, "Starting Tx agg: STA: %pM tid: %d\n",
+		     sta->addr, tid);
+
+	/* start BA session until the peer sends del BA */
+	ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+	if (ret == -EAGAIN) {
+		/*
+		 * driver and mac80211 is out of sync
+		 * this might be cause by reloading firmware
+		 * stop the tx ba session here
+		 */
+		IWL_ERR(mvm, "Fail start Tx agg on tid: %d\n",
+			tid);
+		ieee80211_stop_tx_ba_session(sta, tid);
+	}
+	return ret;
+}
+
+static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			      u8 tid, struct iwl_lq_sta *lq_sta,
+			      struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_tid_data *tid_data;
+
+	/*
+	 * In AP mode, tid can be equal to IWL_MAX_TID_COUNT
+	 * when the frame is not QoS
+	 */
+	if (WARN_ON_ONCE(tid > IWL_MAX_TID_COUNT)) {
+		IWL_ERR(mvm, "tid exceeds max TID count: %d/%d\n",
+			tid, IWL_MAX_TID_COUNT);
+		return;
+	} else if (tid == IWL_MAX_TID_COUNT) {
+		return;
+	}
+
+	tid_data = &mvmsta->tid_data[tid];
+	if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED &&
+	    tid_data->state == IWL_AGG_OFF &&
+	    (lq_sta->tx_agg_tid_en & BIT(tid)) &&
+	    tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD) {
+		IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
+		if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
+			tid_data->state = IWL_AGG_QUEUED;
+	}
+}
+
+static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
+{
+	return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+	       !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+	       !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an iwl_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
+{
+	if (tbl->expected_tpt)
+		return tbl->expected_tpt[rs_index];
+	return 0;
+}
+
+/**
+ * rs_collect_tx_data - Update the success/failure sliding window
+ *
+ * We keep a sliding window of the last 62 packets transmitted
+ * at this rate.  window->data contains the bitmask of successful
+ * packets.
+ */
+static int _rs_collect_tx_data(struct iwl_mvm *mvm,
+			       struct iwl_scale_tbl_info *tbl,
+			       int scale_index, int attempts, int successes,
+			       struct iwl_rate_scale_data *window)
+{
+	static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
+	s32 fail_count, tpt;
+
+	/* Get expected throughput */
+	tpt = get_expected_tpt(tbl, scale_index);
+
+	/*
+	 * Keep track of only the latest 62 tx frame attempts in this rate's
+	 * history window; anything older isn't really relevant any more.
+	 * If we have filled up the sliding window, drop the oldest attempt;
+	 * if the oldest attempt (highest bit in bitmap) shows "success",
+	 * subtract "1" from the success counter (this is the main reason
+	 * we keep these bitmaps!).
+	 */
+	while (attempts > 0) {
+		if (window->counter >= IWL_RATE_MAX_WINDOW) {
+			/* remove earliest */
+			window->counter = IWL_RATE_MAX_WINDOW - 1;
+
+			if (window->data & mask) {
+				window->data &= ~mask;
+				window->success_counter--;
+			}
+		}
+
+		/* Increment frames-attempted counter */
+		window->counter++;
+
+		/* Shift bitmap by one frame to throw away oldest history */
+		window->data <<= 1;
+
+		/* Mark the most recent #successes attempts as successful */
+		if (successes > 0) {
+			window->success_counter++;
+			window->data |= 0x1;
+			successes--;
+		}
+
+		attempts--;
+	}
+
+	/* Calculate current success ratio, avoid divide-by-0! */
+	if (window->counter > 0)
+		window->success_ratio = 128 * (100 * window->success_counter)
+					/ window->counter;
+	else
+		window->success_ratio = IWL_INVALID_VALUE;
+
+	fail_count = window->counter - window->success_counter;
+
+	/* Calculate average throughput, if we have enough history. */
+	if ((fail_count >= IWL_MVM_RS_RATE_MIN_FAILURE_TH) ||
+	    (window->success_counter >= IWL_MVM_RS_RATE_MIN_SUCCESS_TH))
+		window->average_tpt = (window->success_ratio * tpt + 64) / 128;
+	else
+		window->average_tpt = IWL_INVALID_VALUE;
+
+	return 0;
+}
+
+static int rs_collect_tpc_data(struct iwl_mvm *mvm,
+			       struct iwl_lq_sta *lq_sta,
+			       struct iwl_scale_tbl_info *tbl,
+			       int scale_index, int attempts, int successes,
+			       u8 reduced_txp)
+{
+	struct iwl_rate_scale_data *window = NULL;
+
+	if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+		return -EINVAL;
+
+	window = &tbl->tpc_win[reduced_txp];
+	return  _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+				    window);
+}
+
+static void rs_update_tid_tpt_stats(struct iwl_mvm *mvm,
+				    struct iwl_mvm_sta *mvmsta,
+				    u8 tid, int successes)
+{
+	struct iwl_mvm_tid_data *tid_data;
+
+	if (tid >= IWL_MAX_TID_COUNT)
+		return;
+
+	tid_data = &mvmsta->tid_data[tid];
+
+	/*
+	 * Measure if there're enough successful transmits per second.
+	 * These statistics are used only to decide if we can start a
+	 * BA session, so it should be updated only when A-MPDU is
+	 * off.
+	 */
+	if (tid_data->state != IWL_AGG_OFF)
+		return;
+
+	if (time_is_before_jiffies(tid_data->tpt_meas_start + HZ) ||
+	    (tid_data->tx_count >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
+		tid_data->tx_count_last = tid_data->tx_count;
+		tid_data->tx_count = 0;
+		tid_data->tpt_meas_start = jiffies;
+	} else {
+		tid_data->tx_count += successes;
+	}
+}
+
+static int rs_collect_tlc_data(struct iwl_mvm *mvm,
+			       struct iwl_mvm_sta *mvmsta, u8 tid,
+			       struct iwl_scale_tbl_info *tbl,
+			       int scale_index, int attempts, int successes)
+{
+	struct iwl_rate_scale_data *window = NULL;
+
+	if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
+		return -EINVAL;
+
+	if (tbl->column != RS_COLUMN_INVALID) {
+		struct lq_sta_pers *pers = &mvmsta->lq_sta.rs_drv.pers;
+
+		pers->tx_stats[tbl->column][scale_index].total += attempts;
+		pers->tx_stats[tbl->column][scale_index].success += successes;
+	}
+
+	rs_update_tid_tpt_stats(mvm, mvmsta, tid, successes);
+
+	/* Select window for current tx bit rate */
+	window = &(tbl->win[scale_index]);
+	return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+				   window);
+}
+
+/* Convert rs_rate object into ucode rate bitmask */
+static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
+				  struct rs_rate *rate)
+{
+	u32 ucode_rate = 0;
+	int index = rate->index;
+
+	ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
+			 RATE_MCS_ANT_ABC_MSK);
+
+	if (is_legacy(rate)) {
+		ucode_rate |= iwl_rates[index].plcp;
+		if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
+			ucode_rate |= RATE_MCS_CCK_MSK;
+		return ucode_rate;
+	}
+
+	if (is_ht(rate)) {
+		if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
+			IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
+			index = IWL_LAST_HT_RATE;
+		}
+		ucode_rate |= RATE_MCS_HT_MSK;
+
+		if (is_ht_siso(rate))
+			ucode_rate |= iwl_rates[index].plcp_ht_siso;
+		else if (is_ht_mimo2(rate))
+			ucode_rate |= iwl_rates[index].plcp_ht_mimo2;
+		else
+			WARN_ON_ONCE(1);
+	} else if (is_vht(rate)) {
+		if (index < IWL_FIRST_VHT_RATE || index > IWL_LAST_VHT_RATE) {
+			IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
+			index = IWL_LAST_VHT_RATE;
+		}
+		ucode_rate |= RATE_MCS_VHT_MSK;
+		if (is_vht_siso(rate))
+			ucode_rate |= iwl_rates[index].plcp_vht_siso;
+		else if (is_vht_mimo2(rate))
+			ucode_rate |= iwl_rates[index].plcp_vht_mimo2;
+		else
+			WARN_ON_ONCE(1);
+
+	} else {
+		IWL_ERR(mvm, "Invalid rate->type %d\n", rate->type);
+	}
+
+	if (is_siso(rate) && rate->stbc) {
+		/* To enable STBC we need to set both a flag and ANT_AB */
+		ucode_rate |= RATE_MCS_ANT_AB_MSK;
+		ucode_rate |= RATE_MCS_STBC_MSK;
+	}
+
+	ucode_rate |= rate->bw;
+	if (rate->sgi)
+		ucode_rate |= RATE_MCS_SGI_MSK;
+	if (rate->ldpc)
+		ucode_rate |= RATE_MCS_LDPC_MSK;
+
+	return ucode_rate;
+}
+
+/* Convert a ucode rate into an rs_rate object */
+static int rs_rate_from_ucode_rate(const u32 ucode_rate,
+				   enum nl80211_band band,
+				   struct rs_rate *rate)
+{
+	u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+	u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
+	u8 nss;
+
+	memset(rate, 0, sizeof(*rate));
+	rate->index = iwl_hwrate_to_plcp_idx(ucode_rate);
+
+	if (rate->index == IWL_RATE_INVALID)
+		return -EINVAL;
+
+	rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
+
+	/* Legacy */
+	if (!(ucode_rate & RATE_MCS_HT_MSK) &&
+	    !(ucode_rate & RATE_MCS_VHT_MSK) &&
+	    !(ucode_rate & RATE_MCS_HE_MSK)) {
+		if (num_of_ant == 1) {
+			if (band == NL80211_BAND_5GHZ)
+				rate->type = LQ_LEGACY_A;
+			else
+				rate->type = LQ_LEGACY_G;
+		}
+
+		return 0;
+	}
+
+	/* HT, VHT or HE */
+	if (ucode_rate & RATE_MCS_SGI_MSK)
+		rate->sgi = true;
+	if (ucode_rate & RATE_MCS_LDPC_MSK)
+		rate->ldpc = true;
+	if (ucode_rate & RATE_MCS_STBC_MSK)
+		rate->stbc = true;
+	if (ucode_rate & RATE_MCS_BF_MSK)
+		rate->bfer = true;
+
+	rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
+
+	if (ucode_rate & RATE_MCS_HT_MSK) {
+		nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
+		       RATE_HT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			rate->type = LQ_HT_SISO;
+			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+				  "stbc %d bfer %d",
+				  rate->stbc, rate->bfer);
+		} else if (nss == 2) {
+			rate->type = LQ_HT_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
+		} else {
+			WARN_ON_ONCE(1);
+		}
+	} else if (ucode_rate & RATE_MCS_VHT_MSK) {
+		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+		       RATE_VHT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			rate->type = LQ_VHT_SISO;
+			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+				  "stbc %d bfer %d",
+				  rate->stbc, rate->bfer);
+		} else if (nss == 2) {
+			rate->type = LQ_VHT_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
+		} else {
+			WARN_ON_ONCE(1);
+		}
+	} else if (ucode_rate & RATE_MCS_HE_MSK) {
+		nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
+		      RATE_VHT_MCS_NSS_POS) + 1;
+
+		if (nss == 1) {
+			rate->type = LQ_HE_SISO;
+			WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
+				  "stbc %d bfer %d", rate->stbc, rate->bfer);
+		} else if (nss == 2) {
+			rate->type = LQ_HE_MIMO2;
+			WARN_ON_ONCE(num_of_ant != 2);
+		} else {
+			WARN_ON_ONCE(1);
+		}
+	}
+
+	WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
+		     !is_he(rate) && !is_vht(rate));
+
+	return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
+{
+	u8 new_ant_type;
+
+	if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C))
+		return 0;
+
+	if (!rs_is_valid_ant(valid_ant, rate->ant))
+		return 0;
+
+	new_ant_type = ant_toggle_lookup[rate->ant];
+
+	while ((new_ant_type != rate->ant) &&
+	       !rs_is_valid_ant(valid_ant, new_ant_type))
+		new_ant_type = ant_toggle_lookup[new_ant_type];
+
+	if (new_ant_type == rate->ant)
+		return 0;
+
+	rate->ant = new_ant_type;
+
+	return 1;
+}
+
+static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
+				  struct rs_rate *rate)
+{
+	if (is_legacy(rate))
+		return lq_sta->active_legacy_rate;
+	else if (is_siso(rate))
+		return lq_sta->active_siso_rate;
+	else if (is_mimo2(rate))
+		return lq_sta->active_mimo2_rate;
+
+	WARN_ON_ONCE(1);
+	return 0;
+}
+
+static u16 rs_get_adjacent_rate(struct iwl_mvm *mvm, u8 index, u16 rate_mask,
+				int rate_type)
+{
+	u8 high = IWL_RATE_INVALID;
+	u8 low = IWL_RATE_INVALID;
+
+	/* 802.11A or ht walks to the next literal adjacent rate in
+	 * the rate table */
+	if (is_type_a_band(rate_type) || !is_type_legacy(rate_type)) {
+		int i;
+		u32 mask;
+
+		/* Find the previous rate that is in the rate mask */
+		i = index - 1;
+		if (i >= 0)
+			mask = BIT(i);
+		for (; i >= 0; i--, mask >>= 1) {
+			if (rate_mask & mask) {
+				low = i;
+				break;
+			}
+		}
+
+		/* Find the next rate that is in the rate mask */
+		i = index + 1;
+		for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
+			if (rate_mask & mask) {
+				high = i;
+				break;
+			}
+		}
+
+		return (high << 8) | low;
+	}
+
+	low = index;
+	while (low != IWL_RATE_INVALID) {
+		low = iwl_rates[low].prev_rs;
+		if (low == IWL_RATE_INVALID)
+			break;
+		if (rate_mask & (1 << low))
+			break;
+	}
+
+	high = index;
+	while (high != IWL_RATE_INVALID) {
+		high = iwl_rates[high].next_rs;
+		if (high == IWL_RATE_INVALID)
+			break;
+		if (rate_mask & (1 << high))
+			break;
+	}
+
+	return (high << 8) | low;
+}
+
+static inline bool rs_rate_supported(struct iwl_lq_sta *lq_sta,
+				     struct rs_rate *rate)
+{
+	return BIT(rate->index) & rs_get_supported_rates(lq_sta, rate);
+}
+
+/* Get the next supported lower rate in the current column.
+ * Return true if bottom rate in the current column was reached
+ */
+static bool rs_get_lower_rate_in_column(struct iwl_lq_sta *lq_sta,
+					struct rs_rate *rate)
+{
+	u8 low;
+	u16 high_low;
+	u16 rate_mask;
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+
+	rate_mask = rs_get_supported_rates(lq_sta, rate);
+	high_low = rs_get_adjacent_rate(mvm, rate->index, rate_mask,
+					rate->type);
+	low = high_low & 0xff;
+
+	/* Bottom rate of column reached */
+	if (low == IWL_RATE_INVALID)
+		return true;
+
+	rate->index = low;
+	return false;
+}
+
+/* Get the next rate to use following a column downgrade */
+static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
+					  struct rs_rate *rate)
+{
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+
+	if (is_legacy(rate)) {
+		/* No column to downgrade from Legacy */
+		return;
+	} else if (is_siso(rate)) {
+		/* Downgrade to Legacy if we were in SISO */
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			rate->type = LQ_LEGACY_A;
+		else
+			rate->type = LQ_LEGACY_G;
+
+		rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+		WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
+			     rate->index > IWL_RATE_MCS_9_INDEX);
+
+		rate->index = rs_ht_to_legacy[rate->index];
+		rate->ldpc = false;
+	} else {
+		/* Downgrade to SISO with same MCS if in MIMO  */
+		rate->type = is_vht_mimo2(rate) ?
+			LQ_VHT_SISO : LQ_HT_SISO;
+	}
+
+	if (num_of_ant(rate->ant) > 1)
+		rate->ant = first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+
+	/* Relevant in both switching to SISO or Legacy */
+	rate->sgi = false;
+
+	if (!rs_rate_supported(lq_sta, rate))
+		rs_get_lower_rate_in_column(lq_sta, rate);
+}
+
+/* Check if both rates share the same column */
+static inline bool rs_rate_column_match(struct rs_rate *a,
+					struct rs_rate *b)
+{
+	bool ant_match;
+
+	if (a->stbc || a->bfer)
+		ant_match = (b->ant == ANT_A || b->ant == ANT_B);
+	else
+		ant_match = (a->ant == b->ant);
+
+	return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi)
+		&& ant_match;
+}
+
+static inline enum rs_column rs_get_column_from_rate(struct rs_rate *rate)
+{
+	if (is_legacy(rate)) {
+		if (rate->ant == ANT_A)
+			return RS_COLUMN_LEGACY_ANT_A;
+
+		if (rate->ant == ANT_B)
+			return RS_COLUMN_LEGACY_ANT_B;
+
+		goto err;
+	}
+
+	if (is_siso(rate)) {
+		if (rate->ant == ANT_A || rate->stbc || rate->bfer)
+			return rate->sgi ? RS_COLUMN_SISO_ANT_A_SGI :
+				RS_COLUMN_SISO_ANT_A;
+
+		if (rate->ant == ANT_B)
+			return rate->sgi ? RS_COLUMN_SISO_ANT_B_SGI :
+				RS_COLUMN_SISO_ANT_B;
+
+		goto err;
+	}
+
+	if (is_mimo(rate))
+		return rate->sgi ? RS_COLUMN_MIMO2_SGI : RS_COLUMN_MIMO2;
+
+err:
+	return RS_COLUMN_INVALID;
+}
+
+static u8 rs_get_tid(struct ieee80211_hdr *hdr)
+{
+	u8 tid = IWL_MAX_TID_COUNT;
+
+	if (ieee80211_is_data_qos(hdr->frame_control)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tid = qc[0] & 0xf;
+	}
+
+	if (unlikely(tid > IWL_MAX_TID_COUNT))
+		tid = IWL_MAX_TID_COUNT;
+
+	return tid;
+}
+
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  int tid, struct ieee80211_tx_info *info, bool ndp)
+{
+	int legacy_success;
+	int retries;
+	int i;
+	struct iwl_lq_cmd *table;
+	u32 lq_hwrate;
+	struct rs_rate lq_rate, tx_resp_rate;
+	struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+	u32 tlc_info = (uintptr_t)info->status.status_driver_data[0];
+	u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK;
+	u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info);
+	u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1];
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+	/* Treat uninitialized rate scaling data same as non-existing. */
+	if (!lq_sta) {
+		IWL_DEBUG_RATE(mvm, "Station rate scaling not created yet.\n");
+		return;
+	} else if (!lq_sta->pers.drv) {
+		IWL_DEBUG_RATE(mvm, "Rate scaling not initialized yet.\n");
+		return;
+	}
+
+	/* This packet was aggregated but doesn't carry status info */
+	if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+	    !(info->flags & IEEE80211_TX_STAT_AMPDU))
+		return;
+
+	if (rs_rate_from_ucode_rate(tx_resp_hwrate, info->band,
+				    &tx_resp_rate)) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* Disable last tx check if we are debugging with fixed rate but
+	 * update tx stats */
+	if (lq_sta->pers.dbg_fixed_rate) {
+		int index = tx_resp_rate.index;
+		enum rs_column column;
+		int attempts, success;
+
+		column = rs_get_column_from_rate(&tx_resp_rate);
+		if (WARN_ONCE(column == RS_COLUMN_INVALID,
+			      "Can't map rate 0x%x to column",
+			      tx_resp_hwrate))
+			return;
+
+		if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+			attempts = info->status.ampdu_len;
+			success = info->status.ampdu_ack_len;
+		} else {
+			attempts = info->status.rates[0].count;
+			success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		}
+
+		lq_sta->pers.tx_stats[column][index].total += attempts;
+		lq_sta->pers.tx_stats[column][index].success += success;
+
+		IWL_DEBUG_RATE(mvm, "Fixed rate 0x%x success %d attempts %d\n",
+			       tx_resp_hwrate, success, attempts);
+		return;
+	}
+#endif
+
+	if (time_after(jiffies,
+		       (unsigned long)(lq_sta->last_tx +
+				       (IWL_MVM_RS_IDLE_TIMEOUT * HZ)))) {
+		IWL_DEBUG_RATE(mvm, "Tx idle for too long. reinit rs\n");
+		iwl_mvm_rs_rate_init(mvm, sta, info->band);
+		return;
+	}
+	lq_sta->last_tx = jiffies;
+
+	/* Ignore this Tx frame response if its initial rate doesn't match
+	 * that of latest Link Quality command.  There may be stragglers
+	 * from a previous Link Quality command, but we're no longer interested
+	 * in those; they're either from the "active" mode while we're trying
+	 * to check "search" mode, or a prior "search" mode after we've moved
+	 * to a new "search" mode (which might become the new "active" mode).
+	 */
+	table = &lq_sta->lq;
+	lq_hwrate = le32_to_cpu(table->rs_table[0]);
+	if (rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate)) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	/* Here we actually compare this rate to the latest LQ command */
+	if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) {
+		IWL_DEBUG_RATE(mvm,
+			       "tx resp color 0x%x does not match 0x%x\n",
+			       lq_color, LQ_FLAG_COLOR_GET(table->flags));
+
+		/*
+		 * Since rates mis-match, the last LQ command may have failed.
+		 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
+		 * ... driver.
+		 */
+		lq_sta->missed_rate_counter++;
+		if (lq_sta->missed_rate_counter > IWL_MVM_RS_MISSED_RATE_MAX) {
+			lq_sta->missed_rate_counter = 0;
+			IWL_DEBUG_RATE(mvm,
+				       "Too many rates mismatch. Send sync LQ. rs_state %d\n",
+				       lq_sta->rs_state);
+			iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+		}
+		/* Regardless, ignore this status info for outdated rate */
+		return;
+	} else
+		/* Rate did match, so reset the missed_rate_counter */
+		lq_sta->missed_rate_counter = 0;
+
+	if (!lq_sta->search_better_tbl) {
+		curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+	} else {
+		curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	}
+
+	if (WARN_ON_ONCE(!rs_rate_column_match(&lq_rate, &curr_tbl->rate))) {
+		IWL_DEBUG_RATE(mvm,
+			       "Neither active nor search matches tx rate\n");
+		tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		rs_dump_rate(mvm, &tmp_tbl->rate, "ACTIVE");
+		tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+		rs_dump_rate(mvm, &tmp_tbl->rate, "SEARCH");
+		rs_dump_rate(mvm, &lq_rate, "ACTUAL");
+
+		/*
+		 * no matching table found, let's by-pass the data collection
+		 * and continue to perform rate scale to find the rate table
+		 */
+		rs_stay_in_table(lq_sta, true);
+		goto done;
+	}
+
+	/*
+	 * Updating the frame history depends on whether packets were
+	 * aggregated.
+	 *
+	 * For aggregation, all packets were transmitted at the same rate, the
+	 * first index into rate scale table.
+	 */
+	if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+		rs_collect_tpc_data(mvm, lq_sta, curr_tbl, tx_resp_rate.index,
+				    info->status.ampdu_len,
+				    info->status.ampdu_ack_len,
+				    reduced_txp);
+
+		/* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+		 * it as a single frame loss as we don't want the success ratio
+		 * to dip too quickly because a BA wasn't received.
+		 * For TPC, there's no need for this optimisation since we want
+		 * to recover very quickly from a bad power reduction and,
+		 * therefore we'd like the success ratio to get an immediate hit
+		 * when failing to get a BA, so we'd switch back to a lower or
+		 * zero power reduction. When FW transmits agg with a rate
+		 * different from the initial rate, it will not use reduced txp
+		 * and will send BA notification twice (one empty with reduced
+		 * txp equal to the value from LQ and one with reduced txp 0).
+		 * We need to update counters for each txp level accordingly.
+		 */
+		if (info->status.ampdu_ack_len == 0)
+			info->status.ampdu_len = 1;
+
+		rs_collect_tlc_data(mvm, mvmsta, tid, curr_tbl, tx_resp_rate.index,
+				    info->status.ampdu_len,
+				    info->status.ampdu_ack_len);
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+			lq_sta->total_success += info->status.ampdu_ack_len;
+			lq_sta->total_failed += (info->status.ampdu_len -
+					info->status.ampdu_ack_len);
+		}
+	} else {
+		/* For legacy, update frame history with for each Tx retry. */
+		retries = info->status.rates[0].count - 1;
+		/* HW doesn't send more than 15 retries */
+		retries = min(retries, 15);
+
+		/* The last transmission may have been successful */
+		legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+		/* Collect data for each rate used during failed TX attempts */
+		for (i = 0; i <= retries; ++i) {
+			lq_hwrate = le32_to_cpu(table->rs_table[i]);
+			if (rs_rate_from_ucode_rate(lq_hwrate, info->band,
+						    &lq_rate)) {
+				WARN_ON_ONCE(1);
+				return;
+			}
+
+			/*
+			 * Only collect stats if retried rate is in the same RS
+			 * table as active/search.
+			 */
+			if (rs_rate_column_match(&lq_rate, &curr_tbl->rate))
+				tmp_tbl = curr_tbl;
+			else if (rs_rate_column_match(&lq_rate,
+						      &other_tbl->rate))
+				tmp_tbl = other_tbl;
+			else
+				continue;
+
+			rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+					    tx_resp_rate.index, 1,
+					    i < retries ? 0 : legacy_success,
+					    reduced_txp);
+			rs_collect_tlc_data(mvm, mvmsta, tid, tmp_tbl,
+					    tx_resp_rate.index, 1,
+					    i < retries ? 0 : legacy_success);
+		}
+
+		/* Update success/fail counts if not searching for new mode */
+		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+			lq_sta->total_success += legacy_success;
+			lq_sta->total_failed += retries + (1 - legacy_success);
+		}
+	}
+	/* The last TX rate is cached in lq_sta; it's set in if/else above */
+	lq_sta->last_rate_n_flags = lq_hwrate;
+	IWL_DEBUG_RATE(mvm, "reduced txpower: %d\n", reduced_txp);
+done:
+	/* See if there's a better rate or modulation mode to try. */
+	if (sta->supp_rates[info->band])
+		rs_rate_scale_perform(mvm, sta, lq_sta, tid, ndp);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void rs_drv_mac80211_tx_status(void *mvm_r,
+				      struct ieee80211_supported_band *sband,
+				      struct ieee80211_sta *sta, void *priv_sta,
+				      struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_op_mode *op_mode = mvm_r;
+	struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+	if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+		return;
+
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    info->flags & IEEE80211_TX_CTL_NO_ACK)
+		return;
+
+	iwl_mvm_rs_tx_status(mvm, sta, rs_get_tid(hdr), info,
+			     ieee80211_is_qos_nullfunc(hdr->frame_control));
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history statistics.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
+				 struct iwl_lq_sta *lq_sta)
+{
+	IWL_DEBUG_RATE(mvm, "Moving to RS_STATE_STAY_IN_COLUMN\n");
+	lq_sta->rs_state = RS_STATE_STAY_IN_COLUMN;
+	if (is_legacy) {
+		lq_sta->table_count_limit = IWL_MVM_RS_LEGACY_TABLE_COUNT;
+		lq_sta->max_failure_limit = IWL_MVM_RS_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IWL_MVM_RS_LEGACY_SUCCESS_LIMIT;
+	} else {
+		lq_sta->table_count_limit = IWL_MVM_RS_NON_LEGACY_TABLE_COUNT;
+		lq_sta->max_failure_limit = IWL_MVM_RS_NON_LEGACY_FAILURE_LIMIT;
+		lq_sta->max_success_limit = IWL_MVM_RS_NON_LEGACY_SUCCESS_LIMIT;
+	}
+	lq_sta->table_count = 0;
+	lq_sta->total_failed = 0;
+	lq_sta->total_success = 0;
+	lq_sta->flush_timer = jiffies;
+	lq_sta->visited_columns = 0;
+}
+
+static inline int rs_get_max_rate_from_mask(unsigned long rate_mask)
+{
+	if (rate_mask)
+		return find_last_bit(&rate_mask, BITS_PER_LONG);
+	return IWL_RATE_INVALID;
+}
+
+static int rs_get_max_allowed_rate(struct iwl_lq_sta *lq_sta,
+				   const struct rs_tx_column *column)
+{
+	switch (column->mode) {
+	case RS_LEGACY:
+		return lq_sta->max_legacy_rate_idx;
+	case RS_SISO:
+		return lq_sta->max_siso_rate_idx;
+	case RS_MIMO2:
+		return lq_sta->max_mimo2_rate_idx;
+	default:
+		WARN_ON_ONCE(1);
+	}
+
+	return lq_sta->max_legacy_rate_idx;
+}
+
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+					    const struct rs_tx_column *column,
+					    u32 bw)
+{
+	/* Used to choose among HT tables */
+	const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+
+	if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
+			 column->mode != RS_SISO &&
+			 column->mode != RS_MIMO2))
+		return expected_tpt_legacy;
+
+	/* Legacy rates have only one table */
+	if (column->mode == RS_LEGACY)
+		return expected_tpt_legacy;
+
+	ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+	/* Choose among many HT tables depending on number of streams
+	 * (SISO/MIMO2), channel width (20/40/80), SGI, and aggregation
+	 * status */
+	if (column->mode == RS_SISO) {
+		switch (bw) {
+		case RATE_MCS_CHAN_WIDTH_20:
+			ht_tbl_pointer = expected_tpt_siso_20MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_40:
+			ht_tbl_pointer = expected_tpt_siso_40MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_80:
+			ht_tbl_pointer = expected_tpt_siso_80MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_160:
+			ht_tbl_pointer = expected_tpt_siso_160MHz;
+			break;
+		default:
+			WARN_ON_ONCE(1);
+		}
+	} else if (column->mode == RS_MIMO2) {
+		switch (bw) {
+		case RATE_MCS_CHAN_WIDTH_20:
+			ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_40:
+			ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_80:
+			ht_tbl_pointer = expected_tpt_mimo2_80MHz;
+			break;
+		case RATE_MCS_CHAN_WIDTH_160:
+			ht_tbl_pointer = expected_tpt_mimo2_160MHz;
+			break;
+		default:
+			WARN_ON_ONCE(1);
+		}
+	} else {
+		WARN_ON_ONCE(1);
+	}
+
+	if (!column->sgi && !lq_sta->is_agg)		/* Normal */
+		return ht_tbl_pointer[0];
+	else if (column->sgi && !lq_sta->is_agg)        /* SGI */
+		return ht_tbl_pointer[1];
+	else if (!column->sgi && lq_sta->is_agg)        /* AGG */
+		return ht_tbl_pointer[2];
+	else						/* AGG+SGI */
+		return ht_tbl_pointer[3];
+}
+
+static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+				      struct iwl_scale_tbl_info *tbl)
+{
+	struct rs_rate *rate = &tbl->rate;
+	const struct rs_tx_column *column = &rs_tx_columns[tbl->column];
+
+	tbl->expected_tpt = rs_get_expected_tpt_table(lq_sta, column, rate->bw);
+}
+
+static s32 rs_get_best_rate(struct iwl_mvm *mvm,
+			    struct iwl_lq_sta *lq_sta,
+			    struct iwl_scale_tbl_info *tbl,	/* "search" */
+			    unsigned long rate_mask, s8 index)
+{
+	struct iwl_scale_tbl_info *active_tbl =
+	    &(lq_sta->lq_info[lq_sta->active_tbl]);
+	s32 success_ratio = active_tbl->win[index].success_ratio;
+	u16 expected_current_tpt = active_tbl->expected_tpt[index];
+	const u16 *tpt_tbl = tbl->expected_tpt;
+	u16 high_low;
+	u32 target_tpt;
+	int rate_idx;
+
+	if (success_ratio >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
+		target_tpt = 100 * expected_current_tpt;
+		IWL_DEBUG_RATE(mvm,
+			       "SR %d high. Find rate exceeding EXPECTED_CURRENT %d\n",
+			       success_ratio, target_tpt);
+	} else {
+		target_tpt = lq_sta->last_tpt;
+		IWL_DEBUG_RATE(mvm,
+			       "SR %d not that good. Find rate exceeding ACTUAL_TPT %d\n",
+			       success_ratio, target_tpt);
+	}
+
+	rate_idx = find_first_bit(&rate_mask, BITS_PER_LONG);
+
+	while (rate_idx != IWL_RATE_INVALID) {
+		if (target_tpt < (100 * tpt_tbl[rate_idx]))
+			break;
+
+		high_low = rs_get_adjacent_rate(mvm, rate_idx, rate_mask,
+						tbl->rate.type);
+
+		rate_idx = (high_low >> 8) & 0xff;
+	}
+
+	IWL_DEBUG_RATE(mvm, "Best rate found %d target_tp %d expected_new %d\n",
+		       rate_idx, target_tpt,
+		       rate_idx != IWL_RATE_INVALID ?
+		       100 * tpt_tbl[rate_idx] : IWL_INVALID_VALUE);
+
+	return rate_idx;
+}
+
+static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
+{
+	switch (sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
+		return RATE_MCS_CHAN_WIDTH_160;
+	case IEEE80211_STA_RX_BW_80:
+		return RATE_MCS_CHAN_WIDTH_80;
+	case IEEE80211_STA_RX_BW_40:
+		return RATE_MCS_CHAN_WIDTH_40;
+	case IEEE80211_STA_RX_BW_20:
+	default:
+		return RATE_MCS_CHAN_WIDTH_20;
+	}
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
+{
+	struct iwl_scale_tbl_info *tbl;
+	int active_tbl;
+	int flush_interval_passed = 0;
+	struct iwl_mvm *mvm;
+
+	mvm = lq_sta->pers.drv;
+	active_tbl = lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+
+	/* If we've been disallowing search, see if we should now allow it */
+	if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
+		/* Elapsed time using current modulation mode */
+		if (lq_sta->flush_timer)
+			flush_interval_passed =
+				time_after(jiffies,
+					   (unsigned long)(lq_sta->flush_timer +
+							   (IWL_MVM_RS_STAY_IN_COLUMN_TIMEOUT * HZ)));
+
+		/*
+		 * Check if we should allow search for new modulation mode.
+		 * If many frames have failed or succeeded, or we've used
+		 * this same modulation for a long time, allow search, and
+		 * reset history stats that keep track of whether we should
+		 * allow a new search.  Also (below) reset all bitmaps and
+		 * stats in active history.
+		 */
+		if (force_search ||
+		    (lq_sta->total_failed > lq_sta->max_failure_limit) ||
+		    (lq_sta->total_success > lq_sta->max_success_limit) ||
+		    ((!lq_sta->search_better_tbl) &&
+		     (lq_sta->flush_timer) && (flush_interval_passed))) {
+			IWL_DEBUG_RATE(mvm,
+				       "LQ: stay is expired %d %d %d\n",
+				     lq_sta->total_failed,
+				     lq_sta->total_success,
+				     flush_interval_passed);
+
+			/* Allow search for new mode */
+			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_STARTED;
+			IWL_DEBUG_RATE(mvm,
+				       "Moving to RS_STATE_SEARCH_CYCLE_STARTED\n");
+			lq_sta->total_failed = 0;
+			lq_sta->total_success = 0;
+			lq_sta->flush_timer = 0;
+			/* mark the current column as visited */
+			lq_sta->visited_columns = BIT(tbl->column);
+		/*
+		 * Else if we've used this modulation mode enough repetitions
+		 * (regardless of elapsed time or success/failure), reset
+		 * history bitmaps and rate-specific stats for all rates in
+		 * active table.
+		 */
+		} else {
+			lq_sta->table_count++;
+			if (lq_sta->table_count >=
+			    lq_sta->table_count_limit) {
+				lq_sta->table_count = 0;
+
+				IWL_DEBUG_RATE(mvm,
+					       "LQ: stay in table clear win\n");
+				rs_rate_scale_clear_tbl_windows(mvm, tbl);
+			}
+		}
+
+		/* If transitioning to allow "search", reset all history
+		 * bitmaps and stats in active table (this will become the new
+		 * "search" table). */
+		if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
+			rs_rate_scale_clear_tbl_windows(mvm, tbl);
+		}
+	}
+}
+
+static void rs_set_amsdu_len(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			     struct iwl_scale_tbl_info *tbl,
+			     enum rs_action scale_action)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	/*
+	 * In case TLC offload is not active amsdu_enabled is either 0xFFFF
+	 * or 0, since there is no per-TID alg.
+	 */
+	if ((!is_vht(&tbl->rate) && !is_ht(&tbl->rate)) ||
+	    tbl->rate.index < IWL_RATE_MCS_5_INDEX ||
+	    scale_action == RS_ACTION_DOWNSCALE)
+		mvmsta->amsdu_enabled = 0;
+	else
+		mvmsta->amsdu_enabled = 0xFFFF;
+
+	mvmsta->max_amsdu_len = sta->max_amsdu_len;
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void rs_update_rate_tbl(struct iwl_mvm *mvm,
+			       struct ieee80211_sta *sta,
+			       struct iwl_lq_sta *lq_sta,
+			       struct iwl_scale_tbl_info *tbl)
+{
+	rs_fill_lq_cmd(mvm, sta, lq_sta, &tbl->rate);
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, false);
+}
+
+static bool rs_tweak_rate_tbl(struct iwl_mvm *mvm,
+			      struct ieee80211_sta *sta,
+			      struct iwl_lq_sta *lq_sta,
+			      struct iwl_scale_tbl_info *tbl,
+			      enum rs_action scale_action)
+{
+	if (sta->bandwidth != IEEE80211_STA_RX_BW_80)
+		return false;
+
+	if (!is_vht_siso(&tbl->rate))
+		return false;
+
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_80) &&
+	    (tbl->rate.index == IWL_RATE_MCS_0_INDEX) &&
+	    (scale_action == RS_ACTION_DOWNSCALE)) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_20;
+		tbl->rate.index = IWL_RATE_MCS_4_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 80Mhz SISO MCS0 -> 20Mhz MCS4\n");
+		goto tweaked;
+	}
+
+	/* Go back to 80Mhz MCS1 only if we've established that 20Mhz MCS5 is
+	 * sustainable, i.e. we're past the test window. We can't go back
+	 * if MCS5 is just tested as this will happen always after switching
+	 * to 20Mhz MCS4 because the rate stats are cleared.
+	 */
+	if ((tbl->rate.bw == RATE_MCS_CHAN_WIDTH_20) &&
+	    (((tbl->rate.index == IWL_RATE_MCS_5_INDEX) &&
+	     (scale_action == RS_ACTION_STAY)) ||
+	     ((tbl->rate.index > IWL_RATE_MCS_5_INDEX) &&
+	      (scale_action == RS_ACTION_UPSCALE)))) {
+		tbl->rate.bw = RATE_MCS_CHAN_WIDTH_80;
+		tbl->rate.index = IWL_RATE_MCS_1_INDEX;
+		IWL_DEBUG_RATE(mvm, "Switch 20Mhz SISO MCS5 -> 80Mhz MCS1\n");
+		goto tweaked;
+	}
+
+	return false;
+
+tweaked:
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rs_rate_scale_clear_tbl_windows(mvm, tbl);
+	return true;
+}
+
+static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
+					 struct iwl_lq_sta *lq_sta,
+					 struct ieee80211_sta *sta,
+					 struct iwl_scale_tbl_info *tbl)
+{
+	int i, j, max_rate;
+	enum rs_column next_col_id;
+	const struct rs_tx_column *curr_col = &rs_tx_columns[tbl->column];
+	const struct rs_tx_column *next_col;
+	allow_column_func_t allow_func;
+	u8 valid_ants = iwl_mvm_get_valid_tx_ant(mvm);
+	const u16 *expected_tpt_tbl;
+	u16 tpt, max_expected_tpt;
+
+	for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
+		next_col_id = curr_col->next_columns[i];
+
+		if (next_col_id == RS_COLUMN_INVALID)
+			continue;
+
+		if (lq_sta->visited_columns & BIT(next_col_id)) {
+			IWL_DEBUG_RATE(mvm, "Skip already visited column %d\n",
+				       next_col_id);
+			continue;
+		}
+
+		next_col = &rs_tx_columns[next_col_id];
+
+		if (!rs_is_valid_ant(valid_ants, next_col->ant)) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d as ANT config isn't supported by chip. valid_ants 0x%x column ant 0x%x\n",
+				       next_col_id, valid_ants, next_col->ant);
+			continue;
+		}
+
+		for (j = 0; j < MAX_COLUMN_CHECKS; j++) {
+			allow_func = next_col->checks[j];
+			if (allow_func && !allow_func(mvm, sta, &tbl->rate,
+						      next_col))
+				break;
+		}
+
+		if (j != MAX_COLUMN_CHECKS) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d: not allowed (check %d failed)\n",
+				       next_col_id, j);
+
+			continue;
+		}
+
+		tpt = lq_sta->last_tpt / 100;
+		expected_tpt_tbl = rs_get_expected_tpt_table(lq_sta, next_col,
+						     rs_bw_from_sta_bw(sta));
+		if (WARN_ON_ONCE(!expected_tpt_tbl))
+			continue;
+
+		max_rate = rs_get_max_allowed_rate(lq_sta, next_col);
+		if (max_rate == IWL_RATE_INVALID) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d: no rate is allowed in this column\n",
+				       next_col_id);
+			continue;
+		}
+
+		max_expected_tpt = expected_tpt_tbl[max_rate];
+		if (tpt >= max_expected_tpt) {
+			IWL_DEBUG_RATE(mvm,
+				       "Skip column %d: can't beat current TPT. Max expected %d current %d\n",
+				       next_col_id, max_expected_tpt, tpt);
+			continue;
+		}
+
+		IWL_DEBUG_RATE(mvm,
+			       "Found potential column %d. Max expected %d current %d\n",
+			       next_col_id, max_expected_tpt, tpt);
+		break;
+	}
+
+	if (i == MAX_NEXT_COLUMNS)
+		return RS_COLUMN_INVALID;
+
+	return next_col_id;
+}
+
+static int rs_switch_to_column(struct iwl_mvm *mvm,
+			       struct iwl_lq_sta *lq_sta,
+			       struct ieee80211_sta *sta,
+			       enum rs_column col_id)
+{
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct iwl_scale_tbl_info *search_tbl =
+				&(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+	struct rs_rate *rate = &search_tbl->rate;
+	const struct rs_tx_column *column = &rs_tx_columns[col_id];
+	const struct rs_tx_column *curr_column = &rs_tx_columns[tbl->column];
+	unsigned long rate_mask = 0;
+	u32 rate_idx = 0;
+
+	memcpy(search_tbl, tbl, offsetof(struct iwl_scale_tbl_info, win));
+
+	rate->sgi = column->sgi;
+	rate->ant = column->ant;
+
+	if (column->mode == RS_LEGACY) {
+		if (lq_sta->band == NL80211_BAND_5GHZ)
+			rate->type = LQ_LEGACY_A;
+		else
+			rate->type = LQ_LEGACY_G;
+
+		rate->bw = RATE_MCS_CHAN_WIDTH_20;
+		rate->ldpc = false;
+		rate_mask = lq_sta->active_legacy_rate;
+	} else if (column->mode == RS_SISO) {
+		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+		rate_mask = lq_sta->active_siso_rate;
+	} else if (column->mode == RS_MIMO2) {
+		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+		rate_mask = lq_sta->active_mimo2_rate;
+	} else {
+		WARN_ONCE(1, "Bad column mode");
+	}
+
+	if (column->mode != RS_LEGACY) {
+		rate->bw = rs_bw_from_sta_bw(sta);
+		rate->ldpc = lq_sta->ldpc;
+	}
+
+	search_tbl->column = col_id;
+	rs_set_expected_tpt_table(lq_sta, search_tbl);
+
+	lq_sta->visited_columns |= BIT(col_id);
+
+	/* Get the best matching rate if we're changing modes. e.g.
+	 * SISO->MIMO, LEGACY->SISO, MIMO->SISO
+	 */
+	if (curr_column->mode != column->mode) {
+		rate_idx = rs_get_best_rate(mvm, lq_sta, search_tbl,
+					    rate_mask, rate->index);
+
+		if ((rate_idx == IWL_RATE_INVALID) ||
+		    !(BIT(rate_idx) & rate_mask)) {
+			IWL_DEBUG_RATE(mvm,
+				       "can not switch with index %d"
+				       " rate mask %lx\n",
+				       rate_idx, rate_mask);
+
+			goto err;
+		}
+
+		rate->index = rate_idx;
+	}
+
+	IWL_DEBUG_RATE(mvm, "Switched to column %d: Index %d\n",
+		       col_id, rate->index);
+
+	return 0;
+
+err:
+	rate->type = LQ_NONE;
+	return -1;
+}
+
+static enum rs_action rs_get_rate_action(struct iwl_mvm *mvm,
+					 struct iwl_scale_tbl_info *tbl,
+					 s32 sr, int low, int high,
+					 int current_tpt,
+					 int low_tpt, int high_tpt)
+{
+	enum rs_action action = RS_ACTION_STAY;
+
+	if ((sr <= RS_PERCENT(IWL_MVM_RS_SR_FORCE_DECREASE)) ||
+	    (current_tpt == 0)) {
+		IWL_DEBUG_RATE(mvm,
+			       "Decrease rate because of low SR\n");
+		return RS_ACTION_DOWNSCALE;
+	}
+
+	if ((low_tpt == IWL_INVALID_VALUE) &&
+	    (high_tpt == IWL_INVALID_VALUE) &&
+	    (high != IWL_RATE_INVALID)) {
+		IWL_DEBUG_RATE(mvm,
+			       "No data about high/low rates. Increase rate\n");
+		return RS_ACTION_UPSCALE;
+	}
+
+	if ((high_tpt == IWL_INVALID_VALUE) &&
+	    (high != IWL_RATE_INVALID) &&
+	    (low_tpt != IWL_INVALID_VALUE) &&
+	    (low_tpt < current_tpt)) {
+		IWL_DEBUG_RATE(mvm,
+			       "No data about high rate and low rate is worse. Increase rate\n");
+		return RS_ACTION_UPSCALE;
+	}
+
+	if ((high_tpt != IWL_INVALID_VALUE) &&
+	    (high_tpt > current_tpt)) {
+		IWL_DEBUG_RATE(mvm,
+			       "Higher rate is better. Increate rate\n");
+		return RS_ACTION_UPSCALE;
+	}
+
+	if ((low_tpt != IWL_INVALID_VALUE) &&
+	    (high_tpt != IWL_INVALID_VALUE) &&
+	    (low_tpt < current_tpt) &&
+	    (high_tpt < current_tpt)) {
+		IWL_DEBUG_RATE(mvm,
+			       "Both high and low are worse. Maintain rate\n");
+		return RS_ACTION_STAY;
+	}
+
+	if ((low_tpt != IWL_INVALID_VALUE) &&
+	    (low_tpt > current_tpt)) {
+		IWL_DEBUG_RATE(mvm,
+			       "Lower rate is better\n");
+		action = RS_ACTION_DOWNSCALE;
+		goto out;
+	}
+
+	if ((low_tpt == IWL_INVALID_VALUE) &&
+	    (low != IWL_RATE_INVALID)) {
+		IWL_DEBUG_RATE(mvm,
+			       "No data about lower rate\n");
+		action = RS_ACTION_DOWNSCALE;
+		goto out;
+	}
+
+	IWL_DEBUG_RATE(mvm, "Maintain rate\n");
+
+out:
+	if ((action == RS_ACTION_DOWNSCALE) && (low != IWL_RATE_INVALID)) {
+		if (sr >= RS_PERCENT(IWL_MVM_RS_SR_NO_DECREASE)) {
+			IWL_DEBUG_RATE(mvm,
+				       "SR is above NO DECREASE. Avoid downscale\n");
+			action = RS_ACTION_STAY;
+		} else if (current_tpt > (100 * tbl->expected_tpt[low])) {
+			IWL_DEBUG_RATE(mvm,
+				       "Current TPT is higher than max expected in low rate. Avoid downscale\n");
+			action = RS_ACTION_STAY;
+		} else {
+			IWL_DEBUG_RATE(mvm, "Decrease rate\n");
+		}
+	}
+
+	return action;
+}
+
+static bool rs_stbc_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  struct iwl_lq_sta *lq_sta)
+{
+	/* Our chip supports Tx STBC and the peer is an HT/VHT STA which
+	 * supports STBC of at least 1*SS
+	 */
+	if (!lq_sta->stbc_capable)
+		return false;
+
+	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+		return false;
+
+	return true;
+}
+
+static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
+				int *weaker, int *stronger)
+{
+	*weaker = index + IWL_MVM_RS_TPC_TX_POWER_STEP;
+	if (*weaker > TPC_MAX_REDUCTION)
+		*weaker = TPC_INVALID;
+
+	*stronger = index - IWL_MVM_RS_TPC_TX_POWER_STEP;
+	if (*stronger < 0)
+		*stronger = TPC_INVALID;
+}
+
+static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   struct rs_rate *rate, enum nl80211_band band)
+{
+	int index = rate->index;
+	bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
+	bool sta_ps_disabled = (vif->type == NL80211_IFTYPE_STATION &&
+				!vif->bss_conf.ps);
+
+	IWL_DEBUG_RATE(mvm, "cam: %d sta_ps_disabled %d\n",
+		       cam, sta_ps_disabled);
+	/*
+	 * allow tpc only if power management is enabled, or bt coex
+	 * activity grade allows it and we are on 2.4Ghz.
+	 */
+	if ((cam || sta_ps_disabled) &&
+	    !iwl_mvm_bt_coex_is_tpc_allowed(mvm, band))
+		return false;
+
+	IWL_DEBUG_RATE(mvm, "check rate, table type: %d\n", rate->type);
+	if (is_legacy(rate))
+		return index == IWL_RATE_54M_INDEX;
+	if (is_ht(rate))
+		return index == IWL_RATE_MCS_7_INDEX;
+	if (is_vht(rate))
+		return index == IWL_RATE_MCS_7_INDEX ||
+		       index == IWL_RATE_MCS_8_INDEX ||
+		       index == IWL_RATE_MCS_9_INDEX;
+
+	WARN_ON_ONCE(1);
+	return false;
+}
+
+enum tpc_action {
+	TPC_ACTION_STAY,
+	TPC_ACTION_DECREASE,
+	TPC_ACTION_INCREASE,
+	TPC_ACTION_NO_RESTIRCTION,
+};
+
+static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
+					 s32 sr, int weak, int strong,
+					 int current_tpt,
+					 int weak_tpt, int strong_tpt)
+{
+	/* stay until we have valid tpt */
+	if (current_tpt == IWL_INVALID_VALUE) {
+		IWL_DEBUG_RATE(mvm, "no current tpt. stay.\n");
+		return TPC_ACTION_STAY;
+	}
+
+	/* Too many failures, increase txp */
+	if (sr <= RS_PERCENT(IWL_MVM_RS_TPC_SR_FORCE_INCREASE) ||
+	    current_tpt == 0) {
+		IWL_DEBUG_RATE(mvm, "increase txp because of weak SR\n");
+		return TPC_ACTION_NO_RESTIRCTION;
+	}
+
+	/* try decreasing first if applicable */
+	if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+	    weak != TPC_INVALID) {
+		if (weak_tpt == IWL_INVALID_VALUE &&
+		    (strong_tpt == IWL_INVALID_VALUE ||
+		     current_tpt >= strong_tpt)) {
+			IWL_DEBUG_RATE(mvm,
+				       "no weak txp measurement. decrease txp\n");
+			return TPC_ACTION_DECREASE;
+		}
+
+		if (weak_tpt > current_tpt) {
+			IWL_DEBUG_RATE(mvm,
+				       "lower txp has better tpt. decrease txp\n");
+			return TPC_ACTION_DECREASE;
+		}
+	}
+
+	/* next, increase if needed */
+	if (sr < RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+	    strong != TPC_INVALID) {
+		if (weak_tpt == IWL_INVALID_VALUE &&
+		    strong_tpt != IWL_INVALID_VALUE &&
+		    current_tpt < strong_tpt) {
+			IWL_DEBUG_RATE(mvm,
+				       "higher txp has better tpt. increase txp\n");
+			return TPC_ACTION_INCREASE;
+		}
+
+		if (weak_tpt < current_tpt &&
+		    (strong_tpt == IWL_INVALID_VALUE ||
+		     strong_tpt > current_tpt)) {
+			IWL_DEBUG_RATE(mvm,
+				       "lower txp has worse tpt. increase txp\n");
+			return TPC_ACTION_INCREASE;
+		}
+	}
+
+	IWL_DEBUG_RATE(mvm, "no need to increase or decrease txp - stay\n");
+	return TPC_ACTION_STAY;
+}
+
+static bool rs_tpc_perform(struct iwl_mvm *mvm,
+			   struct ieee80211_sta *sta,
+			   struct iwl_lq_sta *lq_sta,
+			   struct iwl_scale_tbl_info *tbl)
+{
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	struct ieee80211_vif *vif = mvm_sta->vif;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+	enum nl80211_band band;
+	struct iwl_rate_scale_data *window;
+	struct rs_rate *rate = &tbl->rate;
+	enum tpc_action action;
+	s32 sr;
+	u8 cur = lq_sta->lq.reduced_tpc;
+	int current_tpt;
+	int weak, strong;
+	int weak_tpt = IWL_INVALID_VALUE, strong_tpt = IWL_INVALID_VALUE;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	if (lq_sta->pers.dbg_fixed_txp_reduction <= TPC_MAX_REDUCTION) {
+		IWL_DEBUG_RATE(mvm, "fixed tpc: %d\n",
+			       lq_sta->pers.dbg_fixed_txp_reduction);
+		lq_sta->lq.reduced_tpc = lq_sta->pers.dbg_fixed_txp_reduction;
+		return cur != lq_sta->pers.dbg_fixed_txp_reduction;
+	}
+#endif
+
+	rcu_read_lock();
+	chanctx_conf = rcu_dereference(vif->chanctx_conf);
+	if (WARN_ON(!chanctx_conf))
+		band = NUM_NL80211_BANDS;
+	else
+		band = chanctx_conf->def.chan->band;
+	rcu_read_unlock();
+
+	if (!rs_tpc_allowed(mvm, vif, rate, band)) {
+		IWL_DEBUG_RATE(mvm,
+			       "tpc is not allowed. remove txp restrictions\n");
+		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+		return cur != TPC_NO_REDUCTION;
+	}
+
+	rs_get_adjacent_txp(mvm, cur, &weak, &strong);
+
+	/* Collect measured throughputs for current and adjacent rates */
+	window = tbl->tpc_win;
+	sr = window[cur].success_ratio;
+	current_tpt = window[cur].average_tpt;
+	if (weak != TPC_INVALID)
+		weak_tpt = window[weak].average_tpt;
+	if (strong != TPC_INVALID)
+		strong_tpt = window[strong].average_tpt;
+
+	IWL_DEBUG_RATE(mvm,
+		       "(TPC: %d): cur_tpt %d SR %d weak %d strong %d weak_tpt %d strong_tpt %d\n",
+		       cur, current_tpt, sr, weak, strong,
+		       weak_tpt, strong_tpt);
+
+	action = rs_get_tpc_action(mvm, sr, weak, strong,
+				   current_tpt, weak_tpt, strong_tpt);
+
+	/* override actions if we are on the edge */
+	if (weak == TPC_INVALID && action == TPC_ACTION_DECREASE) {
+		IWL_DEBUG_RATE(mvm, "already in lowest txp, stay\n");
+		action = TPC_ACTION_STAY;
+	} else if (strong == TPC_INVALID &&
+		   (action == TPC_ACTION_INCREASE ||
+		    action == TPC_ACTION_NO_RESTIRCTION)) {
+		IWL_DEBUG_RATE(mvm, "already in highest txp, stay\n");
+		action = TPC_ACTION_STAY;
+	}
+
+	switch (action) {
+	case TPC_ACTION_DECREASE:
+		lq_sta->lq.reduced_tpc = weak;
+		return true;
+	case TPC_ACTION_INCREASE:
+		lq_sta->lq.reduced_tpc = strong;
+		return true;
+	case TPC_ACTION_NO_RESTIRCTION:
+		lq_sta->lq.reduced_tpc = TPC_NO_REDUCTION;
+		return true;
+	case TPC_ACTION_STAY:
+		/* do nothing */
+		break;
+	}
+	return false;
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void rs_rate_scale_perform(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta,
+				  struct iwl_lq_sta *lq_sta,
+				  int tid, bool ndp)
+{
+	int low = IWL_RATE_INVALID;
+	int high = IWL_RATE_INVALID;
+	int index;
+	struct iwl_rate_scale_data *window = NULL;
+	int current_tpt = IWL_INVALID_VALUE;
+	int low_tpt = IWL_INVALID_VALUE;
+	int high_tpt = IWL_INVALID_VALUE;
+	u32 fail_count;
+	enum rs_action scale_action = RS_ACTION_STAY;
+	u16 rate_mask;
+	u8 update_lq = 0;
+	struct iwl_scale_tbl_info *tbl, *tbl1;
+	u8 active_tbl = 0;
+	u8 done_search = 0;
+	u16 high_low;
+	s32 sr;
+	u8 prev_agg = lq_sta->is_agg;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct rs_rate *rate;
+
+	lq_sta->is_agg = !!mvmsta->agg_tids;
+
+	/*
+	 * Select rate-scale / modulation-mode table to work with in
+	 * the rest of this function:  "search" if searching for better
+	 * modulation mode, or "active" if doing rate scaling within a mode.
+	 */
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+	rate = &tbl->rate;
+
+	if (prev_agg != lq_sta->is_agg) {
+		IWL_DEBUG_RATE(mvm,
+			       "Aggregation changed: prev %d current %d. Update expected TPT table\n",
+			       prev_agg, lq_sta->is_agg);
+		rs_set_expected_tpt_table(lq_sta, tbl);
+		rs_rate_scale_clear_tbl_windows(mvm, tbl);
+	}
+
+	/* current tx rate */
+	index = rate->index;
+
+	/* rates available for this association, and for modulation mode */
+	rate_mask = rs_get_supported_rates(lq_sta, rate);
+
+	if (!(BIT(index) & rate_mask)) {
+		IWL_ERR(mvm, "Current Rate is not valid\n");
+		if (lq_sta->search_better_tbl) {
+			/* revert to active table if search table is not valid*/
+			rate->type = LQ_NONE;
+			lq_sta->search_better_tbl = 0;
+			tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+			rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+		}
+		return;
+	}
+
+	/* Get expected throughput table and history window for current rate */
+	if (!tbl->expected_tpt) {
+		IWL_ERR(mvm, "tbl->expected_tpt is NULL\n");
+		return;
+	}
+
+	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
+	window = &(tbl->win[index]);
+
+	/*
+	 * If there is not enough history to calculate actual average
+	 * throughput, keep analyzing results of more tx frames, without
+	 * changing rate or mode (bypass most of the rest of this function).
+	 * Set up new rate table in uCode only if old rate is not supported
+	 * in current association (use new rate found above).
+	 */
+	fail_count = window->counter - window->success_counter;
+	if ((fail_count < IWL_MVM_RS_RATE_MIN_FAILURE_TH) &&
+	    (window->success_counter < IWL_MVM_RS_RATE_MIN_SUCCESS_TH)) {
+		IWL_DEBUG_RATE(mvm,
+			       "%s: Test Window: succ %d total %d\n",
+			       rs_pretty_rate(rate),
+			       window->success_counter, window->counter);
+
+		/* Can't calculate this yet; not enough history */
+		window->average_tpt = IWL_INVALID_VALUE;
+
+		/* Should we stay with this modulation mode,
+		 * or search for a new one? */
+		rs_stay_in_table(lq_sta, false);
+
+		return;
+	}
+
+	/* If we are searching for better modulation mode, check success. */
+	if (lq_sta->search_better_tbl) {
+		/* If good success, continue using the "search" mode;
+		 * no need to send new link quality command, since we're
+		 * continuing to use the setup that we've been trying. */
+		if (window->average_tpt > lq_sta->last_tpt) {
+			IWL_DEBUG_RATE(mvm,
+				       "SWITCHING TO NEW TABLE SR: %d "
+				       "cur-tpt %d old-tpt %d\n",
+				       window->success_ratio,
+				       window->average_tpt,
+				       lq_sta->last_tpt);
+
+			/* Swap tables; "search" becomes "active" */
+			lq_sta->active_tbl = active_tbl;
+			current_tpt = window->average_tpt;
+		/* Else poor success; go back to mode in "active" table */
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "GOING BACK TO THE OLD TABLE: SR %d "
+				       "cur-tpt %d old-tpt %d\n",
+				       window->success_ratio,
+				       window->average_tpt,
+				       lq_sta->last_tpt);
+
+			/* Nullify "search" table */
+			rate->type = LQ_NONE;
+
+			/* Revert to "active" table */
+			active_tbl = lq_sta->active_tbl;
+			tbl = &(lq_sta->lq_info[active_tbl]);
+
+			/* Revert to "active" rate and throughput info */
+			index = tbl->rate.index;
+			current_tpt = lq_sta->last_tpt;
+
+			/* Need to set up a new rate table in uCode */
+			update_lq = 1;
+		}
+
+		/* Either way, we've made a decision; modulation mode
+		 * search is done, allow rate adjustment next time. */
+		lq_sta->search_better_tbl = 0;
+		done_search = 1;	/* Don't switch modes below! */
+		goto lq_update;
+	}
+
+	/* (Else) not in search of better modulation mode, try for better
+	 * starting rate, while staying in this mode. */
+	high_low = rs_get_adjacent_rate(mvm, index, rate_mask, rate->type);
+	low = high_low & 0xff;
+	high = (high_low >> 8) & 0xff;
+
+	/* TODO: handle rate_idx_mask and rate_idx_mcs_mask */
+
+	sr = window->success_ratio;
+
+	/* Collect measured throughputs for current and adjacent rates */
+	current_tpt = window->average_tpt;
+	if (low != IWL_RATE_INVALID)
+		low_tpt = tbl->win[low].average_tpt;
+	if (high != IWL_RATE_INVALID)
+		high_tpt = tbl->win[high].average_tpt;
+
+	IWL_DEBUG_RATE(mvm,
+		       "%s: cur_tpt %d SR %d low %d high %d low_tpt %d high_tpt %d\n",
+		       rs_pretty_rate(rate), current_tpt, sr,
+		       low, high, low_tpt, high_tpt);
+
+	scale_action = rs_get_rate_action(mvm, tbl, sr, low, high,
+					  current_tpt, low_tpt, high_tpt);
+
+	/* Force a search in case BT doesn't like us being in MIMO */
+	if (is_mimo(rate) &&
+	    !iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta)) {
+		IWL_DEBUG_RATE(mvm,
+			       "BT Coex forbids MIMO. Search for new config\n");
+		rs_stay_in_table(lq_sta, true);
+		goto lq_update;
+	}
+
+	switch (scale_action) {
+	case RS_ACTION_DOWNSCALE:
+		/* Decrease starting rate, update uCode's rate table */
+		if (low != IWL_RATE_INVALID) {
+			update_lq = 1;
+			index = low;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "At the bottom rate. Can't decrease\n");
+		}
+
+		break;
+	case RS_ACTION_UPSCALE:
+		/* Increase starting rate, update uCode's rate table */
+		if (high != IWL_RATE_INVALID) {
+			update_lq = 1;
+			index = high;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "At the top rate. Can't increase\n");
+		}
+
+		break;
+	case RS_ACTION_STAY:
+		/* No change */
+		if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN)
+			update_lq = rs_tpc_perform(mvm, sta, lq_sta, tbl);
+		break;
+	default:
+		break;
+	}
+
+lq_update:
+	/* Replace uCode's rate table for the destination station. */
+	if (update_lq) {
+		tbl->rate.index = index;
+		if (IWL_MVM_RS_80_20_FAR_RANGE_TWEAK)
+			rs_tweak_rate_tbl(mvm, sta, lq_sta, tbl, scale_action);
+		rs_set_amsdu_len(mvm, sta, tbl, scale_action);
+		rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+	}
+
+	rs_stay_in_table(lq_sta, false);
+
+	/*
+	 * Search for new modulation mode if we're:
+	 * 1)  Not changing rates right now
+	 * 2)  Not just finishing up a search
+	 * 3)  Allowing a new search
+	 */
+	if (!update_lq && !done_search &&
+	    lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED
+	    && window->counter) {
+		enum rs_column next_column;
+
+		/* Save current throughput to compare with "search" throughput*/
+		lq_sta->last_tpt = current_tpt;
+
+		IWL_DEBUG_RATE(mvm,
+			       "Start Search: update_lq %d done_search %d rs_state %d win->counter %d\n",
+			       update_lq, done_search, lq_sta->rs_state,
+			       window->counter);
+
+		next_column = rs_get_next_column(mvm, lq_sta, sta, tbl);
+		if (next_column != RS_COLUMN_INVALID) {
+			int ret = rs_switch_to_column(mvm, lq_sta, sta,
+						      next_column);
+			if (!ret)
+				lq_sta->search_better_tbl = 1;
+		} else {
+			IWL_DEBUG_RATE(mvm,
+				       "No more columns to explore in search cycle. Go to RS_STATE_SEARCH_CYCLE_ENDED\n");
+			lq_sta->rs_state = RS_STATE_SEARCH_CYCLE_ENDED;
+		}
+
+		/* If new "search" mode was selected, set up in uCode table */
+		if (lq_sta->search_better_tbl) {
+			/* Access the "search" table, clear its history. */
+			tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+			rs_rate_scale_clear_tbl_windows(mvm, tbl);
+
+			/* Use new "search" start rate */
+			index = tbl->rate.index;
+
+			rs_dump_rate(mvm, &tbl->rate,
+				     "Switch to SEARCH TABLE:");
+			rs_update_rate_tbl(mvm, sta, lq_sta, tbl);
+		} else {
+			done_search = 1;
+		}
+	}
+
+	if (!ndp)
+		rs_tl_turn_on_agg(mvm, mvmsta, tid, lq_sta, sta);
+
+	if (done_search && lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_ENDED) {
+		tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+		rs_set_stay_in_table(mvm, is_legacy(&tbl1->rate), lq_sta);
+	}
+}
+
+struct rs_init_rate_info {
+	s8 rssi;
+	u8 rate_idx;
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_24ghz_legacy[] = {
+	{ -60, IWL_RATE_54M_INDEX },
+	{ -64, IWL_RATE_48M_INDEX },
+	{ -68, IWL_RATE_36M_INDEX },
+	{ -80, IWL_RATE_24M_INDEX },
+	{ -84, IWL_RATE_18M_INDEX },
+	{ -85, IWL_RATE_12M_INDEX },
+	{ -86, IWL_RATE_11M_INDEX },
+	{ -88, IWL_RATE_5M_INDEX  },
+	{ -90, IWL_RATE_2M_INDEX  },
+	{ S8_MIN, IWL_RATE_1M_INDEX },
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_5ghz_legacy[] = {
+	{ -60, IWL_RATE_54M_INDEX },
+	{ -64, IWL_RATE_48M_INDEX },
+	{ -72, IWL_RATE_36M_INDEX },
+	{ -80, IWL_RATE_24M_INDEX },
+	{ -84, IWL_RATE_18M_INDEX },
+	{ -85, IWL_RATE_12M_INDEX },
+	{ -87, IWL_RATE_9M_INDEX  },
+	{ S8_MIN, IWL_RATE_6M_INDEX },
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_ht[] = {
+	{ -60, IWL_RATE_MCS_7_INDEX },
+	{ -64, IWL_RATE_MCS_6_INDEX },
+	{ -68, IWL_RATE_MCS_5_INDEX },
+	{ -72, IWL_RATE_MCS_4_INDEX },
+	{ -80, IWL_RATE_MCS_3_INDEX },
+	{ -84, IWL_RATE_MCS_2_INDEX },
+	{ -85, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+/* MCS index 9 is not valid for 20MHz VHT channel width,
+ * but is ok for 40, 80 and 160MHz channels.
+ */
+static const struct rs_init_rate_info rs_optimal_rates_vht_20mhz[] = {
+	{ -60, IWL_RATE_MCS_8_INDEX },
+	{ -64, IWL_RATE_MCS_7_INDEX },
+	{ -68, IWL_RATE_MCS_6_INDEX },
+	{ -72, IWL_RATE_MCS_5_INDEX },
+	{ -80, IWL_RATE_MCS_4_INDEX },
+	{ -84, IWL_RATE_MCS_3_INDEX },
+	{ -85, IWL_RATE_MCS_2_INDEX },
+	{ -87, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX},
+};
+
+static const struct rs_init_rate_info rs_optimal_rates_vht[] = {
+	{ -60, IWL_RATE_MCS_9_INDEX },
+	{ -64, IWL_RATE_MCS_8_INDEX },
+	{ -68, IWL_RATE_MCS_7_INDEX },
+	{ -72, IWL_RATE_MCS_6_INDEX },
+	{ -80, IWL_RATE_MCS_5_INDEX },
+	{ -84, IWL_RATE_MCS_4_INDEX },
+	{ -85, IWL_RATE_MCS_3_INDEX },
+	{ -87, IWL_RATE_MCS_2_INDEX },
+	{ -88, IWL_RATE_MCS_1_INDEX },
+	{ S8_MIN, IWL_RATE_MCS_0_INDEX },
+};
+
+#define IWL_RS_LOW_RSSI_THRESHOLD (-76) /* dBm */
+
+/* Init the optimal rate based on STA caps
+ * This combined with rssi is used to report the last tx rate
+ * to userspace when we haven't transmitted enough frames.
+ */
+static void rs_init_optimal_rate(struct iwl_mvm *mvm,
+				 struct ieee80211_sta *sta,
+				 struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+
+	if (lq_sta->max_mimo2_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
+	else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
+		rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
+	else if (lq_sta->band == NL80211_BAND_5GHZ)
+		rate->type = LQ_LEGACY_A;
+	else
+		rate->type = LQ_LEGACY_G;
+
+	rate->bw = rs_bw_from_sta_bw(sta);
+	rate->sgi = rs_sgi_allow(mvm, sta, rate, NULL);
+
+	/* ANT/LDPC/STBC aren't relevant for the rate reported to userspace */
+
+	if (is_mimo(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_mimo2_rate;
+	} else if (is_siso(rate)) {
+		lq_sta->optimal_rate_mask = lq_sta->active_siso_rate;
+	} else {
+		lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
+
+		if (lq_sta->band == NL80211_BAND_5GHZ) {
+			lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_24ghz_legacy;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+		}
+	}
+
+	if (is_vht(rate)) {
+		if (rate->bw == RATE_MCS_CHAN_WIDTH_20) {
+			lq_sta->optimal_rates = rs_optimal_rates_vht_20mhz;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+		} else {
+			lq_sta->optimal_rates = rs_optimal_rates_vht;
+			lq_sta->optimal_nentries =
+				ARRAY_SIZE(rs_optimal_rates_vht);
+		}
+	} else if (is_ht(rate)) {
+		lq_sta->optimal_rates = rs_optimal_rates_ht;
+		lq_sta->optimal_nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+	}
+}
+
+/* Compute the optimal rate index based on RSSI */
+static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
+					   struct iwl_lq_sta *lq_sta)
+{
+	struct rs_rate *rate = &lq_sta->optimal_rate;
+	int i;
+
+	rate->index = find_first_bit(&lq_sta->optimal_rate_mask,
+				     BITS_PER_LONG);
+
+	for (i = 0; i < lq_sta->optimal_nentries; i++) {
+		int rate_idx = lq_sta->optimal_rates[i].rate_idx;
+
+		if ((lq_sta->pers.last_rssi >= lq_sta->optimal_rates[i].rssi) &&
+		    (BIT(rate_idx) & lq_sta->optimal_rate_mask)) {
+			rate->index = rate_idx;
+			break;
+		}
+	}
+
+	return rate;
+}
+
+/* Choose an initial legacy rate and antenna to use based on the RSSI
+ * of last Rx
+ */
+static void rs_get_initial_rate(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta,
+				struct iwl_lq_sta *lq_sta,
+				enum nl80211_band band,
+				struct rs_rate *rate)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	int i, nentries;
+	unsigned long active_rate;
+	s8 best_rssi = S8_MIN;
+	u8 best_ant = ANT_NONE;
+	u8 valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+	const struct rs_init_rate_info *initial_rates;
+
+	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+		if (!(lq_sta->pers.chains & BIT(i)))
+			continue;
+
+		if (lq_sta->pers.chain_signal[i] > best_rssi) {
+			best_rssi = lq_sta->pers.chain_signal[i];
+			best_ant = BIT(i);
+		}
+	}
+
+	IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
+		       rs_pretty_ant(best_ant), best_rssi);
+
+	if (best_ant != ANT_A && best_ant != ANT_B)
+		rate->ant = first_antenna(valid_tx_ant);
+	else
+		rate->ant = best_ant;
+
+	rate->sgi = false;
+	rate->ldpc = false;
+	rate->bw = RATE_MCS_CHAN_WIDTH_20;
+
+	rate->index = find_first_bit(&lq_sta->active_legacy_rate,
+				     BITS_PER_LONG);
+
+	if (band == NL80211_BAND_5GHZ) {
+		rate->type = LQ_LEGACY_A;
+		initial_rates = rs_optimal_rates_5ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
+	} else {
+		rate->type = LQ_LEGACY_G;
+		initial_rates = rs_optimal_rates_24ghz_legacy;
+		nentries = ARRAY_SIZE(rs_optimal_rates_24ghz_legacy);
+	}
+
+	if (!IWL_MVM_RS_RSSI_BASED_INIT_RATE)
+		goto out;
+
+	/* Start from a higher rate if the corresponding debug capability
+	 * is enabled. The rate is chosen according to AP capabilities.
+	 * In case of VHT/HT when the rssi is low fallback to the case of
+	 * legacy rates.
+	 */
+	if (sta->vht_cap.vht_supported &&
+	    best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
+		/*
+		 * In AP mode, when a new station associates, rs is initialized
+		 * immediately upon association completion, before the phy
+		 * context is updated with the association parameters, so the
+		 * sta bandwidth might be wider than the phy context allows.
+		 * To avoid this issue, always initialize rs with 20mhz
+		 * bandwidth rate, and after authorization, when the phy context
+		 * is already up-to-date, re-init rs with the correct bw.
+		 */
+		u32 bw = mvmsta->sta_state < IEEE80211_STA_AUTHORIZED ?
+				RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
+
+		switch (bw) {
+		case RATE_MCS_CHAN_WIDTH_40:
+		case RATE_MCS_CHAN_WIDTH_80:
+		case RATE_MCS_CHAN_WIDTH_160:
+			initial_rates = rs_optimal_rates_vht;
+			nentries = ARRAY_SIZE(rs_optimal_rates_vht);
+			break;
+		case RATE_MCS_CHAN_WIDTH_20:
+			initial_rates = rs_optimal_rates_vht_20mhz;
+			nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
+			break;
+		default:
+			IWL_ERR(mvm, "Invalid BW %d\n", sta->bandwidth);
+			goto out;
+		}
+
+		active_rate = lq_sta->active_siso_rate;
+		rate->type = LQ_VHT_SISO;
+		rate->bw = bw;
+	} else if (sta->ht_cap.ht_supported &&
+		   best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
+		initial_rates = rs_optimal_rates_ht;
+		nentries = ARRAY_SIZE(rs_optimal_rates_ht);
+		active_rate = lq_sta->active_siso_rate;
+		rate->type = LQ_HT_SISO;
+	} else {
+		active_rate = lq_sta->active_legacy_rate;
+	}
+
+	for (i = 0; i < nentries; i++) {
+		int rate_idx = initial_rates[i].rate_idx;
+
+		if ((best_rssi >= initial_rates[i].rssi) &&
+		    (BIT(rate_idx) & active_rate)) {
+			rate->index = rate_idx;
+			break;
+		}
+	}
+
+out:
+	rs_dump_rate(mvm, rate, "INITIAL");
+}
+
+/* Save info about RSSI of last Rx */
+void rs_update_last_rssi(struct iwl_mvm *mvm,
+			 struct iwl_mvm_sta *mvmsta,
+			 struct ieee80211_rx_status *rx_status)
+{
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+	int i;
+
+	lq_sta->pers.chains = rx_status->chains;
+	lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
+	lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
+	lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
+	lq_sta->pers.last_rssi = S8_MIN;
+
+	for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
+		if (!(lq_sta->pers.chains & BIT(i)))
+			continue;
+
+		if (lq_sta->pers.chain_signal[i] > lq_sta->pers.last_rssi)
+			lq_sta->pers.last_rssi = lq_sta->pers.chain_signal[i];
+	}
+}
+
+/**
+ * rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values.  These will be replaced later
+ *       if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
+ *       rc80211_simple.
+ *
+ * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
+ *       calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
+ *       which requires station table entry to exist).
+ */
+static void rs_initialize_lq(struct iwl_mvm *mvm,
+			     struct ieee80211_sta *sta,
+			     struct iwl_lq_sta *lq_sta,
+			     enum nl80211_band band)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_scale_tbl_info *tbl;
+	struct rs_rate *rate;
+	u8 active_tbl = 0;
+
+	if (!sta || !lq_sta)
+		return;
+
+	if (!lq_sta->search_better_tbl)
+		active_tbl = lq_sta->active_tbl;
+	else
+		active_tbl = 1 - lq_sta->active_tbl;
+
+	tbl = &(lq_sta->lq_info[active_tbl]);
+	rate = &tbl->rate;
+
+	rs_get_initial_rate(mvm, sta, lq_sta, band, rate);
+	rs_init_optimal_rate(mvm, sta, lq_sta);
+
+	WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
+		  "ant: 0x%x, chains 0x%x, fw tx ant: 0x%x, nvm tx ant: 0x%x\n",
+		  rate->ant, lq_sta->pers.chains, mvm->fw->valid_tx_ant,
+		  mvm->nvm_data ? mvm->nvm_data->valid_tx_ant : ANT_INVALID);
+
+	tbl->column = rs_get_column_from_rate(rate);
+
+	rs_set_expected_tpt_table(lq_sta, tbl);
+	rs_fill_lq_cmd(mvm, sta, lq_sta, rate);
+	/* TODO restore station should remember the lq cmd */
+	iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq,
+			    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED);
+}
+
+static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
+			    void *mvm_sta,
+			    struct ieee80211_tx_rate_control *txrc)
+{
+	struct iwl_op_mode *op_mode = mvm_r;
+	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+	struct sk_buff *skb = txrc->skb;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct iwl_lq_sta *lq_sta;
+	struct rs_rate *optimal_rate;
+	u32 last_ucode_rate;
+
+	if (sta && !iwl_mvm_sta_from_mac80211(sta)->vif) {
+		/* if vif isn't initialized mvm doesn't know about
+		 * this station, so don't do anything with the it
+		 */
+		sta = NULL;
+		mvm_sta = NULL;
+	}
+
+	/* Send management frames and NO_ACK data using lowest rate. */
+	if (rate_control_send_low(sta, mvm_sta, txrc))
+		return;
+
+	if (!mvm_sta)
+		return;
+
+	lq_sta = mvm_sta;
+	iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
+				  info->band, &info->control.rates[0]);
+	info->control.rates[0].count = 1;
+
+	/* Report the optimal rate based on rssi and STA caps if we haven't
+	 * converged yet (too little traffic) or exploring other modulations
+	 */
+	if (lq_sta->rs_state != RS_STATE_STAY_IN_COLUMN) {
+		optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
+		last_ucode_rate = ucode_rate_from_rs_rate(mvm,
+							  optimal_rate);
+		iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
+					  &txrc->reported_rate);
+	}
+}
+
+static void *rs_drv_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
+			      gfp_t gfp)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_op_mode *op_mode = (struct iwl_op_mode *)mvm_rate;
+	struct iwl_mvm *mvm  = IWL_OP_MODE_GET_MVM(op_mode);
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+
+	IWL_DEBUG_RATE(mvm, "create station rate scale window\n");
+
+	lq_sta->pers.drv = mvm;
+#ifdef CONFIG_MAC80211_DEBUGFS
+	lq_sta->pers.dbg_fixed_rate = 0;
+	lq_sta->pers.dbg_fixed_txp_reduction = TPC_INVALID;
+	lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
+#endif
+	lq_sta->pers.chains = 0;
+	memset(lq_sta->pers.chain_signal, 0, sizeof(lq_sta->pers.chain_signal));
+	lq_sta->pers.last_rssi = S8_MIN;
+
+	return lq_sta;
+}
+
+static int rs_vht_highest_rx_mcs_index(struct ieee80211_sta_vht_cap *vht_cap,
+				       int nss)
+{
+	u16 rx_mcs = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) &
+		(0x3 << (2 * (nss - 1)));
+	rx_mcs >>= (2 * (nss - 1));
+
+	if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_7)
+		return IWL_RATE_MCS_7_INDEX;
+	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_8)
+		return IWL_RATE_MCS_8_INDEX;
+	else if (rx_mcs == IEEE80211_VHT_MCS_SUPPORT_0_9)
+		return IWL_RATE_MCS_9_INDEX;
+
+	WARN_ON_ONCE(rx_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED);
+	return -1;
+}
+
+static void rs_vht_set_enabled_rates(struct ieee80211_sta *sta,
+				     struct ieee80211_sta_vht_cap *vht_cap,
+				     struct iwl_lq_sta *lq_sta)
+{
+	int i;
+	int highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 1);
+
+	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+			if (i == IWL_RATE_9M_INDEX)
+				continue;
+
+			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+			if (i == IWL_RATE_MCS_9_INDEX &&
+			    sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				continue;
+
+			lq_sta->active_siso_rate |= BIT(i);
+		}
+	}
+
+	if (sta->rx_nss < 2)
+		return;
+
+	highest_mcs = rs_vht_highest_rx_mcs_index(vht_cap, 2);
+	if (highest_mcs >= IWL_RATE_MCS_0_INDEX) {
+		for (i = IWL_RATE_MCS_0_INDEX; i <= highest_mcs; i++) {
+			if (i == IWL_RATE_9M_INDEX)
+				continue;
+
+			/* VHT MCS9 isn't valid for 20Mhz for NSS=1,2 */
+			if (i == IWL_RATE_MCS_9_INDEX &&
+			    sta->bandwidth == IEEE80211_STA_RX_BW_20)
+				continue;
+
+			lq_sta->active_mimo2_rate |= BIT(i);
+		}
+	}
+}
+
+static void rs_ht_init(struct iwl_mvm *mvm,
+		       struct ieee80211_sta *sta,
+		       struct iwl_lq_sta *lq_sta,
+		       struct ieee80211_sta_ht_cap *ht_cap)
+{
+	/* active_siso_rate mask includes 9 MBits (bit 5),
+	 * and CCK (bits 0-3), supp_rates[] does not;
+	 * shift to convert format, force 9 MBits off.
+	 */
+	lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+	lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+	lq_sta->active_siso_rate &= ~((u16)0x2);
+	lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
+
+	lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+	lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+	lq_sta->active_mimo2_rate &= ~((u16)0x2);
+	lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
+
+	if (mvm->cfg->ht_params->ldpc &&
+	    (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING))
+		lq_sta->ldpc = true;
+
+	if (mvm->cfg->ht_params->stbc &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC))
+		lq_sta->stbc_capable = true;
+
+	lq_sta->is_vht = false;
+}
+
+static void rs_vht_init(struct iwl_mvm *mvm,
+			struct ieee80211_sta *sta,
+			struct iwl_lq_sta *lq_sta,
+			struct ieee80211_sta_vht_cap *vht_cap)
+{
+	rs_vht_set_enabled_rates(sta, vht_cap, lq_sta);
+
+	if (mvm->cfg->ht_params->ldpc &&
+	    (vht_cap->cap & IEEE80211_VHT_CAP_RXLDPC))
+		lq_sta->ldpc = true;
+
+	if (mvm->cfg->ht_params->stbc &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    (vht_cap->cap & IEEE80211_VHT_CAP_RXSTBC_MASK))
+		lq_sta->stbc_capable = true;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
+	    (num_of_ant(iwl_mvm_get_valid_tx_ant(mvm)) > 1) &&
+	    (vht_cap->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE))
+		lq_sta->bfer_capable = true;
+
+	lq_sta->is_vht = true;
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm)
+{
+	spin_lock_bh(&mvm->drv_stats_lock);
+	memset(&mvm->drv_rx_stats, 0, sizeof(mvm->drv_rx_stats));
+	spin_unlock_bh(&mvm->drv_stats_lock);
+}
+
+void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
+{
+	u8 nss = 0;
+
+	spin_lock(&mvm->drv_stats_lock);
+
+	if (agg)
+		mvm->drv_rx_stats.agg_frames++;
+
+	mvm->drv_rx_stats.success_frames++;
+
+	switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		mvm->drv_rx_stats.bw_20_frames++;
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		mvm->drv_rx_stats.bw_40_frames++;
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		mvm->drv_rx_stats.bw_80_frames++;
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		mvm->drv_rx_stats.bw_160_frames++;
+		break;
+	default:
+		WARN_ONCE(1, "bad BW. rate 0x%x", rate);
+	}
+
+	if (rate & RATE_MCS_HT_MSK) {
+		mvm->drv_rx_stats.ht_frames++;
+		nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
+	} else if (rate & RATE_MCS_VHT_MSK) {
+		mvm->drv_rx_stats.vht_frames++;
+		nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
+		       RATE_VHT_MCS_NSS_POS) + 1;
+	} else {
+		mvm->drv_rx_stats.legacy_frames++;
+	}
+
+	if (nss == 1)
+		mvm->drv_rx_stats.siso_frames++;
+	else if (nss == 2)
+		mvm->drv_rx_stats.mimo2_frames++;
+
+	if (rate & RATE_MCS_SGI_MSK)
+		mvm->drv_rx_stats.sgi_frames++;
+	else
+		mvm->drv_rx_stats.ngi_frames++;
+
+	mvm->drv_rx_stats.last_rates[mvm->drv_rx_stats.last_frame_idx] = rate;
+	mvm->drv_rx_stats.last_frame_idx =
+		(mvm->drv_rx_stats.last_frame_idx + 1) %
+			ARRAY_SIZE(mvm->drv_rx_stats.last_rates);
+
+	spin_unlock(&mvm->drv_stats_lock);
+}
+#endif
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+static void rs_drv_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			     enum nl80211_band band)
+{
+	int i, j;
+	struct ieee80211_hw *hw = mvm->hw;
+	struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+	struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta.rs_drv;
+	struct ieee80211_supported_band *sband;
+	unsigned long supp; /* must be unsigned long for for_each_set_bit */
+
+	/* clear all non-persistent lq data */
+	memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
+
+	sband = hw->wiphy->bands[band];
+
+	lq_sta->lq.sta_id = mvmsta->sta_id;
+	mvmsta->amsdu_enabled = 0;
+	mvmsta->max_amsdu_len = sta->max_amsdu_len;
+
+	for (j = 0; j < LQ_SIZE; j++)
+		rs_rate_scale_clear_tbl_windows(mvm, &lq_sta->lq_info[j]);
+
+	lq_sta->flush_timer = 0;
+	lq_sta->last_tx = jiffies;
+
+	IWL_DEBUG_RATE(mvm,
+		       "LQ: *** rate scale station global init for station %d ***\n",
+		       mvmsta->sta_id);
+	/* TODO: what is a good starting rate for STA? About middle? Maybe not
+	 * the lowest or the highest rate.. Could consider using RSSI from
+	 * previous packets? Need to have IEEE 802.1X auth succeed immediately
+	 * after assoc.. */
+
+	lq_sta->missed_rate_counter = IWL_MVM_RS_MISSED_RATE_MAX;
+	lq_sta->band = sband->band;
+	/*
+	 * active legacy rates as per supported rates bitmap
+	 */
+	supp = sta->supp_rates[sband->band];
+	lq_sta->active_legacy_rate = 0;
+	for_each_set_bit(i, &supp, BITS_PER_LONG)
+		lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
+
+	/* TODO: should probably account for rx_highest for both HT/VHT */
+	if (!vht_cap || !vht_cap->vht_supported)
+		rs_ht_init(mvm, sta, lq_sta, ht_cap);
+	else
+		rs_vht_init(mvm, sta, lq_sta, vht_cap);
+
+	lq_sta->max_legacy_rate_idx =
+		rs_get_max_rate_from_mask(lq_sta->active_legacy_rate);
+	lq_sta->max_siso_rate_idx =
+		rs_get_max_rate_from_mask(lq_sta->active_siso_rate);
+	lq_sta->max_mimo2_rate_idx =
+		rs_get_max_rate_from_mask(lq_sta->active_mimo2_rate);
+
+	IWL_DEBUG_RATE(mvm,
+		       "LEGACY=%lX SISO=%lX MIMO2=%lX VHT=%d LDPC=%d STBC=%d BFER=%d\n",
+		       lq_sta->active_legacy_rate,
+		       lq_sta->active_siso_rate,
+		       lq_sta->active_mimo2_rate,
+		       lq_sta->is_vht, lq_sta->ldpc, lq_sta->stbc_capable,
+		       lq_sta->bfer_capable);
+	IWL_DEBUG_RATE(mvm, "MAX RATE: LEGACY=%d SISO=%d MIMO2=%d\n",
+		       lq_sta->max_legacy_rate_idx,
+		       lq_sta->max_siso_rate_idx,
+		       lq_sta->max_mimo2_rate_idx);
+
+	/* These values will be overridden later */
+	lq_sta->lq.single_stream_ant_msk =
+		first_antenna(iwl_mvm_get_valid_tx_ant(mvm));
+	lq_sta->lq.dual_stream_ant_msk = ANT_AB;
+
+	/* as default allow aggregation for all tids */
+	lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
+	lq_sta->is_agg = 0;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_mvm_reset_frame_stats(mvm);
+#endif
+	rs_initialize_lq(mvm, sta, lq_sta, band);
+}
+
+static void rs_drv_rate_update(void *mvm_r,
+			       struct ieee80211_supported_band *sband,
+			       struct cfg80211_chan_def *chandef,
+			       struct ieee80211_sta *sta,
+			       void *priv_sta, u32 changed)
+{
+	struct iwl_op_mode *op_mode = mvm_r;
+	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+	u8 tid;
+
+	if (!iwl_mvm_sta_from_mac80211(sta)->vif)
+		return;
+
+	/* Stop any ongoing aggregations as rs starts off assuming no agg */
+	for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+		ieee80211_stop_tx_ba_session(sta, tid);
+
+	iwl_mvm_rs_rate_init(mvm, sta, sband->band);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
+					    struct iwl_lq_cmd *lq_cmd,
+					    enum nl80211_band band,
+					    u32 ucode_rate)
+{
+	struct rs_rate rate;
+	int i;
+	int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
+	__le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
+	u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+	for (i = 0; i < num_rates; i++)
+		lq_cmd->rs_table[i] = ucode_rate_le32;
+
+	if (rs_rate_from_ucode_rate(ucode_rate, band, &rate)) {
+		WARN_ON_ONCE(1);
+		return;
+	}
+
+	if (is_mimo(&rate))
+		lq_cmd->mimo_delim = num_rates - 1;
+	else
+		lq_cmd->mimo_delim = 0;
+
+	lq_cmd->reduced_tpc = 0;
+
+	if (num_of_ant(ant) == 1)
+		lq_cmd->single_stream_ant_msk = ant;
+
+	if (!mvm->trans->cfg->gen2)
+		lq_cmd->agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	else
+		lq_cmd->agg_frame_cnt_limit =
+			LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+}
+#endif /* CONFIG_MAC80211_DEBUGFS */
+
+static void rs_fill_rates_for_column(struct iwl_mvm *mvm,
+				     struct iwl_lq_sta *lq_sta,
+				     struct rs_rate *rate,
+				     __le32 *rs_table, int *rs_table_index,
+				     int num_rates, int num_retries,
+				     u8 valid_tx_ant, bool toggle_ant)
+{
+	int i, j;
+	__le32 ucode_rate;
+	bool bottom_reached = false;
+	int prev_rate_idx = rate->index;
+	int end = LINK_QUAL_MAX_RETRY_NUM;
+	int index = *rs_table_index;
+
+	for (i = 0; i < num_rates && index < end; i++) {
+		for (j = 0; j < num_retries && index < end; j++, index++) {
+			ucode_rate = cpu_to_le32(ucode_rate_from_rs_rate(mvm,
+									 rate));
+			rs_table[index] = ucode_rate;
+			if (toggle_ant)
+				rs_toggle_antenna(valid_tx_ant, rate);
+		}
+
+		prev_rate_idx = rate->index;
+		bottom_reached = rs_get_lower_rate_in_column(lq_sta, rate);
+		if (bottom_reached && !is_legacy(rate))
+			break;
+	}
+
+	if (!bottom_reached && !is_legacy(rate))
+		rate->index = prev_rate_idx;
+
+	*rs_table_index = index;
+}
+
+/* Building the rate table is non trivial. When we're in MIMO2/VHT/80Mhz/SGI
+ * column the rate table should look like this:
+ *
+ * rate[0] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[1] 0x400D019 VHT | ANT: AB BW: 80Mhz MCS: 9 NSS: 2 SGI
+ * rate[2] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[3] 0x400D018 VHT | ANT: AB BW: 80Mhz MCS: 8 NSS: 2 SGI
+ * rate[4] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[5] 0x400D017 VHT | ANT: AB BW: 80Mhz MCS: 7 NSS: 2 SGI
+ * rate[6] 0x4005007 VHT | ANT: A BW: 80Mhz MCS: 7 NSS: 1 NGI
+ * rate[7] 0x4009006 VHT | ANT: B BW: 80Mhz MCS: 6 NSS: 1 NGI
+ * rate[8] 0x4005005 VHT | ANT: A BW: 80Mhz MCS: 5 NSS: 1 NGI
+ * rate[9] 0x800B Legacy | ANT: B Rate: 36 Mbps
+ * rate[10] 0x4009 Legacy | ANT: A Rate: 24 Mbps
+ * rate[11] 0x8007 Legacy | ANT: B Rate: 18 Mbps
+ * rate[12] 0x4005 Legacy | ANT: A Rate: 12 Mbps
+ * rate[13] 0x800F Legacy | ANT: B Rate: 9 Mbps
+ * rate[14] 0x400D Legacy | ANT: A Rate: 6 Mbps
+ * rate[15] 0x800D Legacy | ANT: B Rate: 6 Mbps
+ */
+static void rs_build_rates_table(struct iwl_mvm *mvm,
+				 struct ieee80211_sta *sta,
+				 struct iwl_lq_sta *lq_sta,
+				 const struct rs_rate *initial_rate)
+{
+	struct rs_rate rate;
+	int num_rates, num_retries, index = 0;
+	u8 valid_tx_ant = 0;
+	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+	bool toggle_ant = false;
+	u32 color;
+
+	memcpy(&rate, initial_rate, sizeof(rate));
+
+	valid_tx_ant = iwl_mvm_get_valid_tx_ant(mvm);
+
+	/* TODO: remove old API when min FW API hits 14 */
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS) &&
+	    rs_stbc_allow(mvm, sta, lq_sta))
+		rate.stbc = true;
+
+	if (is_siso(&rate)) {
+		num_rates = IWL_MVM_RS_INITIAL_SISO_NUM_RATES;
+		num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
+	} else if (is_mimo(&rate)) {
+		num_rates = IWL_MVM_RS_INITIAL_MIMO_NUM_RATES;
+		num_retries = IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE;
+	} else {
+		num_rates = IWL_MVM_RS_INITIAL_LEGACY_NUM_RATES;
+		num_retries = IWL_MVM_RS_INITIAL_LEGACY_RETRIES;
+		toggle_ant = true;
+	}
+
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
+
+	rs_get_lower_rate_down_column(lq_sta, &rate);
+
+	if (is_siso(&rate)) {
+		num_rates = IWL_MVM_RS_SECONDARY_SISO_NUM_RATES;
+		num_retries = IWL_MVM_RS_SECONDARY_SISO_RETRIES;
+		lq_cmd->mimo_delim = index;
+	} else if (is_legacy(&rate)) {
+		num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+		num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
+	} else {
+		WARN_ON_ONCE(1);
+	}
+
+	toggle_ant = true;
+
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
+
+	rs_get_lower_rate_down_column(lq_sta, &rate);
+
+	num_rates = IWL_MVM_RS_SECONDARY_LEGACY_NUM_RATES;
+	num_retries = IWL_MVM_RS_SECONDARY_LEGACY_RETRIES;
+
+	rs_fill_rates_for_column(mvm, lq_sta, &rate, lq_cmd->rs_table, &index,
+				 num_rates, num_retries, valid_tx_ant,
+				 toggle_ant);
+
+	/* update the color of the LQ command (as a counter at bits 1-3) */
+	color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags));
+	lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color);
+}
+
+struct rs_bfer_active_iter_data {
+	struct ieee80211_sta *exclude_sta;
+	struct iwl_mvm_sta *bfer_mvmsta;
+};
+
+static void rs_bfer_active_iter(void *_data,
+				struct ieee80211_sta *sta)
+{
+	struct rs_bfer_active_iter_data *data = _data;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_lq_cmd *lq_cmd = &mvmsta->lq_sta.rs_drv.lq;
+	u32 ss_params = le32_to_cpu(lq_cmd->ss_params);
+
+	if (sta == data->exclude_sta)
+		return;
+
+	/* The current sta has BFER allowed */
+	if (ss_params & LQ_SS_BFER_ALLOWED) {
+		WARN_ON_ONCE(data->bfer_mvmsta != NULL);
+
+		data->bfer_mvmsta = mvmsta;
+	}
+}
+
+static int rs_bfer_priority(struct iwl_mvm_sta *sta)
+{
+	int prio = -1;
+	enum nl80211_iftype viftype = ieee80211_vif_type_p2p(sta->vif);
+
+	switch (viftype) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_P2P_GO:
+		prio = 3;
+		break;
+	case NL80211_IFTYPE_P2P_CLIENT:
+		prio = 2;
+		break;
+	case NL80211_IFTYPE_STATION:
+		prio = 1;
+		break;
+	default:
+		WARN_ONCE(true, "viftype %d sta_id %d", viftype, sta->sta_id);
+		prio = -1;
+	}
+
+	return prio;
+}
+
+/* Returns >0 if sta1 has a higher BFER priority compared to sta2 */
+static int rs_bfer_priority_cmp(struct iwl_mvm_sta *sta1,
+				struct iwl_mvm_sta *sta2)
+{
+	int prio1 = rs_bfer_priority(sta1);
+	int prio2 = rs_bfer_priority(sta2);
+
+	if (prio1 > prio2)
+		return 1;
+	if (prio1 < prio2)
+		return -1;
+	return 0;
+}
+
+static void rs_set_lq_ss_params(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta,
+				struct iwl_lq_sta *lq_sta,
+				const struct rs_rate *initial_rate)
+{
+	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct rs_bfer_active_iter_data data = {
+		.exclude_sta = sta,
+		.bfer_mvmsta = NULL,
+	};
+	struct iwl_mvm_sta *bfer_mvmsta = NULL;
+	u32 ss_params = LQ_SS_PARAMS_VALID;
+
+	if (!iwl_mvm_bt_coex_is_mimo_allowed(mvm, sta))
+		goto out;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	/* Check if forcing the decision is configured.
+	 * Note that SISO is forced by not allowing STBC or BFER
+	 */
+	if (lq_sta->pers.ss_force == RS_SS_FORCE_STBC)
+		ss_params |= (LQ_SS_STBC_1SS_ALLOWED | LQ_SS_FORCE);
+	else if (lq_sta->pers.ss_force == RS_SS_FORCE_BFER)
+		ss_params |= (LQ_SS_BFER_ALLOWED | LQ_SS_FORCE);
+
+	if (lq_sta->pers.ss_force != RS_SS_FORCE_NONE) {
+		IWL_DEBUG_RATE(mvm, "Forcing single stream Tx decision %d\n",
+			       lq_sta->pers.ss_force);
+		goto out;
+	}
+#endif
+
+	if (lq_sta->stbc_capable)
+		ss_params |= LQ_SS_STBC_1SS_ALLOWED;
+
+	if (!lq_sta->bfer_capable)
+		goto out;
+
+	ieee80211_iterate_stations_atomic(mvm->hw,
+					  rs_bfer_active_iter,
+					  &data);
+	bfer_mvmsta = data.bfer_mvmsta;
+
+	/* This code is safe as it doesn't run concurrently for different
+	 * stations. This is guaranteed by the fact that calls to
+	 * ieee80211_tx_status wouldn't run concurrently for a single HW.
+	 */
+	if (!bfer_mvmsta) {
+		IWL_DEBUG_RATE(mvm, "No sta with BFER allowed found. Allow\n");
+
+		ss_params |= LQ_SS_BFER_ALLOWED;
+		goto out;
+	}
+
+	IWL_DEBUG_RATE(mvm, "Found existing sta %d with BFER activated\n",
+		       bfer_mvmsta->sta_id);
+
+	/* Disallow BFER on another STA if active and we're a higher priority */
+	if (rs_bfer_priority_cmp(mvmsta, bfer_mvmsta) > 0) {
+		struct iwl_lq_cmd *bfersta_lq_cmd =
+			&bfer_mvmsta->lq_sta.rs_drv.lq;
+		u32 bfersta_ss_params = le32_to_cpu(bfersta_lq_cmd->ss_params);
+
+		bfersta_ss_params &= ~LQ_SS_BFER_ALLOWED;
+		bfersta_lq_cmd->ss_params = cpu_to_le32(bfersta_ss_params);
+		iwl_mvm_send_lq_cmd(mvm, bfersta_lq_cmd, false);
+
+		ss_params |= LQ_SS_BFER_ALLOWED;
+		IWL_DEBUG_RATE(mvm,
+			       "Lower priority BFER sta found (%d). Switch BFER\n",
+			       bfer_mvmsta->sta_id);
+	}
+out:
+	lq_cmd->ss_params = cpu_to_le32(ss_params);
+}
+
+static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
+			   struct ieee80211_sta *sta,
+			   struct iwl_lq_sta *lq_sta,
+			   const struct rs_rate *initial_rate)
+{
+	struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_mvm_vif *mvmvif;
+
+	lq_cmd->agg_disable_start_th = IWL_MVM_RS_AGG_DISABLE_START;
+	lq_cmd->agg_time_limit =
+		cpu_to_le16(IWL_MVM_RS_AGG_TIME_LIMIT);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+	if (lq_sta->pers.dbg_fixed_rate) {
+		rs_build_rates_table_from_fixed(mvm, lq_cmd,
+						lq_sta->band,
+						lq_sta->pers.dbg_fixed_rate);
+		return;
+	}
+#endif
+	if (WARN_ON_ONCE(!sta || !initial_rate))
+		return;
+
+	rs_build_rates_table(mvm, sta, lq_sta, initial_rate);
+
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_LQ_SS_PARAMS))
+		rs_set_lq_ss_params(mvm, sta, lq_sta, initial_rate);
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+	if (num_of_ant(initial_rate->ant) == 1)
+		lq_cmd->single_stream_ant_msk = initial_rate->ant;
+
+	lq_cmd->agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+	/*
+	 * In case of low latency, tell the firmware to leave a frame in the
+	 * Tx Fifo so that it can start a transaction in the same TxOP. This
+	 * basically allows the firmware to send bursts.
+	 */
+	if (iwl_mvm_vif_low_latency(mvmvif))
+		lq_cmd->agg_frame_cnt_limit--;
+
+	if (mvmsta->vif->p2p)
+		lq_cmd->flags |= LQ_FLAG_USE_RTS_MSK;
+
+	lq_cmd->agg_time_limit =
+			cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
+}
+
+static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+	return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void rs_free(void *mvm_rate)
+{
+	return;
+}
+
+static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
+{
+	struct iwl_op_mode *op_mode __maybe_unused = mvm_r;
+	struct iwl_mvm *mvm __maybe_unused = IWL_OP_MODE_GET_MVM(op_mode);
+
+	IWL_DEBUG_RATE(mvm, "enter\n");
+	IWL_DEBUG_RATE(mvm, "leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
+{
+
+	char *type, *bw;
+	u8 mcs = 0, nss = 0;
+	u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+
+	if (!(rate & RATE_MCS_HT_MSK) &&
+	    !(rate & RATE_MCS_VHT_MSK) &&
+	    !(rate & RATE_MCS_HE_MSK)) {
+		int index = iwl_hwrate_to_plcp_idx(rate);
+
+		return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
+				 rs_pretty_ant(ant),
+				 index == IWL_RATE_INVALID ? "BAD" :
+				 iwl_rate_mcs[index].mbps);
+	}
+
+	if (rate & RATE_MCS_VHT_MSK) {
+		type = "VHT";
+		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+		       >> RATE_VHT_MCS_NSS_POS) + 1;
+	} else if (rate & RATE_MCS_HT_MSK) {
+		type = "HT";
+		mcs = rate & RATE_HT_MCS_INDEX_MSK;
+		nss = ((rate & RATE_HT_MCS_NSS_MSK)
+		       >> RATE_HT_MCS_NSS_POS) + 1;
+	} else if (rate & RATE_MCS_HE_MSK) {
+		type = "HE";
+		mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
+		nss = ((rate & RATE_VHT_MCS_NSS_MSK)
+		       >> RATE_VHT_MCS_NSS_POS) + 1;
+	} else {
+		type = "Unknown"; /* shouldn't happen */
+	}
+
+	switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		bw = "20Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		bw = "40Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		bw = "80Mhz";
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		bw = "160Mhz";
+		break;
+	default:
+		bw = "BAD BW";
+	}
+
+	return scnprintf(buf, bufsz,
+			 "%s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s\n",
+			 type, rs_pretty_ant(ant), bw, mcs, nss,
+			 (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+			 (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+			 (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+			 (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+}
+
+/**
+ * Program the device to use fixed rate for frame transmit
+ * This is for debugging/testing only
+ * once the device start use fixed rate, we need to reload the module
+ * to being back the normal operation.
+ */
+static void rs_program_fix_rate(struct iwl_mvm *mvm,
+				struct iwl_lq_sta *lq_sta)
+{
+	lq_sta->active_legacy_rate = 0x0FFF;	/* 1 - 54 MBits, includes CCK */
+	lq_sta->active_siso_rate   = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+	lq_sta->active_mimo2_rate  = 0x1FD0;	/* 6 - 60 MBits, no 9, no CCK */
+
+	IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
+		       lq_sta->lq.sta_id, lq_sta->pers.dbg_fixed_rate);
+
+	if (lq_sta->pers.dbg_fixed_rate) {
+		rs_fill_lq_cmd(mvm, NULL, lq_sta, NULL);
+		iwl_mvm_send_lq_cmd(lq_sta->pers.drv, &lq_sta->lq, false);
+	}
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
+			const char __user *user_buf, size_t count, loff_t *ppos)
+{
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct iwl_mvm *mvm;
+	char buf[64];
+	size_t buf_size;
+	u32 parsed_rate;
+
+	mvm = lq_sta->pers.drv;
+	memset(buf, 0, sizeof(buf));
+	buf_size = min(count, sizeof(buf) -  1);
+	if (copy_from_user(buf, user_buf, buf_size))
+		return -EFAULT;
+
+	if (sscanf(buf, "%x", &parsed_rate) == 1)
+		lq_sta->pers.dbg_fixed_rate = parsed_rate;
+	else
+		lq_sta->pers.dbg_fixed_rate = 0;
+
+	rs_program_fix_rate(mvm, lq_sta);
+
+	return count;
+}
+
+static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i = 0;
+	ssize_t ret;
+	static const size_t bufsz = 2048;
+
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct iwl_mvm_sta *mvmsta =
+		container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
+	struct iwl_mvm *mvm;
+	struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+	struct rs_rate *rate = &tbl->rate;
+	u32 ss_params;
+
+	mvm = lq_sta->pers.drv;
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "sta_id %d\n", lq_sta->lq.sta_id);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			  "failed=%d success=%d rate=0%lX\n",
+			  lq_sta->total_failed, lq_sta->total_success,
+			  lq_sta->active_legacy_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
+			  lq_sta->pers.dbg_fixed_rate);
+	desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
+	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
+	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
+	    (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+	desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
+			  (is_legacy(rate)) ? "legacy" :
+			  is_vht(rate) ? "VHT" : "HT");
+	if (!is_legacy(rate)) {
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
+		   (is_siso(rate)) ? "SISO" : "MIMO2");
+		desc += scnprintf(buff + desc, bufsz - desc, " %s",
+				(is_ht20(rate)) ? "20MHz" :
+				(is_ht40(rate)) ? "40MHz" :
+				(is_ht80(rate)) ? "80MHz" :
+				(is_ht160(rate)) ? "160MHz" : "BAD BW");
+		desc += scnprintf(buff + desc, bufsz - desc, " %s %s %s %s\n",
+				(rate->sgi) ? "SGI" : "NGI",
+				(rate->ldpc) ? "LDPC" : "BCC",
+				(lq_sta->is_agg) ? "AGG on" : "",
+				(mvmsta->amsdu_enabled) ? "AMSDU on" : "");
+	}
+	desc += scnprintf(buff + desc, bufsz - desc, "last tx rate=0x%X\n",
+			lq_sta->last_rate_n_flags);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"general: flags=0x%X mimo-d=%d s-ant=0x%x d-ant=0x%x\n",
+			lq_sta->lq.flags,
+			lq_sta->lq.mimo_delim,
+			lq_sta->lq.single_stream_ant_msk,
+			lq_sta->lq.dual_stream_ant_msk);
+
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"agg: time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+			le16_to_cpu(lq_sta->lq.agg_time_limit),
+			lq_sta->lq.agg_disable_start_th,
+			lq_sta->lq.agg_frame_cnt_limit);
+
+	desc += scnprintf(buff + desc, bufsz - desc, "reduced tpc=%d\n",
+			  lq_sta->lq.reduced_tpc);
+	ss_params = le32_to_cpu(lq_sta->lq.ss_params);
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"single stream params: %s%s%s%s\n",
+			(ss_params & LQ_SS_PARAMS_VALID) ?
+			"VALID" : "INVALID",
+			(ss_params & LQ_SS_BFER_ALLOWED) ?
+			", BFER" : "",
+			(ss_params & LQ_SS_STBC_1SS_ALLOWED) ?
+			", STBC" : "",
+			(ss_params & LQ_SS_FORCE) ?
+			", FORCE" : "");
+	desc += scnprintf(buff + desc, bufsz - desc,
+			"Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+			lq_sta->lq.initial_rate_index[0],
+			lq_sta->lq.initial_rate_index[1],
+			lq_sta->lq.initial_rate_index[2],
+			lq_sta->lq.initial_rate_index[3]);
+
+	for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+		u32 r = le32_to_cpu(lq_sta->lq.rs_table[i]);
+
+		desc += scnprintf(buff + desc, bufsz - desc,
+				  " rate[%d] 0x%X ", i, r);
+		desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+	.write = rs_sta_dbgfs_scale_table_write,
+	.read = rs_sta_dbgfs_scale_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+static ssize_t rs_sta_dbgfs_stats_table_read(struct file *file,
+			char __user *user_buf, size_t count, loff_t *ppos)
+{
+	char *buff;
+	int desc = 0;
+	int i, j;
+	ssize_t ret;
+	struct iwl_scale_tbl_info *tbl;
+	struct rs_rate *rate;
+	struct iwl_lq_sta *lq_sta = file->private_data;
+
+	buff = kmalloc(1024, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	for (i = 0; i < LQ_SIZE; i++) {
+		tbl = &(lq_sta->lq_info[i]);
+		rate = &tbl->rate;
+		desc += sprintf(buff+desc,
+				"%s type=%d SGI=%d BW=%s DUP=0\n"
+				"index=%d\n",
+				lq_sta->active_tbl == i ? "*" : "x",
+				rate->type,
+				rate->sgi,
+				is_ht20(rate) ? "20MHz" :
+				is_ht40(rate) ? "40MHz" :
+				is_ht80(rate) ? "80MHz" :
+				is_ht160(rate) ? "160MHz" : "ERR",
+				rate->index);
+		for (j = 0; j < IWL_RATE_COUNT; j++) {
+			desc += sprintf(buff+desc,
+				"counter=%d success=%d %%=%d\n",
+				tbl->win[j].counter,
+				tbl->win[j].success_counter,
+				tbl->win[j].success_ratio);
+		}
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+	kfree(buff);
+	return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+	.read = rs_sta_dbgfs_stats_table_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	static const char * const column_name[] = {
+		[RS_COLUMN_LEGACY_ANT_A] = "LEGACY_ANT_A",
+		[RS_COLUMN_LEGACY_ANT_B] = "LEGACY_ANT_B",
+		[RS_COLUMN_SISO_ANT_A] = "SISO_ANT_A",
+		[RS_COLUMN_SISO_ANT_B] = "SISO_ANT_B",
+		[RS_COLUMN_SISO_ANT_A_SGI] = "SISO_ANT_A_SGI",
+		[RS_COLUMN_SISO_ANT_B_SGI] = "SISO_ANT_B_SGI",
+		[RS_COLUMN_MIMO2] = "MIMO2",
+		[RS_COLUMN_MIMO2_SGI] = "MIMO2_SGI",
+	};
+
+	static const char * const rate_name[] = {
+		[IWL_RATE_1M_INDEX] = "1M",
+		[IWL_RATE_2M_INDEX] = "2M",
+		[IWL_RATE_5M_INDEX] = "5.5M",
+		[IWL_RATE_11M_INDEX] = "11M",
+		[IWL_RATE_6M_INDEX] = "6M|MCS0",
+		[IWL_RATE_9M_INDEX] = "9M",
+		[IWL_RATE_12M_INDEX] = "12M|MCS1",
+		[IWL_RATE_18M_INDEX] = "18M|MCS2",
+		[IWL_RATE_24M_INDEX] = "24M|MCS3",
+		[IWL_RATE_36M_INDEX] = "36M|MCS4",
+		[IWL_RATE_48M_INDEX] = "48M|MCS5",
+		[IWL_RATE_54M_INDEX] = "54M|MCS6",
+		[IWL_RATE_MCS_7_INDEX] = "MCS7",
+		[IWL_RATE_MCS_8_INDEX] = "MCS8",
+		[IWL_RATE_MCS_9_INDEX] = "MCS9",
+		[IWL_RATE_MCS_10_INDEX] = "MCS10",
+		[IWL_RATE_MCS_11_INDEX] = "MCS11",
+	};
+
+	char *buff, *pos, *endpos;
+	int col, rate;
+	ssize_t ret;
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	struct rs_rate_stats *stats;
+	static const size_t bufsz = 1024;
+
+	buff = kmalloc(bufsz, GFP_KERNEL);
+	if (!buff)
+		return -ENOMEM;
+
+	pos = buff;
+	endpos = pos + bufsz;
+
+	pos += scnprintf(pos, endpos - pos, "COLUMN,");
+	for (rate = 0; rate < IWL_RATE_COUNT; rate++)
+		pos += scnprintf(pos, endpos - pos, "%s,", rate_name[rate]);
+	pos += scnprintf(pos, endpos - pos, "\n");
+
+	for (col = 0; col < RS_COLUMN_COUNT; col++) {
+		pos += scnprintf(pos, endpos - pos,
+				 "%s,", column_name[col]);
+
+		for (rate = 0; rate < IWL_RATE_COUNT; rate++) {
+			stats = &(lq_sta->pers.tx_stats[col][rate]);
+			pos += scnprintf(pos, endpos - pos,
+					 "%llu/%llu,",
+					 stats->success,
+					 stats->total);
+		}
+		pos += scnprintf(pos, endpos - pos, "\n");
+	}
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
+	kfree(buff);
+	return ret;
+}
+
+static ssize_t rs_sta_dbgfs_drv_tx_stats_write(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	memset(lq_sta->pers.tx_stats, 0, sizeof(lq_sta->pers.tx_stats));
+
+	return count;
+}
+
+static const struct file_operations rs_sta_dbgfs_drv_tx_stats_ops = {
+	.read = rs_sta_dbgfs_drv_tx_stats_read,
+	.write = rs_sta_dbgfs_drv_tx_stats_write,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t iwl_dbgfs_ss_force_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_lq_sta *lq_sta = file->private_data;
+	char buf[12];
+	int bufsz = sizeof(buf);
+	int pos = 0;
+	static const char * const ss_force_name[] = {
+		[RS_SS_FORCE_NONE] = "none",
+		[RS_SS_FORCE_STBC] = "stbc",
+		[RS_SS_FORCE_BFER] = "bfer",
+		[RS_SS_FORCE_SISO] = "siso",
+	};
+
+	pos += scnprintf(buf+pos, bufsz-pos, "%s\n",
+			 ss_force_name[lq_sta->pers.ss_force]);
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_ss_force_write(struct iwl_lq_sta *lq_sta, char *buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_mvm *mvm = lq_sta->pers.drv;
+	int ret = 0;
+
+	if (!strncmp("none", buf, 4)) {
+		lq_sta->pers.ss_force = RS_SS_FORCE_NONE;
+	} else if (!strncmp("siso", buf, 4)) {
+		lq_sta->pers.ss_force = RS_SS_FORCE_SISO;
+	} else if (!strncmp("stbc", buf, 4)) {
+		if (lq_sta->stbc_capable) {
+			lq_sta->pers.ss_force = RS_SS_FORCE_STBC;
+		} else {
+			IWL_ERR(mvm,
+				"can't force STBC. peer doesn't support\n");
+			ret = -EINVAL;
+		}
+	} else if (!strncmp("bfer", buf, 4)) {
+		if (lq_sta->bfer_capable) {
+			lq_sta->pers.ss_force = RS_SS_FORCE_BFER;
+		} else {
+			IWL_ERR(mvm,
+				"can't force BFER. peer doesn't support\n");
+			ret = -EINVAL;
+		}
+	} else {
+		IWL_ERR(mvm, "valid values none|siso|stbc|bfer\n");
+		ret = -EINVAL;
+	}
+	return ret ?: count;
+}
+
+#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
+	_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_lq_sta)
+#define MVM_DEBUGFS_ADD_FILE_RS(name, parent, mode) do {		\
+		if (!debugfs_create_file(#name, mode, parent, lq_sta,	\
+					 &iwl_dbgfs_##name##_ops))	\
+			goto err;					\
+	} while (0)
+
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(ss_force, 32);
+
+static void rs_drv_add_sta_debugfs(void *mvm, void *priv_sta,
+				   struct dentry *dir)
+{
+	struct iwl_lq_sta *lq_sta = priv_sta;
+	struct iwl_mvm_sta *mvmsta;
+
+	mvmsta = container_of(lq_sta, struct iwl_mvm_sta, lq_sta.rs_drv);
+
+	if (!mvmsta->vif)
+		return;
+
+	debugfs_create_file("rate_scale_table", 0600, dir,
+			    lq_sta, &rs_sta_dbgfs_scale_table_ops);
+	debugfs_create_file("rate_stats_table", 0400, dir,
+			    lq_sta, &rs_sta_dbgfs_stats_table_ops);
+	debugfs_create_file("drv_tx_stats", 0600, dir,
+			    lq_sta, &rs_sta_dbgfs_drv_tx_stats_ops);
+	debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
+			  &lq_sta->tx_agg_tid_en);
+	debugfs_create_u8("reduced_tpc", 0600, dir,
+			  &lq_sta->pers.dbg_fixed_txp_reduction);
+
+	MVM_DEBUGFS_ADD_FILE_RS(ss_force, dir, 0600);
+	return;
+err:
+	IWL_ERR((struct iwl_mvm *)mvm, "Can't create debugfs entity\n");
+}
+
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta)
+{
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void rs_rate_init_ops(void *mvm_r,
+			     struct ieee80211_supported_band *sband,
+			     struct cfg80211_chan_def *chandef,
+			     struct ieee80211_sta *sta, void *mvm_sta)
+{
+}
+
+/* ops for rate scaling implemented in the driver */
+static const struct rate_control_ops rs_mvm_ops_drv = {
+	.name = RS_NAME,
+	.tx_status = rs_drv_mac80211_tx_status,
+	.get_rate = rs_drv_get_rate,
+	.rate_init = rs_rate_init_ops,
+	.alloc = rs_alloc,
+	.free = rs_free,
+	.alloc_sta = rs_drv_alloc_sta,
+	.free_sta = rs_free_sta,
+	.rate_update = rs_drv_rate_update,
+#ifdef CONFIG_MAC80211_DEBUGFS
+	.add_sta_debugfs = rs_drv_add_sta_debugfs,
+	.remove_sta_debugfs = rs_remove_sta_debugfs,
+#endif
+};
+
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  enum nl80211_band band)
+{
+	if (iwl_mvm_has_tlc_offload(mvm))
+		rs_fw_rate_init(mvm, sta, band);
+	else
+		rs_drv_rate_init(mvm, sta, band);
+}
+
+int iwl_mvm_rate_control_register(void)
+{
+	return ieee80211_rate_control_register(&rs_mvm_ops_drv);
+}
+
+void iwl_mvm_rate_control_unregister(void)
+{
+	ieee80211_rate_control_unregister(&rs_mvm_ops_drv);
+}
+
+static int rs_drv_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+				bool enable)
+{
+	struct iwl_lq_cmd *lq = &mvmsta->lq_sta.rs_drv.lq;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (enable) {
+		if (mvmsta->tx_protection == 0)
+			lq->flags |= LQ_FLAG_USE_RTS_MSK;
+		mvmsta->tx_protection++;
+	} else {
+		mvmsta->tx_protection--;
+		if (mvmsta->tx_protection == 0)
+			lq->flags &= ~LQ_FLAG_USE_RTS_MSK;
+	}
+
+	return iwl_mvm_send_lq_cmd(mvm, lq, false);
+}
+
+/**
+ * iwl_mvm_tx_protection - ask FW to enable RTS/CTS protection
+ * @mvmsta: The station
+ * @enable: Enable Tx protection?
+ */
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			  bool enable)
+{
+	if (iwl_mvm_has_tlc_offload(mvm))
+		return rs_fw_tx_protection(mvm, mvmsta, enable);
+	else
+		return rs_drv_tx_protection(mvm, mvmsta, enable);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
new file mode 100644
index 0000000..d2cf484
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -0,0 +1,469 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __rs_h__
+#define __rs_h__
+
+#include <net/mac80211.h>
+
+#include "iwl-config.h"
+
+#include "fw-api.h"
+#include "iwl-trans.h"
+
+#define RS_NAME "iwl-mvm-rs"
+
+struct iwl_rs_rate_info {
+	u8 plcp;	  /* uCode API:  IWL_RATE_6M_PLCP, etc. */
+	u8 plcp_ht_siso;  /* uCode API:  IWL_RATE_SISO_6M_PLCP, etc. */
+	u8 plcp_ht_mimo2; /* uCode API:  IWL_RATE_MIMO2_6M_PLCP, etc. */
+	u8 plcp_vht_siso;
+	u8 plcp_vht_mimo2;
+	u8 prev_rs;      /* previous rate used in rs algo */
+	u8 next_rs;      /* next rate used in rs algo */
+};
+
+#define IWL_RATE_60M_PLCP 3
+
+enum {
+	IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+	IWL_RATE_INVALID = IWL_RATE_COUNT,
+};
+
+#define LINK_QUAL_MAX_RETRY_NUM 16
+
+enum {
+	IWL_RATE_6M_INDEX_TABLE = 0,
+	IWL_RATE_9M_INDEX_TABLE,
+	IWL_RATE_12M_INDEX_TABLE,
+	IWL_RATE_18M_INDEX_TABLE,
+	IWL_RATE_24M_INDEX_TABLE,
+	IWL_RATE_36M_INDEX_TABLE,
+	IWL_RATE_48M_INDEX_TABLE,
+	IWL_RATE_54M_INDEX_TABLE,
+	IWL_RATE_1M_INDEX_TABLE,
+	IWL_RATE_2M_INDEX_TABLE,
+	IWL_RATE_5M_INDEX_TABLE,
+	IWL_RATE_11M_INDEX_TABLE,
+	IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define	IWL_RATE_6M_MASK   (1 << IWL_RATE_6M_INDEX)
+#define	IWL_RATE_9M_MASK   (1 << IWL_RATE_9M_INDEX)
+#define	IWL_RATE_12M_MASK  (1 << IWL_RATE_12M_INDEX)
+#define	IWL_RATE_18M_MASK  (1 << IWL_RATE_18M_INDEX)
+#define	IWL_RATE_24M_MASK  (1 << IWL_RATE_24M_INDEX)
+#define	IWL_RATE_36M_MASK  (1 << IWL_RATE_36M_INDEX)
+#define	IWL_RATE_48M_MASK  (1 << IWL_RATE_48M_INDEX)
+#define	IWL_RATE_54M_MASK  (1 << IWL_RATE_54M_INDEX)
+#define IWL_RATE_60M_MASK  (1 << IWL_RATE_60M_INDEX)
+#define	IWL_RATE_1M_MASK   (1 << IWL_RATE_1M_INDEX)
+#define	IWL_RATE_2M_MASK   (1 << IWL_RATE_2M_INDEX)
+#define	IWL_RATE_5M_MASK   (1 << IWL_RATE_5M_INDEX)
+#define	IWL_RATE_11M_MASK  (1 << IWL_RATE_11M_INDEX)
+
+
+/* uCode API values for HT/VHT bit rates */
+enum {
+	IWL_RATE_HT_SISO_MCS_0_PLCP = 0,
+	IWL_RATE_HT_SISO_MCS_1_PLCP = 1,
+	IWL_RATE_HT_SISO_MCS_2_PLCP = 2,
+	IWL_RATE_HT_SISO_MCS_3_PLCP = 3,
+	IWL_RATE_HT_SISO_MCS_4_PLCP = 4,
+	IWL_RATE_HT_SISO_MCS_5_PLCP = 5,
+	IWL_RATE_HT_SISO_MCS_6_PLCP = 6,
+	IWL_RATE_HT_SISO_MCS_7_PLCP = 7,
+	IWL_RATE_HT_MIMO2_MCS_0_PLCP = 0x8,
+	IWL_RATE_HT_MIMO2_MCS_1_PLCP = 0x9,
+	IWL_RATE_HT_MIMO2_MCS_2_PLCP = 0xA,
+	IWL_RATE_HT_MIMO2_MCS_3_PLCP = 0xB,
+	IWL_RATE_HT_MIMO2_MCS_4_PLCP = 0xC,
+	IWL_RATE_HT_MIMO2_MCS_5_PLCP = 0xD,
+	IWL_RATE_HT_MIMO2_MCS_6_PLCP = 0xE,
+	IWL_RATE_HT_MIMO2_MCS_7_PLCP = 0xF,
+	IWL_RATE_VHT_SISO_MCS_0_PLCP = 0,
+	IWL_RATE_VHT_SISO_MCS_1_PLCP = 1,
+	IWL_RATE_VHT_SISO_MCS_2_PLCP = 2,
+	IWL_RATE_VHT_SISO_MCS_3_PLCP = 3,
+	IWL_RATE_VHT_SISO_MCS_4_PLCP = 4,
+	IWL_RATE_VHT_SISO_MCS_5_PLCP = 5,
+	IWL_RATE_VHT_SISO_MCS_6_PLCP = 6,
+	IWL_RATE_VHT_SISO_MCS_7_PLCP = 7,
+	IWL_RATE_VHT_SISO_MCS_8_PLCP = 8,
+	IWL_RATE_VHT_SISO_MCS_9_PLCP = 9,
+	IWL_RATE_VHT_MIMO2_MCS_0_PLCP = 0x10,
+	IWL_RATE_VHT_MIMO2_MCS_1_PLCP = 0x11,
+	IWL_RATE_VHT_MIMO2_MCS_2_PLCP = 0x12,
+	IWL_RATE_VHT_MIMO2_MCS_3_PLCP = 0x13,
+	IWL_RATE_VHT_MIMO2_MCS_4_PLCP = 0x14,
+	IWL_RATE_VHT_MIMO2_MCS_5_PLCP = 0x15,
+	IWL_RATE_VHT_MIMO2_MCS_6_PLCP = 0x16,
+	IWL_RATE_VHT_MIMO2_MCS_7_PLCP = 0x17,
+	IWL_RATE_VHT_MIMO2_MCS_8_PLCP = 0x18,
+	IWL_RATE_VHT_MIMO2_MCS_9_PLCP = 0x19,
+	IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_VHT_SISO_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_VHT_MIMO2_MCS_INV_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_SISO_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_SISO_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_8_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+	IWL_RATE_HT_MIMO2_MCS_9_PLCP = IWL_RATE_HT_SISO_MCS_INV_PLCP,
+};
+
+#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
+
+#define IWL_INVALID_VALUE    -1
+
+#define TPC_MAX_REDUCTION		15
+#define TPC_NO_REDUCTION		0
+#define TPC_INVALID			0xff
+
+#define LINK_QUAL_AGG_FRAME_LIMIT_DEF	(63)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MAX	(63)
+/*
+ * FIXME - various places in firmware API still use u8,
+ * e.g. LQ command and SCD config command.
+ * This should be 256 instead.
+ */
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF	(255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX	(255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_MIN	(0)
+
+#define LQ_SIZE		2	/* 2 mode tables:  "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IWL_AGG_TPT_THREHOLD	0
+#define IWL_AGG_ALL_TID		0xff
+
+enum iwl_table_type {
+	LQ_NONE,
+	LQ_LEGACY_G,	/* legacy types */
+	LQ_LEGACY_A,
+	LQ_HT_SISO,	/* HT types */
+	LQ_HT_MIMO2,
+	LQ_VHT_SISO,    /* VHT types */
+	LQ_VHT_MIMO2,
+	LQ_HE_SISO,     /* HE types */
+	LQ_HE_MIMO2,
+	LQ_MAX,
+};
+
+struct rs_rate {
+	int index;
+	enum iwl_table_type type;
+	u8 ant;
+	u32 bw;
+	bool sgi;
+	bool ldpc;
+	bool stbc;
+	bool bfer;
+};
+
+
+#define is_type_legacy(type) (((type) == LQ_LEGACY_G) || \
+			      ((type) == LQ_LEGACY_A))
+#define is_type_ht_siso(type) ((type) == LQ_HT_SISO)
+#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
+#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
+#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
+#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
+#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
+#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
+			    is_type_he_siso(type))
+#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
+			     is_type_vht_mimo2(type) || is_type_he_mimo2(type))
+#define is_type_mimo(type) (is_type_mimo2(type))
+#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
+#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
+#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
+#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
+#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
+
+#define is_legacy(rate)       is_type_legacy((rate)->type)
+#define is_ht_siso(rate)      is_type_ht_siso((rate)->type)
+#define is_ht_mimo2(rate)     is_type_ht_mimo2((rate)->type)
+#define is_vht_siso(rate)     is_type_vht_siso((rate)->type)
+#define is_vht_mimo2(rate)    is_type_vht_mimo2((rate)->type)
+#define is_siso(rate)         is_type_siso((rate)->type)
+#define is_mimo2(rate)        is_type_mimo2((rate)->type)
+#define is_mimo(rate)         is_type_mimo((rate)->type)
+#define is_ht(rate)           is_type_ht((rate)->type)
+#define is_vht(rate)          is_type_vht((rate)->type)
+#define is_he(rate)           is_type_he((rate)->type)
+#define is_a_band(rate)       is_type_a_band((rate)->type)
+#define is_g_band(rate)       is_type_g_band((rate)->type)
+
+#define is_ht20(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_20)
+#define is_ht40(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_40)
+#define is_ht80(rate)         ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
+#define is_ht160(rate)        ((rate)->bw == RATE_MCS_CHAN_WIDTH_160)
+
+#define IWL_MAX_MCS_DISPLAY_SIZE	12
+
+struct iwl_rate_mcs_info {
+	char	mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+	char	mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
+ * @last_rate_n_flags: last rate reported by FW
+ * @sta_id: the id of the station
+#ifdef CONFIG_MAC80211_DEBUGFS
+ * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @dbg_agg_frame_count_lim: for debug, max number of frames in A-MPDU
+#endif
+ * @chains: bitmask of chains reported in %chain_signal
+ * @chain_signal: per chain signal strength
+ * @last_rssi: last rssi reported
+ * @drv: pointer back to the driver data
+ */
+
+struct iwl_lq_sta_rs_fw {
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+
+	/* persistent fields - initialized only once - keep last! */
+	struct lq_sta_pers_rs_fw {
+		u32 sta_id;
+#ifdef CONFIG_MAC80211_DEBUGFS
+		u32 dbg_fixed_rate;
+		u16 dbg_agg_frame_count_lim;
+#endif
+		u8 chains;
+		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
+		struct iwl_mvm *drv;
+	} pers;
+};
+
+/**
+ * struct iwl_rate_scale_data -- tx success history for one rate
+ */
+struct iwl_rate_scale_data {
+	u64 data;		/* bitmap of successful frames */
+	s32 success_counter;	/* number of frames successful */
+	s32 success_ratio;	/* per-cent * 128  */
+	s32 counter;		/* number of frames attempted */
+	s32 average_tpt;	/* success ratio * expected throughput */
+};
+
+/* Possible Tx columns
+ * Tx Column = a combo of legacy/siso/mimo x antenna x SGI
+ */
+enum rs_column {
+	RS_COLUMN_LEGACY_ANT_A = 0,
+	RS_COLUMN_LEGACY_ANT_B,
+	RS_COLUMN_SISO_ANT_A,
+	RS_COLUMN_SISO_ANT_B,
+	RS_COLUMN_SISO_ANT_A_SGI,
+	RS_COLUMN_SISO_ANT_B_SGI,
+	RS_COLUMN_MIMO2,
+	RS_COLUMN_MIMO2_SGI,
+
+	RS_COLUMN_LAST = RS_COLUMN_MIMO2_SGI,
+	RS_COLUMN_COUNT = RS_COLUMN_LAST + 1,
+	RS_COLUMN_INVALID,
+};
+
+enum rs_ss_force_opt {
+	RS_SS_FORCE_NONE = 0,
+	RS_SS_FORCE_STBC,
+	RS_SS_FORCE_BFER,
+	RS_SS_FORCE_SISO,
+};
+
+/* Packet stats per rate */
+struct rs_rate_stats {
+	u64 success;
+	u64 total;
+};
+
+/**
+ * struct iwl_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct iwl_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct iwl_scale_tbl_info {
+	struct rs_rate rate;
+	enum rs_column column;
+	const u16 *expected_tpt;	/* throughput metrics; expected_tpt_G, etc. */
+	struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
+	/* per txpower-reduction history */
+	struct iwl_rate_scale_data tpc_win[TPC_MAX_REDUCTION + 1];
+};
+
+enum {
+	RS_STATE_SEARCH_CYCLE_STARTED,
+	RS_STATE_SEARCH_CYCLE_ENDED,
+	RS_STATE_STAY_IN_COLUMN,
+};
+
+/**
+ * struct iwl_lq_sta -- driver's rate scaling private structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct iwl_lq_sta {
+	u8 active_tbl;		/* index of active table, range 0-1 */
+	u8 rs_state;            /* RS_STATE_* */
+	u8 search_better_tbl;	/* 1: currently trying alternate mode */
+	s32 last_tpt;
+
+	/* The following determine when to search for a new mode */
+	u32 table_count_limit;
+	u32 max_failure_limit;	/* # failed frames before new search */
+	u32 max_success_limit;	/* # successful frames before new search */
+	u32 table_count;
+	u32 total_failed;	/* total failed frames, any/all rates */
+	u32 total_success;	/* total successful frames, any/all rates */
+	u64 flush_timer;	/* time staying in mode before new search */
+
+	u32 visited_columns;    /* Bitmask marking which Tx columns were
+				 * explored during a search cycle
+				 */
+	u64 last_tx;
+	bool is_vht;
+	bool ldpc;              /* LDPC Rx is supported by the STA */
+	bool stbc_capable;      /* Tx STBC is supported by chip and Rx by STA */
+	bool bfer_capable;      /* Remote supports beamformee and we BFer */
+
+	enum nl80211_band band;
+
+	/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
+	unsigned long active_legacy_rate;
+	unsigned long active_siso_rate;
+	unsigned long active_mimo2_rate;
+
+	/* Highest rate per Tx mode */
+	u8 max_legacy_rate_idx;
+	u8 max_siso_rate_idx;
+	u8 max_mimo2_rate_idx;
+
+	/* Optimal rate based on RSSI and STA caps.
+	 * Used only to reflect link speed to userspace.
+	 */
+	struct rs_rate optimal_rate;
+	unsigned long optimal_rate_mask;
+	const struct rs_init_rate_info *optimal_rates;
+	int optimal_nentries;
+
+	u8 missed_rate_counter;
+
+	struct iwl_lq_cmd lq;
+	struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+	u8 tx_agg_tid_en;
+
+	/* last tx rate_n_flags */
+	u32 last_rate_n_flags;
+	/* packets destined for this STA are aggregated */
+	u8 is_agg;
+
+	/* tx power reduce for this sta */
+	int tpc_reduce;
+
+	/* persistent fields - initialized only once - keep last! */
+	struct lq_sta_pers {
+#ifdef CONFIG_MAC80211_DEBUGFS
+		u32 dbg_fixed_rate;
+		u8 dbg_fixed_txp_reduction;
+
+		/* force STBC/BFER/SISO for testing */
+		enum rs_ss_force_opt ss_force;
+#endif
+		u8 chains;
+		s8 chain_signal[IEEE80211_MAX_CHAINS];
+		s8 last_rssi;
+		struct rs_rate_stats tx_stats[RS_COLUMN_COUNT][IWL_RATE_COUNT];
+		struct iwl_mvm *drv;
+	} pers;
+};
+
+/* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp
+ * Note, it's iwlmvm <-> mac80211 interface.
+ * bits 0-7: reduced tx power
+ * bits 8-10: LQ command's color
+ */
+#define RS_DRV_DATA_TXP_MSK 0xff
+#define RS_DRV_DATA_LQ_COLOR_POS 8
+#define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\
+				      RS_DRV_DATA_LQ_COLOR_POS)
+#define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\
+				  (((uintptr_t)_p) |\
+				   ((_c) << RS_DRV_DATA_LQ_COLOR_POS)))
+
+/* Initialize station's rate scaling information after adding station */
+void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  enum nl80211_band band);
+
+/* Notify RS about Tx status */
+void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			  int tid, struct ieee80211_tx_info *info, bool ndp);
+
+/**
+ * iwl_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module.  The driver can call
+ * iwl_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem.  This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+int iwl_mvm_rate_control_register(void);
+
+/**
+ * iwl_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+void iwl_mvm_rate_control_unregister(void);
+
+struct iwl_mvm_sta;
+
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			  bool enable);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_mvm_reset_frame_stats(struct iwl_mvm *mvm);
+#endif
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+void rs_remove_sta_debugfs(void *mvm, void *mvm_sta);
+#endif
+
+void iwl_mvm_rs_add_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta);
+void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		     enum nl80211_band band);
+int rs_fw_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+			bool enable);
+void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
+#endif /* __rs__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
new file mode 100644
index 0000000..bfb1634
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -0,0 +1,909 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+
+/*
+ * iwl_mvm_rx_rx_phy_cmd - REPLY_RX_PHY_CMD handler
+ *
+ * Copies the phy information in mvm->last_phy_info, it will be used when the
+ * actual data will come from the fw in the next packet.
+ */
+void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+
+	memcpy(&mvm->last_phy_info, pkt->data, sizeof(mvm->last_phy_info));
+	mvm->ampdu_ref++;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvm->last_phy_info.phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+		spin_lock(&mvm->drv_stats_lock);
+		mvm->drv_rx_stats.ampdu_count++;
+		spin_unlock(&mvm->drv_stats_lock);
+	}
+#endif
+}
+
+/*
+ * iwl_mvm_pass_packet_to_mac80211 - builds the packet for mac80211
+ *
+ * Adds the rxb to a new skb and give it to mac80211
+ */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+					    struct ieee80211_sta *sta,
+					    struct napi_struct *napi,
+					    struct sk_buff *skb,
+					    struct ieee80211_hdr *hdr, u16 len,
+					    u8 crypt_len,
+					    struct iwl_rx_cmd_buffer *rxb)
+{
+	unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	unsigned int fraglen;
+
+	/*
+	 * The 'hdrlen' (plus the 8 bytes for the SNAP and the crypt_len,
+	 * but those are all multiples of 4 long) all goes away, but we
+	 * want the *end* of it, which is going to be the start of the IP
+	 * header, to be aligned when it gets pulled in.
+	 * The beginning of the skb->data is aligned on at least a 4-byte
+	 * boundary after allocation. Everything here is aligned at least
+	 * on a 2-byte boundary so we can just take hdrlen & 3 and pad by
+	 * the result.
+	 */
+	skb_reserve(skb, hdrlen & 3);
+
+	/* If frame is small enough to fit in skb->head, pull it completely.
+	 * If not, only pull ieee80211_hdr (including crypto if present, and
+	 * an additional 8 bytes for SNAP/ethertype, see below) so that
+	 * splice() or TCP coalesce are more efficient.
+	 *
+	 * Since, in addition, ieee80211_data_to_8023() always pull in at
+	 * least 8 bytes (possibly more for mesh) we can do the same here
+	 * to save the cost of doing it later. That still doesn't pull in
+	 * the actual IP header since the typical case has a SNAP header.
+	 * If the latter changes (there are efforts in the standards group
+	 * to do so) we should revisit this and ieee80211_data_to_8023().
+	 */
+	hdrlen = (len <= skb_tailroom(skb)) ? len : hdrlen + crypt_len + 8;
+
+	skb_put_data(skb, hdr, hdrlen);
+	fraglen = len - hdrlen;
+
+	if (fraglen) {
+		int offset = (void *)hdr + hdrlen -
+			     rxb_addr(rxb) + rxb_offset(rxb);
+
+		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+				fraglen, rxb->truesize);
+	}
+
+	ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM.  Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+					struct iwl_rx_phy_info *phy_info,
+					struct ieee80211_rx_status *rx_status)
+{
+	int energy_a, energy_b, energy_c, max_energy;
+	u32 val;
+
+	val =
+	    le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+	energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+						IWL_RX_INFO_ENERGY_ANT_A_POS;
+	energy_a = energy_a ? -energy_a : S8_MIN;
+	energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+						IWL_RX_INFO_ENERGY_ANT_B_POS;
+	energy_b = energy_b ? -energy_b : S8_MIN;
+	energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
+						IWL_RX_INFO_ENERGY_ANT_C_POS;
+	energy_c = energy_c ? -energy_c : S8_MIN;
+	max_energy = max(energy_a, energy_b);
+	max_energy = max(max_energy, energy_c);
+
+	IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
+			energy_a, energy_b, energy_c, max_energy);
+
+	rx_status->signal = max_energy;
+	rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+				RX_RES_PHY_FLAGS_ANTENNA)
+					>> RX_RES_PHY_FLAGS_ANTENNA_POS;
+	rx_status->chain_signal[0] = energy_a;
+	rx_status->chain_signal[1] = energy_b;
+	rx_status->chain_signal[2] = energy_c;
+}
+
+/*
+ * iwl_mvm_set_mac80211_rx_flag - translate fw status to mac80211 format
+ * @mvm: the mvm object
+ * @hdr: 80211 header
+ * @stats: status in mac80211's format
+ * @rx_pkt_status: status coming from fw
+ *
+ * returns non 0 value if the packet should be dropped
+ */
+static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm,
+					struct ieee80211_hdr *hdr,
+					struct ieee80211_rx_status *stats,
+					u32 rx_pkt_status,
+					u8 *crypt_len)
+{
+	if (!ieee80211_has_protected(hdr->frame_control) ||
+	    (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+			     RX_MPDU_RES_STATUS_SEC_NO_ENC)
+		return 0;
+
+	/* packet was encrypted with unknown alg */
+	if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+					RX_MPDU_RES_STATUS_SEC_ENC_ERR)
+		return 0;
+
+	switch (rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) {
+	case RX_MPDU_RES_STATUS_SEC_CCM_ENC:
+		/* alg is CCM: check MIC only */
+		if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+			return -1;
+
+		stats->flag |= RX_FLAG_DECRYPTED;
+		*crypt_len = IEEE80211_CCMP_HDR_LEN;
+		return 0;
+
+	case RX_MPDU_RES_STATUS_SEC_TKIP_ENC:
+		/* Don't drop the frame and decrypt it in SW */
+		if (!fw_has_api(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+		    !(rx_pkt_status & RX_MPDU_RES_STATUS_TTAK_OK))
+			return 0;
+		*crypt_len = IEEE80211_TKIP_IV_LEN;
+		/* fall through if TTAK OK */
+
+	case RX_MPDU_RES_STATUS_SEC_WEP_ENC:
+		if (!(rx_pkt_status & RX_MPDU_RES_STATUS_ICV_OK))
+			return -1;
+
+		stats->flag |= RX_FLAG_DECRYPTED;
+		if ((rx_pkt_status & RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+				RX_MPDU_RES_STATUS_SEC_WEP_ENC)
+			*crypt_len = IEEE80211_WEP_IV_LEN;
+		return 0;
+
+	case RX_MPDU_RES_STATUS_SEC_EXT_ENC:
+		if (!(rx_pkt_status & RX_MPDU_RES_STATUS_MIC_OK))
+			return -1;
+		stats->flag |= RX_FLAG_DECRYPTED;
+		return 0;
+
+	default:
+		/* Expected in monitor (not having the keys) */
+		if (!mvm->monitor_on)
+			IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status);
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
+				  struct ieee80211_sta *sta,
+				  struct ieee80211_hdr *hdr, u32 len,
+				  struct iwl_rx_phy_info *phy_info,
+				  u32 rate_n_flags)
+{
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_mvm_tcm_mac *mdata;
+	int mac;
+	int ac = IEEE80211_AC_BE; /* treat non-QoS as BE */
+	struct iwl_mvm_vif *mvmvif;
+	/* expected throughput in 100Kbps, single stream, 20 MHz */
+	static const u8 thresh_tpt[] = {
+		9, 18, 30, 42, 60, 78, 90, 96, 120, 135,
+	};
+	u16 thr;
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		ac = tid_to_mac80211_ac[ieee80211_get_tid(hdr)];
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+
+	if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+		schedule_delayed_work(&mvm->tcm.work, 0);
+	mdata = &mvm->tcm.data[mac];
+	mdata->rx.pkts[ac]++;
+
+	/* count the airtime only once for each ampdu */
+	if (mdata->rx.last_ampdu_ref != mvm->ampdu_ref) {
+		mdata->rx.last_ampdu_ref = mvm->ampdu_ref;
+		mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
+	}
+
+	if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+		return;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+	if (mdata->opened_rx_ba_sessions ||
+	    mdata->uapsd_nonagg_detect.detected ||
+	    (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+	     !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+	     !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+	     !mvmvif->queue_params[IEEE80211_AC_BK].uapsd) ||
+	    mvmsta->sta_id != mvmvif->ap_sta_id)
+		return;
+
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
+		thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
+					RATE_HT_MCS_NSS_POS);
+	} else {
+		if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
+				ARRAY_SIZE(thresh_tpt)))
+			return;
+		thr = thresh_tpt[rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK];
+		thr *= 1 + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+					RATE_VHT_MCS_NSS_POS);
+	}
+
+	thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >>
+				RATE_MCS_CHAN_WIDTH_POS);
+
+	mdata->uapsd_nonagg_detect.rx_bytes += len;
+	ewma_rate_add(&mdata->uapsd_nonagg_detect.rate, thr);
+}
+
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+			    struct sk_buff *skb,
+			    u32 status)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+	if (mvmvif->features & NETIF_F_RXCSUM &&
+	    status & RX_MPDU_RES_STATUS_CSUM_DONE &&
+	    status & RX_MPDU_RES_STATUS_CSUM_OK)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/*
+ * iwl_mvm_rx_rx_mpdu - REPLY_RX_MPDU_CMD handler
+ *
+ * Handles the actual data of the Rx packet from the fw
+ */
+void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb)
+{
+	struct ieee80211_hdr *hdr;
+	struct ieee80211_rx_status *rx_status;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_rx_phy_info *phy_info;
+	struct iwl_rx_mpdu_res_start *rx_res;
+	struct ieee80211_sta *sta = NULL;
+	struct sk_buff *skb;
+	u32 len;
+	u32 rate_n_flags;
+	u32 rx_pkt_status;
+	u8 crypt_len = 0;
+	bool take_ref;
+
+	phy_info = &mvm->last_phy_info;
+	rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
+	hdr = (struct ieee80211_hdr *)(pkt->data + sizeof(*rx_res));
+	len = le16_to_cpu(rx_res->byte_count);
+	rx_pkt_status = le32_to_cpup((__le32 *)
+		(pkt->data + sizeof(*rx_res) + len));
+
+	/* Dont use dev_alloc_skb(), we'll have enough headroom once
+	 * ieee80211_hdr pulled.
+	 */
+	skb = alloc_skb(128, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(mvm, "alloc_skb failed\n");
+		return;
+	}
+
+	rx_status = IEEE80211_SKB_RXCB(skb);
+
+	/*
+	 * drop the packet if it has failed being decrypted by HW
+	 */
+	if (iwl_mvm_set_mac80211_rx_flag(mvm, hdr, rx_status, rx_pkt_status,
+					 &crypt_len)) {
+		IWL_DEBUG_DROP(mvm, "Bad decryption results 0x%08x\n",
+			       rx_pkt_status);
+		kfree_skb(skb);
+		return;
+	}
+
+	/*
+	 * Keep packets with CRC errors (and with overrun) for monitor mode
+	 * (otherwise the firmware discards them) but mark them as bad.
+	 */
+	if (!(rx_pkt_status & RX_MPDU_RES_STATUS_CRC_OK) ||
+	    !(rx_pkt_status & RX_MPDU_RES_STATUS_OVERRUN_OK)) {
+		IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
+		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+	}
+
+	/* This will be used in several places later */
+	rate_n_flags = le32_to_cpu(phy_info->rate_n_flags);
+
+	/* rx_status carries information about the packet to mac80211 */
+	rx_status->mactime = le64_to_cpu(phy_info->timestamp);
+	rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
+	rx_status->band =
+		(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+				NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+	rx_status->freq =
+		ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
+					       rx_status->band);
+
+	/* TSF as indicated by the firmware  is at INA time */
+	rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+
+	iwl_mvm_get_signal_strength(mvm, phy_info, rx_status);
+
+	IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status->signal,
+			      (unsigned long long)rx_status->mactime);
+
+	rcu_read_lock();
+	if (rx_pkt_status & RX_MPDU_RES_STATUS_SRC_STA_FOUND) {
+		u32 id = rx_pkt_status & RX_MPDU_RES_STATUS_STA_ID_MSK;
+
+		id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
+
+		if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+			sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+			if (IS_ERR(sta))
+				sta = NULL;
+		}
+	} else if (!is_multicast_ether_addr(hdr->addr2)) {
+		/* This is fine since we prevent two stations with the same
+		 * address from being added.
+		 */
+		sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+	}
+
+	if (sta) {
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct ieee80211_vif *tx_blocked_vif =
+			rcu_dereference(mvm->csa_tx_blocked_vif);
+
+		/* We have tx blocked stations (with CS bit). If we heard
+		 * frames from a blocked station on a new channel we can
+		 * TX to it again.
+		 */
+		if (unlikely(tx_blocked_vif) &&
+		    mvmsta->vif == tx_blocked_vif) {
+			struct iwl_mvm_vif *mvmvif =
+				iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+			if (mvmvif->csa_target_freq == rx_status->freq)
+				iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+								 false);
+		}
+
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+		    ieee80211_is_beacon(hdr->frame_control)) {
+			struct iwl_fw_dbg_trigger_tlv *trig;
+			struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+			bool trig_check;
+			s32 rssi;
+
+			trig = iwl_fw_dbg_get_trigger(mvm->fw,
+						      FW_DBG_TRIGGER_RSSI);
+			rssi_trig = (void *)trig->data;
+			rssi = le32_to_cpu(rssi_trig->rssi);
+
+			trig_check =
+				iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+							      ieee80211_vif_to_wdev(mvmsta->vif),
+							      trig);
+			if (trig_check && rx_status->signal < rssi)
+				iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+							NULL);
+		}
+
+		if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+		    !is_multicast_ether_addr(hdr->addr1) &&
+		    ieee80211_is_data(hdr->frame_control))
+			iwl_mvm_rx_handle_tcm(mvm, sta, hdr, len, phy_info,
+					      rate_n_flags);
+
+		if (ieee80211_is_data(hdr->frame_control))
+			iwl_mvm_rx_csum(sta, skb, rx_pkt_status);
+	}
+	rcu_read_unlock();
+
+	/* set the preamble flag if appropriate */
+	if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
+		rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+	if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_AGG)) {
+		/*
+		 * We know which subframes of an A-MPDU belong
+		 * together since we get a single PHY response
+		 * from the firmware for all of them
+		 */
+		rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+		rx_status->ampdu_reference = mvm->ampdu_ref;
+	}
+
+	/* Set up the HT phy flags */
+	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		rx_status->bw = RATE_INFO_BW_40;
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		rx_status->bw = RATE_INFO_BW_80;
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		rx_status->bw = RATE_INFO_BW_160;
+		break;
+	}
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+	if (rate_n_flags & RATE_MCS_LDPC_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->encoding = RX_ENC_HT;
+		rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->nss =
+			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+						RATE_VHT_MCS_NSS_POS) + 1;
+		rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		rx_status->encoding = RX_ENC_VHT;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+		if (rate_n_flags & RATE_MCS_BF_MSK)
+			rx_status->enc_flags |= RX_ENC_FLAG_BF;
+	} else {
+		int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+							       rx_status->band);
+
+		if (WARN(rate < 0 || rate > 0xFF,
+			 "Invalid rate flags 0x%x, band %d,\n",
+			 rate_n_flags, rx_status->band)) {
+			kfree_skb(skb);
+			return;
+		}
+		rx_status->rate_idx = rate;
+	}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	iwl_mvm_update_frame_stats(mvm, rate_n_flags,
+				   rx_status->flag & RX_FLAG_AMPDU_DETAILS);
+#endif
+
+	if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+		      ieee80211_is_probe_resp(hdr->frame_control)) &&
+		     mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+	if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+		     ieee80211_is_probe_resp(hdr->frame_control)))
+		rx_status->boottime_ns = ktime_get_boot_ns();
+
+	/* Take a reference briefly to kick off a d0i3 entry delay so
+	 * we can handle bursts of RX packets without toggling the
+	 * state too often.  But don't do this for beacons if we are
+	 * going to idle because the beacon filtering changes we make
+	 * cause the firmware to send us collateral beacons. */
+	take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
+		     ieee80211_is_beacon(hdr->frame_control));
+
+	if (take_ref)
+		iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
+
+	iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
+					crypt_len, rxb);
+
+	if (take_ref)
+		iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
+}
+
+struct iwl_mvm_stat_data {
+	struct iwl_mvm *mvm;
+	__le32 mac_id;
+	u8 beacon_filter_average_energy;
+	void *general;
+};
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+				  struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_stat_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	int sig = -data->beacon_filter_average_energy;
+	int last_event;
+	int thold = vif->bss_conf.cqm_rssi_thold;
+	int hyst = vif->bss_conf.cqm_rssi_hyst;
+	u16 id = le32_to_cpu(data->mac_id);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	/* This doesn't need the MAC ID check since it's not taking the
+	 * data copied into the "data" struct, but rather the data from
+	 * the notification directly.
+	 */
+	if (data->general) {
+		u16 vif_id = mvmvif->id;
+
+		if (iwl_mvm_is_cdb_supported(mvm)) {
+			struct mvm_statistics_general_cdb *general =
+				data->general;
+
+			mvmvif->beacon_stats.num_beacons =
+				le32_to_cpu(general->beacon_counter[vif_id]);
+			mvmvif->beacon_stats.avg_signal =
+				-general->beacon_average_energy[vif_id];
+		} else {
+			struct mvm_statistics_general_v8 *general =
+				data->general;
+
+			mvmvif->beacon_stats.num_beacons =
+				le32_to_cpu(general->beacon_counter[vif_id]);
+			mvmvif->beacon_stats.avg_signal =
+				-general->beacon_average_energy[vif_id];
+		}
+	}
+
+	if (mvmvif->id != id)
+		return;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (sig == 0) {
+		IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+		return;
+	}
+
+	mvmvif->bf_data.ave_beacon_signal = sig;
+
+	/* BT Coex */
+	if (mvmvif->bf_data.bt_coex_min_thold !=
+	    mvmvif->bf_data.bt_coex_max_thold) {
+		last_event = mvmvif->bf_data.last_bt_coex_event;
+		if (sig > mvmvif->bf_data.bt_coex_max_thold &&
+		    (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+		     last_event == 0)) {
+			mvmvif->bf_data.last_bt_coex_event = sig;
+			IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
+				     sig);
+			iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
+		} else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
+			   (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+			    last_event == 0)) {
+			mvmvif->bf_data.last_bt_coex_event = sig;
+			IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
+				     sig);
+			iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
+		}
+	}
+
+	if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+		return;
+
+	/* CQM Notification */
+	last_event = mvmvif->bf_data.last_cqm_event;
+	if (thold && sig < thold && (last_event == 0 ||
+				     sig < last_event - hyst)) {
+		mvmvif->bf_data.last_cqm_event = sig;
+		IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+			     sig);
+		ieee80211_cqm_rssi_notify(
+			vif,
+			NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+			sig,
+			GFP_KERNEL);
+	} else if (sig > thold &&
+		   (last_event == 0 || sig > last_event + hyst)) {
+		mvmvif->bf_data.last_cqm_event = sig;
+		IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+			     sig);
+		ieee80211_cqm_rssi_notify(
+			vif,
+			NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+			sig,
+			GFP_KERNEL);
+	}
+}
+
+static inline void
+iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_stats *trig_stats;
+	u32 trig_offset, trig_thold;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_STATS))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_STATS);
+	trig_stats = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+		return;
+
+	trig_offset = le32_to_cpu(trig_stats->stop_offset);
+	trig_thold = le32_to_cpu(trig_stats->stop_threshold);
+
+	if (WARN_ON_ONCE(trig_offset >= iwl_rx_packet_payload_len(pkt)))
+		return;
+
+	if (le32_to_cpup((__le32 *) (pkt->data + trig_offset)) < trig_thold)
+		return;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
+}
+
+void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
+				  struct iwl_rx_packet *pkt)
+{
+	struct iwl_mvm_stat_data data = {
+		.mvm = mvm,
+	};
+	int expected_size;
+	int i;
+	u8 *energy;
+	__le32 *bytes;
+	__le32 *air_time;
+	__le32 flags;
+
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		if (iwl_mvm_has_new_rx_api(mvm))
+			expected_size = sizeof(struct iwl_notif_statistics_v11);
+		else
+			expected_size = sizeof(struct iwl_notif_statistics_v10);
+	} else {
+		expected_size = sizeof(struct iwl_notif_statistics_cdb);
+	}
+
+	if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) != expected_size,
+		      "received invalid statistics size (%d)!\n",
+		      iwl_rx_packet_payload_len(pkt)))
+		return;
+
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct iwl_notif_statistics_v11 *stats = (void *)&pkt->data;
+
+		data.mac_id = stats->rx.general.mac_id;
+		data.beacon_filter_average_energy =
+			stats->general.common.beacon_filter_average_energy;
+
+		mvm->rx_stats_v3 = stats->rx;
+
+		mvm->radio_stats.rx_time =
+			le64_to_cpu(stats->general.common.rx_time);
+		mvm->radio_stats.tx_time =
+			le64_to_cpu(stats->general.common.tx_time);
+		mvm->radio_stats.on_time_rf =
+			le64_to_cpu(stats->general.common.on_time_rf);
+		mvm->radio_stats.on_time_scan =
+			le64_to_cpu(stats->general.common.on_time_scan);
+
+		data.general = &stats->general;
+
+		flags = stats->flag;
+	} else {
+		struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+
+		data.mac_id = stats->rx.general.mac_id;
+		data.beacon_filter_average_energy =
+			stats->general.common.beacon_filter_average_energy;
+
+		mvm->rx_stats = stats->rx;
+
+		mvm->radio_stats.rx_time =
+			le64_to_cpu(stats->general.common.rx_time);
+		mvm->radio_stats.tx_time =
+			le64_to_cpu(stats->general.common.tx_time);
+		mvm->radio_stats.on_time_rf =
+			le64_to_cpu(stats->general.common.on_time_rf);
+		mvm->radio_stats.on_time_scan =
+			le64_to_cpu(stats->general.common.on_time_scan);
+
+		data.general = &stats->general;
+
+		flags = stats->flag;
+	}
+
+	iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+	ieee80211_iterate_active_interfaces(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_stat_iterator,
+					    &data);
+
+	if (!iwl_mvm_has_new_rx_api(mvm))
+		return;
+
+	if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
+		struct iwl_notif_statistics_v11 *v11 = (void *)&pkt->data;
+
+		energy = (void *)&v11->load_stats.avg_energy;
+		bytes = (void *)&v11->load_stats.byte_count;
+		air_time = (void *)&v11->load_stats.air_time;
+	} else {
+		struct iwl_notif_statistics_cdb *stats = (void *)&pkt->data;
+
+		energy = (void *)&stats->load_stats.avg_energy;
+		bytes = (void *)&stats->load_stats.byte_count;
+		air_time = (void *)&stats->load_stats.air_time;
+	}
+
+	rcu_read_lock();
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		struct iwl_mvm_sta *sta;
+
+		if (!energy[i])
+			continue;
+
+		sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+		if (!sta)
+			continue;
+		sta->avg_energy = energy[i];
+	}
+	rcu_read_unlock();
+
+	/*
+	 * Don't update in case the statistics are not cleared, since
+	 * we will end up counting twice the same airtime, once in TCM
+	 * request and once in statistics notification.
+	 */
+	if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
+		return;
+
+	spin_lock(&mvm->tcm.lock);
+	for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+		u32 airtime = le32_to_cpu(air_time[i]);
+		u32 rx_bytes = le32_to_cpu(bytes[i]);
+
+		mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+		if (airtime) {
+			/* re-init every time to store rate from FW */
+			ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+			ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+				      rx_bytes * 8 / airtime);
+		}
+
+		mdata->rx.airtime += airtime;
+	}
+	spin_unlock(&mvm->tcm.lock);
+}
+
+void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	iwl_mvm_handle_rx_statistics(mvm, rxb_addr(rxb));
+}
+
+void iwl_mvm_window_status_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_ba_window_status_notif *notif = (void *)pkt->data;
+	int i;
+	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
+
+	if (WARN_ONCE(pkt_len != sizeof(*notif),
+		      "Received window status notification of wrong size (%u)\n",
+		      pkt_len))
+		return;
+
+	rcu_read_lock();
+	for (i = 0; i < BA_WINDOW_STREAMS_MAX; i++) {
+		struct ieee80211_sta *sta;
+		u8 sta_id, tid;
+		u64 bitmap;
+		u32 ssn;
+		u16 ratid;
+		u16 received_mpdu;
+
+		ratid = le16_to_cpu(notif->ra_tid[i]);
+		/* check that this TID is valid */
+		if (!(ratid & BA_WINDOW_STATUS_VALID_MSK))
+			continue;
+
+		received_mpdu = le16_to_cpu(notif->mpdu_rx_count[i]);
+		if (received_mpdu == 0)
+			continue;
+
+		tid = ratid & BA_WINDOW_STATUS_TID_MSK;
+		/* get the station */
+		sta_id = (ratid & BA_WINDOW_STATUS_STA_ID_MSK)
+			 >> BA_WINDOW_STATUS_STA_ID_POS;
+		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+		bitmap = le64_to_cpu(notif->bitmap[i]);
+		ssn = le32_to_cpu(notif->start_seq_num[i]);
+
+		/* update mac80211 with the bitmap for the reordering buffer */
+		ieee80211_mark_rx_ba_filtered_frames(sta, tid, ssn, bitmap,
+						     received_mpdu);
+	}
+	rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
new file mode 100644
index 0000000..b53148f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -0,0 +1,1471 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include "iwl-trans.h"
+#include "mvm.h"
+#include "fw-api.h"
+
+static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
+				   int queue, struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
+	struct iwl_mvm_key_pn *ptk_pn;
+	int res;
+	u8 tid, keyidx;
+	u8 pn[IEEE80211_CCMP_PN_LEN];
+	u8 *extiv;
+
+	/* do PN checking */
+
+	/* multicast and non-data only arrives on default queue */
+	if (!ieee80211_is_data(hdr->frame_control) ||
+	    is_multicast_ether_addr(hdr->addr1))
+		return 0;
+
+	/* do not check PN for open AP */
+	if (!(stats->flag & RX_FLAG_DECRYPTED))
+		return 0;
+
+	/*
+	 * avoid checking for default queue - we don't want to replicate
+	 * all the logic that's necessary for checking the PN on fragmented
+	 * frames, leave that to mac80211
+	 */
+	if (queue == 0)
+		return 0;
+
+	/* if we are here - this for sure is either CCMP or GCMP */
+	if (IS_ERR_OR_NULL(sta)) {
+		IWL_ERR(mvm,
+			"expected hw-decrypted unicast frame for station\n");
+		return -1;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	extiv = (u8 *)hdr + ieee80211_hdrlen(hdr->frame_control);
+	keyidx = extiv[3] >> 6;
+
+	ptk_pn = rcu_dereference(mvmsta->ptk_pn[keyidx]);
+	if (!ptk_pn)
+		return -1;
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		tid = ieee80211_get_tid(hdr);
+	else
+		tid = 0;
+
+	/* we don't use HCCA/802.11 QoS TSPECs, so drop such frames */
+	if (tid >= IWL_MAX_TID_COUNT)
+		return -1;
+
+	/* load pn */
+	pn[0] = extiv[7];
+	pn[1] = extiv[6];
+	pn[2] = extiv[5];
+	pn[3] = extiv[4];
+	pn[4] = extiv[1];
+	pn[5] = extiv[0];
+
+	res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
+	if (res < 0)
+		return -1;
+	if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
+		return -1;
+
+	memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
+	stats->flag |= RX_FLAG_PN_VALIDATED;
+
+	return 0;
+}
+
+/* iwl_mvm_create_skb Adds the rxb to a new skb */
+static void iwl_mvm_create_skb(struct sk_buff *skb, struct ieee80211_hdr *hdr,
+			       u16 len, u8 crypt_len,
+			       struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+	unsigned int headlen, fraglen, pad_len = 0;
+	unsigned int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+
+	if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+		len -= 2;
+		pad_len = 2;
+	}
+
+	/* If frame is small enough to fit in skb->head, pull it completely.
+	 * If not, only pull ieee80211_hdr (including crypto if present, and
+	 * an additional 8 bytes for SNAP/ethertype, see below) so that
+	 * splice() or TCP coalesce are more efficient.
+	 *
+	 * Since, in addition, ieee80211_data_to_8023() always pull in at
+	 * least 8 bytes (possibly more for mesh) we can do the same here
+	 * to save the cost of doing it later. That still doesn't pull in
+	 * the actual IP header since the typical case has a SNAP header.
+	 * If the latter changes (there are efforts in the standards group
+	 * to do so) we should revisit this and ieee80211_data_to_8023().
+	 */
+	headlen = (len <= skb_tailroom(skb)) ? len :
+					       hdrlen + crypt_len + 8;
+
+	/* The firmware may align the packet to DWORD.
+	 * The padding is inserted after the IV.
+	 * After copying the header + IV skip the padding if
+	 * present before copying packet data.
+	 */
+	hdrlen += crypt_len;
+	skb_put_data(skb, hdr, hdrlen);
+	skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+
+	fraglen = len - headlen;
+
+	if (fraglen) {
+		int offset = (void *)hdr + headlen + pad_len -
+			     rxb_addr(rxb) + rxb_offset(rxb);
+
+		skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
+				fraglen, rxb->truesize);
+	}
+}
+
+/* iwl_mvm_pass_packet_to_mac80211 - passes the packet for mac80211 */
+static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+					    struct napi_struct *napi,
+					    struct sk_buff *skb, int queue,
+					    struct ieee80211_sta *sta)
+{
+	struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
+
+	if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
+		kfree_skb(skb);
+	} else {
+		unsigned int radiotap_len = 0;
+
+		if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
+			radiotap_len += sizeof(struct ieee80211_radiotap_he);
+		if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
+			radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
+		__skb_push(skb, radiotap_len);
+		ieee80211_rx_napi(mvm->hw, sta, skb, napi);
+	}
+}
+
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+					struct ieee80211_rx_status *rx_status,
+					u32 rate_n_flags, int energy_a,
+					int energy_b)
+{
+	int max_energy;
+	u32 rate_flags = rate_n_flags;
+
+	energy_a = energy_a ? -energy_a : S8_MIN;
+	energy_b = energy_b ? -energy_b : S8_MIN;
+	max_energy = max(energy_a, energy_b);
+
+	IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+			energy_a, energy_b, max_energy);
+
+	rx_status->signal = max_energy;
+	rx_status->chains =
+		(rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+	rx_status->chain_signal[0] = energy_a;
+	rx_status->chain_signal[1] = energy_b;
+	rx_status->chain_signal[2] = S8_MIN;
+}
+
+static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
+			     struct ieee80211_rx_status *stats, u16 phy_info,
+			     struct iwl_rx_mpdu_desc *desc,
+			     u32 pkt_flags, int queue, u8 *crypt_len)
+{
+	u16 status = le16_to_cpu(desc->status);
+
+	/*
+	 * Drop UNKNOWN frames in aggregation, unless in monitor mode
+	 * (where we don't have the keys).
+	 * We limit this to aggregation because in TKIP this is a valid
+	 * scenario, since we may not have the (correct) TTAK (phase 1
+	 * key) in the firmware.
+	 */
+	if (phy_info & IWL_RX_MPDU_PHY_AMPDU &&
+	    (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+	    IWL_RX_MPDU_STATUS_SEC_UNKNOWN && !mvm->monitor_on)
+		return -1;
+
+	if (!ieee80211_has_protected(hdr->frame_control) ||
+	    (status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+	    IWL_RX_MPDU_STATUS_SEC_NONE)
+		return 0;
+
+	/* TODO: handle packets encrypted with unknown alg */
+
+	switch (status & IWL_RX_MPDU_STATUS_SEC_MASK) {
+	case IWL_RX_MPDU_STATUS_SEC_CCM:
+	case IWL_RX_MPDU_STATUS_SEC_GCM:
+		BUILD_BUG_ON(IEEE80211_CCMP_PN_LEN != IEEE80211_GCMP_PN_LEN);
+		/* alg is CCM: check MIC only */
+		if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+			return -1;
+
+		stats->flag |= RX_FLAG_DECRYPTED;
+		if (pkt_flags & FH_RSCSR_RADA_EN)
+			stats->flag |= RX_FLAG_MIC_STRIPPED;
+		*crypt_len = IEEE80211_CCMP_HDR_LEN;
+		return 0;
+	case IWL_RX_MPDU_STATUS_SEC_TKIP:
+		/* Don't drop the frame and decrypt it in SW */
+		if (!fw_has_api(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_API_DEPRECATE_TTAK) &&
+		    !(status & IWL_RX_MPDU_RES_STATUS_TTAK_OK))
+			return 0;
+
+		*crypt_len = IEEE80211_TKIP_IV_LEN;
+		/* fall through if TTAK OK */
+	case IWL_RX_MPDU_STATUS_SEC_WEP:
+		if (!(status & IWL_RX_MPDU_STATUS_ICV_OK))
+			return -1;
+
+		stats->flag |= RX_FLAG_DECRYPTED;
+		if ((status & IWL_RX_MPDU_STATUS_SEC_MASK) ==
+				IWL_RX_MPDU_STATUS_SEC_WEP)
+			*crypt_len = IEEE80211_WEP_IV_LEN;
+
+		if (pkt_flags & FH_RSCSR_RADA_EN)
+			stats->flag |= RX_FLAG_ICV_STRIPPED;
+
+		return 0;
+	case IWL_RX_MPDU_STATUS_SEC_EXT_ENC:
+		if (!(status & IWL_RX_MPDU_STATUS_MIC_OK))
+			return -1;
+		stats->flag |= RX_FLAG_DECRYPTED;
+		return 0;
+	default:
+		/* Expected in monitor (not having the keys) */
+		if (!mvm->monitor_on)
+			IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status);
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+			    struct sk_buff *skb,
+			    struct iwl_rx_mpdu_desc *desc)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+	u16 flags = le16_to_cpu(desc->l3l4_flags);
+	u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+			  IWL_RX_L3_PROTO_POS);
+
+	if (mvmvif->features & NETIF_F_RXCSUM &&
+	    flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+	    (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+	     l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+	     l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+/*
+ * returns true if a packet is a duplicate and should be dropped.
+ * Updates AMSDU PN tracking info
+ */
+static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
+			   struct ieee80211_rx_status *rx_status,
+			   struct ieee80211_hdr *hdr,
+			   struct iwl_rx_mpdu_desc *desc)
+{
+	struct iwl_mvm_sta *mvm_sta;
+	struct iwl_mvm_rxq_dup_data *dup_data;
+	u8 tid, sub_frame_idx;
+
+	if (WARN_ON(IS_ERR_OR_NULL(sta)))
+		return false;
+
+	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	dup_data = &mvm_sta->dup_data[queue];
+
+	/*
+	 * Drop duplicate 802.11 retransmissions
+	 * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+	 */
+	if (ieee80211_is_ctl(hdr->frame_control) ||
+	    ieee80211_is_qos_nullfunc(hdr->frame_control) ||
+	    is_multicast_ether_addr(hdr->addr1)) {
+		rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+		return false;
+	}
+
+	if (ieee80211_is_data_qos(hdr->frame_control))
+		/* frame has qos control */
+		tid = ieee80211_get_tid(hdr);
+	else
+		tid = IWL_MAX_TID_COUNT;
+
+	/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
+	sub_frame_idx = desc->amsdu_info &
+		IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+	if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
+		     dup_data->last_seq[tid] == hdr->seq_ctrl &&
+		     dup_data->last_sub_frame[tid] >= sub_frame_idx))
+		return true;
+
+	/* Allow same PN as the first subframe for following sub frames */
+	if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
+	    sub_frame_idx > dup_data->last_sub_frame[tid] &&
+	    desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
+		rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
+
+	dup_data->last_seq[tid] = hdr->seq_ctrl;
+	dup_data->last_sub_frame[tid] = sub_frame_idx;
+
+	rx_status->flag |= RX_FLAG_DUP_VALIDATED;
+
+	return false;
+}
+
+int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
+			    const u8 *data, u32 count)
+{
+	struct iwl_rxq_sync_cmd *cmd;
+	u32 data_size = sizeof(*cmd) + count;
+	int ret;
+
+	/* should be DWORD aligned */
+	if (WARN_ON(count & 3 || count > IWL_MULTI_QUEUE_SYNC_MSG_MAX_SIZE))
+		return -EINVAL;
+
+	cmd = kzalloc(data_size, GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	cmd->rxq_mask = cpu_to_le32(rxq_mask);
+	cmd->count =  cpu_to_le32(count);
+	cmd->flags = 0;
+	memcpy(cmd->payload, data, count);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm,
+				   WIDE_ID(DATA_PATH_GROUP,
+					   TRIGGER_RX_QUEUES_NOTIF_CMD),
+				   0, data_size, cmd);
+
+	kfree(cmd);
+	return ret;
+}
+
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+	return ieee80211_sn_less(sn1, sn2) &&
+	       !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+				   struct ieee80211_sta *sta,
+				   struct napi_struct *napi,
+				   struct iwl_mvm_baid_data *baid_data,
+				   struct iwl_mvm_reorder_buffer *reorder_buf,
+				   u16 nssn)
+{
+	struct iwl_mvm_reorder_buf_entry *entries =
+		&baid_data->entries[reorder_buf->queue *
+				    baid_data->entries_per_queue];
+	u16 ssn = reorder_buf->head_sn;
+
+	lockdep_assert_held(&reorder_buf->lock);
+
+	/* ignore nssn smaller than head sn - this can happen due to timeout */
+	if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+		goto set_timer;
+
+	while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+		int index = ssn % reorder_buf->buf_size;
+		struct sk_buff_head *skb_list = &entries[index].e.frames;
+		struct sk_buff *skb;
+
+		ssn = ieee80211_sn_inc(ssn);
+
+		/*
+		 * Empty the list. Will have more than one frame for A-MSDU.
+		 * Empty list is valid as well since nssn indicates frames were
+		 * received.
+		 */
+		while ((skb = __skb_dequeue(skb_list))) {
+			iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+							reorder_buf->queue,
+							sta);
+			reorder_buf->num_stored--;
+		}
+	}
+	reorder_buf->head_sn = nssn;
+
+set_timer:
+	if (reorder_buf->num_stored && !reorder_buf->removed) {
+		u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+		while (skb_queue_empty(&entries[index].e.frames))
+			index = (index + 1) % reorder_buf->buf_size;
+		/* modify timer to match next frame's expiration time */
+		mod_timer(&reorder_buf->reorder_timer,
+			  entries[index].e.reorder_time + 1 +
+			  RX_REORDER_BUF_TIMEOUT_MQ);
+	} else {
+		del_timer(&reorder_buf->reorder_timer);
+	}
+}
+
+void iwl_mvm_reorder_timer_expired(struct timer_list *t)
+{
+	struct iwl_mvm_reorder_buffer *buf = from_timer(buf, t, reorder_timer);
+	struct iwl_mvm_baid_data *baid_data =
+		iwl_mvm_baid_data_from_reorder_buf(buf);
+	struct iwl_mvm_reorder_buf_entry *entries =
+		&baid_data->entries[buf->queue * baid_data->entries_per_queue];
+	int i;
+	u16 sn = 0, index = 0;
+	bool expired = false;
+	bool cont = false;
+
+	spin_lock(&buf->lock);
+
+	if (!buf->num_stored || buf->removed) {
+		spin_unlock(&buf->lock);
+		return;
+	}
+
+	for (i = 0; i < buf->buf_size ; i++) {
+		index = (buf->head_sn + i) % buf->buf_size;
+
+		if (skb_queue_empty(&entries[index].e.frames)) {
+			/*
+			 * If there is a hole and the next frame didn't expire
+			 * we want to break and not advance SN
+			 */
+			cont = false;
+			continue;
+		}
+		if (!cont &&
+		    !time_after(jiffies, entries[index].e.reorder_time +
+					 RX_REORDER_BUF_TIMEOUT_MQ))
+			break;
+
+		expired = true;
+		/* continue until next hole after this expired frames */
+		cont = true;
+		sn = ieee80211_sn_add(buf->head_sn, i + 1);
+	}
+
+	if (expired) {
+		struct ieee80211_sta *sta;
+		struct iwl_mvm_sta *mvmsta;
+		u8 sta_id = baid_data->sta_id;
+
+		rcu_read_lock();
+		sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[sta_id]);
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+		/* SN is set to the last expired frame + 1 */
+		IWL_DEBUG_HT(buf->mvm,
+			     "Releasing expired frames for sta %u, sn %d\n",
+			     sta_id, sn);
+		iwl_mvm_event_frame_timeout_callback(buf->mvm, mvmsta->vif,
+						     sta, baid_data->tid);
+		iwl_mvm_release_frames(buf->mvm, sta, NULL, baid_data, buf, sn);
+		rcu_read_unlock();
+	} else {
+		/*
+		 * If no frame expired and there are stored frames, index is now
+		 * pointing to the first unexpired frame - modify timer
+		 * accordingly to this frame.
+		 */
+		mod_timer(&buf->reorder_timer,
+			  entries[index].e.reorder_time +
+			  1 + RX_REORDER_BUF_TIMEOUT_MQ);
+	}
+	spin_unlock(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+			   struct iwl_mvm_delba_data *data)
+{
+	struct iwl_mvm_baid_data *ba_data;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_reorder_buffer *reorder_buf;
+	u8 baid = data->baid;
+
+	if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
+		return;
+
+	rcu_read_lock();
+
+	ba_data = rcu_dereference(mvm->baid_map[baid]);
+	if (WARN_ON_ONCE(!ba_data))
+		goto out;
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+		goto out;
+
+	reorder_buf = &ba_data->reorder_buf[queue];
+
+	/* release all frames that are in the reorder buffer to the stack */
+	spin_lock_bh(&reorder_buf->lock);
+	iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
+			       ieee80211_sn_add(reorder_buf->head_sn,
+						reorder_buf->buf_size));
+	spin_unlock_bh(&reorder_buf->lock);
+	del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+	rcu_read_unlock();
+}
+
+void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
+			    int queue)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_rxq_sync_notification *notif;
+	struct iwl_mvm_internal_rxq_notif *internal_notif;
+
+	notif = (void *)pkt->data;
+	internal_notif = (void *)notif->payload;
+
+	if (internal_notif->sync &&
+	    mvm->queue_sync_cookie != internal_notif->cookie) {
+		WARN_ONCE(1, "Received expired RX queue sync message\n");
+		return;
+	}
+
+	switch (internal_notif->type) {
+	case IWL_MVM_RXQ_EMPTY:
+		break;
+	case IWL_MVM_RXQ_NOTIF_DEL_BA:
+		iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
+		break;
+	default:
+		WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
+	}
+
+	if (internal_notif->sync &&
+	    !atomic_dec_return(&mvm->queue_sync_counter))
+		wake_up(&mvm->rx_sync_waitq);
+}
+
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+			    struct napi_struct *napi,
+			    int queue,
+			    struct ieee80211_sta *sta,
+			    struct sk_buff *skb,
+			    struct iwl_rx_mpdu_desc *desc)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_mvm_sta *mvm_sta;
+	struct iwl_mvm_baid_data *baid_data;
+	struct iwl_mvm_reorder_buffer *buffer;
+	struct sk_buff *tail;
+	u32 reorder = le32_to_cpu(desc->reorder_data);
+	bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+	bool last_subframe =
+		desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
+	u8 tid = ieee80211_get_tid(hdr);
+	u8 sub_frame_idx = desc->amsdu_info &
+			   IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+	struct iwl_mvm_reorder_buf_entry *entries;
+	int index;
+	u16 nssn, sn;
+	u8 baid;
+
+	baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+		IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+	/*
+	 * This also covers the case of receiving a Block Ack Request
+	 * outside a BA session; we'll pass it to mac80211 and that
+	 * then sends a delBA action frame.
+	 */
+	if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+		return false;
+
+	/* no sta yet */
+	if (WARN_ONCE(IS_ERR_OR_NULL(sta),
+		      "Got valid BAID without a valid station assigned\n"))
+		return false;
+
+	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+	/* not a data packet or a bar */
+	if (!ieee80211_is_back_req(hdr->frame_control) &&
+	    (!ieee80211_is_data_qos(hdr->frame_control) ||
+	     is_multicast_ether_addr(hdr->addr1)))
+		return false;
+
+	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+		return false;
+
+	baid_data = rcu_dereference(mvm->baid_map[baid]);
+	if (!baid_data) {
+		IWL_DEBUG_RX(mvm,
+			     "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+			      baid, reorder);
+		return false;
+	}
+
+	if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+		 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+		 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+		 tid))
+		return false;
+
+	nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+	sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+		IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+	buffer = &baid_data->reorder_buf[queue];
+	entries = &baid_data->entries[queue * baid_data->entries_per_queue];
+
+	spin_lock_bh(&buffer->lock);
+
+	if (!buffer->valid) {
+		if (reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN) {
+			spin_unlock_bh(&buffer->lock);
+			return false;
+		}
+		buffer->valid = true;
+	}
+
+	if (ieee80211_is_back_req(hdr->frame_control)) {
+		iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+		goto drop;
+	}
+
+	/*
+	 * If there was a significant jump in the nssn - adjust.
+	 * If the SN is smaller than the NSSN it might need to first go into
+	 * the reorder buffer, in which case we just release up to it and the
+	 * rest of the function will take care of storing it and releasing up to
+	 * the nssn
+	 */
+	if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+				buffer->buf_size) ||
+	    !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
+		u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+		iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer,
+				       min_sn);
+	}
+
+	/* drop any oudated packets */
+	if (ieee80211_sn_less(sn, buffer->head_sn))
+		goto drop;
+
+	/* release immediately if allowed by nssn and no stored frames */
+	if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+		if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+				       buffer->buf_size) &&
+		   (!amsdu || last_subframe))
+			buffer->head_sn = nssn;
+		/* No need to update AMSDU last SN - we are moving the head */
+		spin_unlock_bh(&buffer->lock);
+		return false;
+	}
+
+	/*
+	 * release immediately if there are no stored frames, and the sn is
+	 * equal to the head.
+	 * This can happen due to reorder timer, where NSSN is behind head_sn.
+	 * When we released everything, and we got the next frame in the
+	 * sequence, according to the NSSN we can't release immediately,
+	 * while technically there is no hole and we can move forward.
+	 */
+	if (!buffer->num_stored && sn == buffer->head_sn) {
+		if (!amsdu || last_subframe)
+			buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
+		/* No need to update AMSDU last SN - we are moving the head */
+		spin_unlock_bh(&buffer->lock);
+		return false;
+	}
+
+	index = sn % buffer->buf_size;
+
+	/*
+	 * Check if we already stored this frame
+	 * As AMSDU is either received or not as whole, logic is simple:
+	 * If we have frames in that position in the buffer and the last frame
+	 * originated from AMSDU had a different SN then it is a retransmission.
+	 * If it is the same SN then if the subframe index is incrementing it
+	 * is the same AMSDU - otherwise it is a retransmission.
+	 */
+	tail = skb_peek_tail(&entries[index].e.frames);
+	if (tail && !amsdu)
+		goto drop;
+	else if (tail && (sn != buffer->last_amsdu ||
+			  buffer->last_sub_index >= sub_frame_idx))
+		goto drop;
+
+	/* put in reorder buffer */
+	__skb_queue_tail(&entries[index].e.frames, skb);
+	buffer->num_stored++;
+	entries[index].e.reorder_time = jiffies;
+
+	if (amsdu) {
+		buffer->last_amsdu = sn;
+		buffer->last_sub_index = sub_frame_idx;
+	}
+
+	/*
+	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
+	 * The reason is that NSSN advances on the first sub-frame, and may
+	 * cause the reorder buffer to advance before all the sub-frames arrive.
+	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
+	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
+	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
+	 * already ahead and it will be dropped.
+	 * If the last sub-frame is not on this queue - we will get frame
+	 * release notification with up to date NSSN.
+	 */
+	if (!amsdu || last_subframe)
+		iwl_mvm_release_frames(mvm, sta, napi, baid_data, buffer, nssn);
+
+	spin_unlock_bh(&buffer->lock);
+	return true;
+
+drop:
+	kfree_skb(skb);
+	spin_unlock_bh(&buffer->lock);
+	return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm,
+				    u32 reorder_data, u8 baid)
+{
+	unsigned long now = jiffies;
+	unsigned long timeout;
+	struct iwl_mvm_baid_data *data;
+
+	rcu_read_lock();
+
+	data = rcu_dereference(mvm->baid_map[baid]);
+	if (!data) {
+		IWL_DEBUG_RX(mvm,
+			     "Got valid BAID but no baid allocated, bypass the re-ordering buffer. Baid %d reorder 0x%x\n",
+			      baid, reorder_data);
+		goto out;
+	}
+
+	if (!data->timeout)
+		goto out;
+
+	timeout = data->timeout;
+	/*
+	 * Do not update last rx all the time to avoid cache bouncing
+	 * between the rx queues.
+	 * Update it every timeout. Worst case is the session will
+	 * expire after ~ 2 * timeout, which doesn't matter that much.
+	 */
+	if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+		/* Update is atomic */
+		data->last_rx = now;
+
+out:
+	rcu_read_unlock();
+}
+
+static void iwl_mvm_flip_address(u8 *addr)
+{
+	int i;
+	u8 mac_addr[ETH_ALEN];
+
+	for (i = 0; i < ETH_ALEN; i++)
+		mac_addr[i] = addr[ETH_ALEN - i - 1];
+	ether_addr_copy(addr, mac_addr);
+}
+
+void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+			struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+	struct ieee80211_rx_status *rx_status;
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+	struct ieee80211_hdr *hdr;
+	u32 len = le16_to_cpu(desc->mpdu_len);
+	u32 rate_n_flags, gp2_on_air_rise;
+	u16 phy_info = le16_to_cpu(desc->phy_info);
+	struct ieee80211_sta *sta = NULL;
+	struct sk_buff *skb;
+	u8 crypt_len = 0, channel, energy_a, energy_b;
+	struct ieee80211_radiotap_he *he = NULL;
+	struct ieee80211_radiotap_he_mu *he_mu = NULL;
+	u32 he_type = 0xffffffff;
+	/* this is invalid e.g. because puncture type doesn't allow 0b11 */
+#define HE_PHY_DATA_INVAL ((u64)-1)
+	u64 he_phy_data = HE_PHY_DATA_INVAL;
+	size_t desc_size;
+
+	if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+		return;
+
+	if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
+		channel = desc->v3.channel;
+		gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
+		energy_a = desc->v3.energy_a;
+		energy_b = desc->v3.energy_b;
+		desc_size = sizeof(*desc);
+	} else {
+		rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
+		channel = desc->v1.channel;
+		gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
+		energy_a = desc->v1.energy_a;
+		energy_b = desc->v1.energy_b;
+		desc_size = IWL_RX_DESC_SIZE_V1;
+	}
+
+	hdr = (void *)(pkt->data + desc_size);
+	/* Dont use dev_alloc_skb(), we'll have enough headroom once
+	 * ieee80211_hdr pulled.
+	 */
+	skb = alloc_skb(128, GFP_ATOMIC);
+	if (!skb) {
+		IWL_ERR(mvm, "alloc_skb failed\n");
+		return;
+	}
+
+	if (desc->mac_flags2 & IWL_RX_MPDU_MFLG2_PAD) {
+		/*
+		 * If the device inserted padding it means that (it thought)
+		 * the 802.11 header wasn't a multiple of 4 bytes long. In
+		 * this case, reserve two bytes at the start of the SKB to
+		 * align the payload properly in case we end up copying it.
+		 */
+		skb_reserve(skb, 2);
+	}
+
+	rx_status = IEEE80211_SKB_RXCB(skb);
+
+	if (rate_n_flags & RATE_MCS_HE_MSK) {
+		static const struct ieee80211_radiotap_he known = {
+			.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
+			.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
+					     IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
+		};
+		static const struct ieee80211_radiotap_he_mu mu_known = {
+			.flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
+					      IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
+			.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+		};
+		unsigned int radiotap_len = 0;
+
+		he = skb_put_data(skb, &known, sizeof(known));
+		radiotap_len += sizeof(known);
+		rx_status->flag |= RX_FLAG_RADIOTAP_HE;
+
+		he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+
+		if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
+			if (mvm->trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_22560)
+				he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+			else
+				he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+			if (he_type == RATE_MCS_HE_TYPE_MU) {
+				he_mu = skb_put_data(skb, &mu_known,
+						     sizeof(mu_known));
+				radiotap_len += sizeof(mu_known);
+				rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
+			}
+		}
+
+		/* temporarily hide the radiotap data */
+		__skb_pull(skb, radiotap_len);
+	}
+
+	rx_status = IEEE80211_SKB_RXCB(skb);
+
+	if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
+			      le32_to_cpu(pkt->len_n_flags), queue,
+			      &crypt_len)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	/*
+	 * Keep packets with CRC errors (and with overrun) for monitor mode
+	 * (otherwise the firmware discards them) but mark them as bad.
+	 */
+	if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+	    !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+		IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
+			     le16_to_cpu(desc->status));
+		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+	}
+	/* set the preamble flag if appropriate */
+	if (phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
+		rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
+
+	if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
+		u64 tsf_on_air_rise;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
+		else
+			tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
+
+		rx_status->mactime = tsf_on_air_rise;
+		/* TSF as indicated by the firmware is at INA time */
+		rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
+	} else if (he_type == RATE_MCS_HE_TYPE_SU) {
+		u64 he_phy_data;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+		else
+			he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+		he->data1 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
+		if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
+			      he_phy_data))
+			he->data3 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
+
+		if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+			rx_status->ampdu_reference = mvm->ampdu_ref;
+			mvm->ampdu_ref++;
+
+			rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+			rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+			if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+				      he_phy_data))
+				rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
+		}
+	} else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
+		he_mu->flags1 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
+		he_mu->flags2 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
+		he_mu->flags2 |=
+			le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
+						   he_phy_data),
+					 IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
+	}
+	rx_status->device_timestamp = gp2_on_air_rise;
+	rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
+		NL80211_BAND_2GHZ;
+	rx_status->freq = ieee80211_channel_to_frequency(channel,
+							 rx_status->band);
+	iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
+				    energy_b);
+
+	/* update aggregation data for monitor sake on default queue */
+	if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
+		bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
+		u64 he_phy_data;
+
+		if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+		else
+			he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+		rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+		rx_status->ampdu_reference = mvm->ampdu_ref;
+		/* toggle is switched whenever new aggregation starts */
+		if (toggle_bit != mvm->ampdu_toggle) {
+			mvm->ampdu_ref++;
+			mvm->ampdu_toggle = toggle_bit;
+
+			if (he_phy_data != HE_PHY_DATA_INVAL &&
+			    he_type == RATE_MCS_HE_TYPE_MU) {
+				rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
+				if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
+					      he_phy_data))
+					rx_status->flag |=
+						RX_FLAG_AMPDU_EOF_BIT;
+			}
+		}
+	}
+
+	rcu_read_lock();
+
+	if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+		u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
+
+		if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+			sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+			if (IS_ERR(sta))
+				sta = NULL;
+		}
+	} else if (!is_multicast_ether_addr(hdr->addr2)) {
+		/*
+		 * This is fine since we prevent two stations with the same
+		 * address from being added.
+		 */
+		sta = ieee80211_find_sta_by_ifaddr(mvm->hw, hdr->addr2, NULL);
+	}
+
+	if (sta) {
+		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		struct ieee80211_vif *tx_blocked_vif =
+			rcu_dereference(mvm->csa_tx_blocked_vif);
+		u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+			       IWL_RX_MPDU_REORDER_BAID_MASK) >>
+			       IWL_RX_MPDU_REORDER_BAID_SHIFT);
+
+		if (!mvm->tcm.paused && len >= sizeof(*hdr) &&
+		    !is_multicast_ether_addr(hdr->addr1) &&
+		    ieee80211_is_data(hdr->frame_control) &&
+		    time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+			schedule_delayed_work(&mvm->tcm.work, 0);
+
+		/*
+		 * We have tx blocked stations (with CS bit). If we heard
+		 * frames from a blocked station on a new channel we can
+		 * TX to it again.
+		 */
+		if (unlikely(tx_blocked_vif) &&
+		    tx_blocked_vif == mvmsta->vif) {
+			struct iwl_mvm_vif *mvmvif =
+				iwl_mvm_vif_from_mac80211(tx_blocked_vif);
+
+			if (mvmvif->csa_target_freq == rx_status->freq)
+				iwl_mvm_sta_modify_disable_tx_ap(mvm, sta,
+								 false);
+		}
+
+		rs_update_last_rssi(mvm, mvmsta, rx_status);
+
+		if (iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_RSSI) &&
+		    ieee80211_is_beacon(hdr->frame_control)) {
+			struct iwl_fw_dbg_trigger_tlv *trig;
+			struct iwl_fw_dbg_trigger_low_rssi *rssi_trig;
+			bool trig_check;
+			s32 rssi;
+
+			trig = iwl_fw_dbg_get_trigger(mvm->fw,
+						      FW_DBG_TRIGGER_RSSI);
+			rssi_trig = (void *)trig->data;
+			rssi = le32_to_cpu(rssi_trig->rssi);
+
+			trig_check =
+				iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+							      ieee80211_vif_to_wdev(mvmsta->vif),
+							      trig);
+			if (trig_check && rx_status->signal < rssi)
+				iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+							NULL);
+		}
+
+		if (ieee80211_is_data(hdr->frame_control))
+			iwl_mvm_rx_csum(sta, skb, desc);
+
+		if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
+			kfree_skb(skb);
+			goto out;
+		}
+
+		/*
+		 * Our hardware de-aggregates AMSDUs but copies the mac header
+		 * as it to the de-aggregated MPDUs. We need to turn off the
+		 * AMSDU bit in the QoS control ourselves.
+		 * In addition, HW reverses addr3 and addr4 - reverse it back.
+		 */
+		if ((desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU) &&
+		    !WARN_ON(!ieee80211_is_data_qos(hdr->frame_control))) {
+			u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+			*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+			if (mvm->trans->cfg->device_family ==
+			    IWL_DEVICE_FAMILY_9000) {
+				iwl_mvm_flip_address(hdr->addr3);
+
+				if (ieee80211_has_a4(hdr->frame_control))
+					iwl_mvm_flip_address(hdr->addr4);
+			}
+		}
+		if (baid != IWL_RX_REORDER_DATA_INVALID_BAID) {
+			u32 reorder_data = le32_to_cpu(desc->reorder_data);
+
+			iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
+		}
+	}
+
+	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		rx_status->bw = RATE_INFO_BW_40;
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		rx_status->bw = RATE_INFO_BW_80;
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		rx_status->bw = RATE_INFO_BW_160;
+		break;
+	}
+
+	if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
+	    rate_n_flags & RATE_MCS_HE_106T_MSK) {
+		rx_status->bw = RATE_INFO_BW_HE_RU;
+		rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+	}
+
+	if (rate_n_flags & RATE_MCS_HE_MSK &&
+	    phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
+	    he_type == RATE_MCS_HE_TYPE_MU) {
+		/*
+		 * Unfortunately, we have to leave the mac80211 data
+		 * incorrect for the case that we receive an HE-MU
+		 * transmission and *don't* have the he_mu pointer,
+		 * i.e. we don't have the phy data (due to the bits
+		 * being used for TSF). This shouldn't happen though
+		 * as management frames where we need the TSF/timers
+		 * are not be transmitted in HE-MU, I think.
+		 */
+		u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
+		u8 offs = 0;
+
+		rx_status->bw = RATE_INFO_BW_HE_RU;
+
+		switch (ru) {
+		case 0 ... 36:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
+			offs = ru;
+			break;
+		case 37 ... 52:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
+			offs = ru - 37;
+			break;
+		case 53 ... 60:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
+			offs = ru - 53;
+			break;
+		case 61 ... 64:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
+			offs = ru - 61;
+			break;
+		case 65 ... 66:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
+			offs = ru - 65;
+			break;
+		case 67:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
+			break;
+		case 68:
+			rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+			break;
+		}
+		he->data2 |=
+			le16_encode_bits(offs,
+					 IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
+		he->data2 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
+		if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
+	} else if (he) {
+		he->data1 |=
+			cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
+	}
+
+	if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
+	    rate_n_flags & RATE_MCS_SGI_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
+	if (rate_n_flags & RATE_MCS_LDPC_MSK)
+		rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->encoding = RX_ENC_HT;
+		rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->nss =
+			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+						RATE_VHT_MCS_NSS_POS) + 1;
+		rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		rx_status->encoding = RX_ENC_VHT;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+		if (rate_n_flags & RATE_MCS_BF_MSK)
+			rx_status->enc_flags |= RX_ENC_FLAG_BF;
+	} else if (he) {
+		u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
+				RATE_MCS_STBC_POS;
+		rx_status->nss =
+			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+						RATE_VHT_MCS_NSS_POS) + 1;
+		rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+		rx_status->encoding = RX_ENC_HE;
+		rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
+		if (rate_n_flags & RATE_MCS_BF_MSK)
+			rx_status->enc_flags |= RX_ENC_FLAG_BF;
+
+		rx_status->he_dcm =
+			!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
+
+#define CHECK_TYPE(F)							\
+	BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F !=	\
+		     (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
+
+		CHECK_TYPE(SU);
+		CHECK_TYPE(EXT_SU);
+		CHECK_TYPE(MU);
+		CHECK_TYPE(TRIG);
+
+		he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
+
+		if (rate_n_flags & RATE_MCS_BF_POS)
+			he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
+
+		switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
+			RATE_MCS_HE_GI_LTF_POS) {
+		case 0:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			break;
+		case 1:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			break;
+		case 2:
+			rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+			break;
+		case 3:
+			if (rate_n_flags & RATE_MCS_SGI_MSK)
+				rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+			else
+				rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+			break;
+		}
+
+		switch (he_type) {
+		case RATE_MCS_HE_TYPE_SU: {
+			u16 val;
+
+			/* LTF syms correspond to streams */
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+			switch (rx_status->nss) {
+			case 1:
+				val = 0;
+				break;
+			case 2:
+				val = 1;
+				break;
+			case 3:
+			case 4:
+				val = 2;
+				break;
+			case 5:
+			case 6:
+				val = 3;
+				break;
+			case 7:
+			case 8:
+				val = 4;
+				break;
+			default:
+				WARN_ONCE(1, "invalid nss: %d\n",
+					  rx_status->nss);
+				val = 0;
+			}
+			he->data5 |=
+				le16_encode_bits(val,
+						 IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
+			}
+			break;
+		case RATE_MCS_HE_TYPE_MU: {
+			u16 val;
+			u64 he_phy_data;
+
+			if (mvm->trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_22560)
+				he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
+			else
+				he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+			if (he_phy_data == HE_PHY_DATA_INVAL)
+				break;
+
+			val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
+					he_phy_data);
+
+			he->data2 |=
+				cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
+			he->data5 |=
+				cpu_to_le16(FIELD_PREP(
+					IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
+					val));
+			}
+			break;
+		case RATE_MCS_HE_TYPE_EXT_SU:
+		case RATE_MCS_HE_TYPE_TRIG:
+			/* not supported yet */
+			break;
+		}
+	} else {
+		int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+							       rx_status->band);
+
+		if (WARN(rate < 0 || rate > 0xFF,
+			 "Invalid rate flags 0x%x, band %d,\n",
+			 rate_n_flags, rx_status->band)) {
+			kfree_skb(skb);
+			goto out;
+		}
+		rx_status->rate_idx = rate;
+
+	}
+
+	/* management stuff on default queue */
+	if (!queue) {
+		if (unlikely((ieee80211_is_beacon(hdr->frame_control) ||
+			      ieee80211_is_probe_resp(hdr->frame_control)) &&
+			     mvm->sched_scan_pass_all ==
+			     SCHED_SCAN_PASS_ALL_ENABLED))
+			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
+
+		if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+			     ieee80211_is_probe_resp(hdr->frame_control)))
+			rx_status->boottime_ns = ktime_get_boot_ns();
+	}
+
+	iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
+	if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+		iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+out:
+	rcu_read_unlock();
+}
+
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
+			      struct iwl_rx_cmd_buffer *rxb, int queue)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_frame_release *release = (void *)pkt->data;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_reorder_buffer *reorder_buf;
+	struct iwl_mvm_baid_data *ba_data;
+
+	int baid = release->baid;
+
+	IWL_DEBUG_HT(mvm, "Frame release notification for BAID %u, NSSN %d\n",
+		     release->baid, le16_to_cpu(release->nssn));
+
+	if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+		return;
+
+	rcu_read_lock();
+
+	ba_data = rcu_dereference(mvm->baid_map[baid]);
+	if (WARN_ON_ONCE(!ba_data))
+		goto out;
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+		goto out;
+
+	reorder_buf = &ba_data->reorder_buf[queue];
+
+	spin_lock_bh(&reorder_buf->lock);
+	iwl_mvm_release_frames(mvm, sta, napi, ba_data, reorder_buf,
+			       le16_to_cpu(release->nssn));
+	spin_unlock_bh(&reorder_buf->lock);
+
+out:
+	rcu_read_unlock();
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
new file mode 100644
index 0000000..11ecdf6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -0,0 +1,2031 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "fw/api/scan.h"
+#include "iwl-io.h"
+
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
+
+#define IWL_SCAN_DWELL_ACTIVE		10
+#define IWL_SCAN_DWELL_PASSIVE		110
+#define IWL_SCAN_DWELL_FRAGMENTED	44
+#define IWL_SCAN_DWELL_EXTENDED		90
+#define IWL_SCAN_NUM_OF_FRAGS		3
+
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default APs number */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+
+struct iwl_mvm_scan_timing_params {
+	u32 suspend_time;
+	u32 max_out_time;
+};
+
+static struct iwl_mvm_scan_timing_params scan_timing[] = {
+	[IWL_SCAN_TYPE_UNASSOC] = {
+		.suspend_time = 0,
+		.max_out_time = 0,
+	},
+	[IWL_SCAN_TYPE_WILD] = {
+		.suspend_time = 30,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_MILD] = {
+		.suspend_time = 120,
+		.max_out_time = 120,
+	},
+	[IWL_SCAN_TYPE_FRAGMENTED] = {
+		.suspend_time = 95,
+		.max_out_time = 44,
+	},
+};
+
+struct iwl_mvm_scan_params {
+	/* For CDB this is low band scan type, for non-CDB - type. */
+	enum iwl_mvm_scan_type type;
+	enum iwl_mvm_scan_type hb_type;
+	u32 n_channels;
+	u16 delay;
+	int n_ssids;
+	struct cfg80211_ssid *ssids;
+	struct ieee80211_channel **channels;
+	u32 flags;
+	u8 *mac_addr;
+	u8 *mac_addr_mask;
+	bool no_cck;
+	bool pass_all;
+	int n_match_sets;
+	struct iwl_scan_probe_req preq;
+	struct cfg80211_match_set *match_sets;
+	int n_scan_plans;
+	struct cfg80211_sched_scan_plan *scan_plans;
+	u32 measurement_dwell;
+};
+
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+		return (void *)&cmd->v8.data;
+
+	if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+		return (void *)&cmd->v7.data;
+
+	if (iwl_mvm_cdb_scan_api(mvm))
+		return (void *)&cmd->v6.data;
+
+	return (void *)&cmd->v1.data;
+}
+
+static inline struct iwl_scan_umac_chan_param *
+iwl_mvm_get_scan_req_umac_channel(struct iwl_mvm *mvm)
+{
+	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+		return &cmd->v8.channel;
+
+	if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+		return &cmd->v7.channel;
+
+	if (iwl_mvm_cdb_scan_api(mvm))
+		return &cmd->v6.channel;
+
+	return &cmd->v1.channel;
+}
+
+static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
+{
+	if (mvm->scan_rx_ant != ANT_NONE)
+		return mvm->scan_rx_ant;
+	return iwl_mvm_get_valid_rx_ant(mvm);
+}
+
+static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
+{
+	u16 rx_chain;
+	u8 rx_ant;
+
+	rx_ant = iwl_mvm_scan_rx_ant(mvm);
+	rx_chain = rx_ant << PHY_RX_CHAIN_VALID_POS;
+	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
+	rx_chain |= rx_ant << PHY_RX_CHAIN_FORCE_SEL_POS;
+	rx_chain |= 0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS;
+	return cpu_to_le16(rx_chain);
+}
+
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
+{
+	if (band == NL80211_BAND_2GHZ)
+		return cpu_to_le32(PHY_BAND_24);
+	else
+		return cpu_to_le32(PHY_BAND_5);
+}
+
+static inline __le32
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
+			  bool no_cck)
+{
+	u32 tx_ant;
+
+	mvm->scan_last_antenna_idx =
+		iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+				     mvm->scan_last_antenna_idx);
+	tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+	if (band == NL80211_BAND_2GHZ && !no_cck)
+		return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+				   tx_ant);
+	else
+		return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
+}
+
+static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
+					    struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int *global_cnt = data;
+
+	if (vif->type != NL80211_IFTYPE_P2P_DEVICE && mvmvif->phy_ctxt &&
+	    mvmvif->phy_ctxt->id < NUM_PHY_CTX)
+		*global_cnt += 1;
+}
+
+static enum iwl_mvm_traffic_load iwl_mvm_get_traffic_load(struct iwl_mvm *mvm)
+{
+	return mvm->tcm.result.global_load;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_get_traffic_load_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+	return mvm->tcm.result.band_load[band];
+}
+
+static enum
+iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device,
+					 enum iwl_mvm_traffic_load load,
+					 bool low_latency)
+{
+	int global_cnt = 0;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+					    IEEE80211_IFACE_ITER_NORMAL,
+					    iwl_mvm_scan_condition_iterator,
+					    &global_cnt);
+	if (!global_cnt)
+		return IWL_SCAN_TYPE_UNASSOC;
+
+	if ((load == IWL_MVM_TRAFFIC_HIGH || low_latency) && !p2p_device &&
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_FRAGMENTED_SCAN))
+		return IWL_SCAN_TYPE_FRAGMENTED;
+
+	if (load >= IWL_MVM_TRAFFIC_MEDIUM || low_latency)
+		return IWL_SCAN_TYPE_MILD;
+
+	return IWL_SCAN_TYPE_WILD;
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type(struct iwl_mvm *mvm, bool p2p_device)
+{
+	enum iwl_mvm_traffic_load load;
+	bool low_latency;
+
+	load = iwl_mvm_get_traffic_load(mvm);
+	low_latency = iwl_mvm_low_latency(mvm);
+
+	return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
+static enum
+iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
+					     bool p2p_device,
+					     enum nl80211_band band)
+{
+	enum iwl_mvm_traffic_load load;
+	bool low_latency;
+
+	load = iwl_mvm_get_traffic_load_band(mvm, band);
+	low_latency = iwl_mvm_low_latency_band(mvm, band);
+
+	return _iwl_mvm_get_scan_type(mvm, p2p_device, load, low_latency);
+}
+
+static int
+iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
+			      struct cfg80211_scan_request *req,
+			      struct iwl_mvm_scan_params *params)
+{
+	u32 duration = scan_timing[params->type].max_out_time;
+
+	if (!req->duration)
+		return 0;
+
+	if (iwl_mvm_is_cdb_supported(mvm)) {
+		u32 hb_time = scan_timing[params->hb_type].max_out_time;
+
+		duration = min_t(u32, duration, hb_time);
+	}
+
+	if (req->duration_mandatory && req->duration > duration) {
+		IWL_DEBUG_SCAN(mvm,
+			       "Measurement scan - too long dwell %hu (max out time %u)\n",
+			       req->duration,
+			       duration);
+		return -EOPNOTSUPP;
+	}
+
+	return min_t(u32, (u32)req->duration, duration);
+}
+
+static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
+{
+	/* require rrm scan whenever the fw supports it */
+	return fw_has_capa(&mvm->fw->ucode_capa,
+			   IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT);
+}
+
+static int iwl_mvm_max_scan_ie_fw_cmd_room(struct iwl_mvm *mvm)
+{
+	int max_probe_len;
+
+	max_probe_len = SCAN_OFFLOAD_PROBE_REQ_SIZE;
+
+	/* we create the 802.11 header and SSID element */
+	max_probe_len -= 24 + 2;
+
+	/* DS parameter set element is added on 2.4GHZ band if required */
+	if (iwl_mvm_rrm_scan_needed(mvm))
+		max_probe_len -= 3;
+
+	return max_probe_len;
+}
+
+int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm)
+{
+	int max_ie_len = iwl_mvm_max_scan_ie_fw_cmd_room(mvm);
+
+	/* TODO: [BUG] This function should return the maximum allowed size of
+	 * scan IEs, however the LMAC scan api contains both 2GHZ and 5GHZ IEs
+	 * in the same command. So the correct implementation of this function
+	 * is just iwl_mvm_max_scan_ie_fw_cmd_room() / 2. Currently the scan
+	 * command has only 512 bytes and it would leave us with about 240
+	 * bytes for scan IEs, which is clearly not enough. So meanwhile
+	 * we will report an incorrect value. This may result in a failure to
+	 * issue a scan in unified_scan_lmac and unified_sched_scan_lmac
+	 * functions with -ENOBUFS, if a large enough probe will be provided.
+	 */
+	return max_ie_len;
+}
+
+void iwl_mvm_rx_lmac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
+
+	IWL_DEBUG_SCAN(mvm,
+		       "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
+		       notif->status, notif->scanned_channels);
+
+	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+		ieee80211_sched_scan_results(mvm->hw);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+	}
+}
+
+void iwl_mvm_rx_scan_match_found(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
+{
+	IWL_DEBUG_SCAN(mvm, "Scheduled scan results\n");
+	ieee80211_sched_scan_results(mvm->hw);
+}
+
+static const char *iwl_mvm_ebs_status_str(enum iwl_scan_ebs_status status)
+{
+	switch (status) {
+	case IWL_SCAN_EBS_SUCCESS:
+		return "successful";
+	case IWL_SCAN_EBS_INACTIVE:
+		return "inactive";
+	case IWL_SCAN_EBS_FAILED:
+	case IWL_SCAN_EBS_CHAN_NOT_FOUND:
+	default:
+		return "failed";
+	}
+}
+
+void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_periodic_scan_complete *scan_notif = (void *)pkt->data;
+	bool aborted = (scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+
+	/* If this happens, the firmware has mistakenly sent an LMAC
+	 * notification during UMAC scans -- warn and ignore it.
+	 */
+	if (WARN_ON_ONCE(fw_has_capa(&mvm->fw->ucode_capa,
+				     IWL_UCODE_TLV_CAPA_UMAC_SCAN)))
+		return;
+
+	/* scan status must be locked for proper checking */
+	lockdep_assert_held(&mvm->mutex);
+
+	/* We first check if we were stopping a scan, in which case we
+	 * just clear the stopping flag.  Then we check if it was a
+	 * firmware initiated stop, in which case we need to inform
+	 * mac80211.
+	 * Note that we can have a stopping and a running scan
+	 * simultaneously, but we can't have two different types of
+	 * scans stopping or running at the same time (since LMAC
+	 * doesn't support it).
+	 */
+
+	if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_SCHED) {
+		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR);
+
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_SCHED;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_STOPPING_REGULAR) {
+		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_STOPPING_REGULAR;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_SCHED) {
+		WARN_ON_ONCE(mvm->scan_status & IWL_MVM_SCAN_REGULAR);
+
+		IWL_DEBUG_SCAN(mvm, "Scheduled scan %s, EBS status %s\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+		IWL_DEBUG_SCAN(mvm,
+			       "Last line %d, Last iteration %d, Time after last iteration %d (FW)\n",
+			       scan_notif->last_schedule_line,
+			       scan_notif->last_schedule_iteration,
+			       __le32_to_cpu(scan_notif->time_after_last_iter));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_SCHED;
+		ieee80211_sched_scan_stopped(mvm->hw);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+	} else if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+		struct cfg80211_scan_info info = {
+			.aborted = aborted,
+		};
+
+		IWL_DEBUG_SCAN(mvm, "Regular scan %s, EBS status %s (FW)\n",
+			       aborted ? "aborted" : "completed",
+			       iwl_mvm_ebs_status_str(scan_notif->ebs_status));
+
+		mvm->scan_status &= ~IWL_MVM_SCAN_REGULAR;
+		ieee80211_scan_completed(mvm->hw, &info);
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		cancel_delayed_work(&mvm->scan_timeout_dwork);
+		iwl_mvm_resume_tcm(mvm);
+	} else {
+		IWL_ERR(mvm,
+			"got scan complete notification but no scan is running\n");
+	}
+
+	mvm->last_ebs_successful =
+			scan_notif->ebs_status == IWL_SCAN_EBS_SUCCESS ||
+			scan_notif->ebs_status == IWL_SCAN_EBS_INACTIVE;
+}
+
+static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
+{
+	int i;
+
+	for (i = 0; i < PROBE_OPTION_MAX; i++) {
+		if (!ssid_list[i].len)
+			break;
+		if (ssid_list[i].len == ssid_len &&
+		    !memcmp(ssid_list->ssid, ssid, ssid_len))
+			return i;
+	}
+	return -1;
+}
+
+/* We insert the SSIDs in an inverted order, because the FW will
+ * invert it back.
+ */
+static void iwl_scan_build_ssids(struct iwl_mvm_scan_params *params,
+				 struct iwl_ssid_ie *ssids,
+				 u32 *ssid_bitmap)
+{
+	int i, j;
+	int index;
+
+	/*
+	 * copy SSIDs from match list.
+	 * iwl_config_sched_scan_profiles() uses the order of these ssids to
+	 * config match list.
+	 */
+	for (i = 0, j = params->n_match_sets - 1;
+	     j >= 0 && i < PROBE_OPTION_MAX;
+	     i++, j--) {
+		/* skip empty SSID matchsets */
+		if (!params->match_sets[j].ssid.ssid_len)
+			continue;
+		ssids[i].id = WLAN_EID_SSID;
+		ssids[i].len = params->match_sets[j].ssid.ssid_len;
+		memcpy(ssids[i].ssid, params->match_sets[j].ssid.ssid,
+		       ssids[i].len);
+	}
+
+	/* add SSIDs from scan SSID list */
+	*ssid_bitmap = 0;
+	for (j = params->n_ssids - 1;
+	     j >= 0 && i < PROBE_OPTION_MAX;
+	     i++, j--) {
+		index = iwl_ssid_exist(params->ssids[j].ssid,
+				       params->ssids[j].ssid_len,
+				       ssids);
+		if (index < 0) {
+			ssids[i].id = WLAN_EID_SSID;
+			ssids[i].len = params->ssids[j].ssid_len;
+			memcpy(ssids[i].ssid, params->ssids[j].ssid,
+			       ssids[i].len);
+			*ssid_bitmap |= BIT(i);
+		} else {
+			*ssid_bitmap |= BIT(index);
+		}
+	}
+}
+
+static int
+iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
+				   struct cfg80211_sched_scan_request *req)
+{
+	struct iwl_scan_offload_profile *profile;
+	struct iwl_scan_offload_profile_cfg *profile_cfg;
+	struct iwl_scan_offload_blacklist *blacklist;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+		.len[1] = sizeof(*profile_cfg),
+		.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+		.dataflags[1] = IWL_HCMD_DFL_NOCOPY,
+	};
+	int blacklist_len;
+	int i;
+	int ret;
+
+	if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES))
+		return -EIO;
+
+	if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SHORT_BL)
+		blacklist_len = IWL_SCAN_SHORT_BLACKLIST_LEN;
+	else
+		blacklist_len = IWL_SCAN_MAX_BLACKLIST_LEN;
+
+	blacklist = kcalloc(blacklist_len, sizeof(*blacklist), GFP_KERNEL);
+	if (!blacklist)
+		return -ENOMEM;
+
+	profile_cfg = kzalloc(sizeof(*profile_cfg), GFP_KERNEL);
+	if (!profile_cfg) {
+		ret = -ENOMEM;
+		goto free_blacklist;
+	}
+
+	cmd.data[0] = blacklist;
+	cmd.len[0] = sizeof(*blacklist) * blacklist_len;
+	cmd.data[1] = profile_cfg;
+
+	/* No blacklist configuration */
+
+	profile_cfg->num_profiles = req->n_match_sets;
+	profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
+	profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
+	profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+	if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+		profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
+
+	for (i = 0; i < req->n_match_sets; i++) {
+		profile = &profile_cfg->profiles[i];
+		profile->ssid_index = i;
+		/* Support any cipher and auth algorithm */
+		profile->unicast_cipher = 0xff;
+		profile->auth_alg = 0xff;
+		profile->network_type = IWL_NETWORK_TYPE_ANY;
+		profile->band_selection = IWL_SCAN_OFFLOAD_SELECT_ANY;
+		profile->client_bitmap = SCAN_CLIENT_SCHED_SCAN;
+	}
+
+	IWL_DEBUG_SCAN(mvm, "Sending scheduled scan profile config\n");
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	kfree(profile_cfg);
+free_blacklist:
+	kfree(blacklist);
+
+	return ret;
+}
+
+static bool iwl_mvm_scan_pass_all(struct iwl_mvm *mvm,
+				  struct cfg80211_sched_scan_request *req)
+{
+	if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) {
+		IWL_DEBUG_SCAN(mvm,
+			       "Sending scheduled scan with filtering, n_match_sets %d\n",
+			       req->n_match_sets);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+		return false;
+	}
+
+	IWL_DEBUG_SCAN(mvm, "Sending Scheduled scan without filtering\n");
+
+	mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+	return true;
+}
+
+static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
+{
+	int ret;
+	struct iwl_host_cmd cmd = {
+		.id = SCAN_OFFLOAD_ABORT_CMD,
+	};
+	u32 status = CAN_ABORT_STATUS;
+
+	ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
+	if (ret)
+		return ret;
+
+	if (status != CAN_ABORT_STATUS) {
+		/*
+		 * The scan abort will return 1 for success or
+		 * 2 for "failure".  A failure condition can be
+		 * due to simply not being in an active scan which
+		 * can occur if we send the scan abort before the
+		 * microcode has notified us that a scan is completed.
+		 */
+		IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
+		ret = -ENOENT;
+	}
+
+	return ret;
+}
+
+static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
+				     struct iwl_scan_req_tx_cmd *tx_cmd,
+				     bool no_cck)
+{
+	tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+					 TX_CMD_FLG_BT_DIS);
+	tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+							   NL80211_BAND_2GHZ,
+							   no_cck);
+	tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+
+	tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+					 TX_CMD_FLG_BT_DIS);
+	tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
+							   NL80211_BAND_5GHZ,
+							   no_cck);
+	tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+}
+
+static void
+iwl_mvm_lmac_scan_cfg_channels(struct iwl_mvm *mvm,
+			       struct ieee80211_channel **channels,
+			       int n_channels, u32 ssid_bitmap,
+			       struct iwl_scan_req_lmac *cmd)
+{
+	struct iwl_scan_channel_cfg_lmac *channel_cfg = (void *)&cmd->data;
+	int i;
+
+	for (i = 0; i < n_channels; i++) {
+		channel_cfg[i].channel_num =
+			cpu_to_le16(channels[i]->hw_value);
+		channel_cfg[i].iter_count = cpu_to_le16(1);
+		channel_cfg[i].iter_interval = 0;
+		channel_cfg[i].flags =
+			cpu_to_le32(IWL_UNIFIED_SCAN_CHANNEL_PARTIAL |
+				    ssid_bitmap);
+	}
+}
+
+static u8 *iwl_mvm_copy_and_insert_ds_elem(struct iwl_mvm *mvm, const u8 *ies,
+					   size_t len, u8 *const pos)
+{
+	static const u8 before_ds_params[] = {
+			WLAN_EID_SSID,
+			WLAN_EID_SUPP_RATES,
+			WLAN_EID_REQUEST,
+			WLAN_EID_EXT_SUPP_RATES,
+	};
+	size_t offs;
+	u8 *newpos = pos;
+
+	if (!iwl_mvm_rrm_scan_needed(mvm)) {
+		memcpy(newpos, ies, len);
+		return newpos + len;
+	}
+
+	offs = ieee80211_ie_split(ies, len,
+				  before_ds_params,
+				  ARRAY_SIZE(before_ds_params),
+				  0);
+
+	memcpy(newpos, ies, offs);
+	newpos += offs;
+
+	/* Add a placeholder for DS Parameter Set element */
+	*newpos++ = WLAN_EID_DS_PARAMS;
+	*newpos++ = 1;
+	*newpos++ = 0;
+
+	memcpy(newpos, ies + offs, len - offs);
+	newpos += len - offs;
+
+	return newpos;
+}
+
+#define WFA_TPC_IE_LEN	9
+
+static void iwl_mvm_add_tpc_report_ie(u8 *pos)
+{
+	pos[0] = WLAN_EID_VENDOR_SPECIFIC;
+	pos[1] = WFA_TPC_IE_LEN - 2;
+	pos[2] = (WLAN_OUI_MICROSOFT >> 16) & 0xff;
+	pos[3] = (WLAN_OUI_MICROSOFT >> 8) & 0xff;
+	pos[4] = WLAN_OUI_MICROSOFT & 0xff;
+	pos[5] = WLAN_OUI_TYPE_MICROSOFT_TPC;
+	pos[6] = 0;
+	/* pos[7] - tx power will be inserted by the FW */
+	pos[7] = 0;
+	pos[8] = 0;
+}
+
+static void
+iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			 struct ieee80211_scan_ies *ies,
+			 struct iwl_mvm_scan_params *params)
+{
+	struct ieee80211_mgmt *frame = (void *)params->preq.buf;
+	u8 *pos, *newpos;
+	const u8 *mac_addr = params->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ?
+		params->mac_addr : NULL;
+
+	/*
+	 * Unfortunately, right now the offload scan doesn't support randomising
+	 * within the firmware, so until the firmware API is ready we implement
+	 * it in the driver. This means that the scan iterations won't really be
+	 * random, only when it's restarted, but at least that helps a bit.
+	 */
+	if (mac_addr)
+		get_random_mask_addr(frame->sa, mac_addr,
+				     params->mac_addr_mask);
+	else
+		memcpy(frame->sa, vif->addr, ETH_ALEN);
+
+	frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+	eth_broadcast_addr(frame->da);
+	eth_broadcast_addr(frame->bssid);
+	frame->seq_ctrl = 0;
+
+	pos = frame->u.probe_req.variable;
+	*pos++ = WLAN_EID_SSID;
+	*pos++ = 0;
+
+	params->preq.mac_header.offset = 0;
+	params->preq.mac_header.len = cpu_to_le16(24 + 2);
+
+	/* Insert ds parameter set element on 2.4 GHz band */
+	newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
+						 ies->ies[NL80211_BAND_2GHZ],
+						 ies->len[NL80211_BAND_2GHZ],
+						 pos);
+	params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
+	params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
+	pos = newpos;
+
+	memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+	       ies->len[NL80211_BAND_5GHZ]);
+	params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
+	params->preq.band_data[1].len =
+		cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+	pos += ies->len[NL80211_BAND_5GHZ];
+
+	memcpy(pos, ies->common_ies, ies->common_ie_len);
+	params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
+
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    !fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) {
+		iwl_mvm_add_tpc_report_ie(pos + ies->common_ie_len);
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len +
+							   WFA_TPC_IE_LEN);
+	} else {
+		params->preq.common_data.len = cpu_to_le16(ies->common_ie_len);
+	}
+}
+
+static void iwl_mvm_scan_lmac_dwell(struct iwl_mvm *mvm,
+				    struct iwl_scan_req_lmac *cmd,
+				    struct iwl_mvm_scan_params *params)
+{
+	cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
+	cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+	cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+	cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+	cmd->max_out_time = cpu_to_le32(scan_timing[params->type].max_out_time);
+	cmd->suspend_time = cpu_to_le32(scan_timing[params->type].suspend_time);
+	cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+}
+
+static inline bool iwl_mvm_scan_fits(struct iwl_mvm *mvm, int n_ssids,
+				     struct ieee80211_scan_ies *ies,
+				     int n_channels)
+{
+	return ((n_ssids <= PROBE_OPTION_MAX) &&
+		(n_channels <= mvm->fw->ucode_capa.n_scan_channels) &
+		(ies->common_ie_len +
+		 ies->len[NL80211_BAND_2GHZ] +
+		 ies->len[NL80211_BAND_5GHZ] <=
+		 iwl_mvm_max_scan_ie_fw_cmd_room(mvm)));
+}
+
+static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif)
+{
+	const struct iwl_ucode_capabilities *capa = &mvm->fw->ucode_capa;
+
+	/* We can only use EBS if:
+	 *	1. the feature is supported;
+	 *	2. the last EBS was successful;
+	 *	3. if only single scan, the single scan EBS API is supported;
+	 *	4. it's not a p2p find operation.
+	 */
+	return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
+		mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
+		vif->type != NL80211_IFTYPE_P2P_DEVICE);
+}
+
+static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
+{
+	return params->n_scan_plans == 1 &&
+		params->scan_plans[0].iterations == 1;
+}
+
+static int iwl_mvm_scan_lmac_flags(struct iwl_mvm *mvm,
+				   struct iwl_mvm_scan_params *params,
+				   struct ieee80211_vif *vif)
+{
+	int flags = 0;
+
+	if (params->n_ssids == 0)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASSIVE;
+
+	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
+
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED;
+
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+		flags |= IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED;
+
+	if (params->pass_all)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+	else
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvm->scan_iter_notif_enabled)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+#endif
+
+	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE;
+
+	if (iwl_mvm_is_regular_scan(params) &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+	    params->type != IWL_SCAN_TYPE_FRAGMENTED)
+		flags |= IWL_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL;
+
+	return flags;
+}
+
+static int iwl_mvm_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     struct iwl_mvm_scan_params *params)
+{
+	struct iwl_scan_req_lmac *cmd = mvm->scan_cmd;
+	struct iwl_scan_probe_req *preq =
+		(void *)(cmd->data + sizeof(struct iwl_scan_channel_cfg_lmac) *
+			 mvm->fw->ucode_capa.n_scan_channels);
+	u32 ssid_bitmap = 0;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	memset(cmd, 0, ksize(cmd));
+
+	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+		return -EINVAL;
+
+	iwl_mvm_scan_lmac_dwell(mvm, cmd, params);
+
+	cmd->rx_chain_select = iwl_mvm_scan_rx_chain(mvm);
+	cmd->iter_num = cpu_to_le32(1);
+	cmd->n_channels = (u8)params->n_channels;
+
+	cmd->delay = cpu_to_le32(params->delay);
+
+	cmd->scan_flags = cpu_to_le32(iwl_mvm_scan_lmac_flags(mvm, params,
+							      vif));
+
+	cmd->flags = iwl_mvm_scan_rxon_flags(params->channels[0]->band);
+	cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
+					MAC_FILTER_IN_BEACON);
+	iwl_mvm_scan_fill_tx_cmd(mvm, cmd->tx_cmd, params->no_cck);
+	iwl_scan_build_ssids(params, cmd->direct_scan, &ssid_bitmap);
+
+	/* this API uses bits 1-20 instead of 0-19 */
+	ssid_bitmap <<= 1;
+
+	for (i = 0; i < params->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *scan_plan =
+			&params->scan_plans[i];
+
+		cmd->schedule[i].delay =
+			cpu_to_le16(scan_plan->interval);
+		cmd->schedule[i].iterations = scan_plan->iterations;
+		cmd->schedule[i].full_scan_mul = 1;
+	}
+
+	/*
+	 * If the number of iterations of the last scan plan is set to
+	 * zero, it should run infinitely. However, this is not always the case.
+	 * For example, when regular scan is requested the driver sets one scan
+	 * plan with one iteration.
+	 */
+	if (!cmd->schedule[i - 1].iterations)
+		cmd->schedule[i - 1].iterations = 0xff;
+
+	if (iwl_mvm_scan_use_ebs(mvm, vif)) {
+		cmd->channel_opt[0].flags =
+			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+		cmd->channel_opt[0].non_ebs_ratio =
+			cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+		cmd->channel_opt[1].flags =
+			cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+				    IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+				    IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+		cmd->channel_opt[1].non_ebs_ratio =
+			cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+	}
+
+	iwl_mvm_lmac_scan_cfg_channels(mvm, params->channels,
+				       params->n_channels, ssid_bitmap, cmd);
+
+	*preq = params->preq;
+
+	return 0;
+}
+
+static int rate_to_scan_rate_flag(unsigned int rate)
+{
+	static const int rate_to_scan_rate[IWL_RATE_COUNT] = {
+		[IWL_RATE_1M_INDEX]	= SCAN_CONFIG_RATE_1M,
+		[IWL_RATE_2M_INDEX]	= SCAN_CONFIG_RATE_2M,
+		[IWL_RATE_5M_INDEX]	= SCAN_CONFIG_RATE_5M,
+		[IWL_RATE_11M_INDEX]	= SCAN_CONFIG_RATE_11M,
+		[IWL_RATE_6M_INDEX]	= SCAN_CONFIG_RATE_6M,
+		[IWL_RATE_9M_INDEX]	= SCAN_CONFIG_RATE_9M,
+		[IWL_RATE_12M_INDEX]	= SCAN_CONFIG_RATE_12M,
+		[IWL_RATE_18M_INDEX]	= SCAN_CONFIG_RATE_18M,
+		[IWL_RATE_24M_INDEX]	= SCAN_CONFIG_RATE_24M,
+		[IWL_RATE_36M_INDEX]	= SCAN_CONFIG_RATE_36M,
+		[IWL_RATE_48M_INDEX]	= SCAN_CONFIG_RATE_48M,
+		[IWL_RATE_54M_INDEX]	= SCAN_CONFIG_RATE_54M,
+	};
+
+	return rate_to_scan_rate[rate];
+}
+
+static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
+{
+	struct ieee80211_supported_band *band;
+	unsigned int rates = 0;
+	int i;
+
+	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+	for (i = 0; i < band->n_bitrates; i++)
+		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+	for (i = 0; i < band->n_bitrates; i++)
+		rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
+
+	/* Set both basic rates and supported rates */
+	rates |= SCAN_CONFIG_SUPPORTED_RATE(rates);
+
+	return cpu_to_le32(rates);
+}
+
+static void iwl_mvm_fill_scan_dwell(struct iwl_mvm *mvm,
+				    struct iwl_scan_dwell *dwell)
+{
+	dwell->active = IWL_SCAN_DWELL_ACTIVE;
+	dwell->passive = IWL_SCAN_DWELL_PASSIVE;
+	dwell->fragmented = IWL_SCAN_DWELL_FRAGMENTED;
+	dwell->extended = IWL_SCAN_DWELL_EXTENDED;
+}
+
+static void iwl_mvm_fill_channels(struct iwl_mvm *mvm, u8 *channels)
+{
+	struct ieee80211_supported_band *band;
+	int i, j = 0;
+
+	band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+	for (i = 0; i < band->n_channels; i++, j++)
+		channels[j] = band->channels[i].hw_value;
+	band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
+	for (i = 0; i < band->n_channels; i++, j++)
+		channels[j] = band->channels[i].hw_value;
+}
+
+static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
+					u32 flags, u8 channel_flags)
+{
+	enum iwl_mvm_scan_type type = iwl_mvm_get_scan_type(mvm, false);
+	struct iwl_scan_config_v1 *cfg = config;
+
+	cfg->flags = cpu_to_le32(flags);
+	cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+	cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+	cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+	cfg->out_of_channel_time = cpu_to_le32(scan_timing[type].max_out_time);
+	cfg->suspend_time = cpu_to_le32(scan_timing[type].suspend_time);
+
+	iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+	memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+	cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+	cfg->channel_flags = channel_flags;
+
+	iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
+				     u32 flags, u8 channel_flags)
+{
+	struct iwl_scan_config *cfg = config;
+
+	cfg->flags = cpu_to_le32(flags);
+	cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+	cfg->rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+	cfg->legacy_rates = iwl_mvm_scan_config_rates(mvm);
+
+	if (iwl_mvm_is_cdb_supported(mvm)) {
+		enum iwl_mvm_scan_type lb_type, hb_type;
+
+		lb_type = iwl_mvm_get_scan_type_band(mvm, false,
+						     NL80211_BAND_2GHZ);
+		hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+						     NL80211_BAND_5GHZ);
+
+		cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[lb_type].max_out_time);
+		cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[lb_type].suspend_time);
+
+		cfg->out_of_channel_time[SCAN_HB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[hb_type].max_out_time);
+		cfg->suspend_time[SCAN_HB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[hb_type].suspend_time);
+	} else {
+		enum iwl_mvm_scan_type type =
+			iwl_mvm_get_scan_type(mvm, false);
+
+		cfg->out_of_channel_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[type].max_out_time);
+		cfg->suspend_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(scan_timing[type].suspend_time);
+	}
+
+	iwl_mvm_fill_scan_dwell(mvm, &cfg->dwell);
+
+	memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+
+	cfg->bcast_sta_id = mvm->aux_sta.sta_id;
+	cfg->channel_flags = channel_flags;
+
+	iwl_mvm_fill_channels(mvm, cfg->channel_array);
+}
+
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+	void *cfg;
+	int ret, cmd_size;
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+	};
+	enum iwl_mvm_scan_type type;
+	enum iwl_mvm_scan_type hb_type = IWL_SCAN_TYPE_NOT_SET;
+	int num_channels =
+		mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+		mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
+	u32 flags;
+	u8 channel_flags;
+
+	if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
+		return -ENOBUFS;
+
+	if (iwl_mvm_is_cdb_supported(mvm)) {
+		type = iwl_mvm_get_scan_type_band(mvm, false,
+						  NL80211_BAND_2GHZ);
+		hb_type = iwl_mvm_get_scan_type_band(mvm, false,
+						     NL80211_BAND_5GHZ);
+		if (type == mvm->scan_type && hb_type == mvm->hb_scan_type)
+			return 0;
+	} else {
+		type = iwl_mvm_get_scan_type(mvm, false);
+		if (type == mvm->scan_type)
+			return 0;
+	}
+
+	if (iwl_mvm_cdb_scan_api(mvm))
+		cmd_size = sizeof(struct iwl_scan_config);
+	else
+		cmd_size = sizeof(struct iwl_scan_config_v1);
+	cmd_size += mvm->fw->ucode_capa.n_scan_channels;
+
+	cfg = kzalloc(cmd_size, GFP_KERNEL);
+	if (!cfg)
+		return -ENOMEM;
+
+	flags = SCAN_CONFIG_FLAG_ACTIVATE |
+		 SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+		 SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+		 SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+		 SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+		 SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+		 SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+		 SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+		 SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS |
+		 SCAN_CONFIG_N_CHANNELS(num_channels) |
+		 (type == IWL_SCAN_TYPE_FRAGMENTED ?
+		  SCAN_CONFIG_FLAG_SET_FRAGMENTED :
+		  SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+	channel_flags = IWL_CHANNEL_FLAG_EBS |
+			IWL_CHANNEL_FLAG_ACCURATE_EBS |
+			IWL_CHANNEL_FLAG_EBS_ADD |
+			IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+	/*
+	 * Check for fragmented scan on LMAC2 - high band.
+	 * LMAC1 - low band is checked above.
+	 */
+	if (iwl_mvm_cdb_scan_api(mvm)) {
+		if (iwl_mvm_is_cdb_supported(mvm))
+			flags |= (hb_type == IWL_SCAN_TYPE_FRAGMENTED) ?
+				 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
+				 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
+		iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags);
+	} else {
+		iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags);
+	}
+
+	cmd.data[0] = cfg;
+	cmd.len[0] = cmd_size;
+	cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+
+	IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (!ret) {
+		mvm->scan_type = type;
+		mvm->hb_scan_type = hb_type;
+	}
+
+	kfree(cfg);
+	return ret;
+}
+
+static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
+{
+	int i;
+
+	for (i = 0; i < mvm->max_scans; i++)
+		if (mvm->scan_uid_status[i] == status)
+			return i;
+
+	return -ENOENT;
+}
+
+static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
+				    struct iwl_scan_req_umac *cmd,
+				    struct iwl_mvm_scan_params *params)
+{
+	struct iwl_mvm_scan_timing_params *timing, *hb_timing;
+	u8 active_dwell, passive_dwell;
+
+	timing = &scan_timing[params->type];
+	active_dwell = params->measurement_dwell ?
+		params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
+	passive_dwell = params->measurement_dwell ?
+		params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+
+	if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+		cmd->v7.adwell_default_n_aps_social =
+			IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+		cmd->v7.adwell_default_n_aps =
+			IWL_SCAN_ADWELL_DEFAULT_N_APS;
+
+		/* if custom max budget was configured with debugfs */
+		if (IWL_MVM_ADWELL_MAX_BUDGET)
+			cmd->v7.adwell_max_budget =
+				cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
+		else if (params->ssids && params->ssids[0].ssid_len)
+			cmd->v7.adwell_max_budget =
+				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+		else
+			cmd->v7.adwell_max_budget =
+				cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+		cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+		cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(timing->max_out_time);
+		cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+			cpu_to_le32(timing->suspend_time);
+
+		if (iwl_mvm_is_cdb_supported(mvm)) {
+			hb_timing = &scan_timing[params->hb_type];
+
+			cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+				cpu_to_le32(hb_timing->max_out_time);
+			cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+				cpu_to_le32(hb_timing->suspend_time);
+		}
+
+		if (!iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+			cmd->v7.active_dwell = active_dwell;
+			cmd->v7.passive_dwell = passive_dwell;
+			cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+		} else {
+			cmd->v8.active_dwell[SCAN_LB_LMAC_IDX] = active_dwell;
+			cmd->v8.passive_dwell[SCAN_LB_LMAC_IDX] = passive_dwell;
+			if (iwl_mvm_is_cdb_supported(mvm)) {
+				cmd->v8.active_dwell[SCAN_HB_LMAC_IDX] =
+					active_dwell;
+				cmd->v8.passive_dwell[SCAN_HB_LMAC_IDX] =
+					passive_dwell;
+			}
+		}
+	} else {
+		cmd->v1.extended_dwell = params->measurement_dwell ?
+			params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED;
+		cmd->v1.active_dwell = active_dwell;
+		cmd->v1.passive_dwell = passive_dwell;
+		cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+		if (iwl_mvm_is_cdb_supported(mvm)) {
+			hb_timing = &scan_timing[params->hb_type];
+
+			cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
+					cpu_to_le32(hb_timing->max_out_time);
+			cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
+					cpu_to_le32(hb_timing->suspend_time);
+		}
+
+		if (iwl_mvm_cdb_scan_api(mvm)) {
+			cmd->v6.scan_priority =
+				cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+			cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+				cpu_to_le32(timing->max_out_time);
+			cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+				cpu_to_le32(timing->suspend_time);
+		} else {
+			cmd->v1.scan_priority =
+				cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+			cmd->v1.max_out_time =
+				cpu_to_le32(timing->max_out_time);
+			cmd->v1.suspend_time =
+				cpu_to_le32(timing->suspend_time);
+		}
+	}
+
+	if (iwl_mvm_is_regular_scan(params))
+		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+	else
+		cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+}
+
+static void
+iwl_mvm_umac_scan_cfg_channels(struct iwl_mvm *mvm,
+			       struct ieee80211_channel **channels,
+			       int n_channels, u32 ssid_bitmap,
+			       struct iwl_scan_channel_cfg_umac *channel_cfg)
+{
+	int i;
+
+	for (i = 0; i < n_channels; i++) {
+		channel_cfg[i].flags = cpu_to_le32(ssid_bitmap);
+		channel_cfg[i].channel_num = channels[i]->hw_value;
+		channel_cfg[i].iter_count = 1;
+		channel_cfg[i].iter_interval = 0;
+	}
+}
+
+static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
+				   struct iwl_mvm_scan_params *params,
+				   struct ieee80211_vif *vif)
+{
+	u16 flags = 0;
+
+	if (params->n_ssids == 0)
+		flags = IWL_UMAC_SCAN_GEN_FLAGS_PASSIVE;
+
+	if (params->n_ssids == 1 && params->ssids[0].ssid_len != 0)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT;
+
+	if (params->type == IWL_SCAN_TYPE_FRAGMENTED)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED;
+
+	if (iwl_mvm_is_cdb_supported(mvm) &&
+	    params->hb_type == IWL_SCAN_TYPE_FRAGMENTED)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED;
+
+	if (iwl_mvm_rrm_scan_needed(mvm) &&
+	    fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED;
+
+	if (params->pass_all)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PASS_ALL;
+	else
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_MATCH;
+
+	if (!iwl_mvm_is_regular_scan(params))
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
+
+	if (params->measurement_dwell)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (mvm->scan_iter_notif_enabled)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+#endif
+
+	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
+
+	if (iwl_mvm_is_adaptive_dwell_supported(mvm) && IWL_MVM_ADWELL_ENABLE &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE)
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL;
+
+	/*
+	 * Extended dwell is relevant only for low band to start with, as it is
+	 * being used for social channles only (1, 6, 11), so we can check
+	 * only scan type on low band also for CDB.
+	 */
+	if (iwl_mvm_is_regular_scan(params) &&
+	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
+	    params->type != IWL_SCAN_TYPE_FRAGMENTED &&
+	    !iwl_mvm_is_adaptive_dwell_supported(mvm) &&
+	    !iwl_mvm_is_oce_supported(mvm))
+		flags |= IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL;
+
+	if (iwl_mvm_is_oce_supported(mvm)) {
+		if ((params->flags &
+		     NL80211_SCAN_FLAG_OCE_PROBE_REQ_HIGH_TX_RATE))
+			flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE;
+		/* Since IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL and
+		 * NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION shares
+		 * the same bit, we need to make sure that we use this bit here
+		 * only when IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL cannot be
+		 * used. */
+		if ((params->flags &
+		     NL80211_SCAN_FLAG_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION) &&
+		     !WARN_ON_ONCE(!iwl_mvm_is_adaptive_dwell_supported(mvm)))
+			flags |= IWL_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP;
+		if ((params->flags & NL80211_SCAN_FLAG_FILS_MAX_CHANNEL_TIME))
+			flags |= IWL_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME;
+	}
+
+	return flags;
+}
+
+static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     struct iwl_mvm_scan_params *params,
+			     int type)
+{
+	struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+	struct iwl_scan_umac_chan_param *chan_param;
+	void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
+	struct iwl_scan_req_umac_tail *sec_part = cmd_data +
+		sizeof(struct iwl_scan_channel_cfg_umac) *
+			mvm->fw->ucode_capa.n_scan_channels;
+	int uid, i;
+	u32 ssid_bitmap = 0;
+	u8 channel_flags = 0;
+	u16 gen_flags;
+	struct iwl_mvm_vif *scan_vif = iwl_mvm_vif_from_mac80211(vif);
+
+	chan_param = iwl_mvm_get_scan_req_umac_channel(mvm);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(params->n_scan_plans > IWL_MAX_SCHED_SCAN_PLANS))
+		return -EINVAL;
+
+	uid = iwl_mvm_scan_uid_by_status(mvm, 0);
+	if (uid < 0)
+		return uid;
+
+	memset(cmd, 0, ksize(cmd));
+
+	iwl_mvm_scan_umac_dwell(mvm, cmd, params);
+
+	mvm->scan_uid_status[uid] = type;
+
+	cmd->uid = cpu_to_le32(uid);
+	gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
+	cmd->general_flags = cpu_to_le16(gen_flags);
+	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm)) {
+		if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_FRAGMENTED)
+			cmd->v8.num_of_fragments[SCAN_LB_LMAC_IDX] =
+							IWL_SCAN_NUM_OF_FRAGS;
+		if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED)
+			cmd->v8.num_of_fragments[SCAN_HB_LMAC_IDX] =
+							IWL_SCAN_NUM_OF_FRAGS;
+	}
+
+	cmd->scan_start_mac_id = scan_vif->id;
+
+	if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
+		cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
+
+	if (iwl_mvm_scan_use_ebs(mvm, vif))
+		channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
+				IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+				IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
+
+	chan_param->flags = channel_flags;
+	chan_param->count = params->n_channels;
+
+	iwl_scan_build_ssids(params, sec_part->direct_scan, &ssid_bitmap);
+
+	iwl_mvm_umac_scan_cfg_channels(mvm, params->channels,
+				       params->n_channels, ssid_bitmap,
+				       cmd_data);
+
+	for (i = 0; i < params->n_scan_plans; i++) {
+		struct cfg80211_sched_scan_plan *scan_plan =
+			&params->scan_plans[i];
+
+		sec_part->schedule[i].iter_count = scan_plan->iterations;
+		sec_part->schedule[i].interval =
+			cpu_to_le16(scan_plan->interval);
+	}
+
+	/*
+	 * If the number of iterations of the last scan plan is set to
+	 * zero, it should run infinitely. However, this is not always the case.
+	 * For example, when regular scan is requested the driver sets one scan
+	 * plan with one iteration.
+	 */
+	if (!sec_part->schedule[i - 1].iter_count)
+		sec_part->schedule[i - 1].iter_count = 0xff;
+
+	sec_part->delay = cpu_to_le16(params->delay);
+	sec_part->preq = params->preq;
+
+	return 0;
+}
+
+static int iwl_mvm_num_scans(struct iwl_mvm *mvm)
+{
+	return hweight32(mvm->scan_status & IWL_MVM_SCAN_MASK);
+}
+
+static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
+{
+	bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+					 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+	/* This looks a bit arbitrary, but the idea is that if we run
+	 * out of possible simultaneous scans and the userspace is
+	 * trying to run a scan type that is already running, we
+	 * return -EBUSY.  But if the userspace wants to start a
+	 * different type of scan, we stop the opposite type to make
+	 * space for the new request.  The reason is backwards
+	 * compatibility with old wpa_supplicant that wouldn't stop a
+	 * scheduled scan before starting a normal scan.
+	 */
+
+	if (iwl_mvm_num_scans(mvm) < mvm->max_scans)
+		return 0;
+
+	/* Use a switch, even though this is a bitmask, so that more
+	 * than one bits set will fall in default and we will warn.
+	 */
+	switch (type) {
+	case IWL_MVM_SCAN_REGULAR:
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+			return -EBUSY;
+		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
+	case IWL_MVM_SCAN_SCHED:
+		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+			return -EBUSY;
+		return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
+	case IWL_MVM_SCAN_NETDETECT:
+		/* For non-unified images, there's no need to stop
+		 * anything for net-detect since the firmware is
+		 * restarted anyway.  This way, any sched scans that
+		 * were running will be restarted when we resume.
+		 */
+		if (!unified_image)
+			return 0;
+
+		/* If this is a unified image and we ran out of scans,
+		 * we need to stop something.  Prefer stopping regular
+		 * scans, because the results are useless at this
+		 * point, and we should be able to keep running
+		 * another scheduled scan while suspended.
+		 */
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK)
+			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR,
+						 true);
+		if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK)
+			return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED,
+						 true);
+
+		/* fall through, something is wrong if no scan was
+		 * running but we ran out of scans.
+		 */
+	default:
+		WARN_ON(1);
+		break;
+	}
+
+	return -EIO;
+}
+
+#define SCAN_TIMEOUT 20000
+
+void iwl_mvm_scan_timeout_wk(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+					   scan_timeout_dwork);
+
+	IWL_ERR(mvm, "regular scan timed out\n");
+
+	iwl_force_nmi(mvm->trans);
+}
+
+static void iwl_mvm_fill_scan_type(struct iwl_mvm *mvm,
+				   struct iwl_mvm_scan_params *params,
+				   bool p2p)
+{
+	if (iwl_mvm_is_cdb_supported(mvm)) {
+		params->type =
+			iwl_mvm_get_scan_type_band(mvm, p2p,
+						   NL80211_BAND_2GHZ);
+		params->hb_type =
+			iwl_mvm_get_scan_type_band(mvm, p2p,
+						   NL80211_BAND_5GHZ);
+	} else {
+		params->type = iwl_mvm_get_scan_type(mvm, p2p);
+	}
+}
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			   struct cfg80211_scan_request *req,
+			   struct ieee80211_scan_ies *ies)
+{
+	struct iwl_host_cmd hcmd = {
+		.len = { iwl_mvm_scan_size(mvm), },
+		.data = { mvm->scan_cmd, },
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+	struct iwl_mvm_scan_params params = {};
+	int ret;
+	struct cfg80211_sched_scan_plan scan_plan = { .iterations = 1 };
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+		IWL_ERR(mvm, "scan while LAR regdomain is not set\n");
+		return -EBUSY;
+	}
+
+	ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+	if (ret)
+		return ret;
+
+	/* we should have failed registration if scan_cmd was NULL */
+	if (WARN_ON(!mvm->scan_cmd))
+		return -ENOMEM;
+
+	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+		return -ENOBUFS;
+
+	params.n_ssids = req->n_ssids;
+	params.flags = req->flags;
+	params.n_channels = req->n_channels;
+	params.delay = 0;
+	params.ssids = req->ssids;
+	params.channels = req->channels;
+	params.mac_addr = req->mac_addr;
+	params.mac_addr_mask = req->mac_addr_mask;
+	params.no_cck = req->no_cck;
+	params.pass_all = true;
+	params.n_match_sets = 0;
+	params.match_sets = NULL;
+
+	params.scan_plans = &scan_plan;
+	params.n_scan_plans = 1;
+
+	iwl_mvm_fill_scan_type(mvm, &params,
+			       vif->type == NL80211_IFTYPE_P2P_DEVICE);
+
+	ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
+	if (ret < 0)
+		return ret;
+
+	params.measurement_dwell = ret;
+
+	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+		ret = iwl_mvm_scan_umac(mvm, vif, &params,
+					IWL_MVM_SCAN_REGULAR);
+	} else {
+		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+	}
+
+	if (ret)
+		return ret;
+
+	iwl_mvm_pause_tcm(mvm, false);
+
+	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+	if (ret) {
+		/* If the scan failed, it usually means that the FW was unable
+		 * to allocate the time events. Warn on it, but maybe we
+		 * should try to send the command again with different params.
+		 */
+		IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
+		iwl_mvm_resume_tcm(mvm);
+		return ret;
+	}
+
+	IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
+	mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
+	mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+	iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+
+	schedule_delayed_work(&mvm->scan_timeout_dwork,
+			      msecs_to_jiffies(SCAN_TIMEOUT));
+
+	return 0;
+}
+
+int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct cfg80211_sched_scan_request *req,
+			     struct ieee80211_scan_ies *ies,
+			     int type)
+{
+	struct iwl_host_cmd hcmd = {
+		.len = { iwl_mvm_scan_size(mvm), },
+		.data = { mvm->scan_cmd, },
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+	struct iwl_mvm_scan_params params = {};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_is_lar_supported(mvm) && !mvm->lar_regdom_set) {
+		IWL_ERR(mvm, "sched-scan while LAR regdomain is not set\n");
+		return -EBUSY;
+	}
+
+	ret = iwl_mvm_check_running_scans(mvm, type);
+	if (ret)
+		return ret;
+
+	/* we should have failed registration if scan_cmd was NULL */
+	if (WARN_ON(!mvm->scan_cmd))
+		return -ENOMEM;
+
+	if (!iwl_mvm_scan_fits(mvm, req->n_ssids, ies, req->n_channels))
+		return -ENOBUFS;
+
+	params.n_ssids = req->n_ssids;
+	params.flags = req->flags;
+	params.n_channels = req->n_channels;
+	params.ssids = req->ssids;
+	params.channels = req->channels;
+	params.mac_addr = req->mac_addr;
+	params.mac_addr_mask = req->mac_addr_mask;
+	params.no_cck = false;
+	params.pass_all =  iwl_mvm_scan_pass_all(mvm, req);
+	params.n_match_sets = req->n_match_sets;
+	params.match_sets = req->match_sets;
+	if (!req->n_scan_plans)
+		return -EINVAL;
+
+	params.n_scan_plans = req->n_scan_plans;
+	params.scan_plans = req->scan_plans;
+
+	iwl_mvm_fill_scan_type(mvm, &params,
+			       vif->type == NL80211_IFTYPE_P2P_DEVICE);
+
+	/* In theory, LMAC scans can handle a 32-bit delay, but since
+	 * waiting for over 18 hours to start the scan is a bit silly
+	 * and to keep it aligned with UMAC scans (which only support
+	 * 16-bit delays), trim it down to 16-bits.
+	 */
+	if (req->delay > U16_MAX) {
+		IWL_DEBUG_SCAN(mvm,
+			       "delay value is > 16-bits, set to max possible\n");
+		params.delay = U16_MAX;
+	} else {
+		params.delay = req->delay;
+	}
+
+	ret = iwl_mvm_config_sched_scan_profiles(mvm, req);
+	if (ret)
+		return ret;
+
+	iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
+		ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
+	} else {
+		hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
+		ret = iwl_mvm_scan_lmac(mvm, vif, &params);
+	}
+
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_send_cmd(mvm, &hcmd);
+	if (!ret) {
+		IWL_DEBUG_SCAN(mvm,
+			       "Sched scan request was sent successfully\n");
+		mvm->scan_status |= type;
+	} else {
+		/* If the scan failed, it usually means that the FW was unable
+		 * to allocate the time events. Warn on it, but maybe we
+		 * should try to send the command again with different params.
+		 */
+		IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
+	}
+
+	return ret;
+}
+
+void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
+					 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_umac_scan_complete *notif = (void *)pkt->data;
+	u32 uid = __le32_to_cpu(notif->uid);
+	bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+
+	if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
+		return;
+
+	/* if the scan is already stopping, we don't need to notify mac80211 */
+	if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
+		struct cfg80211_scan_info info = {
+			.aborted = aborted,
+			.scan_start_tsf = mvm->scan_start,
+		};
+
+		memcpy(info.tsf_bssid, mvm->scan_vif->bssid, ETH_ALEN);
+		ieee80211_scan_completed(mvm->hw, &info);
+		mvm->scan_vif = NULL;
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		cancel_delayed_work(&mvm->scan_timeout_dwork);
+		iwl_mvm_resume_tcm(mvm);
+	} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
+		ieee80211_sched_scan_stopped(mvm->hw);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+	}
+
+	mvm->scan_status &= ~mvm->scan_uid_status[uid];
+	IWL_DEBUG_SCAN(mvm,
+		       "Scan completed, uid %u type %u, status %s, EBS status %s\n",
+		       uid, mvm->scan_uid_status[uid],
+		       notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+				"completed" : "aborted",
+		       iwl_mvm_ebs_status_str(notif->ebs_status));
+	IWL_DEBUG_SCAN(mvm,
+		       "Last line %d, Last iteration %d, Time from last iteration %d\n",
+		       notif->last_schedule, notif->last_iter,
+		       __le32_to_cpu(notif->time_from_last_iter));
+
+	if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
+	    notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
+		mvm->last_ebs_successful = false;
+
+	mvm->scan_uid_status[uid] = 0;
+}
+
+void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
+					      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_umac_scan_iter_complete_notif *notif = (void *)pkt->data;
+
+	mvm->scan_start = le64_to_cpu(notif->start_tsf);
+
+	IWL_DEBUG_SCAN(mvm,
+		       "UMAC Scan iteration complete: status=0x%x scanned_channels=%d\n",
+		       notif->status, notif->scanned_channels);
+
+	if (mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_FOUND) {
+		IWL_DEBUG_SCAN(mvm, "Pass all scheduled scan results found\n");
+		ieee80211_sched_scan_results(mvm->hw);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_ENABLED;
+	}
+
+	IWL_DEBUG_SCAN(mvm,
+		       "UMAC Scan iteration complete: scan started at %llu (TSF)\n",
+		       mvm->scan_start);
+}
+
+static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
+{
+	struct iwl_umac_scan_abort cmd = {};
+	int uid, ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* We should always get a valid index here, because we already
+	 * checked that this type of scan was running in the generic
+	 * code.
+	 */
+	uid = iwl_mvm_scan_uid_by_status(mvm, type);
+	if (WARN_ON_ONCE(uid < 0))
+		return uid;
+
+	cmd.uid = cpu_to_le32(uid);
+
+	IWL_DEBUG_SCAN(mvm, "Sending scan abort, uid %u\n", uid);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm,
+				   iwl_cmd_id(SCAN_ABORT_UMAC,
+					      IWL_ALWAYS_LONG_GROUP, 0),
+				   0, sizeof(cmd), &cmd);
+	if (!ret)
+		mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+
+	return ret;
+}
+
+static int iwl_mvm_scan_stop_wait(struct iwl_mvm *mvm, int type)
+{
+	struct iwl_notification_wait wait_scan_done;
+	static const u16 scan_done_notif[] = { SCAN_COMPLETE_UMAC,
+					      SCAN_OFFLOAD_COMPLETE, };
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+				   scan_done_notif,
+				   ARRAY_SIZE(scan_done_notif),
+				   NULL, NULL);
+
+	IWL_DEBUG_SCAN(mvm, "Preparing to stop scan, type %x\n", type);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		ret = iwl_mvm_umac_scan_abort(mvm, type);
+	else
+		ret = iwl_mvm_lmac_scan_abort(mvm);
+
+	if (ret) {
+		IWL_DEBUG_SCAN(mvm, "couldn't stop scan type %d\n", type);
+		iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
+		return ret;
+	}
+
+	ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
+
+	return ret;
+}
+
+int iwl_mvm_scan_size(struct iwl_mvm *mvm)
+{
+	int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
+
+	if (iwl_mvm_is_adaptive_dwell_v2_supported(mvm))
+		base_size = IWL_SCAN_REQ_UMAC_SIZE_V8;
+	else if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+		base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+	else if (iwl_mvm_cdb_scan_api(mvm))
+		base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		return base_size +
+			sizeof(struct iwl_scan_channel_cfg_umac) *
+				mvm->fw->ucode_capa.n_scan_channels +
+			sizeof(struct iwl_scan_req_umac_tail);
+
+	return sizeof(struct iwl_scan_req_lmac) +
+		sizeof(struct iwl_scan_channel_cfg_lmac) *
+		mvm->fw->ucode_capa.n_scan_channels +
+		sizeof(struct iwl_scan_probe_req);
+}
+
+/*
+ * This function is used in nic restart flow, to inform mac80211 about scans
+ * that was aborted by restart flow or by an assert.
+ */
+void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
+{
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
+		int uid, i;
+
+		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_REGULAR);
+		if (uid >= 0) {
+			struct cfg80211_scan_info info = {
+				.aborted = true,
+			};
+
+			ieee80211_scan_completed(mvm->hw, &info);
+			mvm->scan_uid_status[uid] = 0;
+		}
+		uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_SCHED);
+		if (uid >= 0 && !mvm->fw_restart) {
+			ieee80211_sched_scan_stopped(mvm->hw);
+			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+			mvm->scan_uid_status[uid] = 0;
+		}
+
+		/* We shouldn't have any UIDs still set.  Loop over all the
+		 * UIDs to make sure there's nothing left there and warn if
+		 * any is found.
+		 */
+		for (i = 0; i < mvm->max_scans; i++) {
+			if (WARN_ONCE(mvm->scan_uid_status[i],
+				      "UMAC scan UID %d status was not cleaned\n",
+				      i))
+				mvm->scan_uid_status[i] = 0;
+		}
+	} else {
+		if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) {
+			struct cfg80211_scan_info info = {
+				.aborted = true,
+			};
+
+			ieee80211_scan_completed(mvm->hw, &info);
+		}
+
+		/* Sched scan will be restarted by mac80211 in
+		 * restart_hw, so do not report if FW is about to be
+		 * restarted.
+		 */
+		if ((mvm->scan_status & IWL_MVM_SCAN_SCHED) &&
+		    !mvm->fw_restart) {
+			ieee80211_sched_scan_stopped(mvm->hw);
+			mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+		}
+	}
+}
+
+int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
+{
+	int ret;
+
+	if (!(mvm->scan_status & type))
+		return 0;
+
+	if (iwl_mvm_is_radio_killed(mvm)) {
+		ret = 0;
+		goto out;
+	}
+
+	ret = iwl_mvm_scan_stop_wait(mvm, type);
+	if (!ret)
+		mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+out:
+	/* Clear the scan status so the next scan requests will
+	 * succeed and mark the scan as stopping, so that the Rx
+	 * handler doesn't do anything, as the scan was stopped from
+	 * above.
+	 */
+	mvm->scan_status &= ~type;
+
+	if (type == IWL_MVM_SCAN_REGULAR) {
+		/* Since the rx handler won't do anything now, we have
+		 * to release the scan reference here.
+		 */
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+		cancel_delayed_work(&mvm->scan_timeout_dwork);
+		if (notify) {
+			struct cfg80211_scan_info info = {
+				.aborted = true,
+			};
+
+			ieee80211_scan_completed(mvm->hw, &info);
+		}
+	} else if (notify) {
+		ieee80211_sched_scan_stopped(mvm->hw);
+		mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+	}
+
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
new file mode 100644
index 0000000..539b06b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -0,0 +1,336 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+
+/* For counting bound interfaces */
+struct iwl_mvm_active_iface_iterator_data {
+	struct ieee80211_vif *ignore_vif;
+	u8 sta_vif_ap_sta_id;
+	enum iwl_sf_state sta_vif_state;
+	int num_active_macs;
+};
+
+/*
+ * Count bound interfaces which are not p2p, besides data->ignore_vif.
+ * data->station_vif will point to one bound vif of type station, if exists.
+ */
+static void iwl_mvm_bound_iface_iterator(void *_data, u8 *mac,
+					 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_active_iface_iterator_data *data = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif == data->ignore_vif || !mvmvif->phy_ctxt ||
+	    vif->type == NL80211_IFTYPE_P2P_DEVICE)
+		return;
+
+	data->num_active_macs++;
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		data->sta_vif_ap_sta_id = mvmvif->ap_sta_id;
+		if (vif->bss_conf.assoc)
+			data->sta_vif_state = SF_FULL_ON;
+		else
+			data->sta_vif_state = SF_INIT_OFF;
+	}
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const
+__le32 sf_full_timeout_def[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+	{
+		cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+		cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+	},
+	{
+		cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER_DEF),
+		cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER_DEF)
+	},
+	{
+		cpu_to_le32(SF_MCAST_AGING_TIMER_DEF),
+		cpu_to_le32(SF_MCAST_IDLE_TIMER_DEF)
+	},
+	{
+		cpu_to_le32(SF_BA_AGING_TIMER_DEF),
+		cpu_to_le32(SF_BA_IDLE_TIMER_DEF)
+	},
+	{
+		cpu_to_le32(SF_TX_RE_AGING_TIMER_DEF),
+		cpu_to_le32(SF_TX_RE_IDLE_TIMER_DEF)
+	},
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const __le32 sf_full_timeout[SF_NUM_SCENARIO][SF_NUM_TIMEOUT_TYPES] = {
+	{
+		cpu_to_le32(SF_SINGLE_UNICAST_AGING_TIMER),
+		cpu_to_le32(SF_SINGLE_UNICAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_AGG_UNICAST_AGING_TIMER),
+		cpu_to_le32(SF_AGG_UNICAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_MCAST_AGING_TIMER),
+		cpu_to_le32(SF_MCAST_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_BA_AGING_TIMER),
+		cpu_to_le32(SF_BA_IDLE_TIMER)
+	},
+	{
+		cpu_to_le32(SF_TX_RE_AGING_TIMER),
+		cpu_to_le32(SF_TX_RE_IDLE_TIMER)
+	},
+};
+
+static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
+				    struct iwl_sf_cfg_cmd *sf_cmd,
+				    struct ieee80211_sta *sta)
+{
+	int i, j, watermark;
+
+	sf_cmd->watermark[SF_LONG_DELAY_ON] = cpu_to_le32(SF_W_MARK_SCAN);
+
+	/*
+	 * If we are in association flow - check antenna configuration
+	 * capabilities of the AP station, and choose the watermark accordingly.
+	 */
+	if (sta) {
+		if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+			switch (sta->rx_nss) {
+			case 1:
+				watermark = SF_W_MARK_SISO;
+				break;
+			case 2:
+				watermark = SF_W_MARK_MIMO2;
+				break;
+			default:
+				watermark = SF_W_MARK_MIMO3;
+				break;
+			}
+		} else {
+			watermark = SF_W_MARK_LEGACY;
+		}
+	/* default watermark value for unassociated mode. */
+	} else {
+		watermark = SF_W_MARK_MIMO2;
+	}
+	sf_cmd->watermark[SF_FULL_ON] = cpu_to_le32(watermark);
+
+	for (i = 0; i < SF_NUM_SCENARIO; i++) {
+		for (j = 0; j < SF_NUM_TIMEOUT_TYPES; j++) {
+			sf_cmd->long_delay_timeouts[i][j] =
+					cpu_to_le32(SF_LONG_DELAY_AGING_TIMER);
+		}
+	}
+
+	if (sta) {
+		BUILD_BUG_ON(sizeof(sf_full_timeout) !=
+			     sizeof(__le32) * SF_NUM_SCENARIO *
+			     SF_NUM_TIMEOUT_TYPES);
+
+		memcpy(sf_cmd->full_on_timeouts, sf_full_timeout,
+		       sizeof(sf_full_timeout));
+	} else {
+		BUILD_BUG_ON(sizeof(sf_full_timeout_def) !=
+			     sizeof(__le32) * SF_NUM_SCENARIO *
+			     SF_NUM_TIMEOUT_TYPES);
+
+		memcpy(sf_cmd->full_on_timeouts, sf_full_timeout_def,
+		       sizeof(sf_full_timeout_def));
+	}
+
+}
+
+static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
+			     enum iwl_sf_state new_state)
+{
+	struct iwl_sf_cfg_cmd sf_cmd = {
+		.state = cpu_to_le32(new_state),
+	};
+	struct ieee80211_sta *sta;
+	int ret = 0;
+
+	if (mvm->cfg->disable_dummy_notification)
+		sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
+
+	/*
+	 * If an associated AP sta changed its antenna configuration, the state
+	 * will remain FULL_ON but SF parameters need to be reconsidered.
+	 */
+	if (new_state != SF_FULL_ON && mvm->sf_state == new_state)
+		return 0;
+
+	switch (new_state) {
+	case SF_UNINIT:
+		iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+		break;
+	case SF_FULL_ON:
+		if (sta_id == IWL_MVM_INVALID_STA) {
+			IWL_ERR(mvm,
+				"No station: Cannot switch SF to FULL_ON\n");
+			return -EINVAL;
+		}
+		rcu_read_lock();
+		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+		if (IS_ERR_OR_NULL(sta)) {
+			IWL_ERR(mvm, "Invalid station id\n");
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		iwl_mvm_fill_sf_command(mvm, &sf_cmd, sta);
+		rcu_read_unlock();
+		break;
+	case SF_INIT_OFF:
+		iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+		break;
+	default:
+		WARN_ONCE(1, "Invalid state: %d. not sending Smart Fifo cmd\n",
+			  new_state);
+		return -EINVAL;
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_SF_CFG_CMD, CMD_ASYNC,
+				   sizeof(sf_cmd), &sf_cmd);
+	if (!ret)
+		mvm->sf_state = new_state;
+
+	return ret;
+}
+
+/*
+ * Update Smart fifo:
+ * Count bound interfaces that are not to be removed, ignoring p2p devices,
+ * and set new state accordingly.
+ */
+int iwl_mvm_sf_update(struct iwl_mvm *mvm, struct ieee80211_vif *changed_vif,
+		      bool remove_vif)
+{
+	enum iwl_sf_state new_state;
+	u8 sta_id = IWL_MVM_INVALID_STA;
+	struct iwl_mvm_vif *mvmvif = NULL;
+	struct iwl_mvm_active_iface_iterator_data data = {
+		.ignore_vif = changed_vif,
+		.sta_vif_state = SF_UNINIT,
+		.sta_vif_ap_sta_id = IWL_MVM_INVALID_STA,
+	};
+
+	/*
+	 * Ignore the call if we are in HW Restart flow, or if the handled
+	 * vif is a p2p device.
+	 */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+	    (changed_vif && changed_vif->type == NL80211_IFTYPE_P2P_DEVICE))
+		return 0;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_bound_iface_iterator,
+						   &data);
+
+	/* If changed_vif exists and is not to be removed, add to the count */
+	if (changed_vif && !remove_vif)
+		data.num_active_macs++;
+
+	switch (data.num_active_macs) {
+	case 0:
+		/* If there are no active macs - change state to SF_INIT_OFF */
+		new_state = SF_INIT_OFF;
+		break;
+	case 1:
+		if (remove_vif) {
+			/* The one active mac left is of type station
+			 * and we filled the relevant data during iteration
+			 */
+			new_state = data.sta_vif_state;
+			sta_id = data.sta_vif_ap_sta_id;
+		} else {
+			if (WARN_ON(!changed_vif))
+				return -EINVAL;
+			if (changed_vif->type != NL80211_IFTYPE_STATION) {
+				new_state = SF_UNINIT;
+			} else if (changed_vif->bss_conf.assoc &&
+				   changed_vif->bss_conf.dtim_period) {
+				mvmvif = iwl_mvm_vif_from_mac80211(changed_vif);
+				sta_id = mvmvif->ap_sta_id;
+				new_state = SF_FULL_ON;
+			} else {
+				new_state = SF_INIT_OFF;
+			}
+		}
+		break;
+	default:
+		/* If there are multiple active macs - change to SF_UNINIT */
+		new_state = SF_UNINIT;
+	}
+	return iwl_mvm_sf_config(mvm, sta_id, new_state);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
new file mode 100644
index 0000000..18db1ed
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -0,0 +1,3650 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "mvm.h"
+#include "sta.h"
+#include "rs.h"
+
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+	if (iwl_mvm_has_new_rx_api(mvm) ||
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		return sizeof(struct iwl_mvm_add_sta_cmd);
+	else
+		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+				    enum nl80211_iftype iftype)
+{
+	int sta_id;
+	u32 reserved_ids = 0;
+
+	BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
+	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+	if (iftype != NL80211_IFTYPE_STATION)
+		reserved_ids = BIT(0);
+
+	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
+	for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
+		if (BIT(sta_id) & reserved_ids)
+			continue;
+
+		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					       lockdep_is_held(&mvm->mutex)))
+			return sta_id;
+	}
+	return IWL_MVM_INVALID_STA;
+}
+
+/* send station add/update command to firmware */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			   bool update, unsigned int flags)
+{
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
+		.sta_id = mvm_sta->sta_id,
+		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
+		.add_modify = update ? 1 : 0,
+		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
+						 STA_FLG_MIMO_EN_MSK |
+						 STA_FLG_RTS_MIMO_PROT),
+		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
+	};
+	int ret;
+	u32 status;
+	u32 agg_size = 0, mpdu_dens = 0;
+
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		add_sta_cmd.station_type = mvm_sta->sta_type;
+
+	if (!update || (flags & STA_MODIFY_QUEUES)) {
+		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+		if (!iwl_mvm_has_new_tx_api(mvm)) {
+			add_sta_cmd.tfd_queue_msk =
+				cpu_to_le32(mvm_sta->tfd_queue_msk);
+
+			if (flags & STA_MODIFY_QUEUES)
+				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
+		} else {
+			WARN_ON(flags & STA_MODIFY_QUEUES);
+		}
+	}
+
+	switch (sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
+		/* fall through */
+	case IEEE80211_STA_RX_BW_80:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
+		/* fall through */
+	case IEEE80211_STA_RX_BW_40:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
+		/* fall through */
+	case IEEE80211_STA_RX_BW_20:
+		if (sta->ht_cap.ht_supported)
+			add_sta_cmd.station_flags |=
+				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
+		break;
+	}
+
+	switch (sta->rx_nss) {
+	case 1:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+		break;
+	case 2:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
+		break;
+	case 3 ... 8:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
+		break;
+	}
+
+	switch (sta->smps_mode) {
+	case IEEE80211_SMPS_AUTOMATIC:
+	case IEEE80211_SMPS_NUM_MODES:
+		WARN_ON(1);
+		break;
+	case IEEE80211_SMPS_STATIC:
+		/* override NSS */
+		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
+		break;
+	case IEEE80211_SMPS_DYNAMIC:
+		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
+		break;
+	case IEEE80211_SMPS_OFF:
+		/* nothing */
+		break;
+	}
+
+	if (sta->ht_cap.ht_supported) {
+		add_sta_cmd.station_flags_msk |=
+			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
+				    STA_FLG_AGG_MPDU_DENS_MSK);
+
+		mpdu_dens = sta->ht_cap.ampdu_density;
+	}
+
+	if (sta->vht_cap.vht_supported) {
+		agg_size = sta->vht_cap.cap &
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
+		agg_size >>=
+			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+	} else if (sta->ht_cap.ht_supported) {
+		agg_size = sta->ht_cap.ampdu_factor;
+	}
+
+	add_sta_cmd.station_flags |=
+		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
+	add_sta_cmd.station_flags |=
+		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
+	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
+		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
+
+	if (sta->wme) {
+		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
+
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
+			add_sta_cmd.uapsd_acs |= BIT(AC_BK);
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+			add_sta_cmd.uapsd_acs |= BIT(AC_BE);
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
+			add_sta_cmd.uapsd_acs |= BIT(AC_VI);
+		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
+			add_sta_cmd.uapsd_acs |= BIT(AC_VO);
+		add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
+		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
+	}
+
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &add_sta_cmd, &status);
+	if (ret)
+		return ret;
+
+	switch (status & IWL_ADD_STA_STATUS_MASK) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "ADD_STA failed\n");
+		break;
+	}
+
+	return ret;
+}
+
+static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
+{
+	struct iwl_mvm_baid_data *data =
+		from_timer(data, t, session_timer);
+	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
+	struct iwl_mvm_baid_data *ba_data;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvm_sta;
+	unsigned long timeout;
+
+	rcu_read_lock();
+
+	ba_data = rcu_dereference(*rcu_ptr);
+
+	if (WARN_ON(!ba_data))
+		goto unlock;
+
+	if (!ba_data->timeout)
+		goto unlock;
+
+	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+	if (time_is_after_jiffies(timeout)) {
+		mod_timer(&ba_data->session_timer, timeout);
+		goto unlock;
+	}
+
+	/* Timer expired */
+	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+
+	/*
+	 * sta should be valid unless the following happens:
+	 * The firmware asserts which triggers a reconfig flow, but
+	 * the reconfig fails before we set the pointer to sta into
+	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
+	 * A-MDPU and hence the timer continues to run. Then, the
+	 * timer expires and sta is NULL.
+	 */
+	if (!sta)
+		goto unlock;
+
+	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
+				      sta->addr, ba_data->tid);
+unlock:
+	rcu_read_unlock();
+}
+
+/* Disable aggregations for a bitmap of TIDs for a given station */
+static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
+					unsigned long disable_agg_tids,
+					bool remove_queue)
+{
+	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	u32 status;
+	u8 sta_id;
+	int ret;
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	sta_id = mvm->queue_info[queue].ra_sta_id;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+		rcu_read_unlock();
+		return -EINVAL;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	mvmsta->tid_disable_agg |= disable_agg_tids;
+
+	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+	cmd.sta_id = mvmsta->sta_id;
+	cmd.add_modify = STA_MODE_MODIFY;
+	cmd.modify_mask = STA_MODIFY_QUEUES;
+	if (disable_agg_tids)
+		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+	if (remove_queue)
+		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
+	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+	rcu_read_unlock();
+
+	/* Notify FW of queue removal from the STA queues */
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &cmd, &status);
+
+	return ret;
+}
+
+static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	unsigned long tid_bitmap;
+	unsigned long agg_tids = 0;
+	u8 sta_id;
+	int tid;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	sta_id = mvm->queue_info[queue].ra_sta_id;
+	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					lockdep_is_held(&mvm->mutex));
+
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+		return -EINVAL;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	spin_lock_bh(&mvmsta->lock);
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+			agg_tids |= BIT(tid);
+	}
+	spin_unlock_bh(&mvmsta->lock);
+
+	return agg_tids;
+}
+
+/*
+ * Remove a queue from a station's resources.
+ * Note that this only marks as free. It DOESN'T delete a BA agreement, and
+ * doesn't disable the queue
+ */
+static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	unsigned long tid_bitmap;
+	unsigned long disable_agg_tids = 0;
+	u8 sta_id;
+	int tid;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	sta_id = mvm->queue_info[queue].ra_sta_id;
+	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+	spin_lock_bh(&mvmsta->lock);
+	/* Unmap MAC queues and TIDs from this queue */
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
+			disable_agg_tids |= BIT(tid);
+		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+	}
+
+	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
+	spin_unlock_bh(&mvmsta->lock);
+
+	rcu_read_unlock();
+
+	return disable_agg_tids;
+}
+
+static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
+				       bool same_sta)
+{
+	struct iwl_mvm_sta *mvmsta;
+	u8 txq_curr_ac, sta_id, tid;
+	unsigned long disable_agg_tids = 0;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	txq_curr_ac = mvm->queue_info[queue].mac80211_ac;
+	sta_id = mvm->queue_info[queue].ra_sta_id;
+	tid = mvm->queue_info[queue].txq_tid;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+	if (WARN_ON(!mvmsta))
+		return -EINVAL;
+
+	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
+	/* Disable the queue */
+	if (disable_agg_tids)
+		iwl_mvm_invalidate_sta_queue(mvm, queue,
+					     disable_agg_tids, false);
+
+	ret = iwl_mvm_disable_txq(mvm, queue,
+				  mvmsta->vif->hw_queue[txq_curr_ac],
+				  tid, 0);
+	if (ret) {
+		/* Re-mark the inactive queue as inactive */
+		spin_lock_bh(&mvm->queue_info_lock);
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm,
+			"Failed to free inactive queue %d (ret=%d)\n",
+			queue, ret);
+
+		return ret;
+	}
+
+	/* If TXQ is allocated to another STA, update removal in FW */
+	if (!same_sta)
+		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
+
+	return 0;
+}
+
+static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
+				    unsigned long tfd_queue_mask, u8 ac)
+{
+	int queue = 0;
+	u8 ac_to_queue[IEEE80211_NUM_ACS];
+	int i;
+
+	lockdep_assert_held(&mvm->queue_info_lock);
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
+
+	/* See what ACs the existing queues for this STA have */
+	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
+		/* Only DATA queues can be shared */
+		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
+			continue;
+
+		/* Don't try and take queues being reconfigured */
+		if (mvm->queue_info[queue].status ==
+		    IWL_MVM_QUEUE_RECONFIGURING)
+			continue;
+
+		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
+	}
+
+	/*
+	 * The queue to share is chosen only from DATA queues as follows (in
+	 * descending priority):
+	 * 1. An AC_BE queue
+	 * 2. Same AC queue
+	 * 3. Highest AC queue that is lower than new AC
+	 * 4. Any existing AC (there always is at least 1 DATA queue)
+	 */
+
+	/* Priority 1: An AC_BE queue */
+	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[IEEE80211_AC_BE];
+	/* Priority 2: Same AC queue */
+	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[ac];
+	/* Priority 3a: If new AC is VO and VI exists - use VI */
+	else if (ac == IEEE80211_AC_VO &&
+		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[IEEE80211_AC_VI];
+	/* Priority 3b: No BE so only AC less than the new one is BK */
+	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[IEEE80211_AC_BK];
+	/* Priority 4a: No BE nor BK - use VI if exists */
+	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[IEEE80211_AC_VI];
+	/* Priority 4b: No BE, BK nor VI - use VO if exists */
+	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
+		queue = ac_to_queue[IEEE80211_AC_VO];
+
+	/* Make sure queue found (or not) is legal */
+	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
+	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
+	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
+		IWL_ERR(mvm, "No DATA queues available to share\n");
+		return -ENOSPC;
+	}
+
+	/* Make sure the queue isn't in the middle of being reconfigured */
+	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_RECONFIGURING) {
+		IWL_ERR(mvm,
+			"TXQ %d is in the middle of re-config - try again\n",
+			queue);
+		return -EBUSY;
+	}
+
+	return queue;
+}
+
+/*
+ * If a given queue has a higher AC than the TID stream that is being compared
+ * to, the queue needs to be redirected to the lower AC. This function does that
+ * in such a case, otherwise - if no redirection required - it does nothing,
+ * unless the %force param is true.
+ */
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+			       int ac, int ssn, unsigned int wdg_timeout,
+			       bool force)
+{
+	struct iwl_scd_txq_cfg_cmd cmd = {
+		.scd_queue = queue,
+		.action = SCD_CFG_DISABLE_QUEUE,
+	};
+	bool shared_queue;
+	unsigned long mq;
+	int ret;
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	/*
+	 * If the AC is lower than current one - FIFO needs to be redirected to
+	 * the lowest one of the streams in the queue. Check if this is needed
+	 * here.
+	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
+	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
+	 * we need to check if the numerical value of X is LARGER than of Y.
+	 */
+	spin_lock_bh(&mvm->queue_info_lock);
+	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "No redirection needed on TXQ #%d\n",
+				    queue);
+		return 0;
+	}
+
+	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
+	cmd.tid = mvm->queue_info[queue].txq_tid;
+	mq = mvm->hw_queue_to_mac80211[queue];
+	shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1);
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
+			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
+
+	/* Stop MAC queues and wait for this queue to empty */
+	iwl_mvm_stop_mac_queues(mvm, mq);
+	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
+	if (ret) {
+		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
+			queue);
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Before redirecting the queue we need to de-activate it */
+	iwl_trans_txq_disable(mvm->trans, queue, false);
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
+			ret);
+
+	/* Make sure the SCD wrptr is correctly set before reconfiguring */
+	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
+
+	/* Update the TID "owner" of the queue */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].txq_tid = tid;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
+
+	/* Redirect to lower AC */
+	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
+			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
+
+	/* Update AC marking of the queue */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].mac80211_ac = ac;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/*
+	 * Mark queue as shared in transport if shared
+	 * Note this has to be done after queue enablement because enablement
+	 * can also set this value, and there is no indication there to shared
+	 * queues
+	 */
+	if (shared_queue)
+		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+out:
+	/* Continue using the MAC queues */
+	iwl_mvm_start_mac_queues(mvm, mq);
+
+	return ret;
+}
+
+static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
+					struct ieee80211_sta *sta, u8 ac,
+					int tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	unsigned int wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+	u8 mac_queue = mvmsta->vif->hw_queue[ac];
+	int queue = -1;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Allocating queue for sta %d on tid %d\n",
+			    mvmsta->sta_id, tid);
+	queue = iwl_mvm_tvqm_enable_txq(mvm, mac_queue, mvmsta->sta_id, tid,
+					wdg_timeout);
+	if (queue < 0)
+		return queue;
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
+
+	spin_lock_bh(&mvmsta->lock);
+	mvmsta->tid_data[tid].txq_id = queue;
+	mvmsta->tid_data[tid].is_tid_active = true;
+	spin_unlock_bh(&mvmsta->lock);
+
+	return 0;
+}
+
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+				   struct ieee80211_sta *sta, u8 ac, int tid,
+				   struct ieee80211_hdr *hdr)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
+		.sta_id = mvmsta->sta_id,
+		.tid = tid,
+		.frame_limit = IWL_FRAME_LIMIT,
+	};
+	unsigned int wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+	u8 mac_queue = mvmsta->vif->hw_queue[ac];
+	int queue = -1;
+	bool using_inactive_queue = false, same_sta = false;
+	unsigned long disable_agg_tids = 0;
+	enum iwl_mvm_agg_state queue_state;
+	bool shared_queue = false, inc_ssn;
+	int ssn;
+	unsigned long tfd_queue_mask;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+
+	spin_lock_bh(&mvmsta->lock);
+	tfd_queue_mask = mvmsta->tfd_queue_msk;
+	spin_unlock_bh(&mvmsta->lock);
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/*
+	 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+	 * exists
+	 */
+	if (!ieee80211_is_data_qos(hdr->frame_control) ||
+	    ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+						IWL_MVM_DQA_MIN_MGMT_QUEUE,
+						IWL_MVM_DQA_MAX_MGMT_QUEUE);
+		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+					    queue);
+
+		/* If no such queue is found, we'll use a DATA queue instead */
+	}
+
+	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
+	    (mvm->queue_info[mvmsta->reserved_queue].status ==
+	     IWL_MVM_QUEUE_RESERVED ||
+	     mvm->queue_info[mvmsta->reserved_queue].status ==
+	     IWL_MVM_QUEUE_INACTIVE)) {
+		queue = mvmsta->reserved_queue;
+		mvm->queue_info[queue].reserved = true;
+		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+	}
+
+	if (queue < 0)
+		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+						IWL_MVM_DQA_MIN_DATA_QUEUE,
+						IWL_MVM_DQA_MAX_DATA_QUEUE);
+
+	/*
+	 * Check if this queue is already allocated but inactive.
+	 * In such a case, we'll need to first free this queue before enabling
+	 * it again, so we'll mark it as reserved to make sure no new traffic
+	 * arrives on it
+	 */
+	if (queue > 0 &&
+	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+		using_inactive_queue = true;
+		same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Re-assigning TXQ %d: sta_id=%d, tid=%d\n",
+				    queue, mvmsta->sta_id, tid);
+	}
+
+	/* No free queue - we'll have to share */
+	if (queue <= 0) {
+		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
+		if (queue > 0) {
+			shared_queue = true;
+			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
+		}
+	}
+
+	/*
+	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
+	 * to make sure no one else takes it.
+	 * This will allow avoiding re-acquiring the lock at the end of the
+	 * configuration. On error we'll mark it back as free.
+	 */
+	if ((queue > 0) && !shared_queue)
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* This shouldn't happen - out of queues */
+	if (WARN_ON(queue <= 0)) {
+		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
+			tid, cfg.sta_id);
+		return queue;
+	}
+
+	/*
+	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
+	 * as aggregatable.
+	 * Mark all DATA queues as allowing to be aggregated at some point
+	 */
+	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+	/*
+	 * If this queue was previously inactive (idle) - we need to free it
+	 * first
+	 */
+	if (using_inactive_queue) {
+		ret = iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+		if (ret)
+			return ret;
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Allocating %squeue #%d to sta %d on tid %d\n",
+			    shared_queue ? "shared " : "", queue,
+			    mvmsta->sta_id, tid);
+
+	if (shared_queue) {
+		/* Disable any open aggs on this queue */
+		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
+
+		if (disable_agg_tids) {
+			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
+					    queue);
+			iwl_mvm_invalidate_sta_queue(mvm, queue,
+						     disable_agg_tids, false);
+		}
+	}
+
+	ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue,
+				     ssn, &cfg, wdg_timeout);
+	if (inc_ssn) {
+		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
+		le16_add_cpu(&hdr->seq_ctrl, 0x10);
+	}
+
+	/*
+	 * Mark queue as shared in transport if shared
+	 * Note this has to be done after queue enablement because enablement
+	 * can also set this value, and there is no indication there to shared
+	 * queues
+	 */
+	if (shared_queue)
+		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
+
+	spin_lock_bh(&mvmsta->lock);
+	/*
+	 * This looks racy, but it is not. We have only one packet for
+	 * this ra/tid in our Tx path since we stop the Qdisc when we
+	 * need to allocate a new TFD queue.
+	 */
+	if (inc_ssn)
+		mvmsta->tid_data[tid].seq_number += 0x10;
+	mvmsta->tid_data[tid].txq_id = queue;
+	mvmsta->tid_data[tid].is_tid_active = true;
+	mvmsta->tfd_queue_msk |= BIT(queue);
+	queue_state = mvmsta->tid_data[tid].state;
+
+	if (mvmsta->reserved_queue == queue)
+		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+	spin_unlock_bh(&mvmsta->lock);
+
+	if (!shared_queue) {
+		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+		if (ret)
+			goto out_err;
+
+		/* If we need to re-enable aggregations... */
+		if (queue_state == IWL_AGG_ON) {
+			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+			if (ret)
+				goto out_err;
+		}
+	} else {
+		/* Redirect queue, if needed */
+		ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn,
+						 wdg_timeout, false);
+		if (ret)
+			goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+
+	return ret;
+}
+
+static void iwl_mvm_change_queue_owner(struct iwl_mvm *mvm, int queue)
+{
+	struct iwl_scd_txq_cfg_cmd cmd = {
+		.scd_queue = queue,
+		.action = SCD_CFG_UPDATE_QUEUE_TID,
+	};
+	int tid;
+	unsigned long tid_bitmap;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
+		return;
+
+	/* Find any TID for queue */
+	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+	cmd.tid = tid;
+	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
+			queue, ret);
+		return;
+	}
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].txq_tid = tid;
+	spin_unlock_bh(&mvm->queue_info_lock);
+	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
+			    queue, tid);
+}
+
+static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	u8 sta_id;
+	int tid = -1;
+	unsigned long tid_bitmap;
+	unsigned int wdg_timeout;
+	int ssn;
+	int ret = true;
+
+	/* queue sharing is disabled on new TX path */
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	sta_id = mvm->queue_info[queue].ra_sta_id;
+	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* Find TID for queue, and make sure it is the only one on the queue */
+	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
+	if (tid_bitmap != BIT(tid)) {
+		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
+			queue, tid_bitmap);
+		return;
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
+			    tid);
+
+	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					lockdep_is_held(&mvm->mutex));
+
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+		return;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+
+	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
+
+	ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid,
+					 tid_to_mac80211_ac[tid], ssn,
+					 wdg_timeout, true);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
+		return;
+	}
+
+	/* If aggs should be turned back on - do it */
+	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
+		struct iwl_mvm_add_sta_cmd cmd = {0};
+
+		mvmsta->tid_disable_agg &= ~BIT(tid);
+
+		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+		cmd.sta_id = mvmsta->sta_id;
+		cmd.add_modify = STA_MODE_MODIFY;
+		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
+		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
+
+		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+		if (!ret) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "TXQ #%d is now aggregated again\n",
+					    queue);
+
+			/* Mark queue intenally as aggregating again */
+			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
+		}
+	}
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+	spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+	if (tid == IWL_MAX_TID_COUNT)
+		return IEEE80211_AC_VO; /* MGMT */
+
+	return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+				       struct ieee80211_sta *sta, int tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	struct sk_buff_head deferred_tx;
+	u8 mac_queue;
+	bool no_queue = false; /* Marks if there is a problem with the queue */
+	u8 ac;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	skb = skb_peek(&tid_data->deferred_tx_frames);
+	if (!skb)
+		return;
+	hdr = (void *)skb->data;
+
+	ac = iwl_mvm_tid_to_ac_queue(tid);
+	mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+	if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE &&
+	    iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+		IWL_ERR(mvm,
+			"Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+			mvmsta->sta_id, tid);
+
+		/*
+		 * Mark queue as problematic so later the deferred traffic is
+		 * freed, as we can do nothing with it
+		 */
+		no_queue = true;
+	}
+
+	__skb_queue_head_init(&deferred_tx);
+
+	/* Disable bottom-halves when entering TX path */
+	local_bh_disable();
+	spin_lock(&mvmsta->lock);
+	skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+	mvmsta->deferred_traffic_tid_map &= ~BIT(tid);
+	spin_unlock(&mvmsta->lock);
+
+	while ((skb = __skb_dequeue(&deferred_tx)))
+		if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+			ieee80211_free_txskb(mvm->hw, skb);
+	local_bh_enable();
+
+	/* Wake queue */
+	iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+					   add_stream_wk);
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	unsigned long deferred_tid_traffic;
+	int queue, sta_id, tid;
+
+	/* Check inactivity of queues */
+	iwl_mvm_inactivity_check(mvm);
+
+	mutex_lock(&mvm->mutex);
+
+	/* No queue reconfiguration in TVQM mode */
+	if (iwl_mvm_has_new_tx_api(mvm))
+		goto alloc_queues;
+
+	/* Reconfigure queues requiring reconfiguation */
+	for (queue = 0; queue < ARRAY_SIZE(mvm->queue_info); queue++) {
+		bool reconfig;
+		bool change_owner;
+
+		spin_lock_bh(&mvm->queue_info_lock);
+		reconfig = (mvm->queue_info[queue].status ==
+			    IWL_MVM_QUEUE_RECONFIGURING);
+
+		/*
+		 * We need to take into account a situation in which a TXQ was
+		 * allocated to TID x, and then turned shared by adding TIDs y
+		 * and z. If TID x becomes inactive and is removed from the TXQ,
+		 * ownership must be given to one of the remaining TIDs.
+		 * This is mainly because if TID x continues - a new queue can't
+		 * be allocated for it as long as it is an owner of another TXQ.
+		 */
+		change_owner = !(mvm->queue_info[queue].tid_bitmap &
+				 BIT(mvm->queue_info[queue].txq_tid)) &&
+			       (mvm->queue_info[queue].status ==
+				IWL_MVM_QUEUE_SHARED);
+		spin_unlock_bh(&mvm->queue_info_lock);
+
+		if (reconfig)
+			iwl_mvm_unshare_queue(mvm, queue);
+		else if (change_owner)
+			iwl_mvm_change_queue_owner(mvm, queue);
+	}
+
+alloc_queues:
+	/* Go over all stations with deferred traffic */
+	for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+			 IWL_MVM_STATION_COUNT) {
+		clear_bit(sta_id, mvm->sta_deferred_frames);
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+		for_each_set_bit(tid, &deferred_tid_traffic,
+				 IWL_MAX_TID_COUNT + 1)
+			iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+	}
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+				      struct ieee80211_sta *sta,
+				      enum nl80211_iftype vif_type)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	int queue;
+	bool using_inactive_queue = false, same_sta = false;
+
+	/* queue reserving is disabled on new TX path */
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return 0;
+
+	/*
+	 * Check for inactive queues, so we don't reach a situation where we
+	 * can't add a STA due to a shortage in queues that doesn't really exist
+	 */
+	iwl_mvm_inactivity_check(mvm);
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/* Make sure we have free resources for this STA */
+	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
+	     IWL_MVM_QUEUE_FREE))
+		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+	else
+		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+						IWL_MVM_DQA_MIN_DATA_QUEUE,
+						IWL_MVM_DQA_MAX_DATA_QUEUE);
+	if (queue < 0) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "No available queues for new station\n");
+		return -ENOSPC;
+	} else if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) {
+		/*
+		 * If this queue is already allocated but inactive we'll need to
+		 * first free this queue before enabling it again, we'll mark
+		 * it as reserved to make sure no new traffic arrives on it
+		 */
+		using_inactive_queue = true;
+		same_sta = mvm->queue_info[queue].ra_sta_id == mvmsta->sta_id;
+	}
+	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	mvmsta->reserved_queue = queue;
+
+	if (using_inactive_queue)
+		iwl_mvm_free_inactive_queue(mvm, queue, same_sta);
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+			    queue, mvmsta->sta_id);
+
+	return 0;
+}
+
+/*
+ * In DQA mode, after a HW restart the queues should be allocated as before, in
+ * order to avoid race conditions when there are shared queues. This function
+ * does the re-mapping and queue allocation.
+ *
+ * Note that re-enabling aggregations isn't done in this function.
+ */
+static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
+						 struct iwl_mvm_sta *mvm_sta)
+{
+	unsigned int wdg_timeout =
+			iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
+	int i;
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.sta_id = mvm_sta->sta_id,
+		.frame_limit = IWL_FRAME_LIMIT,
+	};
+
+	/* Make sure reserved queue is still marked as such (if allocated) */
+	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+		mvm->queue_info[mvm_sta->reserved_queue].status =
+			IWL_MVM_QUEUE_RESERVED;
+
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
+		int txq_id = tid_data->txq_id;
+		int ac;
+		u8 mac_queue;
+
+		if (txq_id == IWL_MVM_INVALID_QUEUE)
+			continue;
+
+		skb_queue_head_init(&tid_data->deferred_tx_frames);
+
+		ac = tid_to_mac80211_ac[i];
+		mac_queue = mvm_sta->vif->hw_queue[ac];
+
+		if (iwl_mvm_has_new_tx_api(mvm)) {
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "Re-mapping sta %d tid %d\n",
+					    mvm_sta->sta_id, i);
+			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mac_queue,
+							 mvm_sta->sta_id,
+							 i, wdg_timeout);
+			tid_data->txq_id = txq_id;
+
+			/*
+			 * Since we don't set the seq number after reset, and HW
+			 * sets it now, FW reset will cause the seq num to start
+			 * at 0 again, so driver will need to update it
+			 * internally as well, so it keeps in sync with real val
+			 */
+			tid_data->seq_number = 0;
+		} else {
+			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+			cfg.tid = i;
+			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
+			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+					 txq_id ==
+					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+			IWL_DEBUG_TX_QUEUES(mvm,
+					    "Re-mapping sta %d tid %d to queue %d\n",
+					    mvm_sta->sta_id, i, txq_id);
+
+			iwl_mvm_enable_txq(mvm, txq_id, mac_queue, seq, &cfg,
+					   wdg_timeout);
+			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+		}
+	}
+}
+
+static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
+				      struct iwl_mvm_int_sta *sta,
+				      const u8 *addr,
+				      u16 mac_id, u16 color)
+{
+	struct iwl_mvm_add_sta_cmd cmd;
+	int ret;
+	u32 status = ADD_STA_SUCCESS;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.sta_id = sta->sta_id;
+	cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+							     color));
+	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		cmd.station_type = sta->type;
+
+	if (!iwl_mvm_has_new_tx_api(mvm))
+		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
+	cmd.tid_disable_tx = cpu_to_le16(0xffff);
+
+	if (addr)
+		memcpy(cmd.addr, addr, ETH_ALEN);
+
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &cmd, &status);
+	if (ret)
+		return ret;
+
+	switch (status & IWL_ADD_STA_STATUS_MASK) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
+		return 0;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
+			status);
+		break;
+	}
+	return ret;
+}
+
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+		    struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_rxq_dup_data *dup_data;
+	int i, ret, sta_id;
+	bool sta_update = false;
+	unsigned int sta_flags = 0;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+		sta_id = iwl_mvm_find_free_sta_id(mvm,
+						  ieee80211_vif_type_p2p(vif));
+	else
+		sta_id = mvm_sta->sta_id;
+
+	if (sta_id == IWL_MVM_INVALID_STA)
+		return -ENOSPC;
+
+	spin_lock_init(&mvm_sta->lock);
+
+	/* if this is a HW restart re-alloc existing queues */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		struct iwl_mvm_int_sta tmp_sta = {
+			.sta_id = sta_id,
+			.type = mvm_sta->sta_type,
+		};
+
+		/*
+		 * First add an empty station since allocating
+		 * a queue requires a valid station
+		 */
+		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
+						 mvmvif->id, mvmvif->color);
+		if (ret)
+			goto err;
+
+		iwl_mvm_realloc_queues_after_restart(mvm, mvm_sta);
+		sta_update = true;
+		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
+		goto update_fw;
+	}
+
+	mvm_sta->sta_id = sta_id;
+	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
+						      mvmvif->color);
+	mvm_sta->vif = vif;
+	if (!mvm->trans->cfg->gen2)
+		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+	else
+		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
+	mvm_sta->tx_protection = 0;
+	mvm_sta->tt_tx_protection = false;
+	mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
+
+	/* HW restart, don't assume the memory has been zeroed */
+	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
+	mvm_sta->tfd_queue_msk = 0;
+
+	/* for HW restart - reset everything but the sequence number */
+	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+		u16 seq = mvm_sta->tid_data[i].seq_number;
+		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
+		mvm_sta->tid_data[i].seq_number = seq;
+
+		/*
+		 * Mark all queues for this STA as unallocated and defer TX
+		 * frames until the queue is allocated
+		 */
+		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+		skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
+	}
+	mvm_sta->deferred_traffic_tid_map = 0;
+	mvm_sta->agg_tids = 0;
+
+	if (iwl_mvm_has_new_rx_api(mvm) &&
+	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		int q;
+
+		dup_data = kcalloc(mvm->trans->num_rx_queues,
+				   sizeof(*dup_data), GFP_KERNEL);
+		if (!dup_data)
+			return -ENOMEM;
+		/*
+		 * Initialize all the last_seq values to 0xffff which can never
+		 * compare equal to the frame's seq_ctrl in the check in
+		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
+		 * number and fragmented packets don't reach that function.
+		 *
+		 * This thus allows receiving a packet with seqno 0 and the
+		 * retry bit set as the very first packet on a new TID.
+		 */
+		for (q = 0; q < mvm->trans->num_rx_queues; q++)
+			memset(dup_data[q].last_seq, 0xff,
+			       sizeof(dup_data[q].last_seq));
+		mvm_sta->dup_data = dup_data;
+	}
+
+	if (!iwl_mvm_has_new_tx_api(mvm)) {
+		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+						 ieee80211_vif_type_p2p(vif));
+		if (ret)
+			goto err;
+	}
+
+	/*
+	 * if rs is registered with mac80211, then "add station" will be handled
+	 * via the corresponding ops, otherwise need to notify rate scaling here
+	 */
+	if (iwl_mvm_has_tlc_offload(mvm))
+		iwl_mvm_rs_add_sta(mvm, mvm_sta);
+
+update_fw:
+	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
+	if (ret)
+		goto err;
+
+	if (vif->type == NL80211_IFTYPE_STATION) {
+		if (!sta->tdls) {
+			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
+			mvmvif->ap_sta_id = sta_id;
+		} else {
+			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
+		}
+	}
+
+	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+
+	return 0;
+
+err:
+	return ret;
+}
+
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+		      bool drain)
+{
+	struct iwl_mvm_add_sta_cmd cmd = {};
+	int ret;
+	u32 status;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
+	cmd.sta_id = mvmsta->sta_id;
+	cmd.add_modify = STA_MODE_MODIFY;
+	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
+	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
+
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &cmd, &status);
+	if (ret)
+		return ret;
+
+	switch (status & IWL_ADD_STA_STATUS_MASK) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
+			       mvmsta->sta_id);
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
+			mvmsta->sta_id);
+		break;
+	}
+
+	return ret;
+}
+
+/*
+ * Remove a station from the FW table. Before sending the command to remove
+ * the station validate that the station is indeed known to the driver (sanity
+ * only).
+ */
+static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
+		.sta_id = sta_id,
+	};
+	int ret;
+
+	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					lockdep_is_held(&mvm->mutex));
+
+	/* Note: internal stations are marked as error values */
+	if (!sta) {
+		IWL_ERR(mvm, "Invalid station id\n");
+		return -EINVAL;
+	}
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
+				   sizeof(rm_sta_cmd), &rm_sta_cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       struct iwl_mvm_sta *mvm_sta)
+{
+	int ac;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
+			continue;
+
+		ac = iwl_mvm_tid_to_ac_queue(i);
+		iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+				    vif->hw_queue[ac], i, 0);
+		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
+	}
+}
+
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvm_sta)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+		u16 txq_id;
+		int ret;
+
+		spin_lock_bh(&mvm_sta->lock);
+		txq_id = mvm_sta->tid_data[i].txq_id;
+		spin_unlock_bh(&mvm_sta->lock);
+
+		if (txq_id == IWL_MVM_INVALID_QUEUE)
+			continue;
+
+		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+		   struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	u8 sta_id = mvm_sta->sta_id;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (iwl_mvm_has_new_rx_api(mvm))
+		kfree(mvm_sta->dup_data);
+
+	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
+	if (ret)
+		return ret;
+
+	/* flush its queues here since we are freeing mvm_sta */
+	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+	if (ret)
+		return ret;
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
+	} else {
+		u32 q_mask = mvm_sta->tfd_queue_msk;
+
+		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+						     q_mask);
+	}
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+
+	iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
+	/* If there is a TXQ still marked as reserved - free it */
+	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+		u8 reserved_txq = mvm_sta->reserved_queue;
+		enum iwl_mvm_queue_status *status;
+
+		/*
+		 * If no traffic has gone through the reserved TXQ - it
+		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
+		 * should be manually marked as free again
+		 */
+		spin_lock_bh(&mvm->queue_info_lock);
+		status = &mvm->queue_info[reserved_txq].status;
+		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
+			 (*status != IWL_MVM_QUEUE_FREE),
+			 "sta_id %d reserved txq %d status %d",
+			 sta_id, reserved_txq, *status)) {
+			spin_unlock_bh(&mvm->queue_info_lock);
+			return -EINVAL;
+		}
+
+		*status = IWL_MVM_QUEUE_FREE;
+		spin_unlock_bh(&mvm->queue_info_lock);
+	}
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    mvmvif->ap_sta_id == sta_id) {
+		/* if associated - we can't remove the AP STA now */
+		if (vif->bss_conf.assoc)
+			return ret;
+
+		/* unassoc - go ahead - remove the AP STA now */
+		mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
+
+		/* clear d0i3_ap_sta_id if no longer relevant */
+		if (mvm->d0i3_ap_sta_id == sta_id)
+			mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA;
+	}
+
+	/*
+	 * This shouldn't happen - the TDLS channel switch should be canceled
+	 * before the STA is removed.
+	 */
+	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
+		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+		cancel_delayed_work(&mvm->tdls_cs.dwork);
+	}
+
+	/*
+	 * Make sure that the tx response code sees the station as -EBUSY and
+	 * calls the drain worker.
+	 */
+	spin_lock_bh(&mvm_sta->lock);
+	spin_unlock_bh(&mvm_sta->lock);
+
+	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
+	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
+
+	return ret;
+}
+
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+		      struct ieee80211_vif *vif,
+		      u8 sta_id)
+{
+	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
+	return ret;
+}
+
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+			     struct iwl_mvm_int_sta *sta,
+			     u32 qmask, enum nl80211_iftype iftype,
+			     enum iwl_sta_type type)
+{
+	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
+	    sta->sta_id == IWL_MVM_INVALID_STA) {
+		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
+		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
+			return -ENOSPC;
+	}
+
+	sta->tfd_queue_msk = qmask;
+	sta->type = type;
+
+	/* put a non-NULL value so iterating over the stations won't stop */
+	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
+	return 0;
+}
+
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
+{
+	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
+	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
+	sta->sta_id = IWL_MVM_INVALID_STA;
+}
+
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+					  u8 sta_id, u8 fifo)
+{
+	unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+					mvm->cfg->base_params->wd_timeout :
+					IWL_WATCHDOG_DISABLED;
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		int tvqm_queue =
+			iwl_mvm_tvqm_enable_txq(mvm, *queue, sta_id,
+						IWL_MAX_TID_COUNT,
+						wdg_timeout);
+		*queue = tvqm_queue;
+	} else {
+		struct iwl_trans_txq_scd_cfg cfg = {
+			.fifo = fifo,
+			.sta_id = sta_id,
+			.tid = IWL_MAX_TID_COUNT,
+			.aggregate = false,
+			.frame_limit = IWL_FRAME_LIMIT,
+		};
+
+		iwl_mvm_enable_txq(mvm, *queue, *queue, 0, &cfg, wdg_timeout);
+	}
+}
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+{
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Allocate aux station and assign to it the aux queue */
+	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
+				       NL80211_IFTYPE_UNSPECIFIED,
+				       IWL_STA_AUX_ACTIVITY);
+	if (ret)
+		return ret;
+
+	/* Map Aux queue to fifo - needs to happen before adding Aux station */
+	if (!iwl_mvm_has_new_tx_api(mvm))
+		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+					      mvm->aux_sta.sta_id,
+					      IWL_MVM_TX_FIFO_MCAST);
+
+	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
+					 MAC_INDEX_AUX, 0);
+	if (ret) {
+		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+		return ret;
+	}
+
+	/*
+	 * For 22000 firmware and on we cannot add queue to a station unknown
+	 * to firmware so enable queue here - after the station was added
+	 */
+	if (iwl_mvm_has_new_tx_api(mvm))
+		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
+					      mvm->aux_sta.sta_id,
+					      IWL_MVM_TX_FIFO_MCAST);
+
+	return 0;
+}
+
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Map snif queue to fifo - must happen before adding snif station */
+	if (!iwl_mvm_has_new_tx_api(mvm))
+		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+					      mvm->snif_sta.sta_id,
+					      IWL_MVM_TX_FIFO_BE);
+
+	ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
+					 mvmvif->id, 0);
+	if (ret)
+		return ret;
+
+	/*
+	 * For 22000 firmware and on we cannot add queue to a station unknown
+	 * to firmware so enable queue here - after the station was added
+	 */
+	if (iwl_mvm_has_new_tx_api(mvm))
+		iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
+					      mvm->snif_sta.sta_id,
+					      IWL_MVM_TX_FIFO_BE);
+
+	return 0;
+}
+
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_disable_txq(mvm, mvm->snif_queue, mvm->snif_queue,
+			    IWL_MAX_TID_COUNT, 0);
+	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
+	if (ret)
+		IWL_WARN(mvm, "Failed sending remove station\n");
+
+	return ret;
+}
+
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
+{
+	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
+}
+
+void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
+}
+
+/*
+ * Send the add station command for the vif's broadcast station.
+ * Assumes that the station was already allocated.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add.
+ */
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
+	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
+	const u8 *baddr = _baddr;
+	int queue;
+	int ret;
+	unsigned int wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = IWL_MVM_TX_FIFO_VO,
+		.sta_id = mvmvif->bcast_sta.sta_id,
+		.tid = IWL_MAX_TID_COUNT,
+		.aggregate = false,
+		.frame_limit = IWL_FRAME_LIMIT,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!iwl_mvm_has_new_tx_api(mvm)) {
+		if (vif->type == NL80211_IFTYPE_AP ||
+		    vif->type == NL80211_IFTYPE_ADHOC)
+			queue = mvm->probe_queue;
+		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+			queue = mvm->p2p_dev_queue;
+		else if (WARN(1, "Missing required TXQ for adding bcast STA\n"))
+			return -EINVAL;
+
+		bsta->tfd_queue_msk |= BIT(queue);
+
+		iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0,
+				   &cfg, wdg_timeout);
+	}
+
+	if (vif->type == NL80211_IFTYPE_ADHOC)
+		baddr = vif->bss_conf.bssid;
+
+	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
+		return -ENOSPC;
+
+	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
+					 mvmvif->id, mvmvif->color);
+	if (ret)
+		return ret;
+
+	/*
+	 * For 22000 firmware and on we cannot add queue to a station unknown
+	 * to firmware so enable queue here - after the station was added
+	 */
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		queue = iwl_mvm_tvqm_enable_txq(mvm, vif->hw_queue[0],
+						bsta->sta_id,
+						IWL_MAX_TID_COUNT,
+						wdg_timeout);
+
+		if (vif->type == NL80211_IFTYPE_AP ||
+		    vif->type == NL80211_IFTYPE_ADHOC)
+			mvm->probe_queue = queue;
+		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
+			mvm->p2p_dev_queue = queue;
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int queue;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+
+	switch (vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+		queue = mvm->probe_queue;
+		break;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		queue = mvm->p2p_dev_queue;
+		break;
+	default:
+		WARN(1, "Can't free bcast queue on vif type %d\n",
+		     vif->type);
+		return;
+	}
+
+	iwl_mvm_disable_txq(mvm, queue, vif->hw_queue[0], IWL_MAX_TID_COUNT, 0);
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return;
+
+	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
+	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
+}
+
+/* Send the FW a request to remove the station from it's internal data
+ * structures, but DO NOT remove the entry from the local data structures. */
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_free_bcast_sta_queues(mvm, vif);
+
+	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
+	if (ret)
+		IWL_WARN(mvm, "Failed sending remove station\n");
+	return ret;
+}
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
+					ieee80211_vif_type_p2p(vif),
+					IWL_STA_GENERAL_PURPOSE);
+}
+
+/* Allocate a new station entry for the broadcast station to the given vif,
+ * and send it to the FW.
+ * Note that each P2P mac should have its own broadcast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the broadcast station is added
+ * @bsta: the broadcast station to add. */
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
+	if (ret)
+		return ret;
+
+	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
+
+	if (ret)
+		iwl_mvm_dealloc_int_sta(mvm, bsta);
+
+	return ret;
+}
+
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
+
+	iwl_mvm_dealloc_bcast_sta(mvm, vif);
+
+	return ret;
+}
+
+/*
+ * Allocate a new station entry for the multicast station to the given vif,
+ * and send it to the FW.
+ * Note that each AP/GO mac should have its own multicast station.
+ *
+ * @mvm: the mvm component
+ * @vif: the interface to which the multicast station is added
+ */
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
+	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
+	const u8 *maddr = _maddr;
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.fifo = IWL_MVM_TX_FIFO_MCAST,
+		.sta_id = msta->sta_id,
+		.tid = 0,
+		.aggregate = false,
+		.frame_limit = IWL_FRAME_LIMIT,
+	};
+	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
+		    vif->type != NL80211_IFTYPE_ADHOC))
+		return -ENOTSUPP;
+
+	/*
+	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
+	 * invalid, so make sure we use the queue we want.
+	 * Note that this is done here as we want to avoid making DQA
+	 * changes in mac80211 layer.
+	 */
+	if (vif->type == NL80211_IFTYPE_ADHOC) {
+		vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
+		mvmvif->cab_queue = vif->cab_queue;
+	}
+
+	/*
+	 * While in previous FWs we had to exclude cab queue from TFD queue
+	 * mask, now it is needed as any other queue.
+	 */
+	if (!iwl_mvm_has_new_tx_api(mvm) &&
+	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
+		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+				   &cfg, timeout);
+		msta->tfd_queue_msk |= BIT(vif->cab_queue);
+	}
+	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
+					 mvmvif->id, mvmvif->color);
+	if (ret) {
+		iwl_mvm_dealloc_int_sta(mvm, msta);
+		return ret;
+	}
+
+	/*
+	 * Enable cab queue after the ADD_STA command is sent.
+	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
+	 * command with unknown station id, and for FW that doesn't support
+	 * station API since the cab queue is not included in the
+	 * tfd_queue_mask.
+	 */
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
+						    msta->sta_id,
+						    0,
+						    timeout);
+		mvmvif->cab_queue = queue;
+	} else if (!fw_has_api(&mvm->fw->ucode_capa,
+			       IWL_UCODE_TLV_API_STA_TYPE))
+		iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
+				   &cfg, timeout);
+
+	return 0;
+}
+
+/*
+ * Send the FW a request to remove the station from it's internal data
+ * structures, and in addition remove it from the local data structure.
+ */
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+
+	iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
+			    0, 0);
+
+	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
+	if (ret)
+		IWL_WARN(mvm, "Failed sending remove station\n");
+
+	return ret;
+}
+
+#define IWL_MAX_RX_BA_SESSIONS 16
+
+static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
+{
+	struct iwl_mvm_delba_notif notif = {
+		.metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
+		.metadata.sync = 1,
+		.delba.baid = baid,
+	};
+	iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+};
+
+static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
+				 struct iwl_mvm_baid_data *data)
+{
+	int i;
+
+	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+
+	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+		int j;
+		struct iwl_mvm_reorder_buffer *reorder_buf =
+			&data->reorder_buf[i];
+		struct iwl_mvm_reorder_buf_entry *entries =
+			&data->entries[i * data->entries_per_queue];
+
+		spin_lock_bh(&reorder_buf->lock);
+		if (likely(!reorder_buf->num_stored)) {
+			spin_unlock_bh(&reorder_buf->lock);
+			continue;
+		}
+
+		/*
+		 * This shouldn't happen in regular DELBA since the internal
+		 * delBA notification should trigger a release of all frames in
+		 * the reorder buffer.
+		 */
+		WARN_ON(1);
+
+		for (j = 0; j < reorder_buf->buf_size; j++)
+			__skb_queue_purge(&entries[j].e.frames);
+		/*
+		 * Prevent timer re-arm. This prevents a very far fetched case
+		 * where we timed out on the notification. There may be prior
+		 * RX frames pending in the RX queue before the notification
+		 * that might get processed between now and the actual deletion
+		 * and we would re-arm the timer although we are deleting the
+		 * reorder buffer.
+		 */
+		reorder_buf->removed = true;
+		spin_unlock_bh(&reorder_buf->lock);
+		del_timer_sync(&reorder_buf->reorder_timer);
+	}
+}
+
+static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
+					struct iwl_mvm_baid_data *data,
+					u16 ssn, u16 buf_size)
+{
+	int i;
+
+	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+		struct iwl_mvm_reorder_buffer *reorder_buf =
+			&data->reorder_buf[i];
+		struct iwl_mvm_reorder_buf_entry *entries =
+			&data->entries[i * data->entries_per_queue];
+		int j;
+
+		reorder_buf->num_stored = 0;
+		reorder_buf->head_sn = ssn;
+		reorder_buf->buf_size = buf_size;
+		/* rx reorder timer */
+		timer_setup(&reorder_buf->reorder_timer,
+			    iwl_mvm_reorder_timer_expired, 0);
+		spin_lock_init(&reorder_buf->lock);
+		reorder_buf->mvm = mvm;
+		reorder_buf->queue = i;
+		reorder_buf->valid = false;
+		for (j = 0; j < reorder_buf->buf_size; j++)
+			__skb_queue_head_init(&entries[j].e.frames);
+	}
+}
+
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
+{
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_add_sta_cmd cmd = {};
+	struct iwl_mvm_baid_data *baid_data = NULL;
+	int ret;
+	u32 status;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+		return -ENOSPC;
+	}
+
+	if (iwl_mvm_has_new_rx_api(mvm) && start) {
+		u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
+
+		/* sparse doesn't like the __align() so don't check */
+#ifndef __CHECKER__
+		/*
+		 * The division below will be OK if either the cache line size
+		 * can be divided by the entry size (ALIGN will round up) or if
+		 * if the entry size can be divided by the cache line size, in
+		 * which case the ALIGN() will do nothing.
+		 */
+		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
+			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
+#endif
+
+		/*
+		 * Upward align the reorder buffer size to fill an entire cache
+		 * line for each queue, to avoid sharing cache lines between
+		 * different queues.
+		 */
+		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
+
+		/*
+		 * Allocate here so if allocation fails we can bail out early
+		 * before starting the BA session in the firmware
+		 */
+		baid_data = kzalloc(sizeof(*baid_data) +
+				    mvm->trans->num_rx_queues *
+				    reorder_buf_size,
+				    GFP_KERNEL);
+		if (!baid_data)
+			return -ENOMEM;
+
+		/*
+		 * This division is why we need the above BUILD_BUG_ON(),
+		 * if that doesn't hold then this will not be right.
+		 */
+		baid_data->entries_per_queue =
+			reorder_buf_size / sizeof(baid_data->entries[0]);
+	}
+
+	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+	cmd.sta_id = mvm_sta->sta_id;
+	cmd.add_modify = STA_MODE_MODIFY;
+	if (start) {
+		cmd.add_immediate_ba_tid = (u8) tid;
+		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+		cmd.rx_ba_window = cpu_to_le16(buf_size);
+	} else {
+		cmd.remove_immediate_ba_tid = (u8) tid;
+	}
+	cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
+				  STA_MODIFY_REMOVE_BA_TID;
+
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &cmd, &status);
+	if (ret)
+		goto out_free;
+
+	switch (status & IWL_ADD_STA_STATUS_MASK) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
+			     start ? "start" : "stopp");
+		break;
+	case ADD_STA_IMMEDIATE_BA_FAILURE:
+		IWL_WARN(mvm, "RX BA Session refused by fw\n");
+		ret = -ENOSPC;
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
+			start ? "start" : "stopp", status);
+		break;
+	}
+
+	if (ret)
+		goto out_free;
+
+	if (start) {
+		u8 baid;
+
+		mvm->rx_ba_sessions++;
+
+		if (!iwl_mvm_has_new_rx_api(mvm))
+			return 0;
+
+		if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
+			ret = -EINVAL;
+			goto out_free;
+		}
+		baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
+			    IWL_ADD_STA_BAID_SHIFT);
+		baid_data->baid = baid;
+		baid_data->timeout = timeout;
+		baid_data->last_rx = jiffies;
+		baid_data->rcu_ptr = &mvm->baid_map[baid];
+		timer_setup(&baid_data->session_timer,
+			    iwl_mvm_rx_agg_session_expired, 0);
+		baid_data->mvm = mvm;
+		baid_data->tid = tid;
+		baid_data->sta_id = mvm_sta->sta_id;
+
+		mvm_sta->tid_to_baid[tid] = baid;
+		if (timeout)
+			mod_timer(&baid_data->session_timer,
+				  TU_TO_EXP_TIME(timeout * 2));
+
+		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+		/*
+		 * protect the BA data with RCU to cover a case where our
+		 * internal RX sync mechanism will timeout (not that it's
+		 * supposed to happen) and we will free the session data while
+		 * RX is being processed in parallel
+		 */
+		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
+			     mvm_sta->sta_id, tid, baid);
+		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+	} else  {
+		u8 baid = mvm_sta->tid_to_baid[tid];
+
+		if (mvm->rx_ba_sessions > 0)
+			/* check that restart flow didn't zero the counter */
+			mvm->rx_ba_sessions--;
+		if (!iwl_mvm_has_new_rx_api(mvm))
+			return 0;
+
+		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+			return -EINVAL;
+
+		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+		if (WARN_ON(!baid_data))
+			return -EINVAL;
+
+		/* synchronize all rx queues so we can safely delete */
+		iwl_mvm_free_reorder(mvm, baid_data);
+		del_timer_sync(&baid_data->session_timer);
+		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+		kfree_rcu(baid_data, rcu_head);
+		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
+	}
+	return 0;
+
+out_free:
+	kfree(baid_data);
+	return ret;
+}
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		       int tid, u8 queue, bool start)
+{
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_add_sta_cmd cmd = {};
+	int ret;
+	u32 status;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (start) {
+		mvm_sta->tfd_queue_msk |= BIT(queue);
+		mvm_sta->tid_disable_agg &= ~BIT(tid);
+	} else {
+		/* In DQA-mode the queue isn't removed on agg termination */
+		mvm_sta->tid_disable_agg |= BIT(tid);
+	}
+
+	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
+	cmd.sta_id = mvm_sta->sta_id;
+	cmd.add_modify = STA_MODE_MODIFY;
+	if (!iwl_mvm_has_new_tx_api(mvm))
+		cmd.modify_mask = STA_MODIFY_QUEUES;
+	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
+	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
+	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
+
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+					  iwl_mvm_add_sta_cmd_size(mvm),
+					  &cmd, &status);
+	if (ret)
+		return ret;
+
+	switch (status & IWL_ADD_STA_STATUS_MASK) {
+	case ADD_STA_SUCCESS:
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
+			start ? "start" : "stopp", status);
+		break;
+	}
+
+	return ret;
+}
+
+const u8 tid_to_mac80211_ac[] = {
+	IEEE80211_AC_BE,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BK,
+	IEEE80211_AC_BE,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VI,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO,
+	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
+};
+
+static const u8 tid_to_ucode_ac[] = {
+	AC_BE,
+	AC_BK,
+	AC_BK,
+	AC_BE,
+	AC_VI,
+	AC_VI,
+	AC_VO,
+	AC_VO,
+};
+
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data;
+	u16 normalized_ssn;
+	int txq_id;
+	int ret;
+
+	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+		return -EINVAL;
+
+	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
+	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
+		IWL_ERR(mvm,
+			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
+			mvmsta->tid_data[tid].state);
+		return -ENXIO;
+	}
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
+	    iwl_mvm_has_new_tx_api(mvm)) {
+		u8 ac = tid_to_mac80211_ac[tid];
+
+		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock_bh(&mvmsta->lock);
+
+	/* possible race condition - we entered D0i3 while starting agg */
+	if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
+		spin_unlock_bh(&mvmsta->lock);
+		IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
+		return -EIO;
+	}
+
+	spin_lock(&mvm->queue_info_lock);
+
+	/*
+	 * Note the possible cases:
+	 *  1. An enabled TXQ - TXQ needs to become agg'ed
+	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
+	 *	it as reserved
+	 */
+	txq_id = mvmsta->tid_data[tid].txq_id;
+	if (txq_id == IWL_MVM_INVALID_QUEUE) {
+		txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+						 IWL_MVM_DQA_MIN_DATA_QUEUE,
+						 IWL_MVM_DQA_MAX_DATA_QUEUE);
+		if (txq_id < 0) {
+			ret = txq_id;
+			IWL_ERR(mvm, "Failed to allocate agg queue\n");
+			goto release_locks;
+		}
+
+		/* TXQ hasn't yet been enabled, so mark it only as reserved */
+		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
+	} else if (unlikely(mvm->queue_info[txq_id].status ==
+			    IWL_MVM_QUEUE_SHARED)) {
+		ret = -ENXIO;
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Can't start tid %d agg on shared queue!\n",
+				    tid);
+		goto release_locks;
+	}
+
+	spin_unlock(&mvm->queue_info_lock);
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "AGG for tid %d will be on queue #%d\n",
+			    tid, txq_id);
+
+	tid_data = &mvmsta->tid_data[tid];
+	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+	tid_data->txq_id = txq_id;
+	*ssn = tid_data->ssn;
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
+			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
+			    tid_data->next_reclaimed);
+
+	/*
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * to align the wrap around of ssn so we compare relevant values.
+	 */
+	normalized_ssn = tid_data->ssn;
+	if (mvm->trans->cfg->gen2)
+		normalized_ssn &= 0xff;
+
+	if (normalized_ssn == tid_data->next_reclaimed) {
+		tid_data->state = IWL_AGG_STARTING;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+	} else {
+		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+	}
+
+	ret = 0;
+	goto out;
+
+release_locks:
+	spin_unlock(&mvm->queue_info_lock);
+out:
+	spin_unlock_bh(&mvmsta->lock);
+
+	return ret;
+}
+
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+			    bool amsdu)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	unsigned int wdg_timeout =
+		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
+	int queue, ret;
+	bool alloc_queue = true;
+	enum iwl_mvm_queue_status queue_status;
+	u16 ssn;
+
+	struct iwl_trans_txq_scd_cfg cfg = {
+		.sta_id = mvmsta->sta_id,
+		.tid = tid,
+		.frame_limit = buf_size,
+		.aggregate = true,
+	};
+
+	/*
+	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
+	 * manager, so this function should never be called in this case.
+	 */
+	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
+		return -EINVAL;
+
+	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
+		     != IWL_MAX_TID_COUNT);
+
+	spin_lock_bh(&mvmsta->lock);
+	ssn = tid_data->ssn;
+	queue = tid_data->txq_id;
+	tid_data->state = IWL_AGG_ON;
+	mvmsta->agg_tids |= BIT(tid);
+	tid_data->ssn = 0xffff;
+	tid_data->amsdu_in_ampdu_allowed = amsdu;
+	spin_unlock_bh(&mvmsta->lock);
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		/*
+		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
+		 * would have failed, so if we are here there is no need to
+		 * allocate a queue.
+		 * However, if aggregation size is different than the default
+		 * size, the scheduler should be reconfigured.
+		 * We cannot do this with the new TX API, so return unsupported
+		 * for now, until it will be offloaded to firmware..
+		 * Note that if SCD default value changes - this condition
+		 * should be updated as well.
+		 */
+		if (buf_size < IWL_FRAME_LIMIT)
+			return -ENOTSUPP;
+
+		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+		if (ret)
+			return -EIO;
+		goto out;
+	}
+
+	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	queue_status = mvm->queue_info[queue].status;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	/* Maybe there is no need to even alloc a queue... */
+	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+		alloc_queue = false;
+
+	/*
+	 * Only reconfig the SCD for the queue if the window size has
+	 * changed from current (become smaller)
+	 */
+	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
+		/*
+		 * If reconfiguring an existing queue, it first must be
+		 * drained
+		 */
+		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
+						     BIT(queue));
+		if (ret) {
+			IWL_ERR(mvm,
+				"Error draining queue before reconfig\n");
+			return ret;
+		}
+
+		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+					   mvmsta->sta_id, tid,
+					   buf_size, ssn);
+		if (ret) {
+			IWL_ERR(mvm,
+				"Error reconfiguring TXQ #%d\n", queue);
+			return ret;
+		}
+	}
+
+	if (alloc_queue)
+		iwl_mvm_enable_txq(mvm, queue,
+				   vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+				   &cfg, wdg_timeout);
+
+	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
+	if (queue_status != IWL_MVM_QUEUE_SHARED) {
+		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
+		if (ret)
+			return -EIO;
+	}
+
+	/* No need to mark as reserved */
+	spin_lock_bh(&mvm->queue_info_lock);
+	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+out:
+	/*
+	 * Even though in theory the peer could have different
+	 * aggregation reorder buffer sizes for different sessions,
+	 * our ucode doesn't allow for that and has a global limit
+	 * for each station. Therefore, use the minimum of all the
+	 * aggregation sessions and our default value.
+	 */
+	mvmsta->max_agg_bufsize =
+		min(mvmsta->max_agg_bufsize, buf_size);
+	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
+
+	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
+		     sta->addr, tid);
+
+	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false);
+}
+
+static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
+					struct iwl_mvm_sta *mvmsta,
+					struct iwl_mvm_tid_data *tid_data)
+{
+	u16 txq_id = tid_data->txq_id;
+
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	/*
+	 * The TXQ is marked as reserved only if no traffic came through yet
+	 * This means no traffic has been sent on this TID (agg'd or not), so
+	 * we no longer have use for the queue. Since it hasn't even been
+	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+	 * free.
+	 */
+	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
+		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
+		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
+	}
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+}
+
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	u16 txq_id;
+	int err;
+
+	/*
+	 * If mac80211 is cleaning its state, then say that we finished since
+	 * our state has been cleared anyway.
+	 */
+	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		return 0;
+	}
+
+	spin_lock_bh(&mvmsta->lock);
+
+	txq_id = tid_data->txq_id;
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
+			    mvmsta->sta_id, tid, txq_id, tid_data->state);
+
+	mvmsta->agg_tids &= ~BIT(tid);
+
+	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+	switch (tid_data->state) {
+	case IWL_AGG_ON:
+		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "ssn = %d, next_recl = %d\n",
+				    tid_data->ssn, tid_data->next_reclaimed);
+
+		tid_data->ssn = 0xffff;
+		tid_data->state = IWL_AGG_OFF;
+		spin_unlock_bh(&mvmsta->lock);
+
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+		return 0;
+	case IWL_AGG_STARTING:
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		/*
+		 * The agg session has been stopped before it was set up. This
+		 * can happen when the AddBA timer times out for example.
+		 */
+
+		/* No barriers since we are under mutex */
+		lockdep_assert_held(&mvm->mutex);
+
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		tid_data->state = IWL_AGG_OFF;
+		err = 0;
+		break;
+	default:
+		IWL_ERR(mvm,
+			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
+			mvmsta->sta_id, tid, tid_data->state);
+		IWL_ERR(mvm,
+			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
+		err = -EINVAL;
+	}
+
+	spin_unlock_bh(&mvmsta->lock);
+
+	return err;
+}
+
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	u16 txq_id;
+	enum iwl_mvm_agg_state old_state;
+
+	/*
+	 * First set the agg state to OFF to avoid calling
+	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
+	 */
+	spin_lock_bh(&mvmsta->lock);
+	txq_id = tid_data->txq_id;
+	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
+			    mvmsta->sta_id, tid, txq_id, tid_data->state);
+	old_state = tid_data->state;
+	tid_data->state = IWL_AGG_OFF;
+	mvmsta->agg_tids &= ~BIT(tid);
+	spin_unlock_bh(&mvmsta->lock);
+
+	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
+
+	if (old_state >= IWL_AGG_ON) {
+		iwl_mvm_drain_sta(mvm, mvmsta, true);
+
+		if (iwl_mvm_has_new_tx_api(mvm)) {
+			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
+						   BIT(tid), 0))
+				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
+		} else {
+			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0))
+				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
+		}
+
+		iwl_mvm_drain_sta(mvm, mvmsta, false);
+
+		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
+	}
+
+	return 0;
+}
+
+static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
+{
+	int i, max = -1, max_offs = -1;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Pick the unused key offset with the highest 'deleted'
+	 * counter. Every time a key is deleted, all the counters
+	 * are incremented and the one that was just deleted is
+	 * reset to zero. Thus, the highest counter is the one
+	 * that was deleted longest ago. Pick that one.
+	 */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (test_bit(i, mvm->fw_key_table))
+			continue;
+		if (mvm->fw_key_deleted[i] > max) {
+			max = mvm->fw_key_deleted[i];
+			max_offs = i;
+		}
+	}
+
+	if (max_offs < 0)
+		return STA_KEY_IDX_INVALID;
+
+	return max_offs;
+}
+
+static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
+					       struct ieee80211_vif *vif,
+					       struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (sta)
+		return iwl_mvm_sta_from_mac80211(sta);
+
+	/*
+	 * The device expects GTKs for station interfaces to be
+	 * installed as GTKs for the AP station. If we have no
+	 * station ID, then use AP's station ID.
+	 */
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+		u8 sta_id = mvmvif->ap_sta_id;
+
+		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+					    lockdep_is_held(&mvm->mutex));
+
+		/*
+		 * It is possible that the 'sta' parameter is NULL,
+		 * for example when a GTK is removed - the sta_id will then
+		 * be the AP ID, and no station was passed by mac80211.
+		 */
+		if (IS_ERR_OR_NULL(sta))
+			return NULL;
+
+		return iwl_mvm_sta_from_mac80211(sta);
+	}
+
+	return NULL;
+}
+
+static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
+				u32 sta_id,
+				struct ieee80211_key_conf *key, bool mcast,
+				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
+				u8 key_offset, bool mfp)
+{
+	union {
+		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+		struct iwl_mvm_add_sta_key_cmd cmd;
+	} u = {};
+	__le16 key_flags;
+	int ret;
+	u32 status;
+	u16 keyidx;
+	u64 pn = 0;
+	int i, size;
+	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+
+	if (sta_id == IWL_MVM_INVALID_STA)
+		return -EINVAL;
+
+	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
+		 STA_KEY_FLG_KEYID_MSK;
+	key_flags = cpu_to_le16(keyidx);
+	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
+
+	switch (key->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
+		if (new_api) {
+			memcpy((void *)&u.cmd.tx_mic_key,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+			       IWL_MIC_KEY_SIZE);
+
+			memcpy((void *)&u.cmd.rx_mic_key,
+			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+			       IWL_MIC_KEY_SIZE);
+			pn = atomic64_read(&key->tx_pn);
+
+		} else {
+			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
+			for (i = 0; i < 5; i++)
+				u.cmd_v1.tkip_rx_ttak[i] =
+					cpu_to_le16(tkip_p1k[i]);
+		}
+		memcpy(u.cmd.common.key, key->key, key->keylen);
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
+		memcpy(u.cmd.common.key, key->key, key->keylen);
+		if (new_api)
+			pn = atomic64_read(&key->tx_pn);
+		break;
+	case WLAN_CIPHER_SUITE_WEP104:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
+		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
+		break;
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
+		/* fall through */
+	case WLAN_CIPHER_SUITE_GCMP:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
+		memcpy(u.cmd.common.key, key->key, key->keylen);
+		if (new_api)
+			pn = atomic64_read(&key->tx_pn);
+		break;
+	default:
+		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
+		memcpy(u.cmd.common.key, key->key, key->keylen);
+	}
+
+	if (mcast)
+		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+	if (mfp)
+		key_flags |= cpu_to_le16(STA_KEY_MFP);
+
+	u.cmd.common.key_offset = key_offset;
+	u.cmd.common.key_flags = key_flags;
+	u.cmd.common.sta_id = sta_id;
+
+	if (new_api) {
+		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
+		size = sizeof(u.cmd);
+	} else {
+		size = sizeof(u.cmd_v1);
+	}
+
+	status = ADD_STA_SUCCESS;
+	if (cmd_flags & CMD_ASYNC)
+		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
+					   &u.cmd);
+	else
+		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
+						  &u.cmd, &status);
+
+	switch (status) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
+		break;
+	}
+
+	return ret;
+}
+
+static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
+				 struct ieee80211_key_conf *keyconf,
+				 u8 sta_id, bool remove_key)
+{
+	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
+
+	/* verify the key details match the required command's expectations */
+	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
+		    (keyconf->keyidx != 4 && keyconf->keyidx != 5) ||
+		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
+		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
+		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
+		return -EINVAL;
+
+	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
+		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
+		return -EINVAL;
+
+	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
+	igtk_cmd.sta_id = cpu_to_le32(sta_id);
+
+	if (remove_key) {
+		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
+	} else {
+		struct ieee80211_key_seq seq;
+		const u8 *pn;
+
+		switch (keyconf->cipher) {
+		case WLAN_CIPHER_SUITE_AES_CMAC:
+			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+			break;
+		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
+		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+			igtk_cmd.ctrl_flags |=
+				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
+		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+		pn = seq.aes_cmac.pn;
+		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
+						       ((u64) pn[4] << 8) |
+						       ((u64) pn[3] << 16) |
+						       ((u64) pn[2] << 24) |
+						       ((u64) pn[1] << 32) |
+						       ((u64) pn[0] << 40));
+	}
+
+	IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n",
+		       remove_key ? "removing" : "installing",
+		       igtk_cmd.sta_id);
+
+	if (!iwl_mvm_has_new_rx_api(mvm)) {
+		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
+			.ctrl_flags = igtk_cmd.ctrl_flags,
+			.key_id = igtk_cmd.key_id,
+			.sta_id = igtk_cmd.sta_id,
+			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
+		};
+
+		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
+		       ARRAY_SIZE(igtk_cmd_v1.igtk));
+		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
+	}
+	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
+				    sizeof(igtk_cmd), &igtk_cmd);
+}
+
+
+static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (sta)
+		return sta->addr;
+
+	if (vif->type == NL80211_IFTYPE_STATION &&
+	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
+		u8 sta_id = mvmvif->ap_sta_id;
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+						lockdep_is_held(&mvm->mutex));
+		return sta->addr;
+	}
+
+
+	return NULL;
+}
+
+static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_sta *sta,
+				 struct ieee80211_key_conf *keyconf,
+				 u8 key_offset,
+				 bool mcast)
+{
+	int ret;
+	const u8 *addr;
+	struct ieee80211_key_seq seq;
+	u16 p1k[5];
+	u32 sta_id;
+	bool mfp = false;
+
+	if (sta) {
+		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+		sta_id = mvm_sta->sta_id;
+		mfp = sta->mfp;
+	} else if (vif->type == NL80211_IFTYPE_AP &&
+		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+		sta_id = mvmvif->mcast_sta.sta_id;
+	} else {
+		IWL_ERR(mvm, "Failed to find station id\n");
+		return -EINVAL;
+	}
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_TKIP:
+		if (vif->type == NL80211_IFTYPE_AP) {
+			ret = -EINVAL;
+			break;
+		}
+		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
+		/* get phase 1 key from mac80211 */
+		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
+		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
+		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+					   seq.tkip.iv32, p1k, 0, key_offset,
+					   mfp);
+		break;
+	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_WEP40:
+	case WLAN_CIPHER_SUITE_WEP104:
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+					   0, NULL, 0, key_offset, mfp);
+		break;
+	default:
+		ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
+					   0, NULL, 0, key_offset, mfp);
+	}
+
+	return ret;
+}
+
+static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
+				    struct ieee80211_key_conf *keyconf,
+				    bool mcast)
+{
+	union {
+		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
+		struct iwl_mvm_add_sta_key_cmd cmd;
+	} u = {};
+	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
+				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
+	__le16 key_flags;
+	int ret, size;
+	u32 status;
+
+	/* This is a valid situation for GTK removal */
+	if (sta_id == IWL_MVM_INVALID_STA)
+		return 0;
+
+	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
+				 STA_KEY_FLG_KEYID_MSK);
+	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
+	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
+
+	if (mcast)
+		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
+
+	/*
+	 * The fields assigned here are in the same location at the start
+	 * of the command, so we can do this union trick.
+	 */
+	u.cmd.common.key_flags = key_flags;
+	u.cmd.common.key_offset = keyconf->hw_key_idx;
+	u.cmd.common.sta_id = sta_id;
+
+	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
+
+	status = ADD_STA_SUCCESS;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
+					  &status);
+
+	switch (status) {
+	case ADD_STA_SUCCESS:
+		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
+		break;
+	default:
+		ret = -EIO;
+		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
+		break;
+	}
+
+	return ret;
+}
+
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+			struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta,
+			struct ieee80211_key_conf *keyconf,
+			u8 key_offset)
+{
+	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	struct iwl_mvm_sta *mvm_sta;
+	u8 sta_id = IWL_MVM_INVALID_STA;
+	int ret;
+	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (vif->type != NL80211_IFTYPE_AP ||
+	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+		/* Get the station id from the mvm local station table */
+		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+		if (!mvm_sta) {
+			IWL_ERR(mvm, "Failed to find station\n");
+			return -EINVAL;
+		}
+		sta_id = mvm_sta->sta_id;
+
+		/*
+		 * It is possible that the 'sta' parameter is NULL, and thus
+		 * there is a need to retrieve the sta from the local station
+		 * table.
+		 */
+		if (!sta) {
+			sta = rcu_dereference_protected(
+				mvm->fw_id_to_mac_id[sta_id],
+				lockdep_is_held(&mvm->mutex));
+			if (IS_ERR_OR_NULL(sta)) {
+				IWL_ERR(mvm, "Invalid station id\n");
+				return -EINVAL;
+			}
+		}
+
+		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
+			return -EINVAL;
+	} else {
+		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+		sta_id = mvmvif->mcast_sta.sta_id;
+	}
+
+	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
+		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
+		goto end;
+	}
+
+	/* If the key_offset is not pre-assigned, we need to find a
+	 * new offset to use.  In normal cases, the offset is not
+	 * pre-assigned, but during HW_RESTART we want to reuse the
+	 * same indices, so we pass them when this function is called.
+	 *
+	 * In D3 entry, we need to hardcoded the indices (because the
+	 * firmware hardcodes the PTK offset to 0).  In this case, we
+	 * need to make sure we don't overwrite the hw_key_idx in the
+	 * keyconf structure, because otherwise we cannot configure
+	 * the original ones back when resuming.
+	 */
+	if (key_offset == STA_KEY_IDX_INVALID) {
+		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
+		if (key_offset == STA_KEY_IDX_INVALID)
+			return -ENOSPC;
+		keyconf->hw_key_idx = key_offset;
+	}
+
+	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
+	if (ret)
+		goto end;
+
+	/*
+	 * For WEP, the same key is used for multicast and unicast. Upload it
+	 * again, using the same key offset, and now pointing the other one
+	 * to the same key slot (offset).
+	 * If this fails, remove the original as well.
+	 */
+	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+	    sta) {
+		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
+					    key_offset, !mcast);
+		if (ret) {
+			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+			goto end;
+		}
+	}
+
+	__set_bit(key_offset, mvm->fw_key_table);
+
+end:
+	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
+		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
+		      sta ? sta->addr : zero_addr, ret);
+	return ret;
+}
+
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct ieee80211_key_conf *keyconf)
+{
+	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	struct iwl_mvm_sta *mvm_sta;
+	u8 sta_id = IWL_MVM_INVALID_STA;
+	int ret, i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Get the station from the mvm local station table */
+	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+	if (mvm_sta)
+		sta_id = mvm_sta->sta_id;
+	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
+		sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
+
+
+	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
+		      keyconf->keyidx, sta_id);
+
+	if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+			keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256))
+		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
+
+	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
+		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
+			keyconf->hw_key_idx);
+		return -ENOENT;
+	}
+
+	/* track which key was deleted last */
+	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
+		if (mvm->fw_key_deleted[i] < U8_MAX)
+			mvm->fw_key_deleted[i]++;
+	}
+	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
+
+	if (sta && !mvm_sta) {
+		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
+		return 0;
+	}
+
+	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
+	if (ret)
+		return ret;
+
+	/* delete WEP key twice to get rid of (now useless) offset */
+	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
+		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
+
+	return ret;
+}
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_key_conf *keyconf,
+			     struct ieee80211_sta *sta, u32 iv32,
+			     u16 *phase1key)
+{
+	struct iwl_mvm_sta *mvm_sta;
+	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
+	bool mfp = sta ? sta->mfp : false;
+
+	rcu_read_lock();
+
+	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
+	if (WARN_ON_ONCE(!mvm_sta))
+		goto unlock;
+	iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
+			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
+			     mfp);
+
+ unlock:
+	rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_add_sta_cmd cmd = {
+		.add_modify = STA_MODE_MODIFY,
+		.sta_id = mvmsta->sta_id,
+		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
+		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+				       struct ieee80211_sta *sta,
+				       enum ieee80211_frame_release_type reason,
+				       u16 cnt, u16 tids, bool more_data,
+				       bool single_sta_queue)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_add_sta_cmd cmd = {
+		.add_modify = STA_MODE_MODIFY,
+		.sta_id = mvmsta->sta_id,
+		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
+		.sleep_tx_count = cpu_to_le16(cnt),
+		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+	};
+	int tid, ret;
+	unsigned long _tids = tids;
+
+	/* convert TIDs to ACs - we don't support TSPEC so that's OK
+	 * Note that this field is reserved and unused by firmware not
+	 * supporting GO uAPSD, so it's safe to always do this.
+	 */
+	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+	/* If we're releasing frames from aggregation or dqa queues then check
+	 * if all the queues that we're releasing frames from, combined, have:
+	 *  - more frames than the service period, in which case more_data
+	 *    needs to be set
+	 *  - fewer than 'cnt' frames, in which case we need to adjust the
+	 *    firmware command (but do that unconditionally)
+	 */
+	if (single_sta_queue) {
+		int remaining = cnt;
+		int sleep_tx_count;
+
+		spin_lock_bh(&mvmsta->lock);
+		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+			struct iwl_mvm_tid_data *tid_data;
+			u16 n_queued;
+
+			tid_data = &mvmsta->tid_data[tid];
+
+			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
+			if (n_queued > remaining) {
+				more_data = true;
+				remaining = 0;
+				break;
+			}
+			remaining -= n_queued;
+		}
+		sleep_tx_count = cnt - remaining;
+		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
+			mvmsta->sleep_tx_count = sleep_tx_count;
+		spin_unlock_bh(&mvmsta->lock);
+
+		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
+		if (WARN_ON(cnt - remaining == 0)) {
+			ieee80211_sta_eosp(sta);
+			return;
+		}
+	}
+
+	/* Note: this is ignored by firmware not supporting GO uAPSD */
+	if (more_data)
+		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
+
+	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+		mvmsta->next_status_eosp = true;
+		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
+	} else {
+		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
+	}
+
+	/* block the Tx queues until the FW updated the sleep Tx count */
+	iwl_trans_block_txq_ptrs(mvm->trans, true);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
+				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
+				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+	struct ieee80211_sta *sta;
+	u32 sta_id = le32_to_cpu(notif->sta_id);
+
+	if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+		return;
+
+	rcu_read_lock();
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+	if (!IS_ERR_OR_NULL(sta))
+		ieee80211_sta_eosp(sta);
+	rcu_read_unlock();
+}
+
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+				   struct iwl_mvm_sta *mvmsta, bool disable)
+{
+	struct iwl_mvm_add_sta_cmd cmd = {
+		.add_modify = STA_MODE_MODIFY,
+		.sta_id = mvmsta->sta_id,
+		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+				      struct ieee80211_sta *sta,
+				      bool disable)
+{
+	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+	spin_lock_bh(&mvm_sta->lock);
+
+	if (mvm_sta->disable_tx == disable) {
+		spin_unlock_bh(&mvm_sta->lock);
+		return;
+	}
+
+	mvm_sta->disable_tx = disable;
+
+	/* Tell mac80211 to start/stop queuing tx for this station */
+	ieee80211_sta_block_awake(mvm->hw, sta, disable);
+
+	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
+
+	spin_unlock_bh(&mvm_sta->lock);
+}
+
+static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
+					      struct iwl_mvm_vif *mvmvif,
+					      struct iwl_mvm_int_sta *sta,
+					      bool disable)
+{
+	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
+	struct iwl_mvm_add_sta_cmd cmd = {
+		.add_modify = STA_MODE_MODIFY,
+		.sta_id = sta->sta_id,
+		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
+		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
+		.mac_id_n_color = cpu_to_le32(id),
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0,
+				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
+}
+
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+				       struct iwl_mvm_vif *mvmvif,
+				       bool disable)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvm_sta;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Block/unblock all the stations of the given mvmvif */
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+		if (mvm_sta->mac_id_n_color !=
+		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
+			continue;
+
+		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
+	}
+
+	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
+		return;
+
+	/* Need to block/unblock also multicast station */
+	if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
+		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+						  &mvmvif->mcast_sta, disable);
+
+	/*
+	 * Only unblock the broadcast station (FW blocks it for immediate
+	 * quiet, not the driver)
+	 */
+	if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
+		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
+						  &mvmvif->bcast_sta, disable);
+}
+
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_sta *mvmsta;
+
+	rcu_read_lock();
+
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+
+	if (!WARN_ON(!mvmsta))
+		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
+
+	rcu_read_unlock();
+}
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
+{
+	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
+
+	/*
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * to align the wrap around of ssn so we compare relevant values.
+	 */
+	if (mvm->trans->cfg->gen2)
+		sn &= 0xff;
+
+	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
new file mode 100644
index 0000000..0fc2111
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -0,0 +1,579 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __sta_h__
+#define __sta_h__
+
+#include <linux/spinlock.h>
+#include <net/mac80211.h>
+#include <linux/wait.h>
+
+#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "rs.h"
+
+struct iwl_mvm;
+struct iwl_mvm_vif;
+
+/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ *	TXQ #0 - command queue
+ *	TXQ #1 - aux frames
+ *	TXQ #2 - P2P device frames
+ *	TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ *	TXQ #4 - BSS DATA frames queue
+ *	TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ *	TXQ #9 - P2P GO/SoftAP probe responses
+ *	TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
+ * DOC: station table - introduction
+ *
+ * The station table is a list of data structure that reprensent the stations.
+ * In STA/P2P client mode, the driver will hold one station for the AP/ GO.
+ * In GO/AP mode, the driver will have as many stations as associated clients.
+ * All these stations are reflected in the fw's station table. The driver
+ * keeps the fw's station table up to date with the ADD_STA command. Stations
+ * can be removed by the REMOVE_STA command.
+ *
+ * All the data related to a station is held in the structure %iwl_mvm_sta
+ * which is embed in the mac80211's %ieee80211_sta (in the drv_priv) area.
+ * This data includes the index of the station in the fw, per tid information
+ * (sequence numbers, Block-ack state machine, etc...). The stations are
+ * created and deleted by the %sta_state callback from %ieee80211_ops.
+ *
+ * The driver holds a map: %fw_id_to_mac_id that allows to fetch a
+ * %ieee80211_sta (and the %iwl_mvm_sta embedded into it) based on a fw
+ * station index. That way, the driver is able to get the tid related data in
+ * O(1) in time sensitive paths (Tx / Tx response / BA notification). These
+ * paths are triggered by the fw, and the driver needs to get a pointer to the
+ * %ieee80211 structure. This map helps to get that pointer quickly.
+ */
+
+/**
+ * DOC: station table - locking
+ *
+ * As stated before, the station is created / deleted by mac80211's %sta_state
+ * callback from %ieee80211_ops which can sleep. The next paragraph explains
+ * the locking of a single stations, the next ones relates to the station
+ * table.
+ *
+ * The station holds the sequence number per tid. So this data needs to be
+ * accessed in the Tx path (which is softIRQ). It also holds the Block-Ack
+ * information (the state machine / and the logic that checks if the queues
+ * were drained), so it also needs to be accessible from the Tx response flow.
+ * In short, the station needs to be access from sleepable context as well as
+ * from tasklets, so the station itself needs a spinlock.
+ *
+ * The writers of %fw_id_to_mac_id map are serialized by the global mutex of
+ * the mvm op_mode. This is possible since %sta_state can sleep.
+ * The pointers in this map are RCU protected, hence we won't replace the
+ * station while we have Tx / Tx response / BA notification running.
+ *
+ * If a station is deleted while it still has packets in its A-MPDU queues,
+ * then the reclaim flow will notice that there is no station in the map for
+ * sta_id and it will dump the responses.
+ */
+
+/**
+ * DOC: station table - internal stations
+ *
+ * The FW needs a few internal stations that are not reflected in
+ * mac80211, such as broadcast station in AP / GO mode, or AUX sta for
+ * scanning and P2P device (during the GO negotiation).
+ * For these kind of stations we have %iwl_mvm_int_sta struct which holds the
+ * data relevant for them from both %iwl_mvm_sta and %ieee80211_sta.
+ * Usually the data for these stations is static, so no locking is required,
+ * and no TID data as this is also not needed.
+ * One thing to note, is that these stations have an ID in the fw, but not
+ * in mac80211. In order to "reserve" them a sta_id in %fw_id_to_mac_id
+ * we fill ERR_PTR(EINVAL) in this mapping and all other dereferencing of
+ * pointers from this mapping need to check that the value is not error
+ * or NULL.
+ *
+ * Currently there is only one auxiliary station for scanning, initialized
+ * on init.
+ */
+
+/**
+ * DOC: station table - AP Station in STA mode
+ *
+ * %iwl_mvm_vif includes the index of the AP station in the fw's STA table:
+ * %ap_sta_id. To get the point to the corresponding %ieee80211_sta,
+ * &fw_id_to_mac_id can be used. Due to the way the fw works, we must not remove
+ * the AP station from the fw before setting the MAC context as unassociated.
+ * Hence, %fw_id_to_mac_id[%ap_sta_id] will be NULLed when the AP station is
+ * removed by mac80211, but the station won't be removed in the fw until the
+ * VIF is set as unassociated. Then, %ap_sta_id will be invalidated.
+ */
+
+/**
+ * DOC: station table - Drain vs. Flush
+ *
+ * Flush means that all the frames in the SCD queue are dumped regardless the
+ * station to which they were sent. We do that when we disassociate and before
+ * we remove the STA of the AP. The flush can be done synchronously against the
+ * fw.
+ * Drain means that the fw will drop all the frames sent to a specific station.
+ * This is useful when a client (if we are IBSS / GO or AP) disassociates.
+ */
+
+/**
+ * DOC: station table - fw restart
+ *
+ * When the fw asserts, or we have any other issue that requires to reset the
+ * driver, we require mac80211 to reconfigure the driver. Since the private
+ * data of the stations is embed in mac80211's %ieee80211_sta, that data will
+ * not be zeroed and needs to be reinitialized manually.
+ * %IWL_MVM_STATUS_IN_HW_RESTART is set during restart and that will hint us
+ * that we must not allocate a new sta_id but reuse the previous one. This
+ * means that the stations being re-added after the reset will have the same
+ * place in the fw as before the reset. We do need to zero the %fw_id_to_mac_id
+ * map, since the stations aren't in the fw any more. Internal stations that
+ * are not added by mac80211 will be re-added in the init flow that is called
+ * after the restart: mac80211 call's %iwl_mvm_mac_start which calls to
+ * %iwl_mvm_up.
+ */
+
+/**
+ * DOC: AP mode - PS
+ *
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
+ * AMPDUs are in a separate queue that is stopped by the fw. We just need to
+ * let mac80211 know when there are frames in these queues so that it can
+ * properly handle trigger frames.
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledge since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signaling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
+ */
+
+/**
+ * enum iwl_mvm_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_QUEUED: aggregation start work has been queued
+ * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ *	HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_mvm_agg_state {
+	IWL_AGG_OFF = 0,
+	IWL_AGG_QUEUED,
+	IWL_AGG_STARTING,
+	IWL_AGG_ON,
+	IWL_EMPTYING_HW_QUEUE_ADDBA,
+	IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ *	This is basically (last acked packet++).
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ *	Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
+ * @lq_color: the color of the LQ command as it appears in tx response.
+ * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session / DQA
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ *	the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ *	Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ *	we are ready to finish the Tx AGG stop / start flow.
+ * @tx_time: medium time consumed by this A-MPDU
+ * @is_tid_active: has this TID sent traffic in the last
+ *	%IWL_MVM_DQA_QUEUE_TIMEOUT time period. If %txq_id is invalid, this
+ *	field should be ignored.
+ * @tpt_meas_start: time of the throughput measurements start, is reset every HZ
+ * @tx_count_last: number of frames transmitted during the last second
+ * @tx_count: counts the number of frames transmitted since the last reset of
+ *	 tpt_meas_start
+ */
+struct iwl_mvm_tid_data {
+	struct sk_buff_head deferred_tx_frames;
+	u16 seq_number;
+	u16 next_reclaimed;
+	/* The rest is Tx AGG related */
+	u32 rate_n_flags;
+	u8 lq_color;
+	bool amsdu_in_ampdu_allowed;
+	enum iwl_mvm_agg_state state;
+	u16 txq_id;
+	u16 ssn;
+	u16 tx_time;
+	bool is_tid_active;
+	unsigned long tpt_meas_start;
+	u32 tx_count_last;
+	u32 tx_count;
+};
+
+struct iwl_mvm_key_pn {
+	struct rcu_head rcu_head;
+	struct {
+		u8 pn[IWL_MAX_TID_COUNT][IEEE80211_CCMP_PN_LEN];
+	} ____cacheline_aligned_in_smp q[];
+};
+
+struct iwl_mvm_delba_data {
+	u32 baid;
+} __packed;
+
+struct iwl_mvm_delba_notif {
+	struct iwl_mvm_internal_rxq_notif metadata;
+	struct iwl_mvm_delba_data delba;
+} __packed;
+
+/**
+ * struct iwl_mvm_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwl_mvm_rxq_dup_data {
+	__le16 last_seq[IWL_MAX_TID_COUNT + 1];
+	u8 last_sub_frame[IWL_MAX_TID_COUNT + 1];
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_sta - representation of a station in the driver
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @tfd_queue_msk: the tfd queues used by the station
+ * @mac_id_n_color: the MAC context this station is linked to
+ * @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
+ *	tid.
+ * @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @sta_type: station type
+ * @sta_state: station state according to enum %ieee80211_sta_state
+ * @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ *	we need to signal the EOSP
+ * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
+ * and from Tx response flow, it needs a spinlock.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @tid_to_baid: a simple map of TID to baid
+ * @lq_sta: holds rate scaling data, either for the case when RS is done in
+ *	the driver - %rs_drv or in the FW - %rs_fw.
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ *	Every STA has is given one reserved queue to allow it to operate. If no
+ *	such queue can be guaranteed, the STA addition will fail.
+ * @tx_protection: reference counter for controlling the Tx protection.
+ * @tt_tx_protection: is thermal throttling enable Tx protection?
+ * @disable_tx: is tx to this STA disabled?
+ * @amsdu_enabled: bitmap of TX AMSDU allowed TIDs.
+ *	In case TLC offload is not active it is either 0xFFFF or 0.
+ * @max_amsdu_len: max AMSDU length
+ * @agg_tids: bitmap of tids whose status is operational aggregated (IWL_AGG_ON)
+ * @sleep_tx_count: the number of frames that we told the firmware to let out
+ *	even when that station is asleep. This is useful in case the queue
+ *	gets empty before all the frames were sent, which can happen when
+ *	we are sending frames from an AMPDU queue and there was a hole in
+ *	the BA window. To be used for UAPSD only.
+ * @ptk_pn: per-queue PTK PN data structures
+ * @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is placed in that
+ * space.
+ *
+ */
+struct iwl_mvm_sta {
+	u32 sta_id;
+	u32 tfd_queue_msk;
+	u32 mac_id_n_color;
+	u16 tid_disable_agg;
+	u16 max_agg_bufsize;
+	enum iwl_sta_type sta_type;
+	enum ieee80211_sta_state sta_state;
+	bool bt_reduced_txpower;
+	bool next_status_eosp;
+	spinlock_t lock;
+	struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
+	u8 tid_to_baid[IWL_MAX_TID_COUNT];
+	union {
+		struct iwl_lq_sta_rs_fw rs_fw;
+		struct iwl_lq_sta rs_drv;
+	} lq_sta;
+	struct ieee80211_vif *vif;
+	struct iwl_mvm_key_pn __rcu *ptk_pn[4];
+	struct iwl_mvm_rxq_dup_data *dup_data;
+
+	u16 deferred_traffic_tid_map;
+
+	u8 reserved_queue;
+
+	/* Temporary, until the new TLC will control the Tx protection */
+	s8 tx_protection;
+	bool tt_tx_protection;
+
+	bool disable_tx;
+	u16 amsdu_enabled;
+	u16 max_amsdu_len;
+	bool sleeping;
+	u8 agg_tids;
+	u8 sleep_tx_count;
+	u8 avg_energy;
+};
+
+u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
+
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_mac80211(struct ieee80211_sta *sta)
+{
+	return (void *)sta->drv_priv;
+}
+
+/**
+ * struct iwl_mvm_int_sta - representation of an internal station (auxiliary or
+ * broadcast)
+ * @sta_id: the index of the station in the fw (will be replaced by id_n_color)
+ * @type: station type
+ * @tfd_queue_msk: the tfd queues used by the station
+ */
+struct iwl_mvm_int_sta {
+	u32 sta_id;
+	enum iwl_sta_type type;
+	u32 tfd_queue_msk;
+};
+
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ *	about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ *	from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
+int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+			   bool update, unsigned int flags);
+int iwl_mvm_add_sta(struct iwl_mvm *mvm,
+		    struct ieee80211_vif *vif,
+		    struct ieee80211_sta *sta);
+
+static inline int iwl_mvm_update_sta(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif,
+				     struct ieee80211_sta *sta)
+{
+	return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
+}
+
+int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvm_sta);
+int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
+		   struct ieee80211_vif *vif,
+		   struct ieee80211_sta *sta);
+int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
+		      struct ieee80211_vif *vif,
+		      u8 sta_id);
+int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
+			struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta,
+			struct ieee80211_key_conf *keyconf,
+			u8 key_offset);
+int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
+			   struct ieee80211_vif *vif,
+			   struct ieee80211_sta *sta,
+			   struct ieee80211_key_conf *keyconf);
+
+void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     struct ieee80211_key_conf *keyconf,
+			     struct ieee80211_sta *sta, u32 iv32,
+			     u16 *phase1key);
+
+void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+			   struct iwl_rx_cmd_buffer *rxb);
+
+/* AMPDU */
+int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
+int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
+			    bool amsdu);
+int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid);
+int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u16 tid);
+
+int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
+		       int tid, u8 queue, bool start);
+
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm);
+
+int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
+			     struct iwl_mvm_int_sta *sta,
+				    u32 qmask, enum nl80211_iftype iftype,
+				    enum iwl_sta_type type);
+void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta);
+int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm);
+
+void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
+				struct ieee80211_sta *sta);
+void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
+				       struct ieee80211_sta *sta,
+				       enum ieee80211_frame_release_type reason,
+				       u16 cnt, u16 tids, bool more_data,
+				       bool single_sta_queue);
+int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+		      bool drain);
+void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
+				   struct iwl_mvm_sta *mvmsta, bool disable);
+void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
+				      struct ieee80211_sta *sta,
+				      bool disable);
+void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
+				       struct iwl_mvm_vif *mvmvif,
+				       bool disable);
+void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
+
+int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
+			       int ac, int ssn, unsigned int wdg_timeout,
+			       bool force);
+
+#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
new file mode 100644
index 0000000..67f360c
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -0,0 +1,739 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(C) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(C) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/etherdevice.h>
+#include "mvm.h"
+#include "time-event.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+#define TU_TO_US(x) (x * 1024)
+#define TU_TO_MS(x) (TU_TO_US(x) / 1000)
+
+void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (!sta || IS_ERR(sta) || !sta->tdls)
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+		ieee80211_tdls_oper_request(mvmsta->vif, sta->addr,
+				NL80211_TDLS_TEARDOWN,
+				WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED,
+				GFP_KERNEL);
+	}
+}
+
+int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	int count = 0;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (!sta || IS_ERR(sta) || !sta->tdls)
+			continue;
+
+		if (vif) {
+			mvmsta = iwl_mvm_sta_from_mac80211(sta);
+			if (mvmsta->vif != vif)
+				continue;
+		}
+
+		count++;
+	}
+
+	return count;
+}
+
+static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_rx_packet *pkt;
+	struct iwl_tdls_config_res *resp;
+	struct iwl_tdls_config_cmd tdls_cfg_cmd = {};
+	struct iwl_host_cmd cmd = {
+		.id = TDLS_CONFIG_CMD,
+		.flags = CMD_WANT_SKB,
+		.data = { &tdls_cfg_cmd, },
+		.len = { sizeof(struct iwl_tdls_config_cmd), },
+	};
+	struct ieee80211_sta *sta;
+	int ret, i, cnt;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	tdls_cfg_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+	tdls_cfg_cmd.tx_to_ap_tid = IWL_MVM_TDLS_FW_TID;
+	tdls_cfg_cmd.tx_to_ap_ssn = cpu_to_le16(0); /* not used for now */
+
+	/* for now the Tx cmd is empty and unused */
+
+	/* populate TDLS peer data */
+	cnt = 0;
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
+						lockdep_is_held(&mvm->mutex));
+		if (IS_ERR_OR_NULL(sta) || !sta->tdls)
+			continue;
+
+		tdls_cfg_cmd.sta_info[cnt].sta_id = i;
+		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_tid =
+							IWL_MVM_TDLS_FW_TID;
+		tdls_cfg_cmd.sta_info[cnt].tx_to_peer_ssn = cpu_to_le16(0);
+		tdls_cfg_cmd.sta_info[cnt].is_initiator =
+				cpu_to_le32(sta->tdls_initiator ? 1 : 0);
+
+		cnt++;
+	}
+
+	tdls_cfg_cmd.tdls_peer_count = cnt;
+	IWL_DEBUG_TDLS(mvm, "send TDLS config to FW for %d peers\n", cnt);
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (WARN_ON_ONCE(ret))
+		return;
+
+	pkt = cmd.resp_pkt;
+
+	WARN_ON_ONCE(iwl_rx_packet_payload_len(pkt) != sizeof(*resp));
+
+	/* we don't really care about the response at this point */
+
+	iwl_free_resp(&cmd);
+}
+
+void iwl_mvm_recalc_tdls_state(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			       bool sta_added)
+{
+	int tdls_sta_cnt = iwl_mvm_tdls_sta_count(mvm, vif);
+
+	/* when the first peer joins, send a power update first */
+	if (tdls_sta_cnt == 1 && sta_added)
+		iwl_mvm_power_update_mac(mvm);
+
+	/* Configure the FW with TDLS peer info only if TDLS channel switch
+	 * capability is set.
+	 * TDLS config data is used currently only in TDLS channel switch code.
+	 * Supposed to serve also TDLS buffer station which is not implemneted
+	 * yet in FW*/
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH))
+		iwl_mvm_tdls_config(mvm, vif);
+
+	/* when the last peer leaves, send a power update last */
+	if (tdls_sta_cnt == 0 && !sta_added)
+		iwl_mvm_power_update_mac(mvm);
+}
+
+void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
+					   struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
+
+	/*
+	 * iwl_mvm_protect_session() reads directly from the device
+	 * (the system time), so make sure it is available.
+	 */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_TDLS))
+		return;
+
+	mutex_lock(&mvm->mutex);
+	/* Protect the session to hear the TDLS setup response on the channel */
+	iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true);
+	mutex_unlock(&mvm->mutex);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_TDLS);
+}
+
+static const char *
+iwl_mvm_tdls_cs_state_str(enum iwl_mvm_tdls_cs_state state)
+{
+	switch (state) {
+	case IWL_MVM_TDLS_SW_IDLE:
+		return "IDLE";
+	case IWL_MVM_TDLS_SW_REQ_SENT:
+		return "REQ SENT";
+	case IWL_MVM_TDLS_SW_RESP_RCVD:
+		return "RESP RECEIVED";
+	case IWL_MVM_TDLS_SW_REQ_RCVD:
+		return "REQ RECEIVED";
+	case IWL_MVM_TDLS_SW_ACTIVE:
+		return "ACTIVE";
+	}
+
+	return NULL;
+}
+
+static void iwl_mvm_tdls_update_cs_state(struct iwl_mvm *mvm,
+					 enum iwl_mvm_tdls_cs_state state)
+{
+	if (mvm->tdls_cs.state == state)
+		return;
+
+	IWL_DEBUG_TDLS(mvm, "TDLS channel switch state: %s -> %s\n",
+		       iwl_mvm_tdls_cs_state_str(mvm->tdls_cs.state),
+		       iwl_mvm_tdls_cs_state_str(state));
+	mvm->tdls_cs.state = state;
+
+	/* we only send requests to our switching peer - update sent time */
+	if (state == IWL_MVM_TDLS_SW_REQ_SENT)
+		mvm->tdls_cs.peer.sent_timestamp =
+			iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+
+	if (state == IWL_MVM_TDLS_SW_IDLE)
+		mvm->tdls_cs.cur_sta_id = IWL_MVM_INVALID_STA;
+}
+
+void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_tdls_channel_switch_notif *notif = (void *)pkt->data;
+	struct ieee80211_sta *sta;
+	unsigned int delay;
+	struct iwl_mvm_sta *mvmsta;
+	struct ieee80211_vif *vif;
+	u32 sta_id = le32_to_cpu(notif->sta_id);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* can fail sometimes */
+	if (!le32_to_cpu(notif->status)) {
+		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+		return;
+	}
+
+	if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+		return;
+
+	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+					lockdep_is_held(&mvm->mutex));
+	/* the station may not be here, but if it is, it must be a TDLS peer */
+	if (IS_ERR_OR_NULL(sta) || WARN_ON(!sta->tdls))
+		return;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	vif = mvmsta->vif;
+
+	/*
+	 * Update state and possibly switch again after this is over (DTIM).
+	 * Also convert TU to msec.
+	 */
+	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+			 msecs_to_jiffies(delay));
+
+	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_ACTIVE);
+}
+
+static int
+iwl_mvm_tdls_check_action(struct iwl_mvm *mvm,
+			  enum iwl_tdls_channel_switch_type type,
+			  const u8 *peer, bool peer_initiator, u32 timestamp)
+{
+	bool same_peer = false;
+	int ret = 0;
+
+	/* get the existing peer if it's there */
+	if (mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE &&
+	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+		struct ieee80211_sta *sta = rcu_dereference_protected(
+				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+				lockdep_is_held(&mvm->mutex));
+		if (!IS_ERR_OR_NULL(sta))
+			same_peer = ether_addr_equal(peer, sta->addr);
+	}
+
+	switch (mvm->tdls_cs.state) {
+	case IWL_MVM_TDLS_SW_IDLE:
+		/*
+		 * might be spurious packet from the peer after the switch is
+		 * already done
+		 */
+		if (type == TDLS_MOVE_CH)
+			ret = -EINVAL;
+		break;
+	case IWL_MVM_TDLS_SW_REQ_SENT:
+		/* only allow requests from the same peer */
+		if (!same_peer)
+			ret = -EBUSY;
+		else if (type == TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH &&
+			 !peer_initiator)
+			/*
+			 * We received a ch-switch request while an outgoing
+			 * one is pending. Allow it if the peer is the link
+			 * initiator.
+			 */
+			ret = -EBUSY;
+		else if (type == TDLS_SEND_CHAN_SW_REQ)
+			/* wait for idle before sending another request */
+			ret = -EBUSY;
+		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
+			/* we got a stale response - ignore it */
+			ret = -EINVAL;
+		break;
+	case IWL_MVM_TDLS_SW_RESP_RCVD:
+		/*
+		 * we are waiting for the FW to give an "active" notification,
+		 * so ignore requests in the meantime
+		 */
+		ret = -EBUSY;
+		break;
+	case IWL_MVM_TDLS_SW_REQ_RCVD:
+		/* as above, allow the link initiator to proceed */
+		if (type == TDLS_SEND_CHAN_SW_REQ) {
+			if (!same_peer)
+				ret = -EBUSY;
+			else if (peer_initiator) /* they are the initiator */
+				ret = -EBUSY;
+		} else if (type == TDLS_MOVE_CH) {
+			ret = -EINVAL;
+		}
+		break;
+	case IWL_MVM_TDLS_SW_ACTIVE:
+		/*
+		 * the only valid request when active is a request to return
+		 * to the base channel by the current off-channel peer
+		 */
+		if (type != TDLS_MOVE_CH || !same_peer)
+			ret = -EBUSY;
+		break;
+	}
+
+	if (ret)
+		IWL_DEBUG_TDLS(mvm,
+			       "Invalid TDLS action %d state %d peer %pM same_peer %d initiator %d\n",
+			       type, mvm->tdls_cs.state, peer, same_peer,
+			       peer_initiator);
+
+	return ret;
+}
+
+static int
+iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
+				   struct ieee80211_vif *vif,
+				   enum iwl_tdls_channel_switch_type type,
+				   const u8 *peer, bool peer_initiator,
+				   u8 oper_class,
+				   struct cfg80211_chan_def *chandef,
+				   u32 timestamp, u16 switch_time,
+				   u16 switch_timeout, struct sk_buff *skb,
+				   u32 ch_sw_tm_ie)
+{
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	struct ieee80211_tx_info *info;
+	struct ieee80211_hdr *hdr;
+	struct iwl_tdls_channel_switch_cmd cmd = {0};
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
+					timestamp);
+	if (ret)
+		return ret;
+
+	if (!skb || WARN_ON(skb->len > IWL_TDLS_CH_SW_FRAME_MAX_SIZE)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	cmd.switch_type = type;
+	cmd.timing.frame_timestamp = cpu_to_le32(timestamp);
+	cmd.timing.switch_time = cpu_to_le32(switch_time);
+	cmd.timing.switch_timeout = cpu_to_le32(switch_timeout);
+
+	rcu_read_lock();
+	sta = ieee80211_find_sta(vif, peer);
+	if (!sta) {
+		rcu_read_unlock();
+		ret = -ENOENT;
+		goto out;
+	}
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	cmd.peer_sta_id = cpu_to_le32(mvmsta->sta_id);
+
+	if (!chandef) {
+		if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+		    mvm->tdls_cs.peer.chandef.chan) {
+			/* actually moving to the channel */
+			chandef = &mvm->tdls_cs.peer.chandef;
+		} else if (mvm->tdls_cs.state == IWL_MVM_TDLS_SW_ACTIVE &&
+			   type == TDLS_MOVE_CH) {
+			/* we need to return to base channel */
+			struct ieee80211_chanctx_conf *chanctx =
+					rcu_dereference(vif->chanctx_conf);
+
+			if (WARN_ON_ONCE(!chanctx)) {
+				rcu_read_unlock();
+				goto out;
+			}
+
+			chandef = &chanctx->def;
+		}
+	}
+
+	if (chandef) {
+		cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
+			       PHY_BAND_24 : PHY_BAND_5);
+		cmd.ci.channel = chandef->chan->hw_value;
+		cmd.ci.width = iwl_mvm_get_channel_width(chandef);
+		cmd.ci.ctrl_pos = iwl_mvm_get_ctrl_pos(chandef);
+	}
+
+	/* keep quota calculation simple for now - 50% of DTIM for TDLS */
+	cmd.timing.max_offchan_duration =
+			cpu_to_le32(TU_TO_US(vif->bss_conf.dtim_period *
+					     vif->bss_conf.beacon_int) / 2);
+
+	/* Switch time is the first element in the switch-timing IE. */
+	cmd.frame.switch_time_offset = cpu_to_le32(ch_sw_tm_ie + 2);
+
+	info = IEEE80211_SKB_CB(skb);
+	hdr = (void *)skb->data;
+	if (info->control.hw_key) {
+		if (info->control.hw_key->cipher != WLAN_CIPHER_SUITE_CCMP) {
+			rcu_read_unlock();
+			ret = -EINVAL;
+			goto out;
+		}
+		iwl_mvm_set_tx_cmd_ccmp(info, &cmd.frame.tx_cmd);
+	}
+
+	iwl_mvm_set_tx_cmd(mvm, skb, &cmd.frame.tx_cmd, info,
+			   mvmsta->sta_id);
+
+	iwl_mvm_set_tx_cmd_rate(mvm, &cmd.frame.tx_cmd, info, sta,
+				hdr->frame_control);
+	rcu_read_unlock();
+
+	memcpy(cmd.frame.data, skb->data, skb->len);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, TDLS_CHANNEL_SWITCH_CMD, 0,
+				   sizeof(cmd), &cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to send TDLS_CHANNEL_SWITCH cmd: %d\n",
+			ret);
+		goto out;
+	}
+
+	/* channel switch has started, update state */
+	if (type != TDLS_MOVE_CH) {
+		mvm->tdls_cs.cur_sta_id = mvmsta->sta_id;
+		iwl_mvm_tdls_update_cs_state(mvm,
+					     type == TDLS_SEND_CHAN_SW_REQ ?
+					     IWL_MVM_TDLS_SW_REQ_SENT :
+					     IWL_MVM_TDLS_SW_REQ_RCVD);
+	} else {
+		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_RESP_RCVD);
+	}
+
+out:
+
+	/* channel switch failed - we are idle */
+	if (ret)
+		iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+	return ret;
+}
+
+void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
+{
+	struct iwl_mvm *mvm;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	struct ieee80211_vif *vif;
+	unsigned int delay;
+	int ret;
+
+	mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
+	mutex_lock(&mvm->mutex);
+
+	/* called after an active channel switch has finished or timed-out */
+	iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
+
+	/* station might be gone, in that case do nothing */
+	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
+		goto out;
+
+	sta = rcu_dereference_protected(
+				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+				lockdep_is_held(&mvm->mutex));
+	/* the station may not be here, but if it is, it must be a TDLS peer */
+	if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
+		goto out;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	vif = mvmsta->vif;
+	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+						 TDLS_SEND_CHAN_SW_REQ,
+						 sta->addr,
+						 mvm->tdls_cs.peer.initiator,
+						 mvm->tdls_cs.peer.op_class,
+						 &mvm->tdls_cs.peer.chandef,
+						 0, 0, 0,
+						 mvm->tdls_cs.peer.skb,
+						 mvm->tdls_cs.peer.ch_sw_tm_ie);
+	if (ret)
+		IWL_ERR(mvm, "Not sending TDLS channel switch: %d\n", ret);
+
+	/* retry after a DTIM if we failed sending now */
+	delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
+	schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
+out:
+	mutex_unlock(&mvm->mutex);
+}
+
+int
+iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
+			    struct ieee80211_vif *vif,
+			    struct ieee80211_sta *sta, u8 oper_class,
+			    struct cfg80211_chan_def *chandef,
+			    struct sk_buff *tmpl_skb, u32 ch_sw_tm_ie)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct iwl_mvm_sta *mvmsta;
+	unsigned int delay;
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
+		       sta->addr, chandef->chan->center_freq, chandef->width);
+
+	/* we only support a single peer for channel switching */
+	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
+		IWL_DEBUG_TDLS(mvm,
+			       "Existing peer. Can't start switch with %pM\n",
+			       sta->addr);
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
+						 TDLS_SEND_CHAN_SW_REQ,
+						 sta->addr, sta->tdls_initiator,
+						 oper_class, chandef, 0, 0, 0,
+						 tmpl_skb, ch_sw_tm_ie);
+	if (ret)
+		goto out;
+
+	/*
+	 * Mark the peer as "in tdls switch" for this vif. We only allow a
+	 * single such peer per vif.
+	 */
+	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
+	if (!mvm->tdls_cs.peer.skb) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
+	mvm->tdls_cs.peer.chandef = *chandef;
+	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
+	mvm->tdls_cs.peer.op_class = oper_class;
+	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
+
+	/*
+	 * Wait for 2 DTIM periods before attempting the next switch. The next
+	 * switch will be made sooner if the current one completes before that.
+	 */
+	delay = 2 * TU_TO_MS(vif->bss_conf.dtim_period *
+			     vif->bss_conf.beacon_int);
+	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+			 msecs_to_jiffies(delay));
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
+					struct ieee80211_vif *vif,
+					struct ieee80211_sta *sta)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	struct ieee80211_sta *cur_sta;
+	bool wait_for_phy = false;
+
+	mutex_lock(&mvm->mutex);
+
+	IWL_DEBUG_TDLS(mvm, "TDLS cancel channel switch with %pM\n", sta->addr);
+
+	/* we only support a single peer for channel switching */
+	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
+		IWL_DEBUG_TDLS(mvm, "No ch switch peer - %pM\n", sta->addr);
+		goto out;
+	}
+
+	cur_sta = rcu_dereference_protected(
+				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
+				lockdep_is_held(&mvm->mutex));
+	/* make sure it's the same peer */
+	if (cur_sta != sta)
+		goto out;
+
+	/*
+	 * If we're currently in a switch because of the now canceled peer,
+	 * wait a DTIM here to make sure the phy is back on the base channel.
+	 * We can't otherwise force it.
+	 */
+	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
+	    mvm->tdls_cs.state != IWL_MVM_TDLS_SW_IDLE)
+		wait_for_phy = true;
+
+	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
+	dev_kfree_skb(mvm->tdls_cs.peer.skb);
+	mvm->tdls_cs.peer.skb = NULL;
+
+out:
+	mutex_unlock(&mvm->mutex);
+
+	/* make sure the phy is on the base channel */
+	if (wait_for_phy)
+		msleep(TU_TO_MS(vif->bss_conf.dtim_period *
+				vif->bss_conf.beacon_int));
+
+	/* flush the channel switch state */
+	flush_delayed_work(&mvm->tdls_cs.dwork);
+
+	IWL_DEBUG_TDLS(mvm, "TDLS ending channel switch with %pM\n", sta->addr);
+}
+
+void
+iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_tdls_ch_sw_params *params)
+{
+	struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+	enum iwl_tdls_channel_switch_type type;
+	unsigned int delay;
+	const char *action_str =
+		params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
+		"REQ" : "RESP";
+
+	mutex_lock(&mvm->mutex);
+
+	IWL_DEBUG_TDLS(mvm,
+		       "Received TDLS ch switch action %s from %pM status %d\n",
+		       action_str, params->sta->addr, params->status);
+
+	/*
+	 * we got a non-zero status from a peer we were switching to - move to
+	 * the idle state and retry again later
+	 */
+	if (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_RESPONSE &&
+	    params->status != 0 &&
+	    mvm->tdls_cs.state == IWL_MVM_TDLS_SW_REQ_SENT &&
+	    mvm->tdls_cs.cur_sta_id != IWL_MVM_INVALID_STA) {
+		struct ieee80211_sta *cur_sta;
+
+		/* make sure it's the same peer */
+		cur_sta = rcu_dereference_protected(
+				mvm->fw_id_to_mac_id[mvm->tdls_cs.cur_sta_id],
+				lockdep_is_held(&mvm->mutex));
+		if (cur_sta == params->sta) {
+			iwl_mvm_tdls_update_cs_state(mvm,
+						     IWL_MVM_TDLS_SW_IDLE);
+			goto retry;
+		}
+	}
+
+	type = (params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST) ?
+	       TDLS_SEND_CHAN_SW_RESP_AND_MOVE_CH : TDLS_MOVE_CH;
+
+	iwl_mvm_tdls_config_channel_switch(mvm, vif, type, params->sta->addr,
+					   params->sta->tdls_initiator, 0,
+					   params->chandef, params->timestamp,
+					   params->switch_time,
+					   params->switch_timeout,
+					   params->tmpl_skb,
+					   params->ch_sw_tm_ie);
+
+retry:
+	/* register a timeout in case we don't succeed in switching */
+	delay = vif->bss_conf.dtim_period * vif->bss_conf.beacon_int *
+		1024 / 1000;
+	mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
+			 msecs_to_jiffies(delay));
+	mutex_unlock(&mvm->mutex);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
new file mode 100644
index 0000000..cbbc16f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/testmode.h
@@ -0,0 +1,97 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_MVM_TESTMODE_H__
+#define __IWL_MVM_TESTMODE_H__
+
+/**
+ * enum iwl_mvm_testmode_attrs - testmode attributes inside NL80211_ATTR_TESTDATA
+ * @IWL_MVM_TM_ATTR_UNSPEC: (invalid attribute)
+ * @IWL_MVM_TM_ATTR_CMD: sub command, see &enum iwl_mvm_testmode_commands (u32)
+ * @IWL_MVM_TM_ATTR_NOA_DURATION: requested NoA duration (u32)
+ * @IWL_MVM_TM_ATTR_BEACON_FILTER_STATE: beacon filter state (0 or 1, u32)
+ */
+enum iwl_mvm_testmode_attrs {
+	IWL_MVM_TM_ATTR_UNSPEC,
+	IWL_MVM_TM_ATTR_CMD,
+	IWL_MVM_TM_ATTR_NOA_DURATION,
+	IWL_MVM_TM_ATTR_BEACON_FILTER_STATE,
+
+	/* keep last */
+	NUM_IWL_MVM_TM_ATTRS,
+	IWL_MVM_TM_ATTR_MAX = NUM_IWL_MVM_TM_ATTRS - 1,
+};
+
+/**
+ * enum iwl_mvm_testmode_commands - MVM testmode commands
+ * @IWL_MVM_TM_CMD_SET_NOA: set NoA on GO vif for testing
+ * @IWL_MVM_TM_CMD_SET_BEACON_FILTER: turn beacon filtering off/on
+ */
+enum iwl_mvm_testmode_commands {
+	IWL_MVM_TM_CMD_SET_NOA,
+	IWL_MVM_TM_CMD_SET_BEACON_FILTER,
+};
+
+#endif /* __IWL_MVM_TESTMODE_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
new file mode 100644
index 0000000..cd91bc4
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -0,0 +1,921 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/jiffies.h>
+#include <net/mac80211.h>
+
+#include "fw/notif-wait.h"
+#include "iwl-trans.h"
+#include "fw-api.h"
+#include "time-event.h"
+#include "mvm.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+
+/*
+ * For the high priority TE use a time event type that has similar priority to
+ * the FW's action scan priority.
+ */
+#define IWL_MVM_ROC_TE_TYPE_NORMAL TE_P2P_DEVICE_DISCOVERABLE
+#define IWL_MVM_ROC_TE_TYPE_MGMT_TX TE_P2P_CLIENT_ASSOC
+
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+			   struct iwl_mvm_time_event_data *te_data)
+{
+	lockdep_assert_held(&mvm->time_event_lock);
+
+	if (!te_data->vif)
+		return;
+
+	list_del(&te_data->list);
+	te_data->running = false;
+	te_data->uid = 0;
+	te_data->id = TE_MAX;
+	te_data->vif = NULL;
+}
+
+void iwl_mvm_roc_done_wk(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
+
+	/*
+	 * Clear the ROC_RUNNING /ROC_AUX_RUNNING status bit.
+	 * This will cause the TX path to drop offchannel transmissions.
+	 * That would also be done by mac80211, but it is racy, in particular
+	 * in the case that the time event actually completed in the firmware
+	 * (which is handled in iwl_mvm_te_handle_notif).
+	 */
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
+		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
+	if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
+		iwl_mvm_unref(mvm, IWL_MVM_REF_ROC_AUX);
+
+	synchronize_net();
+
+	/*
+	 * Flush the offchannel queue -- this is called when the time
+	 * event finishes or is canceled, so that frames queued for it
+	 * won't get stuck on the queue and be transmitted in the next
+	 * time event.
+	 * We have to send the command asynchronously since this cannot
+	 * be under the mutex for locking reasons, but that's not an
+	 * issue as it will have to complete before the next command is
+	 * executed, and a new time event means a new command.
+	 */
+	iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
+
+	/* Do the same for the P2P device queue (STA) */
+	if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
+		struct iwl_mvm_vif *mvmvif;
+
+		/*
+		 * NB: access to this pointer would be racy, but the flush bit
+		 * can only be set when we had a P2P-Device VIF, and we have a
+		 * flush of this work in iwl_mvm_prepare_mac_removal() so it's
+		 * not really racy.
+		 */
+
+		if (!WARN_ON(!mvm->p2p_device_vif)) {
+			mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
+			iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
+					  CMD_ASYNC);
+		}
+	}
+}
+
+static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
+{
+	/*
+	 * Of course, our status bit is just as racy as mac80211, so in
+	 * addition, fire off the work struct which will drop all frames
+	 * from the hardware queues that made it through the race. First
+	 * it will of course synchronize the TX path to make sure that
+	 * any *new* TX will be rejected.
+	 */
+	schedule_work(&mvm->roc_done_wk);
+}
+
+static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
+{
+	struct ieee80211_vif *csa_vif;
+
+	rcu_read_lock();
+
+	csa_vif = rcu_dereference(mvm->csa_vif);
+	if (!csa_vif || !csa_vif->csa_active)
+		goto out_unlock;
+
+	IWL_DEBUG_TE(mvm, "CSA NOA started\n");
+
+	/*
+	 * CSA NoA is started but we still have beacons to
+	 * transmit on the current channel.
+	 * So we just do nothing here and the switch
+	 * will be performed on the last TBTT.
+	 */
+	if (!ieee80211_csa_is_complete(csa_vif)) {
+		IWL_WARN(mvm, "CSA NOA started too early\n");
+		goto out_unlock;
+	}
+
+	ieee80211_csa_finish(csa_vif);
+
+	rcu_read_unlock();
+
+	RCU_INIT_POINTER(mvm->csa_vif, NULL);
+
+	return;
+
+out_unlock:
+	rcu_read_unlock();
+}
+
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+					struct ieee80211_vif *vif,
+					const char *errmsg)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return false;
+
+	if (!mvmvif->csa_bcn_pending && vif->bss_conf.assoc &&
+	    vif->bss_conf.dtim_period)
+		return false;
+	if (errmsg)
+		IWL_ERR(mvm, "%s\n", errmsg);
+
+	iwl_mvm_connection_loss(mvm, vif, errmsg);
+	return true;
+}
+
+static void
+iwl_mvm_te_handle_notify_csa(struct iwl_mvm *mvm,
+			     struct iwl_mvm_time_event_data *te_data,
+			     struct iwl_time_event_notif *notif)
+{
+	struct ieee80211_vif *vif = te_data->vif;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	if (!notif->status)
+		IWL_DEBUG_TE(mvm, "CSA time event failed to start\n");
+
+	switch (te_data->vif->type) {
+	case NL80211_IFTYPE_AP:
+		if (!notif->status)
+			mvmvif->csa_failed = true;
+		iwl_mvm_csa_noa_start(mvm);
+		break;
+	case NL80211_IFTYPE_STATION:
+		if (!notif->status) {
+			iwl_mvm_connection_loss(mvm, vif,
+						"CSA TE failed to start");
+			break;
+		}
+		iwl_mvm_csa_client_absent(mvm, te_data->vif);
+		ieee80211_chswitch_done(te_data->vif, true);
+		break;
+	default:
+		/* should never happen */
+		WARN_ON_ONCE(1);
+		break;
+	}
+
+	/* we don't need it anymore */
+	iwl_mvm_te_clear_data(mvm, te_data);
+}
+
+static void iwl_mvm_te_check_trigger(struct iwl_mvm *mvm,
+				     struct iwl_time_event_notif *notif,
+				     struct iwl_mvm_time_event_data *te_data)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_time_event *te_trig;
+	int i;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TIME_EVENT);
+	te_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(te_data->vif),
+					   trig))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(te_trig->time_events); i++) {
+		u32 trig_te_id = le32_to_cpu(te_trig->time_events[i].id);
+		u32 trig_action_bitmap =
+			le32_to_cpu(te_trig->time_events[i].action_bitmap);
+		u32 trig_status_bitmap =
+			le32_to_cpu(te_trig->time_events[i].status_bitmap);
+
+		if (trig_te_id != te_data->id ||
+		    !(trig_action_bitmap & le32_to_cpu(notif->action)) ||
+		    !(trig_status_bitmap & BIT(le32_to_cpu(notif->status))))
+			continue;
+
+		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+					"Time event %d Action 0x%x received status: %d",
+					te_data->id,
+					le32_to_cpu(notif->action),
+					le32_to_cpu(notif->status));
+		break;
+	}
+}
+
+/*
+ * Handles a FW notification for an event that is known to the driver.
+ *
+ * @mvm: the mvm component
+ * @te_data: the time event data
+ * @notif: the notification data corresponding the time event data.
+ */
+static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
+				    struct iwl_mvm_time_event_data *te_data,
+				    struct iwl_time_event_notif *notif)
+{
+	lockdep_assert_held(&mvm->time_event_lock);
+
+	IWL_DEBUG_TE(mvm, "Handle time event notif - UID = 0x%x action %d\n",
+		     le32_to_cpu(notif->unique_id),
+		     le32_to_cpu(notif->action));
+
+	iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+	/*
+	 * The FW sends the start/end time event notifications even for events
+	 * that it fails to schedule. This is indicated in the status field of
+	 * the notification. This happens in cases that the scheduler cannot
+	 * find a schedule that can handle the event (for example requesting a
+	 * P2P Device discoveribility, while there are other higher priority
+	 * events in the system).
+	 */
+	if (!le32_to_cpu(notif->status)) {
+		const char *msg;
+
+		if (notif->action & cpu_to_le32(TE_V2_NOTIF_HOST_EVENT_START))
+			msg = "Time Event start notification failure";
+		else
+			msg = "Time Event end notification failure";
+
+		IWL_DEBUG_TE(mvm, "%s\n", msg);
+
+		if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, msg)) {
+			iwl_mvm_te_clear_data(mvm, te_data);
+			return;
+		}
+	}
+
+	if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
+		IWL_DEBUG_TE(mvm,
+			     "TE ended - current time %lu, estimated end %lu\n",
+			     jiffies, te_data->end_jiffies);
+
+		switch (te_data->vif->type) {
+		case NL80211_IFTYPE_P2P_DEVICE:
+			ieee80211_remain_on_channel_expired(mvm->hw);
+			iwl_mvm_roc_finished(mvm);
+			break;
+		case NL80211_IFTYPE_STATION:
+			/*
+			 * By now, we should have finished association
+			 * and know the dtim period.
+			 */
+			iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+				"No beacon heard and the time event is over already...");
+			break;
+		default:
+			break;
+		}
+
+		iwl_mvm_te_clear_data(mvm, te_data);
+	} else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
+		te_data->running = true;
+		te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
+
+		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+			set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+			iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
+			ieee80211_ready_on_channel(mvm->hw);
+		} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
+			iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
+		}
+	} else {
+		IWL_WARN(mvm, "Got TE with unknown action\n");
+	}
+}
+
+/*
+ * Handle A Aux ROC time event
+ */
+static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
+					   struct iwl_time_event_notif *notif)
+{
+	struct iwl_mvm_time_event_data *te_data, *tmp;
+	bool aux_roc_te = false;
+
+	list_for_each_entry_safe(te_data, tmp, &mvm->aux_roc_te_list, list) {
+		if (le32_to_cpu(notif->unique_id) == te_data->uid) {
+			aux_roc_te = true;
+			break;
+		}
+	}
+	if (!aux_roc_te) /* Not a Aux ROC time event */
+		return -EINVAL;
+
+	iwl_mvm_te_check_trigger(mvm, notif, te_data);
+
+	IWL_DEBUG_TE(mvm,
+		     "Aux ROC time event notification  - UID = 0x%x action %d (error = %d)\n",
+		     le32_to_cpu(notif->unique_id),
+		     le32_to_cpu(notif->action), le32_to_cpu(notif->status));
+
+	if (!le32_to_cpu(notif->status) ||
+	    le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_END) {
+		/* End TE, notify mac80211 */
+		ieee80211_remain_on_channel_expired(mvm->hw);
+		iwl_mvm_roc_finished(mvm); /* flush aux queue */
+		list_del(&te_data->list); /* remove from list */
+		te_data->running = false;
+		te_data->vif = NULL;
+		te_data->uid = 0;
+		te_data->id = TE_MAX;
+	} else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
+		set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+		te_data->running = true;
+		iwl_mvm_ref(mvm, IWL_MVM_REF_ROC_AUX);
+		ieee80211_ready_on_channel(mvm->hw); /* Start TE */
+	} else {
+		IWL_DEBUG_TE(mvm,
+			     "ERROR: Unknown Aux ROC Time Event (action = %d)\n",
+			     le32_to_cpu(notif->action));
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * The Rx handler for time event notifications
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_time_event_notif *notif = (void *)pkt->data;
+	struct iwl_mvm_time_event_data *te_data, *tmp;
+
+	IWL_DEBUG_TE(mvm, "Time event notification - UID = 0x%x action %d\n",
+		     le32_to_cpu(notif->unique_id),
+		     le32_to_cpu(notif->action));
+
+	spin_lock_bh(&mvm->time_event_lock);
+	/* This time event is triggered for Aux ROC request */
+	if (!iwl_mvm_aux_roc_te_handle_notif(mvm, notif))
+		goto unlock;
+
+	list_for_each_entry_safe(te_data, tmp, &mvm->time_event_list, list) {
+		if (le32_to_cpu(notif->unique_id) == te_data->uid)
+			iwl_mvm_te_handle_notif(mvm, te_data, notif);
+	}
+unlock:
+	spin_unlock_bh(&mvm->time_event_lock);
+}
+
+static bool iwl_mvm_te_notif(struct iwl_notif_wait_data *notif_wait,
+			     struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_mvm_time_event_data *te_data = data;
+	struct iwl_time_event_notif *resp;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
+
+	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_NOTIFICATION))
+		return true;
+
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		IWL_ERR(mvm, "Invalid TIME_EVENT_NOTIFICATION response\n");
+		return true;
+	}
+
+	resp = (void *)pkt->data;
+
+	/* te_data->uid is already set in the TIME_EVENT_CMD response */
+	if (le32_to_cpu(resp->unique_id) != te_data->uid)
+		return false;
+
+	IWL_DEBUG_TE(mvm, "TIME_EVENT_NOTIFICATION response - UID = 0x%x\n",
+		     te_data->uid);
+	if (!resp->status)
+		IWL_ERR(mvm,
+			"TIME_EVENT_NOTIFICATION received but not executed\n");
+
+	return true;
+}
+
+static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
+					struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	struct iwl_mvm_time_event_data *te_data = data;
+	struct iwl_time_event_resp *resp;
+	int resp_len = iwl_rx_packet_payload_len(pkt);
+
+	if (WARN_ON(pkt->hdr.cmd != TIME_EVENT_CMD))
+		return true;
+
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		IWL_ERR(mvm, "Invalid TIME_EVENT_CMD response\n");
+		return true;
+	}
+
+	resp = (void *)pkt->data;
+
+	/* we should never get a response to another TIME_EVENT_CMD here */
+	if (WARN_ON_ONCE(le32_to_cpu(resp->id) != te_data->id))
+		return false;
+
+	te_data->uid = le32_to_cpu(resp->unique_id);
+	IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n",
+		     te_data->uid);
+	return true;
+}
+
+static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
+				       struct ieee80211_vif *vif,
+				       struct iwl_mvm_time_event_data *te_data,
+				       struct iwl_time_event_cmd *te_cmd)
+{
+	static const u16 time_event_response[] = { TIME_EVENT_CMD };
+	struct iwl_notification_wait wait_time_event;
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	IWL_DEBUG_TE(mvm, "Add new TE, duration %d TU\n",
+		     le32_to_cpu(te_cmd->duration));
+
+	spin_lock_bh(&mvm->time_event_lock);
+	if (WARN_ON(te_data->id != TE_MAX)) {
+		spin_unlock_bh(&mvm->time_event_lock);
+		return -EIO;
+	}
+	te_data->vif = vif;
+	te_data->duration = le32_to_cpu(te_cmd->duration);
+	te_data->id = le32_to_cpu(te_cmd->id);
+	list_add_tail(&te_data->list, &mvm->time_event_list);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	/*
+	 * Use a notification wait, which really just processes the
+	 * command response and doesn't wait for anything, in order
+	 * to be able to process the response and get the UID inside
+	 * the RX path. Using CMD_WANT_SKB doesn't work because it
+	 * stores the buffer and then wakes up this thread, by which
+	 * time another notification (that the time event started)
+	 * might already be processed unsuccessfully.
+	 */
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event,
+				   time_event_response,
+				   ARRAY_SIZE(time_event_response),
+				   iwl_mvm_time_event_response, te_data);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+					    sizeof(*te_cmd), te_cmd);
+	if (ret) {
+		IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
+		iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
+		goto out_clear_te;
+	}
+
+	/* No need to wait for anything, so just pass 1 (0 isn't valid) */
+	ret = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1);
+	/* should never fail */
+	WARN_ON_ONCE(ret);
+
+	if (ret) {
+ out_clear_te:
+		spin_lock_bh(&mvm->time_event_lock);
+		iwl_mvm_te_clear_data(mvm, te_data);
+		spin_unlock_bh(&mvm->time_event_lock);
+	}
+	return ret;
+}
+
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     u32 duration, u32 min_duration,
+			     u32 max_delay, bool wait_for_notif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+	const u16 te_notif_response[] = { TIME_EVENT_NOTIFICATION };
+	struct iwl_notification_wait wait_te_notif;
+	struct iwl_time_event_cmd time_cmd = {};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (te_data->running &&
+	    time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
+		IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
+			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
+		return;
+	}
+
+	if (te_data->running) {
+		IWL_DEBUG_TE(mvm, "extend 0x%x: only %u ms left\n",
+			     te_data->uid,
+			     jiffies_to_msecs(te_data->end_jiffies - jiffies));
+		/*
+		 * we don't have enough time
+		 * cancel the current TE and issue a new one
+		 * Of course it would be better to remove the old one only
+		 * when the new one is added, but we don't care if we are off
+		 * channel for a bit. All we need to do, is not to return
+		 * before we actually begin to be on the channel.
+		 */
+		iwl_mvm_stop_session_protection(mvm, vif);
+	}
+
+	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+	time_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+	time_cmd.id = cpu_to_le32(TE_BSS_STA_AGGRESSIVE_ASSOC);
+
+	time_cmd.apply_time = cpu_to_le32(0);
+
+	time_cmd.max_frags = TE_V2_FRAG_NONE;
+	time_cmd.max_delay = cpu_to_le32(max_delay);
+	/* TODO: why do we need to interval = bi if it is not periodic? */
+	time_cmd.interval = cpu_to_le32(1);
+	time_cmd.duration = cpu_to_le32(duration);
+	time_cmd.repeat = 1;
+	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+				      TE_V2_NOTIF_HOST_EVENT_END |
+				      TE_V2_START_IMMEDIATELY);
+
+	if (!wait_for_notif) {
+		iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+		return;
+	}
+
+	/*
+	 * Create notification_wait for the TIME_EVENT_NOTIFICATION to use
+	 * right after we send the time event
+	 */
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_te_notif,
+				   te_notif_response,
+				   ARRAY_SIZE(te_notif_response),
+				   iwl_mvm_te_notif, te_data);
+
+	/* If TE was sent OK - wait for the notification that started */
+	if (iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd)) {
+		IWL_ERR(mvm, "Failed to add TE to protect session\n");
+		iwl_remove_notification(&mvm->notif_wait, &wait_te_notif);
+	} else if (iwl_wait_notification(&mvm->notif_wait, &wait_te_notif,
+					 TU_TO_JIFFIES(max_delay))) {
+		IWL_ERR(mvm, "Failed to protect session until TE\n");
+	}
+}
+
+static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+					struct iwl_mvm_time_event_data *te_data,
+					u32 *uid)
+{
+	u32 id;
+
+	/*
+	 * It is possible that by the time we got to this point the time
+	 * event was already removed.
+	 */
+	spin_lock_bh(&mvm->time_event_lock);
+
+	/* Save time event uid before clearing its data */
+	*uid = te_data->uid;
+	id = te_data->id;
+
+	/*
+	 * The clear_data function handles time events that were already removed
+	 */
+	iwl_mvm_te_clear_data(mvm, te_data);
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	/*
+	 * It is possible that by the time we try to remove it, the time event
+	 * has already ended and removed. In such a case there is no need to
+	 * send a removal command.
+	 */
+	if (id == TE_MAX) {
+		IWL_DEBUG_TE(mvm, "TE 0x%x has already ended\n", *uid);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * Explicit request to remove a aux roc time event. The removal of a time
+ * event needs to be synchronized with the flow of a time event's end
+ * notification, which also removes the time event from the op mode
+ * data structures.
+ */
+static void iwl_mvm_remove_aux_roc_te(struct iwl_mvm *mvm,
+				      struct iwl_mvm_vif *mvmvif,
+				      struct iwl_mvm_time_event_data *te_data)
+{
+	struct iwl_hs20_roc_req aux_cmd = {};
+	u32 uid;
+	int ret;
+
+	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+		return;
+
+	aux_cmd.event_unique_id = cpu_to_le32(uid);
+	aux_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+	aux_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+	IWL_DEBUG_TE(mvm, "Removing BSS AUX ROC TE 0x%x\n",
+		     le32_to_cpu(aux_cmd.event_unique_id));
+	ret = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0,
+				   sizeof(aux_cmd), &aux_cmd);
+
+	if (WARN_ON(ret))
+		return;
+}
+
+/*
+ * Explicit request to remove a time event. The removal of a time event needs to
+ * be synchronized with the flow of a time event's end notification, which also
+ * removes the time event from the op mode data structures.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+			       struct iwl_mvm_vif *mvmvif,
+			       struct iwl_mvm_time_event_data *te_data)
+{
+	struct iwl_time_event_cmd time_cmd = {};
+	u32 uid;
+	int ret;
+
+	if (!__iwl_mvm_remove_time_event(mvm, te_data, &uid))
+		return;
+
+	/* When we remove a TE, the UID is to be set in the id field */
+	time_cmd.id = cpu_to_le32(uid);
+	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_REMOVE);
+	time_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+	IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
+	ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, 0,
+				   sizeof(time_cmd), &time_cmd);
+	if (WARN_ON(ret))
+		return;
+}
+
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+				     struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+	u32 id;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->time_event_lock);
+	id = te_data->id;
+	spin_unlock_bh(&mvm->time_event_lock);
+
+	if (id != TE_BSS_STA_AGGRESSIVE_ASSOC) {
+		IWL_DEBUG_TE(mvm,
+			     "don't remove TE with id=%u (not session protection)\n",
+			     id);
+		return;
+	}
+
+	iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			  int duration, enum ieee80211_roc_type type)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+	struct iwl_time_event_cmd time_cmd = {};
+
+	lockdep_assert_held(&mvm->mutex);
+	if (te_data->running) {
+		IWL_WARN(mvm, "P2P_DEVICE remain on channel already running\n");
+		return -EBUSY;
+	}
+
+	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+	time_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+
+	switch (type) {
+	case IEEE80211_ROC_TYPE_NORMAL:
+		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_NORMAL);
+		break;
+	case IEEE80211_ROC_TYPE_MGMT_TX:
+		time_cmd.id = cpu_to_le32(IWL_MVM_ROC_TE_TYPE_MGMT_TX);
+		break;
+	default:
+		WARN_ONCE(1, "Got an invalid ROC type\n");
+		return -EINVAL;
+	}
+
+	time_cmd.apply_time = cpu_to_le32(0);
+	time_cmd.interval = cpu_to_le32(1);
+
+	/*
+	 * The P2P Device TEs can have lower priority than other events
+	 * that are being scheduled by the driver/fw, and thus it might not be
+	 * scheduled. To improve the chances of it being scheduled, allow them
+	 * to be fragmented, and in addition allow them to be delayed.
+	 */
+	time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
+	time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
+	time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
+	time_cmd.repeat = 1;
+	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+				      TE_V2_NOTIF_HOST_EVENT_END |
+				      TE_V2_START_IMMEDIATELY);
+
+	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
+
+static struct iwl_mvm_time_event_data *iwl_mvm_get_roc_te(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_time_event_data *te_data;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	spin_lock_bh(&mvm->time_event_lock);
+
+	/*
+	 * Iterate over the list of time events and find the time event that is
+	 * associated with a P2P_DEVICE interface.
+	 * This assumes that a P2P_DEVICE interface can have only a single time
+	 * event at any given time and this time event coresponds to a ROC
+	 * request
+	 */
+	list_for_each_entry(te_data, &mvm->time_event_list, list) {
+		if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+			goto out;
+	}
+
+	/* There can only be at most one AUX ROC time event, we just use the
+	 * list to simplify/unify code. Remove it if it exists.
+	 */
+	te_data = list_first_entry_or_null(&mvm->aux_roc_te_list,
+					   struct iwl_mvm_time_event_data,
+					   list);
+out:
+	spin_unlock_bh(&mvm->time_event_lock);
+	return te_data;
+}
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_time_event_data *te_data;
+	u32 uid;
+
+	te_data = iwl_mvm_get_roc_te(mvm);
+	if (te_data)
+		__iwl_mvm_remove_time_event(mvm, te_data, &uid);
+}
+
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_vif *mvmvif;
+	struct iwl_mvm_time_event_data *te_data;
+
+	te_data = iwl_mvm_get_roc_te(mvm);
+	if (!te_data) {
+		IWL_WARN(mvm, "No remain on channel event\n");
+		return;
+	}
+
+	mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
+
+	if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+		set_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status);
+	} else {
+		iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
+	}
+
+	iwl_mvm_roc_finished(mvm);
+}
+
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+				struct ieee80211_vif *vif,
+				u32 duration, u32 apply_time)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+	struct iwl_time_event_cmd time_cmd = {};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (te_data->running) {
+		u32 id;
+
+		spin_lock_bh(&mvm->time_event_lock);
+		id = te_data->id;
+		spin_unlock_bh(&mvm->time_event_lock);
+
+		if (id == TE_CHANNEL_SWITCH_PERIOD) {
+			IWL_DEBUG_TE(mvm, "CS period is already scheduled\n");
+			return -EBUSY;
+		}
+
+		/*
+		 * Remove the session protection time event to allow the
+		 * channel switch. If we got here, we just heard a beacon so
+		 * the session protection is not needed anymore anyway.
+		 */
+		iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+	}
+
+	time_cmd.action = cpu_to_le32(FW_CTXT_ACTION_ADD);
+	time_cmd.id_and_color =
+		cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
+	time_cmd.id = cpu_to_le32(TE_CHANNEL_SWITCH_PERIOD);
+	time_cmd.apply_time = cpu_to_le32(apply_time);
+	time_cmd.max_frags = TE_V2_FRAG_NONE;
+	time_cmd.duration = cpu_to_le32(duration);
+	time_cmd.repeat = 1;
+	time_cmd.interval = cpu_to_le32(1);
+	time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+				      TE_V2_ABSENCE);
+	if (!apply_time)
+		time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
+
+	return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
new file mode 100644
index 0000000..3d2e8b6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -0,0 +1,250 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#ifndef __time_event_h__
+#define __time_event_h__
+
+#include "fw-api.h"
+
+#include "mvm.h"
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ *	1) Driver sends a TIME_EVENT_CMD to the fw
+ *	2) Driver gets the response for that command. This response contains the
+ *	   Unique ID (UID) of the event.
+ *	3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ *	What is the duration of the event?
+ *	What is the start time of the event?
+ *	Is there an end-time for the event?
+ *	How much can the event be delayed?
+ *	Can the event be split?
+ *	If yes what is the maximal number of chunks?
+ *	etc...
+ */
+
+/**
+ * DOC: Abstraction to the driver
+ *
+ * In order to simplify the use of time events to the rest of the driver,
+ * we abstract the use of time events. This component provides the functions
+ * needed by the driver.
+ */
+
+#define IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 600
+#define IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+/**
+ * iwl_mvm_protect_session - start / extend the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ * @duration: the duration of the session in TU.
+ * @min_duration: will start a new session if the current session will end
+ *	in less than min_duration.
+ * @max_delay: maximum delay before starting the time event (in TU)
+ * @wait_for_notif: true if it is required that a time event notification be
+ *	waited for (that the time event has been scheduled before returning)
+ *
+ * This function can be used to start a session protection which means that the
+ * fw will stay on the channel for %duration_ms milliseconds. This function
+ * can block (sleep) until the session starts. This function can also be used
+ * to extend a currently running session.
+ * This function is meant to be used for BSS association for example, where we
+ * want to make sure that the fw stays on the channel during the association.
+ */
+void iwl_mvm_protect_session(struct iwl_mvm *mvm,
+			     struct ieee80211_vif *vif,
+			     u32 duration, u32 min_duration,
+			     u32 max_delay, bool wait_for_notif);
+
+/**
+ * iwl_mvm_stop_session_protection - cancel the session protection.
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the session is issued
+ *
+ * This functions cancels the session protection which is an act of good
+ * citizenship. If it is not needed any more it should be canceled because
+ * the other bindings wait for the medium during that time.
+ * This funtions doesn't sleep.
+ */
+void iwl_mvm_stop_session_protection(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif);
+
+/*
+ * iwl_mvm_rx_time_event_notif - handles %TIME_EVENT_NOTIFICATION.
+ */
+void iwl_mvm_rx_time_event_notif(struct iwl_mvm *mvm,
+				 struct iwl_rx_cmd_buffer *rxb);
+
+/**
+ * iwl_mvm_start_p2p_roc - start remain on channel for p2p device functionality
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the roc is requested. It is assumed
+ * that the vif type is NL80211_IFTYPE_P2P_DEVICE
+ * @duration: the requested duration in millisecond for the fw to be on the
+ * channel that is bound to the vif.
+ * @type: the remain on channel request type
+ *
+ * This function can be used to issue a remain on channel session,
+ * which means that the fw will stay in the channel for the request %duration
+ * milliseconds. The function is async, meaning that it only issues the ROC
+ * request but does not wait for it to start. Once the FW is ready to serve the
+ * ROC request, it will issue a notification to the driver that it is on the
+ * requested channel. Once the FW completes the ROC request it will issue
+ * another notification to the driver.
+ */
+int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			  int duration, enum ieee80211_roc_type type);
+
+/**
+ * iwl_mvm_stop_roc - stop remain on channel functionality
+ * @mvm: the mvm component
+ *
+ * This function can be used to cancel an ongoing ROC session.
+ * The function is async, it will instruct the FW to stop serving the ROC
+ * session, but will not wait for the actual stopping of the session.
+ */
+void iwl_mvm_stop_roc(struct iwl_mvm *mvm);
+
+/**
+ * iwl_mvm_remove_time_event - general function to clean up of time event
+ * @mvm: the mvm component
+ * @vif: the vif to which the time event belongs
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function can be used to cancel a time event regardless its type.
+ * It is useful for cleaning up time events running before removing an
+ * interface.
+ */
+void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
+			       struct iwl_mvm_vif *mvmvif,
+			       struct iwl_mvm_time_event_data *te_data);
+
+/**
+ * iwl_mvm_te_clear_data - remove time event from list
+ * @mvm: the mvm component
+ * @te_data: the time event data to remove
+ *
+ * This function is mostly internal, it is made available here only
+ * for firmware restart purposes.
+ */
+void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
+			   struct iwl_mvm_time_event_data *te_data);
+
+void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm);
+void iwl_mvm_roc_done_wk(struct work_struct *wk);
+
+/**
+ * iwl_mvm_schedule_csa_period - request channel switch absence period
+ * @mvm: the mvm component
+ * @vif: the virtual interface for which the channel switch is issued
+ * @duration: the duration of the NoA in TU.
+ * @apply_time: NoA start time in GP2.
+ *
+ * This function is used to schedule NoA time event and is used to perform
+ * the channel switch flow.
+ */
+int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
+				struct ieee80211_vif *vif,
+				u32 duration, u32 apply_time);
+
+/**
+ * iwl_mvm_te_scheduled - check if the fw received the TE cmd
+ * @te_data: the time event data that corresponds to that time event
+ *
+ * This function returns true iff this TE is added to the fw.
+ */
+static inline bool
+iwl_mvm_te_scheduled(struct iwl_mvm_time_event_data *te_data)
+{
+	if (!te_data)
+		return false;
+
+	return !!te_data->uid;
+}
+
+#endif /* __time_event_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.c b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
new file mode 100644
index 0000000..2d0b8a3
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.c
@@ -0,0 +1,310 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "mvm.h"
+#include "fw/api/tof.h"
+
+#define IWL_MVM_TOF_RANGE_REQ_MAX_ID 256
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+
+	tof_data->tof_cfg.sub_grp_cmd_id = cpu_to_le32(TOF_CONFIG_CMD);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	if (IWL_MVM_TOF_IS_RESPONDER) {
+		tof_data->responder_cfg.sub_grp_cmd_id =
+			cpu_to_le32(TOF_RESPONDER_CONFIG_CMD);
+		tof_data->responder_cfg.sta_id = IWL_MVM_INVALID_STA;
+	}
+#endif
+
+	tof_data->range_req.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_REQ_CMD);
+	tof_data->range_req.req_timeout = 1;
+	tof_data->range_req.initiator = 1;
+	tof_data->range_req.report_policy = 3;
+
+	tof_data->range_req_ext.sub_grp_cmd_id =
+		cpu_to_le32(TOF_RANGE_REQ_EXT_CMD);
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+	mvm->init_status |= IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
+}
+
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tof_data *tof_data = &mvm->tof_data;
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_TOF_SUPPORT) ||
+	    !(mvm->init_status & IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE))
+		return;
+
+	memset(tof_data, 0, sizeof(*tof_data));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+	mvm->init_status &= ~IWL_MVM_INIT_STATUS_TOF_INIT_COMPLETE;
+}
+
+static void iwl_tof_iterator(void *_data, u8 *mac,
+			     struct ieee80211_vif *vif)
+{
+	bool *enabled = _data;
+
+	/* non bss vif exists */
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION)
+		*enabled = false;
+}
+
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_tof_config_cmd *cmd = &mvm->tof_data.tof_cfg;
+	bool enabled;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_tof_iterator, &enabled);
+	if (!enabled) {
+		IWL_DEBUG_INFO(mvm, "ToF is not supported (non bss vif)\n");
+		return -EINVAL;
+	}
+
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id)
+{
+	struct iwl_tof_range_abort_cmd cmd = {
+		.sub_grp_cmd_id = cpu_to_le32(TOF_RANGE_ABORT_CMD),
+		.request_id = id,
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Invalid range request id %d (active %d)\n",
+			id, mvm->tof_data.active_range_request);
+		return -EINVAL;
+	}
+
+	/* after abort is sent there's no active request anymore */
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif)
+{
+	struct iwl_tof_responder_config_cmd *cmd = &mvm->tof_data.responder_cfg;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (vif->p2p || vif->type != NL80211_IFTYPE_AP ||
+	    !mvmvif->ap_ibss_active) {
+		IWL_ERR(mvm, "Cannot start responder, not in AP mode\n");
+		return -EIO;
+	}
+
+	cmd->sta_id = mvmvif->bcast_sta.sta_id;
+	memcpy(cmd->bssid, vif->addr, ETH_ALEN);
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(*cmd), cmd);
+}
+#endif
+
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif)
+{
+	struct iwl_host_cmd cmd = {
+		.id = iwl_cmd_id(TOF_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+		.len = { sizeof(mvm->tof_data.range_req), },
+		/* no copy because of the command size */
+		.dataflags = { IWL_HCMD_DFL_NOCOPY, },
+	};
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send range request, not STA mode\n");
+		return -EIO;
+	}
+
+	/* nesting of range requests is not supported in FW */
+	if (mvm->tof_data.active_range_request !=
+		IWL_MVM_TOF_RANGE_REQ_MAX_ID) {
+		IWL_ERR(mvm, "Cannot send range req, already active req %d\n",
+			mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	mvm->tof_data.active_range_request = mvm->tof_data.range_req.request_id;
+
+	cmd.data[0] = &mvm->tof_data.range_req;
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif)
+{
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_TOF_SUPPORT))
+		return -EINVAL;
+
+	if (ieee80211_vif_type_p2p(vif) !=  NL80211_IFTYPE_STATION) {
+		IWL_ERR(mvm, "Cannot send ext range req, not in STA mode\n");
+		return -EIO;
+	}
+
+	return iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(TOF_CMD,
+						    IWL_ALWAYS_LONG_GROUP, 0),
+				    0, sizeof(mvm->tof_data.range_req_ext),
+				    &mvm->tof_data.range_req_ext);
+}
+
+static int iwl_mvm_tof_range_resp(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_range_rsp_ntfy *resp = (void *)data;
+
+	if (resp->request_id != mvm->tof_data.active_range_request) {
+		IWL_ERR(mvm, "Request id mismatch, got %d, active %d\n",
+			resp->request_id, mvm->tof_data.active_range_request);
+		return -EIO;
+	}
+
+	memcpy(&mvm->tof_data.range_resp, resp,
+	       sizeof(struct iwl_tof_range_rsp_ntfy));
+	mvm->tof_data.active_range_request = IWL_MVM_TOF_RANGE_REQ_MAX_ID;
+
+	return 0;
+}
+
+static int iwl_mvm_tof_mcsi_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_mcsi_notif *resp = (struct iwl_tof_mcsi_notif *)data;
+
+	IWL_DEBUG_INFO(mvm, "MCSI notification, token %d\n", resp->token);
+	return 0;
+}
+
+static int iwl_mvm_tof_nb_report_notif(struct iwl_mvm *mvm, void *data)
+{
+	struct iwl_tof_neighbor_report *report =
+		(struct iwl_tof_neighbor_report *)data;
+
+	IWL_DEBUG_INFO(mvm, "NB report, bssid %pM, token %d, status 0x%x\n",
+		       report->bssid, report->request_token, report->status);
+	return 0;
+}
+
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_tof_gen_resp_cmd *resp = (void *)pkt->data;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	switch (le32_to_cpu(resp->sub_grp_cmd_id)) {
+	case TOF_RANGE_RESPONSE_NOTIF:
+		iwl_mvm_tof_range_resp(mvm, resp->data);
+		break;
+	case TOF_MCSI_DEBUG_NOTIF:
+		iwl_mvm_tof_mcsi_notif(mvm, resp->data);
+		break;
+	case TOF_NEIGHBOR_REPORT_RSP_NOTIF:
+		iwl_mvm_tof_nb_report_notif(mvm, resp->data);
+		break;
+	default:
+	       IWL_ERR(mvm, "Unknown sub-group command 0x%x\n",
+		       resp->sub_grp_cmd_id);
+	       break;
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tof.h b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
new file mode 100644
index 0000000..2ff560a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tof.h
@@ -0,0 +1,94 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __tof_h__
+#define __tof_h__
+
+#include "fw/api/tof.h"
+
+struct iwl_mvm_tof_data {
+	struct iwl_tof_config_cmd tof_cfg;
+	struct iwl_tof_range_req_cmd range_req;
+	struct iwl_tof_range_req_ext_cmd range_req_ext;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+	struct iwl_tof_responder_config_cmd responder_cfg;
+#endif
+	struct iwl_tof_range_rsp_ntfy range_resp;
+	u8 last_abort_id;
+	u16 active_range_request;
+};
+
+void iwl_mvm_tof_init(struct iwl_mvm *mvm);
+void iwl_mvm_tof_clean(struct iwl_mvm *mvm);
+int iwl_mvm_tof_config_cmd(struct iwl_mvm *mvm);
+int iwl_mvm_tof_range_abort_cmd(struct iwl_mvm *mvm, u8 id);
+int iwl_mvm_tof_range_request_cmd(struct iwl_mvm *mvm,
+				  struct ieee80211_vif *vif);
+void iwl_mvm_tof_resp_handler(struct iwl_mvm *mvm,
+			      struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_tof_range_request_ext_cmd(struct iwl_mvm *mvm,
+				      struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_mvm_tof_responder_cmd(struct iwl_mvm *mvm,
+			      struct ieee80211_vif *vif);
+#endif
+#endif /* __tof_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
new file mode 100644
index 0000000..1232f63
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -0,0 +1,907 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/sort.h>
+
+#include "mvm.h"
+
+#define IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT	HZ
+
+void iwl_mvm_enter_ctkill(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+	u32 duration = tt->params.ct_kill_duration;
+
+	if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+		return;
+
+	IWL_ERR(mvm, "Enter CT Kill\n");
+	iwl_mvm_set_hw_ctkill_state(mvm, true);
+
+	if (!iwl_mvm_is_tt_in_fw(mvm)) {
+		tt->throttle = false;
+		tt->dynamic_smps = false;
+	}
+
+	/* Don't schedule an exit work if we're in test mode, since
+	 * the temperature will not change unless we manually set it
+	 * again (or disable testing).
+	 */
+	if (!mvm->temperature_test)
+		schedule_delayed_work(&tt->ct_kill_exit,
+				      round_jiffies_relative(duration * HZ));
+}
+
+static void iwl_mvm_exit_ctkill(struct iwl_mvm *mvm)
+{
+	if (!test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+		return;
+
+	IWL_ERR(mvm, "Exit CT Kill\n");
+	iwl_mvm_set_hw_ctkill_state(mvm, false);
+}
+
+void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp)
+{
+	/* ignore the notification if we are in test mode */
+	if (mvm->temperature_test)
+		return;
+
+	if (mvm->temperature == temp)
+		return;
+
+	mvm->temperature = temp;
+	iwl_mvm_tt_handler(mvm);
+}
+
+static int iwl_mvm_temp_notif_parse(struct iwl_mvm *mvm,
+				    struct iwl_rx_packet *pkt)
+{
+	struct iwl_dts_measurement_notif_v1 *notif_v1;
+	int len = iwl_rx_packet_payload_len(pkt);
+	int temp;
+
+	/* we can use notif_v1 only, because v2 only adds an additional
+	 * parameter, which is not used in this function.
+	*/
+	if (WARN_ON_ONCE(len < sizeof(*notif_v1))) {
+		IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+		return -EINVAL;
+	}
+
+	notif_v1 = (void *)pkt->data;
+
+	temp = le32_to_cpu(notif_v1->temp);
+
+	/* shouldn't be negative, but since it's s32, make sure it isn't */
+	if (WARN_ON_ONCE(temp < 0))
+		temp = 0;
+
+	IWL_DEBUG_TEMP(mvm, "DTS_MEASUREMENT_NOTIFICATION - %d\n", temp);
+
+	return temp;
+}
+
+static bool iwl_mvm_temp_notif_wait(struct iwl_notif_wait_data *notif_wait,
+				    struct iwl_rx_packet *pkt, void *data)
+{
+	struct iwl_mvm *mvm =
+		container_of(notif_wait, struct iwl_mvm, notif_wait);
+	int *temp = data;
+	int ret;
+
+	ret = iwl_mvm_temp_notif_parse(mvm, pkt);
+	if (ret < 0)
+		return true;
+
+	*temp = ret;
+
+	return true;
+}
+
+void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_dts_measurement_notif_v2 *notif_v2;
+	int len = iwl_rx_packet_payload_len(pkt);
+	int temp;
+	u32 ths_crossed;
+
+	/* the notification is handled synchronously in ctkill, so skip here */
+	if (test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status))
+		return;
+
+	temp = iwl_mvm_temp_notif_parse(mvm, pkt);
+
+	if (!iwl_mvm_is_tt_in_fw(mvm)) {
+		if (temp >= 0)
+			iwl_mvm_tt_temp_changed(mvm, temp);
+		return;
+	}
+
+	if (WARN_ON_ONCE(len < sizeof(*notif_v2))) {
+		IWL_ERR(mvm, "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
+		return;
+	}
+
+	notif_v2 = (void *)pkt->data;
+	ths_crossed = le32_to_cpu(notif_v2->threshold_idx);
+
+	/* 0xFF in ths_crossed means the notification is not related
+	 * to a trip, so we can ignore it here.
+	 */
+	if (ths_crossed == 0xFF)
+		return;
+
+	IWL_DEBUG_TEMP(mvm, "Temp = %d Threshold crossed = %d\n",
+		       temp, ths_crossed);
+
+#ifdef CONFIG_THERMAL
+	if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
+		return;
+
+	if (mvm->tz_device.tzone) {
+		struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
+
+		thermal_notify_framework(tz_dev->tzone,
+					 tz_dev->fw_trips_index[ths_crossed]);
+	}
+#endif /* CONFIG_THERMAL */
+}
+
+void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct ct_kill_notif *notif;
+	int len = iwl_rx_packet_payload_len(pkt);
+
+	if (WARN_ON_ONCE(len != sizeof(*notif))) {
+		IWL_ERR(mvm, "Invalid CT_KILL_NOTIFICATION\n");
+		return;
+	}
+
+	notif = (struct ct_kill_notif *)pkt->data;
+	IWL_DEBUG_TEMP(mvm, "CT Kill notification temperature = %d\n",
+		       notif->temperature);
+
+	iwl_mvm_enter_ctkill(mvm);
+}
+
+static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
+{
+	struct iwl_dts_measurement_cmd cmd = {
+		.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
+	};
+	struct iwl_ext_dts_measurement_cmd extcmd = {
+		.control_mode = cpu_to_le32(DTS_AUTOMATIC),
+	};
+	u32 cmdid;
+
+	cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+			   PHY_OPS_GROUP, 0);
+
+	if (!fw_has_capa(&mvm->fw->ucode_capa,
+			 IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
+		return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
+
+	return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
+}
+
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
+{
+	struct iwl_notification_wait wait_temp_notif;
+	static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
+					    DTS_MEASUREMENT_NOTIF_WIDE) };
+	int ret;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
+				   temp_notif, ARRAY_SIZE(temp_notif),
+				   iwl_mvm_temp_notif_wait, temp);
+
+	ret = iwl_mvm_get_temp_cmd(mvm);
+	if (ret) {
+		IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
+		iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
+		return ret;
+	}
+
+	ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
+				    IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
+	if (ret)
+		IWL_ERR(mvm, "Getting the temperature timed out\n");
+
+	return ret;
+}
+
+static void check_exit_ctkill(struct work_struct *work)
+{
+	struct iwl_mvm_tt_mgmt *tt;
+	struct iwl_mvm *mvm;
+	u32 duration;
+	s32 temp;
+	int ret;
+
+	tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
+	mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
+
+	if (iwl_mvm_is_tt_in_fw(mvm)) {
+		iwl_mvm_exit_ctkill(mvm);
+
+		return;
+	}
+
+	duration = tt->params.ct_kill_duration;
+
+	mutex_lock(&mvm->mutex);
+
+	if (__iwl_mvm_mac_start(mvm))
+		goto reschedule;
+
+	/* make sure the device is available for direct read/writes */
+	if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_CHECK_CTKILL)) {
+		__iwl_mvm_mac_stop(mvm);
+		goto reschedule;
+	}
+
+	ret = iwl_mvm_get_temp(mvm, &temp);
+
+	iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
+
+	__iwl_mvm_mac_stop(mvm);
+
+	if (ret)
+		goto reschedule;
+
+	IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
+
+	if (temp <= tt->params.ct_kill_exit) {
+		mutex_unlock(&mvm->mutex);
+		iwl_mvm_exit_ctkill(mvm);
+		return;
+	}
+
+reschedule:
+	mutex_unlock(&mvm->mutex);
+	schedule_delayed_work(&mvm->thermal_throttle.ct_kill_exit,
+			      round_jiffies(duration * HZ));
+}
+
+static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
+				     struct ieee80211_vif *vif)
+{
+	struct iwl_mvm *mvm = _data;
+	enum ieee80211_smps_mode smps_mode;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (mvm->thermal_throttle.dynamic_smps)
+		smps_mode = IEEE80211_SMPS_DYNAMIC;
+	else
+		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, smps_mode);
+}
+
+static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
+{
+	struct iwl_mvm_sta *mvmsta;
+	int i, err;
+
+	for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+		mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
+		if (!mvmsta)
+			continue;
+
+		if (enable == mvmsta->tt_tx_protection)
+			continue;
+		err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
+		if (err) {
+			IWL_ERR(mvm, "Failed to %s Tx protection\n",
+				enable ? "enable" : "disable");
+		} else {
+			IWL_DEBUG_TEMP(mvm, "%s Tx protection\n",
+				       enable ? "Enable" : "Disable");
+			mvmsta->tt_tx_protection = enable;
+		}
+	}
+}
+
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+{
+	struct iwl_host_cmd cmd = {
+		.id = REPLY_THERMAL_MNG_BACKOFF,
+		.len = { sizeof(u32), },
+		.data = { &backoff, },
+	};
+
+	backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
+	if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
+		IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
+			       backoff);
+		mvm->thermal_throttle.tx_backoff = backoff;
+	} else {
+		IWL_ERR(mvm, "Failed to change Thermal Tx backoff\n");
+	}
+}
+
+void iwl_mvm_tt_handler(struct iwl_mvm *mvm)
+{
+	struct iwl_tt_params *params = &mvm->thermal_throttle.params;
+	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+	s32 temperature = mvm->temperature;
+	bool throttle_enable = false;
+	int i;
+	u32 tx_backoff;
+
+	IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", mvm->temperature);
+
+	if (params->support_ct_kill && temperature >= params->ct_kill_entry) {
+		iwl_mvm_enter_ctkill(mvm);
+		return;
+	}
+
+	if (params->support_ct_kill &&
+	    temperature <= params->ct_kill_exit) {
+		iwl_mvm_exit_ctkill(mvm);
+		return;
+	}
+
+	if (params->support_dynamic_smps) {
+		if (!tt->dynamic_smps &&
+		    temperature >= params->dynamic_smps_entry) {
+			IWL_DEBUG_TEMP(mvm, "Enable dynamic SMPS\n");
+			tt->dynamic_smps = true;
+			ieee80211_iterate_active_interfaces_atomic(
+					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_tt_smps_iterator, mvm);
+			throttle_enable = true;
+		} else if (tt->dynamic_smps &&
+			   temperature <= params->dynamic_smps_exit) {
+			IWL_DEBUG_TEMP(mvm, "Disable dynamic SMPS\n");
+			tt->dynamic_smps = false;
+			ieee80211_iterate_active_interfaces_atomic(
+					mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+					iwl_mvm_tt_smps_iterator, mvm);
+		}
+	}
+
+	if (params->support_tx_protection) {
+		if (temperature >= params->tx_protection_entry) {
+			iwl_mvm_tt_tx_protection(mvm, true);
+			throttle_enable = true;
+		} else if (temperature <= params->tx_protection_exit) {
+			iwl_mvm_tt_tx_protection(mvm, false);
+		}
+	}
+
+	if (params->support_tx_backoff) {
+		tx_backoff = tt->min_backoff;
+		for (i = 0; i < TT_TX_BACKOFF_SIZE; i++) {
+			if (temperature < params->tx_backoff[i].temperature)
+				break;
+			tx_backoff = max(tt->min_backoff,
+					 params->tx_backoff[i].backoff);
+		}
+		if (tx_backoff != tt->min_backoff)
+			throttle_enable = true;
+		if (tt->tx_backoff != tx_backoff)
+			iwl_mvm_tt_tx_backoff(mvm, tx_backoff);
+	}
+
+	if (!tt->throttle && throttle_enable) {
+		IWL_WARN(mvm,
+			 "Due to high temperature thermal throttling initiated\n");
+		tt->throttle = true;
+	} else if (tt->throttle && !tt->dynamic_smps &&
+		   tt->tx_backoff == tt->min_backoff &&
+		   temperature <= params->tx_protection_exit) {
+		IWL_WARN(mvm,
+			 "Temperature is back to normal thermal throttling stopped\n");
+		tt->throttle = false;
+	}
+}
+
+static const struct iwl_tt_params iwl_mvm_default_tt_params = {
+	.ct_kill_entry = 118,
+	.ct_kill_exit = 96,
+	.ct_kill_duration = 5,
+	.dynamic_smps_entry = 114,
+	.dynamic_smps_exit = 110,
+	.tx_protection_entry = 114,
+	.tx_protection_exit = 108,
+	.tx_backoff = {
+		{.temperature = 112, .backoff = 200},
+		{.temperature = 113, .backoff = 600},
+		{.temperature = 114, .backoff = 1200},
+		{.temperature = 115, .backoff = 2000},
+		{.temperature = 116, .backoff = 4000},
+		{.temperature = 117, .backoff = 10000},
+	},
+	.support_ct_kill = true,
+	.support_dynamic_smps = true,
+	.support_tx_protection = true,
+	.support_tx_backoff = true,
+};
+
+/* budget in mWatt */
+static const u32 iwl_mvm_cdev_budgets[] = {
+	2000,	/* cooling state 0 */
+	1800,	/* cooling state 1 */
+	1600,	/* cooling state 2 */
+	1400,	/* cooling state 3 */
+	1200,	/* cooling state 4 */
+	1000,	/* cooling state 5 */
+	900,	/* cooling state 6 */
+	800,	/* cooling state 7 */
+	700,	/* cooling state 8 */
+	650,	/* cooling state 9 */
+	600,	/* cooling state 10 */
+	550,	/* cooling state 11 */
+	500,	/* cooling state 12 */
+	450,	/* cooling state 13 */
+	400,	/* cooling state 14 */
+	350,	/* cooling state 15 */
+	300,	/* cooling state 16 */
+	250,	/* cooling state 17 */
+	200,	/* cooling state 18 */
+	150,	/* cooling state 19 */
+};
+
+int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
+{
+	struct iwl_mvm_ctdp_cmd cmd = {
+		.operation = cpu_to_le32(op),
+		.budget = cpu_to_le32(iwl_mvm_cdev_budgets[state]),
+		.window_size = 0,
+	};
+	int ret;
+	u32 status;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	status = 0;
+	ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
+						       CTDP_CONFIG_CMD),
+					  sizeof(cmd), &cmd, &status);
+
+	if (ret) {
+		IWL_ERR(mvm, "cTDP command failed (err=%d)\n", ret);
+		return ret;
+	}
+
+	switch (op) {
+	case CTDP_CMD_OPERATION_START:
+#ifdef CONFIG_THERMAL
+		mvm->cooling_dev.cur_state = state;
+#endif /* CONFIG_THERMAL */
+		break;
+	case CTDP_CMD_OPERATION_REPORT:
+		IWL_DEBUG_TEMP(mvm, "cTDP avg energy in mWatt = %d\n", status);
+		/* when the function is called with CTDP_CMD_OPERATION_REPORT
+		 * option the function should return the average budget value
+		 * that is received from the FW.
+		 * The budget can't be less or equal to 0, so it's possible
+		 * to distinguish between error values and budgets.
+		 */
+		return status;
+	case CTDP_CMD_OPERATION_STOP:
+		IWL_DEBUG_TEMP(mvm, "cTDP stopped successfully\n");
+		break;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_THERMAL
+static int compare_temps(const void *a, const void *b)
+{
+	return ((s16)le16_to_cpu(*(__le16 *)a) -
+		(s16)le16_to_cpu(*(__le16 *)b));
+}
+
+int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
+{
+	struct temp_report_ths_cmd cmd = {0};
+	int ret, i, j, idx = 0;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (!mvm->tz_device.tzone)
+		return -EINVAL;
+
+	/* The driver holds array of temperature trips that are unsorted
+	 * and uncompressed, the FW should get it compressed and sorted
+	 */
+
+	/* compress temp_trips to cmd array, remove uninitialized values*/
+	for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+		if (mvm->tz_device.temp_trips[i] != S16_MIN) {
+			cmd.thresholds[idx++] =
+				cpu_to_le16(mvm->tz_device.temp_trips[i]);
+		}
+	}
+	cmd.num_temps = cpu_to_le32(idx);
+
+	if (!idx)
+		goto send;
+
+	/*sort cmd array*/
+	sort(cmd.thresholds, idx, sizeof(s16), compare_temps, NULL);
+
+	/* we should save the indexes of trips because we sort
+	 * and compress the orginal array
+	 */
+	for (i = 0; i < idx; i++) {
+		for (j = 0; j < IWL_MAX_DTS_TRIPS; j++) {
+			if (le16_to_cpu(cmd.thresholds[i]) ==
+				mvm->tz_device.temp_trips[j])
+				mvm->tz_device.fw_trips_index[i] = j;
+		}
+	}
+
+send:
+	ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
+						TEMP_REPORTING_THRESHOLDS_CMD),
+				   0, sizeof(cmd), &cmd);
+	if (ret)
+		IWL_ERR(mvm, "TEMP_REPORT_THS_CMD command failed (err=%d)\n",
+			ret);
+
+	return ret;
+}
+
+static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
+				  int *temperature)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+	int ret;
+	int temp;
+
+	mutex_lock(&mvm->mutex);
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+		ret = -ENODATA;
+		goto out;
+	}
+
+	ret = iwl_mvm_get_temp(mvm, &temp);
+	if (ret)
+		goto out;
+
+	*temperature = temp * 1000;
+
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static int iwl_mvm_tzone_get_trip_temp(struct thermal_zone_device *device,
+				       int trip, int *temp)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+
+	if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+		return -EINVAL;
+
+	*temp = mvm->tz_device.temp_trips[trip] * 1000;
+
+	return 0;
+}
+
+static int iwl_mvm_tzone_get_trip_type(struct thermal_zone_device *device,
+				       int trip, enum thermal_trip_type *type)
+{
+	if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS)
+		return -EINVAL;
+
+	*type = THERMAL_TRIP_PASSIVE;
+
+	return 0;
+}
+
+static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
+				       int trip, int temp)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)device->devdata;
+	struct iwl_mvm_thermal_device *tzone;
+	int i, ret;
+	s16 temperature;
+
+	mutex_lock(&mvm->mutex);
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+		ret = -EIO;
+		goto out;
+	}
+
+	if (trip < 0 || trip >= IWL_MAX_DTS_TRIPS) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if ((temp / 1000) > S16_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	temperature = (s16)(temp / 1000);
+	tzone = &mvm->tz_device;
+
+	if (!tzone) {
+		ret = -EIO;
+		goto out;
+	}
+
+	/* no updates*/
+	if (tzone->temp_trips[trip] == temperature) {
+		ret = 0;
+		goto out;
+	}
+
+	/* already existing temperature */
+	for (i = 0; i < IWL_MAX_DTS_TRIPS; i++) {
+		if (tzone->temp_trips[i] == temperature) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	tzone->temp_trips[trip] = temperature;
+
+	ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
+out:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static  struct thermal_zone_device_ops tzone_ops = {
+	.get_temp = iwl_mvm_tzone_get_temp,
+	.get_trip_temp = iwl_mvm_tzone_get_trip_temp,
+	.get_trip_type = iwl_mvm_tzone_get_trip_type,
+	.set_trip_temp = iwl_mvm_tzone_set_trip_temp,
+};
+
+/* make all trips writable */
+#define IWL_WRITABLE_TRIPS_MSK (BIT(IWL_MAX_DTS_TRIPS) - 1)
+
+static void iwl_mvm_thermal_zone_register(struct iwl_mvm *mvm)
+{
+	int i;
+	char name[] = "iwlwifi";
+
+	if (!iwl_mvm_is_tt_in_fw(mvm)) {
+		mvm->tz_device.tzone = NULL;
+
+		return;
+	}
+
+	BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+	mvm->tz_device.tzone = thermal_zone_device_register(name,
+							IWL_MAX_DTS_TRIPS,
+							IWL_WRITABLE_TRIPS_MSK,
+							mvm, &tzone_ops,
+							NULL, 0, 0);
+	if (IS_ERR(mvm->tz_device.tzone)) {
+		IWL_DEBUG_TEMP(mvm,
+			       "Failed to register to thermal zone (err = %ld)\n",
+			       PTR_ERR(mvm->tz_device.tzone));
+		mvm->tz_device.tzone = NULL;
+		return;
+	}
+
+	/* 0 is a valid temperature,
+	 * so initialize the array with S16_MIN which invalid temperature
+	 */
+	for (i = 0 ; i < IWL_MAX_DTS_TRIPS; i++)
+		mvm->tz_device.temp_trips[i] = S16_MIN;
+}
+
+static int iwl_mvm_tcool_get_max_state(struct thermal_cooling_device *cdev,
+				       unsigned long *state)
+{
+	*state = ARRAY_SIZE(iwl_mvm_cdev_budgets) - 1;
+
+	return 0;
+}
+
+static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
+				       unsigned long *state)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+
+	*state = mvm->cooling_dev.cur_state;
+
+	return 0;
+}
+
+static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
+				       unsigned long new_state)
+{
+	struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
+	int ret;
+
+	mutex_lock(&mvm->mutex);
+
+	if (!iwl_mvm_firmware_running(mvm) ||
+	    mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
+		ret = -EIO;
+		goto unlock;
+	}
+
+	if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+				   new_state);
+
+unlock:
+	mutex_unlock(&mvm->mutex);
+	return ret;
+}
+
+static const struct thermal_cooling_device_ops tcooling_ops = {
+	.get_max_state = iwl_mvm_tcool_get_max_state,
+	.get_cur_state = iwl_mvm_tcool_get_cur_state,
+	.set_cur_state = iwl_mvm_tcool_set_cur_state,
+};
+
+static void iwl_mvm_cooling_device_register(struct iwl_mvm *mvm)
+{
+	char name[] = "iwlwifi";
+
+	if (!iwl_mvm_is_ctdp_supported(mvm))
+		return;
+
+	BUILD_BUG_ON(ARRAY_SIZE(name) >= THERMAL_NAME_LENGTH);
+
+	mvm->cooling_dev.cdev =
+		thermal_cooling_device_register(name,
+						mvm,
+						&tcooling_ops);
+
+	if (IS_ERR(mvm->cooling_dev.cdev)) {
+		IWL_DEBUG_TEMP(mvm,
+			       "Failed to register to cooling device (err = %ld)\n",
+			       PTR_ERR(mvm->cooling_dev.cdev));
+		mvm->cooling_dev.cdev = NULL;
+		return;
+	}
+}
+
+static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
+{
+	if (!iwl_mvm_is_tt_in_fw(mvm) || !mvm->tz_device.tzone)
+		return;
+
+	IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
+	if (mvm->tz_device.tzone) {
+		thermal_zone_device_unregister(mvm->tz_device.tzone);
+		mvm->tz_device.tzone = NULL;
+	}
+}
+
+static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
+{
+	if (!iwl_mvm_is_ctdp_supported(mvm) || !mvm->cooling_dev.cdev)
+		return;
+
+	IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
+	if (mvm->cooling_dev.cdev) {
+		thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+		mvm->cooling_dev.cdev = NULL;
+	}
+}
+#endif /* CONFIG_THERMAL */
+
+void iwl_mvm_thermal_initialize(struct iwl_mvm *mvm, u32 min_backoff)
+{
+	struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
+
+	IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
+
+	if (mvm->cfg->thermal_params)
+		tt->params = *mvm->cfg->thermal_params;
+	else
+		tt->params = iwl_mvm_default_tt_params;
+
+	tt->throttle = false;
+	tt->dynamic_smps = false;
+	tt->min_backoff = min_backoff;
+	INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
+
+#ifdef CONFIG_THERMAL
+	iwl_mvm_cooling_device_register(mvm);
+	iwl_mvm_thermal_zone_register(mvm);
+#endif
+	mvm->init_status |= IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
+
+void iwl_mvm_thermal_exit(struct iwl_mvm *mvm)
+{
+	if (!(mvm->init_status & IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE))
+		return;
+
+	cancel_delayed_work_sync(&mvm->thermal_throttle.ct_kill_exit);
+	IWL_DEBUG_TEMP(mvm, "Exit Thermal Throttling\n");
+
+#ifdef CONFIG_THERMAL
+	iwl_mvm_cooling_device_unregister(mvm);
+	iwl_mvm_thermal_zone_unregister(mvm);
+#endif
+	mvm->init_status &= ~IWL_MVM_INIT_STATUS_THERMAL_INIT_COMPLETE;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
new file mode 100644
index 0000000..2d21f0a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -0,0 +1,1978 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "iwl-trans.h"
+#include "iwl-eeprom-parse.h"
+#include "mvm.h"
+#include "sta.h"
+
+static void
+iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
+			  u16 tid, u16 ssn)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
+		return;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+				"BAR sent to %pM, tid %d, ssn %d",
+				addr, tid, ssn);
+}
+
+#define OPT_HDR(type, skb, off) \
+	(type *)(skb_network_header(skb) + (off))
+
+static u16 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+			   struct ieee80211_hdr *hdr,
+			   struct ieee80211_tx_info *info,
+			   u16 offload_assist)
+{
+#if IS_ENABLED(CONFIG_INET)
+	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+	u8 protocol = 0;
+
+	/*
+	 * Do not compute checksum if already computed or if transport will
+	 * compute it
+	 */
+	if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+		goto out;
+
+	/* We do not expect to be requested to csum stuff we do not support */
+	if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+		      (skb->protocol != htons(ETH_P_IP) &&
+		       skb->protocol != htons(ETH_P_IPV6)),
+		      "No support for requested checksum\n")) {
+		skb_checksum_help(skb);
+		goto out;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		protocol = ip_hdr(skb)->protocol;
+	} else {
+#if IS_ENABLED(CONFIG_IPV6)
+		struct ipv6hdr *ipv6h =
+			(struct ipv6hdr *)skb_network_header(skb);
+		unsigned int off = sizeof(*ipv6h);
+
+		protocol = ipv6h->nexthdr;
+		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+			struct ipv6_opt_hdr *hp;
+
+			/* only supported extension headers */
+			if (protocol != NEXTHDR_ROUTING &&
+			    protocol != NEXTHDR_HOP &&
+			    protocol != NEXTHDR_DEST) {
+				skb_checksum_help(skb);
+				goto out;
+			}
+
+			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+			protocol = hp->nexthdr;
+			off += ipv6_optlen(hp);
+		}
+		/* if we get here - protocol now should be TCP/UDP */
+#endif
+	}
+
+	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+		WARN_ON_ONCE(1);
+		skb_checksum_help(skb);
+		goto out;
+	}
+
+	/* enable L4 csum */
+	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+	/*
+	 * Set offset to IP header (snap).
+	 * We don't support tunneling so no need to take care of inner header.
+	 * Size is in words.
+	 */
+	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+	if (skb->protocol == htons(ETH_P_IP) &&
+	    (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+		ip_hdr(skb)->check = 0;
+		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+	}
+
+	/* reset UDP/TCP header csum */
+	if (protocol == IPPROTO_TCP)
+		tcp_hdr(skb)->check = 0;
+	else
+		udp_hdr(skb)->check = 0;
+
+	/*
+	 * mac header len should include IV, size is in words unless
+	 * the IV is added by the firmware like in WEP.
+	 * In new Tx API, the IV is always added by the firmware.
+	 */
+	if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
+	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
+	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
+		mh_len += info->control.hw_key->iv_len;
+	mh_len /= 2;
+	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+out:
+#endif
+	return offload_assist;
+}
+
+/*
+ * Sets most of the Tx cmd's fields
+ */
+void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
+			struct iwl_tx_cmd *tx_cmd,
+			struct ieee80211_tx_info *info, u8 sta_id)
+{
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	__le16 fc = hdr->frame_control;
+	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
+	u32 len = skb->len + FCS_LEN;
+	u16 offload_assist = 0;
+	u8 ac;
+
+	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+		tx_flags |= TX_CMD_FLG_ACK;
+	else
+		tx_flags &= ~TX_CMD_FLG_ACK;
+
+	if (ieee80211_is_probe_resp(fc))
+		tx_flags |= TX_CMD_FLG_TSF;
+
+	if (ieee80211_has_morefrags(fc))
+		tx_flags |= TX_CMD_FLG_MORE_FRAG;
+
+	if (ieee80211_is_data_qos(fc)) {
+		u8 *qc = ieee80211_get_qos_ctl(hdr);
+		tx_cmd->tid_tspec = qc[0] & 0xf;
+		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+		if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+			offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+	} else if (ieee80211_is_back_req(fc)) {
+		struct ieee80211_bar *bar = (void *)skb->data;
+		u16 control = le16_to_cpu(bar->control);
+		u16 ssn = le16_to_cpu(bar->start_seq_num);
+
+		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+		tx_cmd->tid_tspec = (control &
+				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
+		iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
+					  ssn);
+	} else {
+		tx_cmd->tid_tspec = IWL_TID_NON_QOS;
+		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
+			tx_flags |= TX_CMD_FLG_SEQ_CTL;
+		else
+			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+	}
+
+	/* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+	if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+		ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+	else
+		ac = tid_to_mac80211_ac[0];
+
+	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
+			TX_CMD_FLG_BT_PRIO_POS;
+
+	if (ieee80211_is_mgmt(fc)) {
+		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
+		else if (ieee80211_is_action(fc))
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+		else
+			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+
+		/* The spec allows Action frames in A-MPDU, we don't support
+		 * it
+		 */
+		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
+	} else {
+		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
+	}
+
+	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
+	    !is_multicast_ether_addr(ieee80211_get_DA(hdr)))
+		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
+	    ieee80211_action_contains_tpc(skb))
+		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
+
+	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
+	/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
+	tx_cmd->len = cpu_to_le16((u16)skb->len);
+	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
+	tx_cmd->sta_id = sta_id;
+
+	/* padding is inserted later in transport */
+	if (ieee80211_hdrlen(fc) % 4 &&
+	    !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+		offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+	tx_cmd->offload_assist |=
+		cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, hdr, info,
+					    offload_assist));
+}
+
+static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
+			       struct ieee80211_tx_info *info,
+			       struct ieee80211_sta *sta)
+{
+	int rate_idx;
+	u8 rate_plcp;
+	u32 rate_flags;
+
+	/* HT rate doesn't make sense for a non data frame */
+	WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS,
+		  "Got an HT rate (flags:0x%x/mcs:%d) for a non data frame\n",
+		  info->control.rates[0].flags,
+		  info->control.rates[0].idx);
+
+	rate_idx = info->control.rates[0].idx;
+	/* if the rate isn't a well known legacy rate, take the lowest one */
+	if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
+		rate_idx = rate_lowest_index(
+				&mvm->nvm_data->bands[info->band], sta);
+
+	/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+	if (info->band == NL80211_BAND_5GHZ)
+		rate_idx += IWL_FIRST_OFDM_RATE;
+
+	/* For 2.4 GHZ band, check that there is no need to remap */
+	BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
+
+	/* Get PLCP rate for tx_cmd->rate_n_flags */
+	rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+
+	if (info->band == NL80211_BAND_2GHZ &&
+	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
+		rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
+	else
+		rate_flags =
+			BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
+
+	/* Set CCK flag as needed */
+	if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
+		rate_flags |= RATE_MCS_CCK_MSK;
+
+	return (u32)rate_plcp | rate_flags;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are rate related
+ */
+void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
+			    struct ieee80211_tx_info *info,
+			    struct ieee80211_sta *sta, __le16 fc)
+{
+	/* Set retry limit on RTS packets */
+	tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
+
+	/* Set retry limit on DATA packets and Probe Responses*/
+	if (ieee80211_is_probe_resp(fc)) {
+		tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
+		tx_cmd->rts_retry_limit =
+			min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
+	} else if (ieee80211_is_back_req(fc)) {
+		tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
+	} else {
+		tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
+	}
+
+	/*
+	 * for data packets, rate info comes from the table inside the fw. This
+	 * table is controlled by LINK_QUALITY commands
+	 */
+
+	if (ieee80211_is_data(fc) && sta) {
+		tx_cmd->initial_rate_index = 0;
+		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+		return;
+	} else if (ieee80211_is_back_req(fc)) {
+		tx_cmd->tx_flags |=
+			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
+	}
+
+	mvm->mgmt_last_antenna_idx =
+		iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
+				     mvm->mgmt_last_antenna_idx);
+
+	/* Set the rate in the TX cmd */
+	tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
+}
+
+static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+					 u8 *crypto_hdr)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+	u64 pn;
+
+	pn = atomic64_inc_return(&keyconf->tx_pn);
+	crypto_hdr[0] = pn;
+	crypto_hdr[2] = 0;
+	crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+	crypto_hdr[1] = pn >> 8;
+	crypto_hdr[4] = pn >> 16;
+	crypto_hdr[5] = pn >> 24;
+	crypto_hdr[6] = pn >> 32;
+	crypto_hdr[7] = pn >> 40;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
+				      struct ieee80211_tx_info *info,
+				      struct iwl_tx_cmd *tx_cmd,
+				      struct sk_buff *skb_frag,
+				      int hdrlen)
+{
+	struct ieee80211_key_conf *keyconf = info->control.hw_key;
+	u8 *crypto_hdr = skb_frag->data + hdrlen;
+	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
+	u64 pn;
+
+	switch (keyconf->cipher) {
+	case WLAN_CIPHER_SUITE_CCMP:
+		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
+		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+		break;
+
+	case WLAN_CIPHER_SUITE_TKIP:
+		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+		pn = atomic64_inc_return(&keyconf->tx_pn);
+		ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
+		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+		break;
+
+	case WLAN_CIPHER_SUITE_WEP104:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+		/* fall through */
+	case WLAN_CIPHER_SUITE_WEP40:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
+			((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
+			  TX_CMD_SEC_WEP_KEY_IDX_MSK);
+
+		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+		break;
+	case WLAN_CIPHER_SUITE_GCMP:
+	case WLAN_CIPHER_SUITE_GCMP_256:
+		type = TX_CMD_SEC_GCMP;
+		/* Fall through */
+	case WLAN_CIPHER_SUITE_CCMP_256:
+		/* TODO: Taking the key from the table might introduce a race
+		 * when PTK rekeying is done, having an old packets with a PN
+		 * based on the old key but the message encrypted with a new
+		 * one.
+		 * Need to handle this.
+		 */
+		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
+		tx_cmd->key[0] = keyconf->hw_key_idx;
+		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+		break;
+	default:
+		tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
+	}
+}
+
+/*
+ * Allocates and sets the Tx cmd the driver data pointers in the skb
+ */
+static struct iwl_device_cmd *
+iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
+		      struct ieee80211_tx_info *info, int hdrlen,
+		      struct ieee80211_sta *sta, u8 sta_id)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_device_cmd *dev_cmd;
+	struct iwl_tx_cmd *tx_cmd;
+
+	dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
+
+	if (unlikely(!dev_cmd))
+		return NULL;
+
+	/* Make sure we zero enough of dev_cmd */
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
+
+	memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
+	dev_cmd->hdr.cmd = TX_CMD;
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		u16 offload_assist = 0;
+		u32 rate_n_flags = 0;
+		u16 flags = 0;
+
+		if (ieee80211_is_data_qos(hdr->frame_control)) {
+			u8 *qc = ieee80211_get_qos_ctl(hdr);
+
+			if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+				offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
+		}
+
+		offload_assist = iwl_mvm_tx_csum(mvm, skb, hdr, info,
+						 offload_assist);
+
+		/* padding is inserted later in transport */
+		if (ieee80211_hdrlen(hdr->frame_control) % 4 &&
+		    !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
+			offload_assist |= BIT(TX_CMD_OFFLD_PAD);
+
+		if (!info->control.hw_key)
+			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
+
+		/* For data packets rate info comes from the fw */
+		if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
+			flags |= IWL_TX_FLAGS_CMD_RATE;
+			rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
+		}
+
+		if (mvm->trans->cfg->device_family >=
+		    IWL_DEVICE_FAMILY_22560) {
+			struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
+
+			cmd->offload_assist |= cpu_to_le32(offload_assist);
+
+			/* Total # bytes to be transmitted */
+			cmd->len = cpu_to_le16((u16)skb->len);
+
+			/* Copy MAC header from skb into command buffer */
+			memcpy(cmd->hdr, hdr, hdrlen);
+
+			cmd->flags = cpu_to_le16(flags);
+			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+		} else {
+			struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
+
+			cmd->offload_assist |= cpu_to_le16(offload_assist);
+
+			/* Total # bytes to be transmitted */
+			cmd->len = cpu_to_le16((u16)skb->len);
+
+			/* Copy MAC header from skb into command buffer */
+			memcpy(cmd->hdr, hdr, hdrlen);
+
+			cmd->flags = cpu_to_le32(flags);
+			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
+		}
+		goto out;
+	}
+
+	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+
+	if (info->control.hw_key)
+		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
+
+	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
+
+	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
+
+	/* Copy MAC header from skb into command buffer */
+	memcpy(tx_cmd->hdr, hdr, hdrlen);
+
+out:
+	return dev_cmd;
+}
+
+static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
+				       struct iwl_device_cmd *cmd)
+{
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
+	memset(&skb_info->status, 0, sizeof(skb_info->status));
+	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
+
+	skb_info->driver_data[1] = cmd;
+}
+
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+				      struct ieee80211_tx_info *info, __le16 fc)
+{
+	struct iwl_mvm_vif *mvmvif;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(info->control.vif);
+
+	switch (info->control.vif->type) {
+	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_ADHOC:
+		/*
+		 * Non-bufferable frames use the broadcast station, thus they
+		 * use the probe queue.
+		 * Also take care of the case where we send a deauth to a
+		 * station that we don't have, or similarly an association
+		 * response (with non-success status) for a station we can't
+		 * accept.
+		 * Also, disassociate frames might happen, particular with
+		 * reason 7 ("Class 3 frame received from nonassociated STA").
+		 */
+		if (ieee80211_is_mgmt(fc) &&
+		    (!ieee80211_is_bufferable_mmpdu(fc) ||
+		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
+			return mvm->probe_queue;
+		if (info->hw_queue == info->control.vif->cab_queue)
+			return mvmvif->cab_queue;
+
+		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
+			  "fc=0x%02x", le16_to_cpu(fc));
+		return mvm->probe_queue;
+	case NL80211_IFTYPE_P2P_DEVICE:
+		if (ieee80211_is_mgmt(fc))
+			return mvm->p2p_dev_queue;
+		if (info->hw_queue == info->control.vif->cab_queue)
+			return mvmvif->cab_queue;
+
+		WARN_ON_ONCE(1);
+		return mvm->p2p_dev_queue;
+	default:
+		WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
+		return -1;
+	}
+}
+
+int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_info info;
+	struct iwl_device_cmd *dev_cmd;
+	u8 sta_id;
+	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+	int queue;
+
+	/* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+	 * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+	 * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+	 * and hence needs to be sent on the aux queue
+	 */
+	if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+	    skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+		skb_info->hw_queue = mvm->aux_queue;
+
+	memcpy(&info, skb->cb, sizeof(info));
+
+	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
+		return -1;
+
+	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+			 (!info.control.vif ||
+			  info.hw_queue != info.control.vif->cab_queue)))
+		return -1;
+
+	queue = info.hw_queue;
+
+	/*
+	 * If the interface on which the frame is sent is the P2P_DEVICE
+	 * or an AP/GO interface use the broadcast station associated
+	 * with it; otherwise if the interface is a managed interface
+	 * use the AP station associated with it for multicast traffic
+	 * (this is not possible for unicast packets as a TLDS discovery
+	 * response are sent without a station entry); otherwise use the
+	 * AUX station.
+	 */
+	sta_id = mvm->aux_sta.sta_id;
+	if (info.control.vif) {
+		struct iwl_mvm_vif *mvmvif =
+			iwl_mvm_vif_from_mac80211(info.control.vif);
+
+		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+		    info.control.vif->type == NL80211_IFTYPE_AP ||
+		    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+			if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+				sta_id = mvmvif->bcast_sta.sta_id;
+			else
+				sta_id = mvmvif->mcast_sta.sta_id;
+
+			queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+							   hdr->frame_control);
+			if (queue < 0)
+				return -1;
+		} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+			   is_multicast_ether_addr(hdr->addr1)) {
+			u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
+
+			if (ap_sta_id != IWL_MVM_INVALID_STA)
+				sta_id = ap_sta_id;
+		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
+			queue = mvm->snif_queue;
+			sta_id = mvm->snif_sta.sta_id;
+		}
+	}
+
+	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
+
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
+	if (!dev_cmd)
+		return -1;
+
+	/* From now on, we cannot access info->control */
+	iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
+		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+		return -1;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_INET
+
+static int
+iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+		       netdev_features_t netdev_flags,
+		       struct sk_buff_head *mpdus_skb)
+{
+	struct sk_buff *tmp, *next;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	char cb[sizeof(skb->cb)];
+	u16 i = 0;
+	unsigned int tcp_payload_len;
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
+	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+	skb_shinfo(skb)->gso_size = num_subframes * mss;
+	memcpy(cb, skb->cb, sizeof(cb));
+
+	next = skb_gso_segment(skb, netdev_flags);
+	skb_shinfo(skb)->gso_size = mss;
+	if (WARN_ON_ONCE(IS_ERR(next)))
+		return -EINVAL;
+	else if (next)
+		consume_skb(skb);
+
+	while (next) {
+		tmp = next;
+		next = tmp->next;
+
+		memcpy(tmp->cb, cb, sizeof(tmp->cb));
+		/*
+		 * Compute the length of all the data added for the A-MSDU.
+		 * This will be used to compute the length to write in the TX
+		 * command. We have: SNAP + IP + TCP for n -1 subframes and
+		 * ETH header for n subframes.
+		 */
+		tcp_payload_len = skb_tail_pointer(tmp) -
+			skb_transport_header(tmp) -
+			tcp_hdrlen(tmp) + tmp->data_len;
+
+		if (ipv4)
+			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
+
+		if (tcp_payload_len > mss) {
+			skb_shinfo(tmp)->gso_size = mss;
+		} else {
+			if (ieee80211_is_data_qos(hdr->frame_control)) {
+				u8 *qc;
+
+				if (ipv4)
+					ip_send_check(ip_hdr(tmp));
+
+				qc = ieee80211_get_qos_ctl((void *)tmp->data);
+				*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+			}
+			skb_shinfo(tmp)->gso_size = 0;
+		}
+
+		tmp->prev = NULL;
+		tmp->next = NULL;
+
+		__skb_queue_tail(mpdus_skb, tmp);
+		i++;
+	}
+
+	return 0;
+}
+
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff_head *mpdus_skb)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
+	u16 snap_ip_tcp, pad;
+	unsigned int dbg_max_amsdu_len;
+	netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
+	u8 tid, txf;
+
+	snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
+		tcp_hdrlen(skb);
+
+	dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len);
+
+	if (!mvmsta->max_amsdu_len ||
+	    !ieee80211_is_data_qos(hdr->frame_control) ||
+	    (!mvmsta->amsdu_enabled && !dbg_max_amsdu_len))
+		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+	/*
+	 * Do not build AMSDU for IPv6 with extension headers.
+	 * ask stack to segment and checkum the generated MPDUs for us.
+	 */
+	if (skb->protocol == htons(ETH_P_IPV6) &&
+	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+	    IPPROTO_TCP) {
+		netdev_flags &= ~NETIF_F_CSUM_MASK;
+		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+	}
+
+	tid = ieee80211_get_tid(hdr);
+	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+		return -EINVAL;
+
+	/*
+	 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
+	 * during an BA session.
+	 */
+	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+	    !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed)
+		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+	if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(mvmsta->vif)) ||
+	    !(mvmsta->amsdu_enabled & BIT(tid)))
+		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+	max_amsdu_len = mvmsta->max_amsdu_len;
+
+	/* the Tx FIFO to which this A-MSDU will be routed */
+	txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, tid_to_mac80211_ac[tid]);
+
+	/*
+	 * Don't send an AMSDU that will be longer than the TXF.
+	 * Add a security margin of 256 for the TX command + headers.
+	 * We also want to have the start of the next packet inside the
+	 * fifo to be able to send bursts.
+	 */
+	max_amsdu_len = min_t(unsigned int, max_amsdu_len,
+			      mvm->fwrt.smem_cfg.lmac[0].txfifo_size[txf] -
+			      256);
+
+	if (unlikely(dbg_max_amsdu_len))
+		max_amsdu_len = min_t(unsigned int, max_amsdu_len,
+				      dbg_max_amsdu_len);
+
+	/*
+	 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
+	 * supported. This is a spec requirement (IEEE 802.11-2015
+	 * section 8.7.3 NOTE 3).
+	 */
+	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+	    !sta->vht_cap.vht_supported)
+		max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
+
+	/* Sub frame header + SNAP + IP header + TCP header + MSS */
+	subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
+	pad = (4 - subf_len) & 0x3;
+
+	/*
+	 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
+	 * N * subf_len + (N - 1) * pad.
+	 */
+	num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
+
+	if (sta->max_amsdu_subframes &&
+	    num_subframes > sta->max_amsdu_subframes)
+		num_subframes = sta->max_amsdu_subframes;
+
+	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+		tcp_hdrlen(skb) + skb->data_len;
+
+	/*
+	 * Make sure we have enough TBs for the A-MSDU:
+	 *	2 for each subframe
+	 *	1 more for each fragment
+	 *	1 more for the potential data in the header
+	 */
+	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
+	    mvm->trans->max_skb_frags)
+		num_subframes = 1;
+
+	if (num_subframes > 1)
+		*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+	/* This skb fits in one single A-MSDU */
+	if (num_subframes * mss >= tcp_payload_len) {
+		__skb_queue_tail(mpdus_skb, skb);
+		return 0;
+	}
+
+	/*
+	 * Trick the segmentation function to make it
+	 * create SKBs that can fit into one A-MSDU.
+	 */
+	return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
+				      mpdus_skb);
+}
+#else /* CONFIG_INET */
+static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+			  struct ieee80211_tx_info *info,
+			  struct ieee80211_sta *sta,
+			  struct sk_buff_head *mpdus_skb)
+{
+	/* Impossible to get TSO with CONFIG_INET */
+	WARN_ON(1);
+
+	return -1;
+}
+#endif
+
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvm_sta, u8 tid,
+				  struct sk_buff *skb)
+{
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	u8 mac_queue = info->hw_queue;
+	struct sk_buff_head *deferred_tx_frames;
+
+	lockdep_assert_held(&mvm_sta->lock);
+
+	mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+	set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+	deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+	skb_queue_tail(deferred_tx_frames, skb);
+
+	/*
+	 * The first deferred frame should've stopped the MAC queues, so we
+	 * should never get a second deferred frame for the RA/TID.
+	 * In case of GSO the first packet may have been split, so don't warn.
+	 */
+	if (skb_queue_len(deferred_tx_frames) == 1) {
+		iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+		schedule_work(&mvm->add_stream_wk);
+	}
+}
+
+/* Check if there are any timed-out TIDs on a given shared TXQ */
+static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
+{
+	unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
+	unsigned long now = jiffies;
+	int tid;
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return false;
+
+	for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
+				IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+			return true;
+	}
+
+	return false;
+}
+
+static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
+			       struct iwl_mvm_sta *mvmsta,
+			       int airtime)
+{
+	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+	struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+	if (mvm->tcm.paused)
+		return;
+
+	if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
+		schedule_delayed_work(&mvm->tcm.work, 0);
+
+	mdata->tx.airtime += airtime;
+}
+
+static void iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
+				  struct iwl_mvm_sta *mvmsta, int tid)
+{
+	u32 ac = tid_to_mac80211_ac[tid];
+	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
+	struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+	mdata->tx.pkts[ac]++;
+}
+
+/*
+ * Sets the fields in the Tx cmd that are crypto related
+ */
+static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+			   struct ieee80211_tx_info *info,
+			   struct ieee80211_sta *sta)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct iwl_mvm_sta *mvmsta;
+	struct iwl_device_cmd *dev_cmd;
+	__le16 fc;
+	u16 seq_number = 0;
+	u8 tid = IWL_MAX_TID_COUNT;
+	u16 txq_id = info->hw_queue;
+	bool is_ampdu = false;
+	int hdrlen;
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	fc = hdr->frame_control;
+	hdrlen = ieee80211_hdrlen(fc);
+
+	if (WARN_ON_ONCE(!mvmsta))
+		return -1;
+
+	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+		return -1;
+
+	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+					sta, mvmsta->sta_id);
+	if (!dev_cmd)
+		goto drop;
+
+	/*
+	 * we handle that entirely ourselves -- for uAPSD the firmware
+	 * will always send a notification, and for PS-Poll responses
+	 * we'll notify mac80211 when getting frame status
+	 */
+	info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
+	spin_lock(&mvmsta->lock);
+
+	/* nullfunc frames should go to the MGMT queue regardless of QOS,
+	 * the condition of !ieee80211_is_qos_nullfunc(fc) keeps the default
+	 * assignment of MGMT TID
+	 */
+	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+		tid = ieee80211_get_tid(hdr);
+		if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+			goto drop_unlock_sta;
+
+		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+		if (WARN_ON_ONCE(is_ampdu &&
+				 mvmsta->tid_data[tid].state != IWL_AGG_ON))
+			goto drop_unlock_sta;
+
+		seq_number = mvmsta->tid_data[tid].seq_number;
+		seq_number &= IEEE80211_SCTL_SEQ;
+
+		if (!iwl_mvm_has_new_tx_api(mvm)) {
+			struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+			hdr->seq_ctrl |= cpu_to_le16(seq_number);
+			/* update the tx_cmd hdr as it was already copied */
+			tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
+		}
+	}
+
+	txq_id = mvmsta->tid_data[tid].txq_id;
+
+	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
+
+	/* Check if TXQ needs to be allocated or re-activated */
+	if (unlikely(txq_id == IWL_MVM_INVALID_QUEUE ||
+		     !mvmsta->tid_data[tid].is_tid_active)) {
+		/* If TXQ needs to be allocated... */
+		if (txq_id == IWL_MVM_INVALID_QUEUE) {
+			iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+			/*
+			 * The frame is now deferred, and the worker scheduled
+			 * will re-allocate it, so we can free it for now.
+			 */
+			iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+			spin_unlock(&mvmsta->lock);
+			return 0;
+		}
+
+		/* queue should always be active in new TX path */
+		WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+		/* If we are here - TXQ exists and needs to be re-activated */
+		spin_lock(&mvm->queue_info_lock);
+		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+		mvmsta->tid_data[tid].is_tid_active = true;
+		spin_unlock(&mvm->queue_info_lock);
+
+		IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
+				    txq_id);
+	}
+
+	if (!iwl_mvm_has_new_tx_api(mvm)) {
+		/* Keep track of the time of the last frame for this RA/TID */
+		mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
+		/*
+		 * If we have timed-out TIDs - schedule the worker that will
+		 * reconfig the queues and update them
+		 *
+		 * Note that the mvm->queue_info_lock isn't being taken here in
+		 * order to not serialize the TX flow. This isn't dangerous
+		 * because scheduling mvm->add_stream_wk can't ruin the state,
+		 * and if we DON'T schedule it due to some race condition then
+		 * next TX we get here we will.
+		 */
+		if (unlikely(mvm->queue_info[txq_id].status ==
+			     IWL_MVM_QUEUE_SHARED &&
+			     iwl_mvm_txq_should_update(mvm, txq_id)))
+			schedule_work(&mvm->add_stream_wk);
+	}
+
+	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
+		     tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
+
+	/* From now on, we cannot access info->control */
+	iwl_mvm_skb_prepare_status(skb, dev_cmd);
+
+	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
+		goto drop_unlock_sta;
+
+	if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
+		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
+
+	spin_unlock(&mvmsta->lock);
+
+	iwl_mvm_tx_pkt_queued(mvm, mvmsta, tid == IWL_MAX_TID_COUNT ? 0 : tid);
+
+	return 0;
+
+drop_unlock_sta:
+	iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+	spin_unlock(&mvmsta->lock);
+drop:
+	return -1;
+}
+
+int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
+		   struct ieee80211_sta *sta)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct ieee80211_tx_info info;
+	struct sk_buff_head mpdus_skbs;
+	unsigned int payload_len;
+	int ret;
+
+	if (WARN_ON_ONCE(!mvmsta))
+		return -1;
+
+	if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_INVALID_STA))
+		return -1;
+
+	memcpy(&info, skb->cb, sizeof(info));
+
+	if (!skb_is_gso(skb))
+		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+
+	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
+		tcp_hdrlen(skb) + skb->data_len;
+
+	if (payload_len <= skb_shinfo(skb)->gso_size)
+		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+
+	__skb_queue_head_init(&mpdus_skbs);
+
+	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
+	if (ret)
+		return ret;
+
+	if (WARN_ON(skb_queue_empty(&mpdus_skbs)))
+		return ret;
+
+	while (!skb_queue_empty(&mpdus_skbs)) {
+		skb = __skb_dequeue(&mpdus_skbs);
+
+		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
+		if (ret) {
+			__skb_queue_purge(&mpdus_skbs);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
+				      struct ieee80211_sta *sta, u8 tid)
+{
+	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+	struct ieee80211_vif *vif = mvmsta->vif;
+	u16 normalized_ssn;
+
+	lockdep_assert_held(&mvmsta->lock);
+
+	if ((tid_data->state == IWL_AGG_ON ||
+	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+	    iwl_mvm_tid_queued(mvm, tid_data) == 0) {
+		/*
+		 * Now that this aggregation or DQA queue is empty tell
+		 * mac80211 so it knows we no longer have frames buffered for
+		 * the station on this TID (for the TIM bitmap calculation.)
+		 */
+		ieee80211_sta_set_buffered(sta, tid, false);
+	}
+
+	/*
+	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
+	 * to align the wrap around of ssn so we compare relevant values.
+	 */
+	normalized_ssn = tid_data->ssn;
+	if (mvm->trans->cfg->gen2)
+		normalized_ssn &= 0xff;
+
+	if (normalized_ssn != tid_data->next_reclaimed)
+		return;
+
+	switch (tid_data->state) {
+	case IWL_EMPTYING_HW_QUEUE_ADDBA:
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Can continue addBA flow ssn = next_recl = %d\n",
+				    tid_data->next_reclaimed);
+		tid_data->state = IWL_AGG_STARTING;
+		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+
+	case IWL_EMPTYING_HW_QUEUE_DELBA:
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Can continue DELBA flow ssn = next_recl = %d\n",
+				    tid_data->next_reclaimed);
+		tid_data->state = IWL_AGG_OFF;
+		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+		break;
+
+	default:
+		break;
+	}
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+const char *iwl_mvm_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+	switch (status & TX_STATUS_MSK) {
+	case TX_STATUS_SUCCESS:
+		return "SUCCESS";
+	TX_STATUS_POSTPONE(DELAY);
+	TX_STATUS_POSTPONE(FEW_BYTES);
+	TX_STATUS_POSTPONE(BT_PRIO);
+	TX_STATUS_POSTPONE(QUIET_PERIOD);
+	TX_STATUS_POSTPONE(CALC_TTAK);
+	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+	TX_STATUS_FAIL(SHORT_LIMIT);
+	TX_STATUS_FAIL(LONG_LIMIT);
+	TX_STATUS_FAIL(UNDERRUN);
+	TX_STATUS_FAIL(DRAIN_FLOW);
+	TX_STATUS_FAIL(RFKILL_FLUSH);
+	TX_STATUS_FAIL(LIFE_EXPIRE);
+	TX_STATUS_FAIL(DEST_PS);
+	TX_STATUS_FAIL(HOST_ABORTED);
+	TX_STATUS_FAIL(BT_RETRY);
+	TX_STATUS_FAIL(STA_INVALID);
+	TX_STATUS_FAIL(FRAG_DROPPED);
+	TX_STATUS_FAIL(TID_DISABLE);
+	TX_STATUS_FAIL(FIFO_FLUSHED);
+	TX_STATUS_FAIL(SMALL_CF_POLL);
+	TX_STATUS_FAIL(FW_DROP);
+	TX_STATUS_FAIL(STA_COLOR_MISMATCH);
+	}
+
+	return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+			       enum nl80211_band band,
+			       struct ieee80211_tx_rate *r)
+{
+	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+	switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+	case RATE_MCS_CHAN_WIDTH_20:
+		break;
+	case RATE_MCS_CHAN_WIDTH_40:
+		r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+		break;
+	case RATE_MCS_CHAN_WIDTH_80:
+		r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
+		break;
+	case RATE_MCS_CHAN_WIDTH_160:
+		r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
+		break;
+	}
+	if (rate_n_flags & RATE_MCS_SGI_MSK)
+		r->flags |= IEEE80211_TX_RC_SHORT_GI;
+	if (rate_n_flags & RATE_MCS_HT_MSK) {
+		r->flags |= IEEE80211_TX_RC_MCS;
+		r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+	} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+		ieee80211_rate_set_vht(
+			r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
+			((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
+						RATE_VHT_MCS_NSS_POS) + 1);
+		r->flags |= IEEE80211_TX_RC_VHT_MCS;
+	} else {
+		r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+							     band);
+	}
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+					struct ieee80211_tx_info *info)
+{
+	struct ieee80211_tx_rate *r = &info->status.rates[0];
+
+	info->status.antenna =
+		((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+	iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+}
+
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+					    u32 status)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_tx_status *status_trig;
+	int i;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
+	status_trig = (void *)trig->data;
+
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt, NULL, trig))
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+		/* don't collect on status 0 */
+		if (!status_trig->statuses[i].status)
+			break;
+
+		if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+			continue;
+
+		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+					"Tx status %d was received",
+					status & TX_STATUS_MSK);
+		break;
+	}
+}
+
+/**
+ * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
+				      struct iwl_mvm_tx_resp *tx_resp)
+{
+	return le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
+			    tx_resp->frame_count) & 0xfff;
+}
+
+static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
+				     struct iwl_rx_packet *pkt)
+{
+	struct ieee80211_sta *sta;
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	/* struct iwl_mvm_tx_resp_v3 is almost the same */
+	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+	struct agg_tx_status *agg_status =
+		iwl_mvm_get_agg_status(mvm, tx_resp);
+	u32 status = le16_to_cpu(agg_status->status);
+	u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
+	struct iwl_mvm_sta *mvmsta;
+	struct sk_buff_head skbs;
+	u8 skb_freed = 0;
+	u8 lq_color;
+	u16 next_reclaimed, seq_ctl;
+	bool is_ndp = false;
+
+	__skb_queue_head_init(&skbs);
+
+	if (iwl_mvm_has_new_tx_api(mvm))
+		txq_id = le16_to_cpu(tx_resp->tx_queue);
+
+	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
+
+	/* we can free until ssn % q.n_bd not inclusive */
+	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs);
+
+	while (!skb_queue_empty(&skbs)) {
+		struct sk_buff *skb = __skb_dequeue(&skbs);
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+		struct ieee80211_hdr *hdr = (void *)skb->data;
+		bool flushed = false;
+
+		skb_freed++;
+
+		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+		memset(&info->status, 0, sizeof(info->status));
+
+		/* inform mac80211 about what happened with the frame */
+		switch (status & TX_STATUS_MSK) {
+		case TX_STATUS_SUCCESS:
+		case TX_STATUS_DIRECT_DONE:
+			info->flags |= IEEE80211_TX_STAT_ACK;
+			break;
+		case TX_STATUS_FAIL_FIFO_FLUSHED:
+		case TX_STATUS_FAIL_DRAIN_FLOW:
+			flushed = true;
+			break;
+		case TX_STATUS_FAIL_DEST_PS:
+			/* the FW should have stopped the queue and not
+			 * return this status
+			 */
+			WARN_ON(1);
+			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+			break;
+		default:
+			break;
+		}
+
+		iwl_mvm_tx_status_check_trigger(mvm, status);
+
+		info->status.rates[0].count = tx_resp->failure_frame + 1;
+		iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+					    info);
+		info->status.status_driver_data[1] =
+			(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
+
+		/* Single frame failure in an AMPDU queue => send BAR */
+		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+		    !(info->flags & IEEE80211_TX_STAT_ACK) &&
+		    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
+			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+		/* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
+		if (ieee80211_is_back_req(hdr->frame_control))
+			seq_ctl = 0;
+		else if (status != TX_STATUS_SUCCESS)
+			seq_ctl = le16_to_cpu(hdr->seq_ctrl);
+
+		if (unlikely(!seq_ctl)) {
+			struct ieee80211_hdr *hdr = (void *)skb->data;
+
+			/*
+			 * If it is an NDP, we can't update next_reclaim since
+			 * its sequence control is 0. Note that for that same
+			 * reason, NDPs are never sent to A-MPDU'able queues
+			 * so that we can never have more than one freed frame
+			 * for a single Tx resonse (see WARN_ON below).
+			 */
+			if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+				is_ndp = true;
+		}
+
+		/*
+		 * TODO: this is not accurate if we are freeing more than one
+		 * packet.
+		 */
+		info->status.tx_time =
+			le16_to_cpu(tx_resp->wireless_media_time);
+		BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
+		lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+		info->status.status_driver_data[0] =
+			RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
+
+		ieee80211_tx_status(mvm->hw, skb);
+	}
+
+	/* This is an aggregation queue or might become one, so we use
+	 * the ssn since: ssn = wifi seq_num % 256.
+	 * The seq_ctl is the sequence control of the packet to which
+	 * this Tx response relates. But if there is a hole in the
+	 * bitmap of the BA we received, this Tx response may allow to
+	 * reclaim the hole and all the subsequent packets that were
+	 * already acked. In that case, seq_ctl != ssn, and the next
+	 * packet to be reclaimed will be ssn and not seq_ctl. In that
+	 * case, several packets will be reclaimed even if
+	 * frame_count = 1.
+	 *
+	 * The ssn is the index (% 256) of the latest packet that has
+	 * treated (acked / dropped) + 1.
+	 */
+	next_reclaimed = ssn;
+
+	IWL_DEBUG_TX_REPLY(mvm,
+			   "TXQ %d status %s (0x%08x)\n",
+			   txq_id, iwl_mvm_get_tx_fail_reason(status), status);
+
+	IWL_DEBUG_TX_REPLY(mvm,
+			   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
+			   le32_to_cpu(tx_resp->initial_rate),
+			   tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
+			   ssn, next_reclaimed, seq_ctl);
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+	/*
+	 * sta can't be NULL otherwise it'd mean that the sta has been freed in
+	 * the firmware while we still have packets for it in the Tx queues.
+	 */
+	if (WARN_ON_ONCE(!sta))
+		goto out;
+
+	if (!IS_ERR(sta)) {
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+		iwl_mvm_tx_airtime(mvm, mvmsta,
+				   le16_to_cpu(tx_resp->wireless_media_time));
+
+		if (tid != IWL_TID_NON_QOS && tid != IWL_MGMT_TID) {
+			struct iwl_mvm_tid_data *tid_data =
+				&mvmsta->tid_data[tid];
+			bool send_eosp_ndp = false;
+
+			spin_lock_bh(&mvmsta->lock);
+
+			if (!is_ndp) {
+				tid_data->next_reclaimed = next_reclaimed;
+				IWL_DEBUG_TX_REPLY(mvm,
+						   "Next reclaimed packet:%d\n",
+						   next_reclaimed);
+			} else {
+				IWL_DEBUG_TX_REPLY(mvm,
+						   "NDP - don't update next_reclaimed\n");
+			}
+
+			iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+			if (mvmsta->sleep_tx_count) {
+				mvmsta->sleep_tx_count--;
+				if (mvmsta->sleep_tx_count &&
+				    !iwl_mvm_tid_queued(mvm, tid_data)) {
+					/*
+					 * The number of frames in the queue
+					 * dropped to 0 even if we sent less
+					 * frames than we thought we had on the
+					 * Tx queue.
+					 * This means we had holes in the BA
+					 * window that we just filled, ask
+					 * mac80211 to send EOSP since the
+					 * firmware won't know how to do that.
+					 * Send NDP and the firmware will send
+					 * EOSP notification that will trigger
+					 * a call to ieee80211_sta_eosp().
+					 */
+					send_eosp_ndp = true;
+				}
+			}
+
+			spin_unlock_bh(&mvmsta->lock);
+			if (send_eosp_ndp) {
+				iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
+					IEEE80211_FRAME_RELEASE_UAPSD,
+					1, tid, false, false);
+				mvmsta->sleep_tx_count = 0;
+				ieee80211_send_eosp_nullfunc(sta, tid);
+			}
+		}
+
+		if (mvmsta->next_status_eosp) {
+			mvmsta->next_status_eosp = false;
+			ieee80211_sta_eosp(sta);
+		}
+	} else {
+		mvmsta = NULL;
+	}
+
+out:
+	rcu_read_unlock();
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+#define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
+static const char *iwl_get_agg_tx_status(u16 status)
+{
+	switch (status & AGG_TX_STATE_STATUS_MSK) {
+	AGG_TX_STATE_(TRANSMITTED);
+	AGG_TX_STATE_(UNDERRUN);
+	AGG_TX_STATE_(BT_PRIO);
+	AGG_TX_STATE_(FEW_BYTES);
+	AGG_TX_STATE_(ABORT);
+	AGG_TX_STATE_(TX_ON_AIR_DROP);
+	AGG_TX_STATE_(LAST_SENT_TRY_CNT);
+	AGG_TX_STATE_(LAST_SENT_BT_KILL);
+	AGG_TX_STATE_(SCD_QUERY);
+	AGG_TX_STATE_(TEST_BAD_CRC32);
+	AGG_TX_STATE_(RESPONSE);
+	AGG_TX_STATE_(DUMP_TX);
+	AGG_TX_STATE_(DELAY_TX);
+	}
+
+	return "UNKNOWN";
+}
+
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+				      struct iwl_rx_packet *pkt)
+{
+	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+	struct agg_tx_status *frame_status =
+		iwl_mvm_get_agg_status(mvm, tx_resp);
+	int i;
+
+	for (i = 0; i < tx_resp->frame_count; i++) {
+		u16 fstatus = le16_to_cpu(frame_status[i].status);
+
+		IWL_DEBUG_TX_REPLY(mvm,
+				   "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
+				   iwl_get_agg_tx_status(fstatus),
+				   fstatus & AGG_TX_STATE_STATUS_MSK,
+				   (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
+					AGG_TX_STATE_TRY_CNT_POS,
+				   le16_to_cpu(frame_status[i].sequence));
+	}
+}
+#else
+static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
+				      struct iwl_rx_packet *pkt)
+{}
+#endif /* CONFIG_IWLWIFI_DEBUG */
+
+static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
+				  struct iwl_rx_packet *pkt)
+{
+	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
+	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	struct iwl_mvm_sta *mvmsta;
+	int queue = SEQ_TO_QUEUE(sequence);
+
+	if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
+			 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
+		return;
+
+	if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
+		return;
+
+	iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
+
+	rcu_read_lock();
+
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+
+	if (!WARN_ON_ONCE(!mvmsta)) {
+		mvmsta->tid_data[tid].rate_n_flags =
+			le32_to_cpu(tx_resp->initial_rate);
+		mvmsta->tid_data[tid].tx_time =
+			le16_to_cpu(tx_resp->wireless_media_time);
+		mvmsta->tid_data[tid].lq_color =
+			TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
+		iwl_mvm_tx_airtime(mvm, mvmsta,
+				   le16_to_cpu(tx_resp->wireless_media_time));
+	}
+
+	rcu_read_unlock();
+}
+
+void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
+
+	if (tx_resp->frame_count == 1)
+		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
+	else
+		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
+}
+
+static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
+			       int txq, int index,
+			       struct ieee80211_tx_info *ba_info, u32 rate)
+{
+	struct sk_buff_head reclaimed_skbs;
+	struct iwl_mvm_tid_data *tid_data;
+	struct ieee80211_sta *sta;
+	struct iwl_mvm_sta *mvmsta;
+	struct sk_buff *skb;
+	int freed;
+
+	if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+		      tid > IWL_MAX_TID_COUNT,
+		      "sta_id %d tid %d", sta_id, tid))
+		return;
+
+	rcu_read_lock();
+
+	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+	/* Reclaiming frames for a station that has been deleted ? */
+	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+		rcu_read_unlock();
+		return;
+	}
+
+	mvmsta = iwl_mvm_sta_from_mac80211(sta);
+	tid_data = &mvmsta->tid_data[tid];
+
+	if (tid_data->txq_id != txq) {
+		IWL_ERR(mvm,
+			"invalid BA notification: Q %d, tid %d\n",
+			tid_data->txq_id, tid);
+		rcu_read_unlock();
+		return;
+	}
+
+	spin_lock_bh(&mvmsta->lock);
+
+	__skb_queue_head_init(&reclaimed_skbs);
+
+	/*
+	 * Release all TFDs before the SSN, i.e. all TFDs in front of
+	 * block-ack window (we assume that they've been successfully
+	 * transmitted ... if not, it's too late anyway).
+	 */
+	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+
+	tid_data->next_reclaimed = index;
+
+	iwl_mvm_check_ratid_empty(mvm, sta, tid);
+
+	freed = 0;
+
+	/* pack lq color from tid_data along the reduced txp */
+	ba_info->status.status_driver_data[0] =
+		RS_DRV_DATA_PACK(tid_data->lq_color,
+				 ba_info->status.status_driver_data[0]);
+	ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
+
+	skb_queue_walk(&reclaimed_skbs, skb) {
+		struct ieee80211_hdr *hdr = (void *)skb->data;
+		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+		if (ieee80211_is_data_qos(hdr->frame_control))
+			freed++;
+		else
+			WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
+
+		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+		memset(&info->status, 0, sizeof(info->status));
+		/* Packet was transmitted successfully, failures come as single
+		 * frames because before failing a frame the firmware transmits
+		 * it without aggregation at least once.
+		 */
+		info->flags |= IEEE80211_TX_STAT_ACK;
+
+		/* this is the first skb we deliver in this batch */
+		/* put the rate scaling data there */
+		if (freed == 1) {
+			info->flags |= IEEE80211_TX_STAT_AMPDU;
+			memcpy(&info->status, &ba_info->status,
+			       sizeof(ba_info->status));
+			iwl_mvm_hwrate_to_tx_status(rate, info);
+		}
+	}
+
+	spin_unlock_bh(&mvmsta->lock);
+
+	/* We got a BA notif with 0 acked or scd_ssn didn't progress which is
+	 * possible (i.e. first MPDU in the aggregation wasn't acked)
+	 * Still it's important to update RS about sent vs. acked.
+	 */
+	if (skb_queue_empty(&reclaimed_skbs)) {
+		struct ieee80211_chanctx_conf *chanctx_conf = NULL;
+
+		if (mvmsta->vif)
+			chanctx_conf =
+				rcu_dereference(mvmsta->vif->chanctx_conf);
+
+		if (WARN_ON_ONCE(!chanctx_conf))
+			goto out;
+
+		ba_info->band = chanctx_conf->def.chan->band;
+		iwl_mvm_hwrate_to_tx_status(rate, ba_info);
+
+		if (!iwl_mvm_has_tlc_offload(mvm)) {
+			IWL_DEBUG_TX_REPLY(mvm,
+					   "No reclaim. Update rs directly\n");
+			iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
+		}
+	}
+
+out:
+	rcu_read_unlock();
+
+	while (!skb_queue_empty(&reclaimed_skbs)) {
+		skb = __skb_dequeue(&reclaimed_skbs);
+		ieee80211_tx_status(mvm->hw, skb);
+	}
+}
+
+void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	int sta_id, tid, txq, index;
+	struct ieee80211_tx_info ba_info = {};
+	struct iwl_mvm_ba_notif *ba_notif;
+	struct iwl_mvm_tid_data *tid_data;
+	struct iwl_mvm_sta *mvmsta;
+
+	ba_info.flags = IEEE80211_TX_STAT_AMPDU;
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		struct iwl_mvm_compressed_ba_notif *ba_res =
+			(void *)pkt->data;
+		u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
+		int i;
+
+		sta_id = ba_res->sta_id;
+		ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
+		ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
+		ba_info.status.tx_time =
+			(u16)le32_to_cpu(ba_res->wireless_time);
+		ba_info.status.status_driver_data[0] =
+			(void *)(uintptr_t)ba_res->reduced_txp;
+
+		if (!le16_to_cpu(ba_res->tfd_cnt))
+			goto out;
+
+		rcu_read_lock();
+
+		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+		if (!mvmsta)
+			goto out_unlock;
+
+		/* Free per TID */
+		for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
+			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
+				&ba_res->tfd[i];
+
+			tid = ba_tfd->tid;
+			if (tid == IWL_MGMT_TID)
+				tid = IWL_MAX_TID_COUNT;
+
+			mvmsta->tid_data[i].lq_color = lq_color;
+			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
+					   (int)(le16_to_cpu(ba_tfd->q_num)),
+					   le16_to_cpu(ba_tfd->tfd_index),
+					   &ba_info,
+					   le32_to_cpu(ba_res->tx_rate));
+		}
+
+		iwl_mvm_tx_airtime(mvm, mvmsta,
+				   le32_to_cpu(ba_res->wireless_time));
+out_unlock:
+		rcu_read_unlock();
+out:
+		IWL_DEBUG_TX_REPLY(mvm,
+				   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
+				   sta_id, le32_to_cpu(ba_res->flags),
+				   le16_to_cpu(ba_res->txed),
+				   le16_to_cpu(ba_res->done));
+		return;
+	}
+
+	ba_notif = (void *)pkt->data;
+	sta_id = ba_notif->sta_id;
+	tid = ba_notif->tid;
+	/* "flow" corresponds to Tx queue */
+	txq = le16_to_cpu(ba_notif->scd_flow);
+	/* "ssn" is start of block-ack Tx window, corresponds to index
+	 * (in Tx queue's circular buffer) of first TFD/frame in window */
+	index = le16_to_cpu(ba_notif->scd_ssn);
+
+	rcu_read_lock();
+	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
+	if (WARN_ON_ONCE(!mvmsta)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	tid_data = &mvmsta->tid_data[tid];
+
+	ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
+	ba_info.status.ampdu_len = ba_notif->txed;
+	ba_info.status.tx_time = tid_data->tx_time;
+	ba_info.status.status_driver_data[0] =
+		(void *)(uintptr_t)ba_notif->reduced_txp;
+
+	rcu_read_unlock();
+
+	iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
+			   tid_data->rate_n_flags);
+
+	IWL_DEBUG_TX_REPLY(mvm,
+			   "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
+			   ba_notif->sta_addr, ba_notif->sta_id);
+
+	IWL_DEBUG_TX_REPLY(mvm,
+			   "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
+			   ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
+			   le64_to_cpu(ba_notif->bitmap), txq, index,
+			   ba_notif->txed, ba_notif->txed_2_done);
+
+	IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+			   ba_notif->reduced_txp);
+}
+
+/*
+ * Note that there are transports that buffer frames before they reach
+ * the firmware. This means that after flush_tx_path is called, the
+ * queue might not be empty. The race-free way to handle this is to:
+ * 1) set the station as draining
+ * 2) flush the Tx path
+ * 3) wait for the transport queues to be empty
+ */
+int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags)
+{
+	int ret;
+	struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
+		.queues_ctl = cpu_to_le32(tfd_msk),
+		.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
+	};
+
+	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+				   sizeof(flush_cmd), &flush_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+	return ret;
+}
+
+int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
+			   u16 tids, u32 flags)
+{
+	int ret;
+	struct iwl_tx_path_flush_cmd flush_cmd = {
+		.sta_id = cpu_to_le32(sta_id),
+		.tid_mask = cpu_to_le16(tids),
+	};
+
+	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, flags,
+				   sizeof(flush_cmd), &flush_cmd);
+	if (ret)
+		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
+	return ret;
+}
+
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+{
+	struct iwl_mvm_int_sta *int_sta = sta;
+	struct iwl_mvm_sta *mvm_sta = sta;
+
+	BUILD_BUG_ON(offsetof(struct iwl_mvm_int_sta, sta_id) !=
+		     offsetof(struct iwl_mvm_sta, sta_id));
+
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
+					      0xff | BIT(IWL_MGMT_TID), flags);
+
+	if (internal)
+		return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
+					     flags);
+
+	return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
new file mode 100644
index 0000000..b002a7a
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -0,0 +1,1862 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "mvm.h"
+#include "fw/api/rs.h"
+
+/*
+ * Will return 0 even if the cmd failed when RFKILL is asserted unless
+ * CMD_WANT_SKB is set in cmd->flags.
+ */
+int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
+{
+	int ret;
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+	if (WARN_ON(mvm->d3_test_active))
+		return -EIO;
+#endif
+
+	/*
+	 * Synchronous commands from this op-mode must hold
+	 * the mutex, this ensures we don't try to send two
+	 * (or more) synchronous commands at a time.
+	 */
+	if (!(cmd->flags & CMD_ASYNC)) {
+		lockdep_assert_held(&mvm->mutex);
+		if (!(cmd->flags & CMD_SEND_IN_IDLE))
+			iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
+	}
+
+	ret = iwl_trans_send_cmd(mvm->trans, cmd);
+
+	if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
+		iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
+
+	/*
+	 * If the caller wants the SKB, then don't hide any problems, the
+	 * caller might access the response buffer which will be NULL if
+	 * the command failed.
+	 */
+	if (cmd->flags & CMD_WANT_SKB)
+		return ret;
+
+	/* Silently ignore failures if RFKILL is asserted */
+	if (!ret || ret == -ERFKILL)
+		return 0;
+	return ret;
+}
+
+int iwl_mvm_send_cmd_pdu(struct iwl_mvm *mvm, u32 id,
+			 u32 flags, u16 len, const void *data)
+{
+	struct iwl_host_cmd cmd = {
+		.id = id,
+		.len = { len, },
+		.data = { data, },
+		.flags = flags,
+	};
+
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/*
+ * We assume that the caller set the status to the success value
+ */
+int iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd,
+			    u32 *status)
+{
+	struct iwl_rx_packet *pkt;
+	struct iwl_cmd_response *resp;
+	int ret, resp_len;
+
+	lockdep_assert_held(&mvm->mutex);
+
+#if defined(CONFIG_IWLWIFI_DEBUGFS) && defined(CONFIG_PM_SLEEP)
+	if (WARN_ON(mvm->d3_test_active))
+		return -EIO;
+#endif
+
+	/*
+	 * Only synchronous commands can wait for status,
+	 * we use WANT_SKB so the caller can't.
+	 */
+	if (WARN_ONCE(cmd->flags & (CMD_ASYNC | CMD_WANT_SKB),
+		      "cmd flags %x", cmd->flags))
+		return -EINVAL;
+
+	cmd->flags |= CMD_WANT_SKB;
+
+	ret = iwl_trans_send_cmd(mvm->trans, cmd);
+	if (ret == -ERFKILL) {
+		/*
+		 * The command failed because of RFKILL, don't update
+		 * the status, leave it as success and return 0.
+		 */
+		return 0;
+	} else if (ret) {
+		return ret;
+	}
+
+	pkt = cmd->resp_pkt;
+
+	resp_len = iwl_rx_packet_payload_len(pkt);
+	if (WARN_ON_ONCE(resp_len != sizeof(*resp))) {
+		ret = -EIO;
+		goto out_free_resp;
+	}
+
+	resp = (void *)pkt->data;
+	*status = le32_to_cpu(resp->status);
+ out_free_resp:
+	iwl_free_resp(cmd);
+	return ret;
+}
+
+/*
+ * We assume that the caller set the status to the sucess value
+ */
+int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
+				const void *data, u32 *status)
+{
+	struct iwl_host_cmd cmd = {
+		.id = id,
+		.len = { len, },
+		.data = { data, },
+	};
+
+	return iwl_mvm_send_cmd_status(mvm, &cmd, status);
+}
+
+#define IWL_DECLARE_RATE_INFO(r) \
+	[IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+	IWL_DECLARE_RATE_INFO(1),
+	IWL_DECLARE_RATE_INFO(2),
+	IWL_DECLARE_RATE_INFO(5),
+	IWL_DECLARE_RATE_INFO(11),
+	IWL_DECLARE_RATE_INFO(6),
+	IWL_DECLARE_RATE_INFO(9),
+	IWL_DECLARE_RATE_INFO(12),
+	IWL_DECLARE_RATE_INFO(18),
+	IWL_DECLARE_RATE_INFO(24),
+	IWL_DECLARE_RATE_INFO(36),
+	IWL_DECLARE_RATE_INFO(48),
+	IWL_DECLARE_RATE_INFO(54),
+};
+
+int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
+					enum nl80211_band band)
+{
+	int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+	int idx;
+	int band_offset = 0;
+
+	/* Legacy rate format, search for match in table */
+	if (band == NL80211_BAND_5GHZ)
+		band_offset = IWL_FIRST_OFDM_RATE;
+	for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
+		if (fw_rate_idx_to_plcp[idx] == rate)
+			return idx - band_offset;
+
+	return -1;
+}
+
+u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+{
+	/* Get PLCP rate for tx_cmd->rate_n_flags */
+	return fw_rate_idx_to_plcp[rate_idx];
+}
+
+void iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	struct iwl_error_resp *err_resp = (void *)pkt->data;
+
+	IWL_ERR(mvm, "FW Error notification: type 0x%08X cmd_id 0x%02X\n",
+		le32_to_cpu(err_resp->error_type), err_resp->cmd_id);
+	IWL_ERR(mvm, "FW Error notification: seq 0x%04X service 0x%08X\n",
+		le16_to_cpu(err_resp->bad_cmd_seq_num),
+		le32_to_cpu(err_resp->error_service));
+	IWL_ERR(mvm, "FW Error notification: timestamp 0x%16llX\n",
+		le64_to_cpu(err_resp->timestamp));
+}
+
+/*
+ * Returns the first antenna as ANT_[ABC], as defined in iwl-config.h.
+ * The parameter should also be a combination of ANT_[ABC].
+ */
+u8 first_antenna(u8 mask)
+{
+	BUILD_BUG_ON(ANT_A != BIT(0)); /* using ffs is wrong if not */
+	if (WARN_ON_ONCE(!mask)) /* ffs will return 0 if mask is zeroed */
+		return BIT(0);
+	return BIT(ffs(mask) - 1);
+}
+
+/*
+ * Toggles between TX antennas to send the probe request on.
+ * Receives the bitmask of valid TX antennas and the *index* used
+ * for the last TX, and returns the next valid *index* to use.
+ * In order to set it in the tx_cmd, must do BIT(idx).
+ */
+u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
+{
+	u8 ind = last_idx;
+	int i;
+
+	for (i = 0; i < MAX_ANT_NUM; i++) {
+		ind = (ind + 1) % MAX_ANT_NUM;
+		if (valid & BIT(ind))
+			return ind;
+	}
+
+	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
+	return last_idx;
+}
+
+static const struct {
+	const char *name;
+	u8 num;
+} advanced_lookup[] = {
+	{ "NMI_INTERRUPT_WDG", 0x34 },
+	{ "SYSASSERT", 0x35 },
+	{ "UCODE_VERSION_MISMATCH", 0x37 },
+	{ "BAD_COMMAND", 0x38 },
+	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+	{ "FATAL_ERROR", 0x3D },
+	{ "NMI_TRM_HW_ERR", 0x46 },
+	{ "NMI_INTERRUPT_TRM", 0x4C },
+	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+	{ "NMI_INTERRUPT_HOST", 0x66 },
+	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
+	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
+	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+	{ "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *desc_lookup(u32 num)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+		if (advanced_lookup[i].num == num)
+			return advanced_lookup[i].name;
+
+	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+	return advanced_lookup[i].name;
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_error_event_table_v1 {
+	u32 valid;		/* (nonzero) valid, (0) log is empty */
+	u32 error_id;		/* type of error */
+	u32 pc;			/* program counter */
+	u32 blink1;		/* branch link */
+	u32 blink2;		/* branch link */
+	u32 ilink1;		/* interrupt link */
+	u32 ilink2;		/* interrupt link */
+	u32 data1;		/* error-specific data */
+	u32 data2;		/* error-specific data */
+	u32 data3;		/* error-specific data */
+	u32 bcon_time;		/* beacon timer */
+	u32 tsf_low;		/* network timestamp function timer */
+	u32 tsf_hi;		/* network timestamp function timer */
+	u32 gp1;		/* GP1 timer register */
+	u32 gp2;		/* GP2 timer register */
+	u32 gp3;		/* GP3 timer register */
+	u32 ucode_ver;		/* uCode version */
+	u32 hw_ver;		/* HW Silicon version */
+	u32 brd_ver;		/* HW board version */
+	u32 log_pc;		/* log program counter */
+	u32 frame_ptr;		/* frame pointer */
+	u32 stack_ptr;		/* stack pointer */
+	u32 hcmd;		/* last host command header */
+	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
+				 * rxtx_flag */
+	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
+				 * host_flag */
+	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
+				 * enc_flag */
+	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
+				 * time_flag */
+	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
+				 * wico interrupt */
+	u32 isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
+	u32 wait_event;		/* wait event() caller address */
+	u32 l2p_control;	/* L2pControlField */
+	u32 l2p_duration;	/* L2pDurationField */
+	u32 l2p_mhvalid;	/* L2pMhValidBits */
+	u32 l2p_addr_match;	/* L2pAddrMatchStat */
+	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
+				 * (LMPM_PMG_SEL) */
+	u32 u_timestamp;	/* indicate when the date and time of the
+				 * compilation */
+	u32 flow_handler;	/* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_1 */;
+
+struct iwl_error_event_table {
+	u32 valid;		/* (nonzero) valid, (0) log is empty */
+	u32 error_id;		/* type of error */
+	u32 trm_hw_status0;	/* TRM HW status */
+	u32 trm_hw_status1;	/* TRM HW status */
+	u32 blink2;		/* branch link */
+	u32 ilink1;		/* interrupt link */
+	u32 ilink2;		/* interrupt link */
+	u32 data1;		/* error-specific data */
+	u32 data2;		/* error-specific data */
+	u32 data3;		/* error-specific data */
+	u32 bcon_time;		/* beacon timer */
+	u32 tsf_low;		/* network timestamp function timer */
+	u32 tsf_hi;		/* network timestamp function timer */
+	u32 gp1;		/* GP1 timer register */
+	u32 gp2;		/* GP2 timer register */
+	u32 fw_rev_type;	/* firmware revision type */
+	u32 major;		/* uCode version major */
+	u32 minor;		/* uCode version minor */
+	u32 hw_ver;		/* HW Silicon version */
+	u32 brd_ver;		/* HW board version */
+	u32 log_pc;		/* log program counter */
+	u32 frame_ptr;		/* frame pointer */
+	u32 stack_ptr;		/* stack pointer */
+	u32 hcmd;		/* last host command header */
+	u32 isr0;		/* isr status register LMPM_NIC_ISR0:
+				 * rxtx_flag */
+	u32 isr1;		/* isr status register LMPM_NIC_ISR1:
+				 * host_flag */
+	u32 isr2;		/* isr status register LMPM_NIC_ISR2:
+				 * enc_flag */
+	u32 isr3;		/* isr status register LMPM_NIC_ISR3:
+				 * time_flag */
+	u32 isr4;		/* isr status register LMPM_NIC_ISR4:
+				 * wico interrupt */
+	u32 last_cmd_id;	/* last HCMD id handled by the firmware */
+	u32 wait_event;		/* wait event() caller address */
+	u32 l2p_control;	/* L2pControlField */
+	u32 l2p_duration;	/* L2pDurationField */
+	u32 l2p_mhvalid;	/* L2pMhValidBits */
+	u32 l2p_addr_match;	/* L2pAddrMatchStat */
+	u32 lmpm_pmg_sel;	/* indicate which clocks are turned on
+				 * (LMPM_PMG_SEL) */
+	u32 u_timestamp;	/* indicate when the date and time of the
+				 * compilation */
+	u32 flow_handler;	/* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+	u32 valid;		/* (nonzero) valid, (0) log is empty */
+	u32 error_id;		/* type of error */
+	u32 blink1;		/* branch link */
+	u32 blink2;		/* branch link */
+	u32 ilink1;		/* interrupt link */
+	u32 ilink2;		/* interrupt link */
+	u32 data1;		/* error-specific data */
+	u32 data2;		/* error-specific data */
+	u32 data3;		/* error-specific data */
+	u32 umac_major;
+	u32 umac_minor;
+	u32 frame_pointer;	/* core register 27*/
+	u32 stack_pointer;	/* core register 28 */
+	u32 cmd_header;		/* latest host cmd sent to UMAC */
+	u32 nic_isr_pref;	/* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+	struct iwl_trans *trans = mvm->trans;
+	struct iwl_umac_error_event_table table;
+
+	if (!mvm->support_umac_log)
+		return;
+
+	iwl_trans_read_mem_bytes(trans, mvm->umac_error_event_table, &table,
+				 sizeof(table));
+
+	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+			mvm->status, table.valid);
+	}
+
+	IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
+		desc_lookup(table.error_id));
+	IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+	IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+	IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+	IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+	IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+	IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+	IWL_ERR(mvm, "0x%08X | umac data3\n", table.data3);
+	IWL_ERR(mvm, "0x%08X | umac major\n", table.umac_major);
+	IWL_ERR(mvm, "0x%08X | umac minor\n", table.umac_minor);
+	IWL_ERR(mvm, "0x%08X | frame pointer\n", table.frame_pointer);
+	IWL_ERR(mvm, "0x%08X | stack pointer\n", table.stack_pointer);
+	IWL_ERR(mvm, "0x%08X | last host cmd\n", table.cmd_header);
+	IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
+}
+
+static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base)
+{
+	struct iwl_trans *trans = mvm->trans;
+	struct iwl_error_event_table table;
+	u32 val;
+
+	if (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) {
+		if (!base)
+			base = mvm->fw->init_errlog_ptr;
+	} else {
+		if (!base)
+			base = mvm->fw->inst_errlog_ptr;
+	}
+
+	if (base < 0x400000) {
+		IWL_ERR(mvm,
+			"Not valid error log pointer 0x%08X for %s uCode\n",
+			base,
+			(mvm->fwrt.cur_fw_img == IWL_UCODE_INIT)
+			? "Init" : "RT");
+		return;
+	}
+
+	/* check if there is a HW error */
+	val = iwl_trans_read_mem32(trans, base);
+	if (((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50)) {
+		int err;
+
+		IWL_ERR(trans, "HW error, resetting before reading\n");
+
+		/* reset the device */
+		iwl_trans_sw_reset(trans);
+
+		/* set INIT_DONE flag */
+		iwl_set_bit(trans, CSR_GP_CNTRL,
+			    BIT(trans->cfg->csr->flag_init_done));
+
+		/* and wait for clock stabilization */
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+			udelay(2);
+
+		err = iwl_poll_bit(trans, CSR_GP_CNTRL,
+				   BIT(trans->cfg->csr->flag_mac_clock_ready),
+				   BIT(trans->cfg->csr->flag_mac_clock_ready),
+				   25000);
+		if (err < 0) {
+			IWL_DEBUG_INFO(trans,
+				       "Failed to reset the card for the dump\n");
+			return;
+		}
+	}
+
+	iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+		IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+		IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+			mvm->status, table.valid);
+	}
+
+	/* Do not change this output - scripts rely on it */
+
+	IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
+	trace_iwlwifi_dev_ucode_error(trans->dev, &table, table.hw_ver, table.brd_ver);
+	IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+		desc_lookup(table.error_id));
+	IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
+	IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
+	IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
+	IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
+	IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
+	IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
+	IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
+	IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
+	IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
+	IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
+	IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
+	IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
+	IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
+	IWL_ERR(mvm, "0x%08X | uCode revision type\n", table.fw_rev_type);
+	IWL_ERR(mvm, "0x%08X | uCode version major\n", table.major);
+	IWL_ERR(mvm, "0x%08X | uCode version minor\n", table.minor);
+	IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
+	IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
+	IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
+	IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
+	IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
+	IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
+	IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
+	IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
+	IWL_ERR(mvm, "0x%08X | last cmd Id\n", table.last_cmd_id);
+	IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
+	IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
+	IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
+	IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+	IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+	IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+	IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
+	IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+}
+
+void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
+{
+	if (!test_bit(STATUS_DEVICE_ENABLED, &mvm->trans->status)) {
+		IWL_ERR(mvm,
+			"DEVICE_ENABLED bit is not set. Aborting dump.\n");
+		return;
+	}
+
+	iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[0]);
+
+	if (mvm->error_event_table[1])
+		iwl_mvm_dump_lmac_error_log(mvm, mvm->error_event_table[1]);
+
+	iwl_mvm_dump_umac_error_log(mvm);
+}
+
+int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq)
+{
+	int i;
+
+	lockdep_assert_held(&mvm->queue_info_lock);
+
+	/* This should not be hit with new TX path */
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -ENOSPC;
+
+	/* Start by looking for a free queue */
+	for (i = minq; i <= maxq; i++)
+		if (mvm->queue_info[i].hw_queue_refcount == 0 &&
+		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
+			return i;
+
+	/*
+	 * If no free queue found - settle for an inactive one to reconfigure
+	 * Make sure that the inactive queue either already belongs to this STA,
+	 * or that if it belongs to another one - it isn't the reserved queue
+	 */
+	for (i = minq; i <= maxq; i++)
+		if (mvm->queue_info[i].status == IWL_MVM_QUEUE_INACTIVE &&
+		    (sta_id == mvm->queue_info[i].ra_sta_id ||
+		     !mvm->queue_info[i].reserved))
+			return i;
+
+	return -ENOSPC;
+}
+
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+			 int tid, int frame_limit, u16 ssn)
+{
+	struct iwl_scd_txq_cfg_cmd cmd = {
+		.scd_queue = queue,
+		.action = SCD_CFG_ENABLE_QUEUE,
+		.window = frame_limit,
+		.sta_id = sta_id,
+		.ssn = cpu_to_le16(ssn),
+		.tx_fifo = fifo,
+		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+		.tid = tid,
+	};
+	int ret;
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return -EINVAL;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+		 "Trying to reconfig unallocated queue %d\n", queue)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return -ENXIO;
+	}
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+		  queue, fifo, ret);
+
+	return ret;
+}
+
+static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue,
+				       int mac80211_queue, u8 sta_id, u8 tid)
+{
+	bool enable_queue = true;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	/* Make sure this TID isn't already enabled */
+	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
+			queue, tid);
+		return false;
+	}
+
+	/* Update mappings and refcounts */
+	if (mvm->queue_info[queue].hw_queue_refcount > 0)
+		enable_queue = false;
+
+	if (mac80211_queue != IEEE80211_INVAL_HW_QUEUE) {
+		WARN(mac80211_queue >=
+		     BITS_PER_BYTE * sizeof(mvm->hw_queue_to_mac80211[0]),
+		     "cannot track mac80211 queue %d (queue %d, sta %d, tid %d)\n",
+		     mac80211_queue, queue, sta_id, tid);
+		mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+	}
+
+	mvm->queue_info[queue].hw_queue_refcount++;
+	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
+	mvm->queue_info[queue].ra_sta_id = sta_id;
+
+	if (enable_queue) {
+		if (tid != IWL_MAX_TID_COUNT)
+			mvm->queue_info[queue].mac80211_ac =
+				tid_to_mac80211_ac[tid];
+		else
+			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
+
+		mvm->queue_info[queue].txq_tid = tid;
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Enabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue, mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->hw_queue_to_mac80211[queue]);
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	return enable_queue;
+}
+
+int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, int mac80211_queue,
+			    u8 sta_id, u8 tid, unsigned int timeout)
+{
+	struct iwl_tx_queue_cfg_cmd cmd = {
+		.flags = cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE),
+		.sta_id = sta_id,
+		.tid = tid,
+	};
+	int queue, size = IWL_DEFAULT_QUEUE_SIZE;
+
+	if (cmd.tid == IWL_MAX_TID_COUNT) {
+		cmd.tid = IWL_MGMT_TID;
+		size = IWL_MGMT_QUEUE_SIZE;
+	}
+	queue = iwl_trans_txq_alloc(mvm->trans, (void *)&cmd,
+				    SCD_QUEUE_CFG, size, timeout);
+
+	if (queue < 0) {
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Failed allocating TXQ for sta %d tid %d, ret: %d\n",
+				    sta_id, tid, queue);
+		return queue;
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
+			    queue, sta_id, tid);
+
+	mvm->hw_queue_to_mac80211[queue] |= BIT(mac80211_queue);
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Enabling TXQ #%d (mac80211 map:0x%x)\n",
+			    queue, mvm->hw_queue_to_mac80211[queue]);
+
+	return queue;
+}
+
+bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
+			unsigned int wdg_timeout)
+{
+	struct iwl_scd_txq_cfg_cmd cmd = {
+		.scd_queue = queue,
+		.action = SCD_CFG_ENABLE_QUEUE,
+		.window = cfg->frame_limit,
+		.sta_id = cfg->sta_id,
+		.ssn = cpu_to_le16(ssn),
+		.tx_fifo = cfg->fifo,
+		.aggregate = cfg->aggregate,
+		.tid = cfg->tid,
+	};
+	bool inc_ssn;
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return false;
+
+	/* Send the enabling command if we need to */
+	if (!iwl_mvm_update_txq_mapping(mvm, queue, mac80211_queue,
+					cfg->sta_id, cfg->tid))
+		return false;
+
+	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
+					   NULL, wdg_timeout);
+	if (inc_ssn)
+		le16_add_cpu(&cmd.ssn, 1);
+
+	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
+	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
+
+	return inc_ssn;
+}
+
+int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
+			u8 tid, u8 flags)
+{
+	struct iwl_scd_txq_cfg_cmd cmd = {
+		.scd_queue = queue,
+		.action = SCD_CFG_DISABLE_QUEUE,
+	};
+	bool remove_mac_queue = mac80211_queue != IEEE80211_INVAL_HW_QUEUE;
+	int ret;
+
+	if (WARN_ON(remove_mac_queue && mac80211_queue >= IEEE80211_MAX_QUEUES))
+		return -EINVAL;
+
+	if (iwl_mvm_has_new_tx_api(mvm)) {
+		spin_lock_bh(&mvm->queue_info_lock);
+
+		if (remove_mac_queue)
+			mvm->hw_queue_to_mac80211[queue] &=
+				~BIT(mac80211_queue);
+
+		spin_unlock_bh(&mvm->queue_info_lock);
+
+		iwl_trans_txq_free(mvm->trans, queue);
+
+		return 0;
+	}
+
+	spin_lock_bh(&mvm->queue_info_lock);
+
+	if (WARN_ON(mvm->queue_info[queue].hw_queue_refcount == 0)) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return 0;
+	}
+
+	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+
+	/*
+	 * If there is another TID with the same AC - don't remove the MAC queue
+	 * from the mapping
+	 */
+	if (tid < IWL_MAX_TID_COUNT) {
+		unsigned long tid_bitmap =
+			mvm->queue_info[queue].tid_bitmap;
+		int ac = tid_to_mac80211_ac[tid];
+		int i;
+
+		for_each_set_bit(i, &tid_bitmap, IWL_MAX_TID_COUNT) {
+			if (tid_to_mac80211_ac[i] == ac)
+				remove_mac_queue = false;
+		}
+	}
+
+	if (remove_mac_queue)
+		mvm->hw_queue_to_mac80211[queue] &=
+			~BIT(mac80211_queue);
+	mvm->queue_info[queue].hw_queue_refcount--;
+
+	cmd.action = mvm->queue_info[queue].hw_queue_refcount ?
+		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
+	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
+			    queue,
+			    mvm->queue_info[queue].hw_queue_refcount,
+			    mvm->hw_queue_to_mac80211[queue]);
+
+	/* If the queue is still enabled - nothing left to do in this func */
+	if (cmd.action == SCD_CFG_ENABLE_QUEUE) {
+		spin_unlock_bh(&mvm->queue_info_lock);
+		return 0;
+	}
+
+	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+	cmd.tid = mvm->queue_info[queue].txq_tid;
+
+	/* Make sure queue info is correct even though we overwrite it */
+	WARN(mvm->queue_info[queue].hw_queue_refcount ||
+	     mvm->queue_info[queue].tid_bitmap ||
+	     mvm->hw_queue_to_mac80211[queue],
+	     "TXQ #%d info out-of-sync - refcount=%d, mac map=0x%x, tid=0x%x\n",
+	     queue, mvm->queue_info[queue].hw_queue_refcount,
+	     mvm->hw_queue_to_mac80211[queue],
+	     mvm->queue_info[queue].tid_bitmap);
+
+	/* If we are here - the queue is freed and we can zero out these vals */
+	mvm->queue_info[queue].hw_queue_refcount = 0;
+	mvm->queue_info[queue].tid_bitmap = 0;
+	mvm->hw_queue_to_mac80211[queue] = 0;
+
+	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
+	mvm->queue_info[queue].reserved = false;
+
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	iwl_trans_txq_disable(mvm->trans, queue, false);
+	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags,
+				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
+
+	if (ret)
+		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
+			queue, ret);
+	return ret;
+}
+
+/**
+ * iwl_mvm_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ *        after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init)
+{
+	struct iwl_host_cmd cmd = {
+		.id = LQ_CMD,
+		.len = { sizeof(struct iwl_lq_cmd), },
+		.flags = init ? 0 : CMD_ASYNC,
+		.data = { lq, },
+	};
+
+	if (WARN_ON(lq->sta_id == IWL_MVM_INVALID_STA ||
+		    iwl_mvm_has_tlc_offload(mvm)))
+		return -EINVAL;
+
+	return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
+/**
+ * iwl_mvm_update_smps - Get a request to change the SMPS mode
+ * @req_type: The part of the driver who call for a change.
+ * @smps_requests: The request to change the SMPS mode.
+ *
+ * Get a requst to change the SMPS mode,
+ * and change it according to all other requests in the driver.
+ */
+void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			 enum iwl_mvm_smps_type_request req_type,
+			 enum ieee80211_smps_mode smps_request)
+{
+	struct iwl_mvm_vif *mvmvif;
+	enum ieee80211_smps_mode smps_mode;
+	int i;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* SMPS is irrelevant for NICs that don't have at least 2 RX antenna */
+	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+		return;
+
+	if (vif->type == NL80211_IFTYPE_AP)
+		smps_mode = IEEE80211_SMPS_OFF;
+	else
+		smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
+	mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	mvmvif->smps_requests[req_type] = smps_request;
+	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC) {
+			smps_mode = IEEE80211_SMPS_STATIC;
+			break;
+		}
+		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+			smps_mode = IEEE80211_SMPS_DYNAMIC;
+	}
+
+	ieee80211_request_smps(vif, smps_mode);
+}
+
+int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
+{
+	struct iwl_statistics_cmd scmd = {
+		.flags = clear ? cpu_to_le32(IWL_STATISTICS_FLG_CLEAR) : 0,
+	};
+	struct iwl_host_cmd cmd = {
+		.id = STATISTICS_CMD,
+		.len[0] = sizeof(scmd),
+		.data[0] = &scmd,
+		.flags = CMD_WANT_SKB,
+	};
+	int ret;
+
+	ret = iwl_mvm_send_cmd(mvm, &cmd);
+	if (ret)
+		return ret;
+
+	iwl_mvm_handle_rx_statistics(mvm, cmd.resp_pkt);
+	iwl_free_resp(&cmd);
+
+	if (clear)
+		iwl_mvm_accu_radio_stats(mvm);
+
+	return 0;
+}
+
+void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm)
+{
+	mvm->accu_radio_stats.rx_time += mvm->radio_stats.rx_time;
+	mvm->accu_radio_stats.tx_time += mvm->radio_stats.tx_time;
+	mvm->accu_radio_stats.on_time_rf += mvm->radio_stats.on_time_rf;
+	mvm->accu_radio_stats.on_time_scan += mvm->radio_stats.on_time_scan;
+}
+
+static void iwl_mvm_diversity_iter(void *_data, u8 *mac,
+				   struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	bool *result = _data;
+	int i;
+
+	for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
+		if (mvmvif->smps_requests[i] == IEEE80211_SMPS_STATIC ||
+		    mvmvif->smps_requests[i] == IEEE80211_SMPS_DYNAMIC)
+			*result = false;
+	}
+}
+
+bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
+{
+	bool result = true;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
+		return false;
+
+	if (mvm->cfg->rx_with_siso_diversity)
+		return false;
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_diversity_iter, &result);
+
+	return result;
+}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			       bool low_latency,
+			       enum iwl_mvm_low_latency_cause cause)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	int res;
+	bool prev;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	prev = iwl_mvm_vif_low_latency(mvmvif);
+	iwl_mvm_vif_set_low_latency(mvmvif, low_latency, cause);
+
+	low_latency = iwl_mvm_vif_low_latency(mvmvif);
+
+	if (low_latency == prev)
+		return 0;
+
+	if (fw_has_capa(&mvm->fw->ucode_capa,
+			IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) {
+		struct iwl_mac_low_latency_cmd cmd = {
+			.mac_id = cpu_to_le32(mvmvif->id)
+		};
+
+		if (low_latency) {
+			/* currently we don't care about the direction */
+			cmd.low_latency_rx = 1;
+			cmd.low_latency_tx = 1;
+		}
+		res = iwl_mvm_send_cmd_pdu(mvm,
+					   iwl_cmd_id(LOW_LATENCY_CMD,
+						      MAC_CONF_GROUP, 0),
+					   0, sizeof(cmd), &cmd);
+		if (res)
+			IWL_ERR(mvm, "Failed to send low latency command\n");
+	}
+
+	res = iwl_mvm_update_quotas(mvm, false, NULL);
+	if (res)
+		return res;
+
+	iwl_mvm_bt_coex_vif_change(mvm);
+
+	return iwl_mvm_power_update_mac(mvm);
+}
+
+struct iwl_mvm_low_latency_iter {
+	bool result;
+	bool result_per_band[NUM_NL80211_BANDS];
+};
+
+static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_low_latency_iter *result = _data;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	enum nl80211_band band;
+
+	if (iwl_mvm_vif_low_latency(mvmvif)) {
+		result->result = true;
+
+		if (!mvmvif->phy_ctxt)
+			return;
+
+		band = mvmvif->phy_ctxt->channel->band;
+		result->result_per_band[band] = true;
+	}
+}
+
+bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_low_latency_iter data = {};
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_ll_iter, &data);
+
+	return data.result;
+}
+
+bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band)
+{
+	struct iwl_mvm_low_latency_iter data = {};
+
+	ieee80211_iterate_active_interfaces_atomic(
+			mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+			iwl_mvm_ll_iter, &data);
+
+	return data.result_per_band[band];
+}
+
+struct iwl_bss_iter_data {
+	struct ieee80211_vif *vif;
+	bool error;
+};
+
+static void iwl_mvm_bss_iface_iterator(void *_data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_bss_iter_data *data = _data;
+
+	if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+		return;
+
+	if (data->vif) {
+		data->error = true;
+		return;
+	}
+
+	data->vif = vif;
+}
+
+struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm)
+{
+	struct iwl_bss_iter_data bss_iter_data = {};
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_bss_iface_iterator, &bss_iter_data);
+
+	if (bss_iter_data.error) {
+		IWL_ERR(mvm, "More than one managed interface active!\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	return bss_iter_data.vif;
+}
+
+struct iwl_sta_iter_data {
+	bool assoc;
+};
+
+static void iwl_mvm_sta_iface_iterator(void *_data, u8 *mac,
+				       struct ieee80211_vif *vif)
+{
+	struct iwl_sta_iter_data *data = _data;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (vif->bss_conf.assoc)
+		data->assoc = true;
+}
+
+bool iwl_mvm_is_vif_assoc(struct iwl_mvm *mvm)
+{
+	struct iwl_sta_iter_data data = {
+		.assoc = false,
+	};
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_sta_iface_iterator,
+						   &data);
+	return data.assoc;
+}
+
+unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
+				    struct ieee80211_vif *vif,
+				    bool tdls, bool cmd_q)
+{
+	struct iwl_fw_dbg_trigger_tlv *trigger;
+	struct iwl_fw_dbg_trigger_txq_timer *txq_timer;
+	unsigned int default_timeout =
+		cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS)) {
+		/*
+		 * We can't know when the station is asleep or awake, so we
+		 * must disable the queue hang detection.
+		 */
+		if (fw_has_capa(&mvm->fw->ucode_capa,
+				IWL_UCODE_TLV_CAPA_STA_PM_NOTIF) &&
+		    vif && vif->type == NL80211_IFTYPE_AP)
+			return IWL_WATCHDOG_DISABLED;
+		return iwlmvm_mod_params.tfd_q_hang_detect ?
+			default_timeout : IWL_WATCHDOG_DISABLED;
+	}
+
+	trigger = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TXQ_TIMERS);
+	txq_timer = (void *)trigger->data;
+
+	if (tdls)
+		return le32_to_cpu(txq_timer->tdls);
+
+	if (cmd_q)
+		return le32_to_cpu(txq_timer->command_queue);
+
+	if (WARN_ON(!vif))
+		return default_timeout;
+
+	switch (ieee80211_vif_type_p2p(vif)) {
+	case NL80211_IFTYPE_ADHOC:
+		return le32_to_cpu(txq_timer->ibss);
+	case NL80211_IFTYPE_STATION:
+		return le32_to_cpu(txq_timer->bss);
+	case NL80211_IFTYPE_AP:
+		return le32_to_cpu(txq_timer->softap);
+	case NL80211_IFTYPE_P2P_CLIENT:
+		return le32_to_cpu(txq_timer->p2p_client);
+	case NL80211_IFTYPE_P2P_GO:
+		return le32_to_cpu(txq_timer->p2p_go);
+	case NL80211_IFTYPE_P2P_DEVICE:
+		return le32_to_cpu(txq_timer->p2p_device);
+	case NL80211_IFTYPE_MONITOR:
+		return default_timeout;
+	default:
+		WARN_ON(1);
+		return mvm->cfg->base_params->wd_timeout;
+	}
+}
+
+void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+			     const char *errmsg)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_mlme *trig_mlme;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
+		goto out;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
+	trig_mlme = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		goto out;
+
+	if (trig_mlme->stop_connection_loss &&
+	    --trig_mlme->stop_connection_loss)
+		goto out;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, "%s", errmsg);
+
+out:
+	ieee80211_connection_loss(vif);
+}
+
+/*
+ * Remove inactive TIDs of a given queue.
+ * If all queue TIDs are inactive - mark the queue as inactive
+ * If only some the queue TIDs are inactive - unmap them from the queue
+ */
+static void iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
+					 struct iwl_mvm_sta *mvmsta, int queue,
+					 unsigned long tid_bitmap)
+{
+	int tid;
+
+	lockdep_assert_held(&mvmsta->lock);
+	lockdep_assert_held(&mvm->queue_info_lock);
+
+	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
+		return;
+
+	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		/* If some TFDs are still queued - don't mark TID as inactive */
+		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
+			tid_bitmap &= ~BIT(tid);
+
+		/* Don't mark as inactive any TID that has an active BA */
+		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
+			tid_bitmap &= ~BIT(tid);
+	}
+
+	/* If all TIDs in the queue are inactive - mark queue as inactive. */
+	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE;
+
+		for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1)
+			mvmsta->tid_data[tid].is_tid_active = false;
+
+		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d marked as inactive\n",
+				    queue);
+		return;
+	}
+
+	/*
+	 * If we are here, this is a shared queue and not all TIDs timed-out.
+	 * Remove the ones that did.
+	 */
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		int mac_queue = mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
+		mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac_queue);
+		mvm->queue_info[queue].hw_queue_refcount--;
+		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
+		mvmsta->tid_data[tid].is_tid_active = false;
+
+		IWL_DEBUG_TX_QUEUES(mvm,
+				    "Removing inactive TID %d from shared Q:%d\n",
+				    tid, queue);
+	}
+
+	IWL_DEBUG_TX_QUEUES(mvm,
+			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
+			    mvm->queue_info[queue].tid_bitmap);
+
+	/*
+	 * There may be different TIDs with the same mac queues, so make
+	 * sure all TIDs have existing corresponding mac queues enabled
+	 */
+	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
+	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
+		mvm->hw_queue_to_mac80211[queue] |=
+			BIT(mvmsta->vif->hw_queue[tid_to_mac80211_ac[tid]]);
+	}
+
+	/* If the queue is marked as shared - "unshare" it */
+	if (mvm->queue_info[queue].hw_queue_refcount == 1 &&
+	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
+		mvm->queue_info[queue].status = IWL_MVM_QUEUE_RECONFIGURING;
+		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
+				    queue);
+	}
+}
+
+void iwl_mvm_inactivity_check(struct iwl_mvm *mvm)
+{
+	unsigned long timeout_queues_map = 0;
+	unsigned long now = jiffies;
+	int i;
+
+	if (iwl_mvm_has_new_tx_api(mvm))
+		return;
+
+	spin_lock_bh(&mvm->queue_info_lock);
+	for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
+		if (mvm->queue_info[i].hw_queue_refcount > 0)
+			timeout_queues_map |= BIT(i);
+	spin_unlock_bh(&mvm->queue_info_lock);
+
+	rcu_read_lock();
+
+	/*
+	 * If a queue time outs - mark it as INACTIVE (don't remove right away
+	 * if we don't have to.) This is an optimization in case traffic comes
+	 * later, and we don't HAVE to use a currently-inactive queue
+	 */
+	for_each_set_bit(i, &timeout_queues_map, IWL_MAX_HW_QUEUES) {
+		struct ieee80211_sta *sta;
+		struct iwl_mvm_sta *mvmsta;
+		u8 sta_id;
+		int tid;
+		unsigned long inactive_tid_bitmap = 0;
+		unsigned long queue_tid_bitmap;
+
+		spin_lock_bh(&mvm->queue_info_lock);
+		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
+
+		/* If TXQ isn't in active use anyway - nothing to do here... */
+		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
+		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) {
+			spin_unlock_bh(&mvm->queue_info_lock);
+			continue;
+		}
+
+		/* Check to see if there are inactive TIDs on this queue */
+		for_each_set_bit(tid, &queue_tid_bitmap,
+				 IWL_MAX_TID_COUNT + 1) {
+			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
+				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
+				continue;
+
+			inactive_tid_bitmap |= BIT(tid);
+		}
+		spin_unlock_bh(&mvm->queue_info_lock);
+
+		/* If all TIDs are active - finish check on this queue */
+		if (!inactive_tid_bitmap)
+			continue;
+
+		/*
+		 * If we are here - the queue hadn't been served recently and is
+		 * in use
+		 */
+
+		sta_id = mvm->queue_info[i].ra_sta_id;
+		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+
+		/*
+		 * If the STA doesn't exist anymore, it isn't an error. It could
+		 * be that it was removed since getting the queues, and in this
+		 * case it should've inactivated its queues anyway.
+		 */
+		if (IS_ERR_OR_NULL(sta))
+			continue;
+
+		mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
+		spin_lock_bh(&mvmsta->lock);
+		spin_lock(&mvm->queue_info_lock);
+		iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
+					     inactive_tid_bitmap);
+		spin_unlock(&mvm->queue_info_lock);
+		spin_unlock_bh(&mvmsta->lock);
+	}
+
+	rcu_read_unlock();
+}
+
+void iwl_mvm_event_frame_timeout_callback(struct iwl_mvm *mvm,
+					  struct ieee80211_vif *vif,
+					  const struct ieee80211_sta *sta,
+					  u16 tid)
+{
+	struct iwl_fw_dbg_trigger_tlv *trig;
+	struct iwl_fw_dbg_trigger_ba *ba_trig;
+
+	if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_BA))
+		return;
+
+	trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_BA);
+	ba_trig = (void *)trig->data;
+	if (!iwl_fw_dbg_trigger_check_stop(&mvm->fwrt,
+					   ieee80211_vif_to_wdev(vif), trig))
+		return;
+
+	if (!(le16_to_cpu(ba_trig->frame_timeout) & BIT(tid)))
+		return;
+
+	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
+				"Frame from %pM timed out, tid %d",
+				sta->addr, tid);
+}
+
+u8 iwl_mvm_tcm_load_percentage(u32 airtime, u32 elapsed)
+{
+	if (!elapsed)
+		return 0;
+
+	return (100 * airtime / elapsed) / USEC_PER_MSEC;
+}
+
+static enum iwl_mvm_traffic_load
+iwl_mvm_tcm_load(struct iwl_mvm *mvm, u32 airtime, unsigned long elapsed)
+{
+	u8 load = iwl_mvm_tcm_load_percentage(airtime, elapsed);
+
+	if (load > IWL_MVM_TCM_LOAD_HIGH_THRESH)
+		return IWL_MVM_TRAFFIC_HIGH;
+	if (load > IWL_MVM_TCM_LOAD_MEDIUM_THRESH)
+		return IWL_MVM_TRAFFIC_MEDIUM;
+
+	return IWL_MVM_TRAFFIC_LOW;
+}
+
+struct iwl_mvm_tcm_iter_data {
+	struct iwl_mvm *mvm;
+	bool any_sent;
+};
+
+static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_tcm_iter_data *data = _data;
+	struct iwl_mvm *mvm = data->mvm;
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	bool low_latency, prev = mvmvif->low_latency & LOW_LATENCY_TRAFFIC;
+
+	if (mvmvif->id >= NUM_MAC_INDEX_DRIVER)
+		return;
+
+	low_latency = mvm->tcm.result.low_latency[mvmvif->id];
+
+	if (!mvm->tcm.result.change[mvmvif->id] &&
+	    prev == low_latency) {
+		iwl_mvm_update_quotas(mvm, false, NULL);
+		return;
+	}
+
+	if (prev != low_latency) {
+		/* this sends traffic load and updates quota as well */
+		iwl_mvm_update_low_latency(mvm, vif, low_latency,
+					   LOW_LATENCY_TRAFFIC);
+	} else {
+		iwl_mvm_update_quotas(mvm, false, NULL);
+	}
+
+	data->any_sent = true;
+}
+
+static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
+{
+	struct iwl_mvm_tcm_iter_data data = {
+		.mvm = mvm,
+		.any_sent = false,
+	};
+
+	mutex_lock(&mvm->mutex);
+
+	ieee80211_iterate_active_interfaces(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_tcm_iter, &data);
+
+	if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
+		iwl_mvm_config_scan(mvm);
+
+	mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
+{
+	struct iwl_mvm *mvm;
+	struct iwl_mvm_vif *mvmvif;
+	struct ieee80211_vif *vif;
+
+	mvmvif = container_of(wk, struct iwl_mvm_vif,
+			      uapsd_nonagg_detected_wk.work);
+	vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+	mvm = mvmvif->mvm;
+
+	if (mvm->tcm.data[mvmvif->id].opened_rx_ba_sessions)
+		return;
+
+	/* remember that this AP is broken */
+	memcpy(mvm->uapsd_noagg_bssids[mvm->uapsd_noagg_bssid_write_idx].addr,
+	       vif->bss_conf.bssid, ETH_ALEN);
+	mvm->uapsd_noagg_bssid_write_idx++;
+	if (mvm->uapsd_noagg_bssid_write_idx >= IWL_MVM_UAPSD_NOAGG_LIST_LEN)
+		mvm->uapsd_noagg_bssid_write_idx = 0;
+
+	iwl_mvm_connection_loss(mvm, vif,
+				"AP isn't using AMPDU with uAPSD enabled");
+}
+
+static void iwl_mvm_uapsd_agg_disconnect_iter(void *data, u8 *mac,
+					      struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	struct iwl_mvm *mvm = mvmvif->mvm;
+	int *mac_id = data;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return;
+
+	if (mvmvif->id != *mac_id)
+		return;
+
+	if (!vif->bss_conf.assoc)
+		return;
+
+	if (!mvmvif->queue_params[IEEE80211_AC_VO].uapsd &&
+	    !mvmvif->queue_params[IEEE80211_AC_VI].uapsd &&
+	    !mvmvif->queue_params[IEEE80211_AC_BE].uapsd &&
+	    !mvmvif->queue_params[IEEE80211_AC_BK].uapsd)
+		return;
+
+	if (mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected)
+		return;
+
+	mvm->tcm.data[*mac_id].uapsd_nonagg_detect.detected = true;
+	IWL_INFO(mvm,
+		 "detected AP should do aggregation but isn't, likely due to U-APSD\n");
+	schedule_delayed_work(&mvmvif->uapsd_nonagg_detected_wk, 15 * HZ);
+}
+
+static void iwl_mvm_check_uapsd_agg_expected_tpt(struct iwl_mvm *mvm,
+						 unsigned int elapsed,
+						 int mac)
+{
+	u64 bytes = mvm->tcm.data[mac].uapsd_nonagg_detect.rx_bytes;
+	u64 tpt;
+	unsigned long rate;
+
+	rate = ewma_rate_read(&mvm->tcm.data[mac].uapsd_nonagg_detect.rate);
+
+	if (!rate || mvm->tcm.data[mac].opened_rx_ba_sessions ||
+	    mvm->tcm.data[mac].uapsd_nonagg_detect.detected)
+		return;
+
+	if (iwl_mvm_has_new_rx_api(mvm)) {
+		tpt = 8 * bytes; /* kbps */
+		do_div(tpt, elapsed);
+		rate *= 1000; /* kbps */
+		if (tpt < 22 * rate / 100)
+			return;
+	} else {
+		/*
+		 * the rate here is actually the threshold, in 100Kbps units,
+		 * so do the needed conversion from bytes to 100Kbps:
+		 * 100kb = bits / (100 * 1000),
+		 * 100kbps = 100kb / (msecs / 1000) ==
+		 *           (bits / (100 * 1000)) / (msecs / 1000) ==
+		 *           bits / (100 * msecs)
+		 */
+		tpt = (8 * bytes);
+		do_div(tpt, elapsed * 100);
+		if (tpt < rate)
+			return;
+	}
+
+	ieee80211_iterate_active_interfaces_atomic(
+		mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+		iwl_mvm_uapsd_agg_disconnect_iter, &mac);
+}
+
+static void iwl_mvm_tcm_iterator(void *_data, u8 *mac,
+				 struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+	u32 *band = _data;
+
+	if (!mvmvif->phy_ctxt)
+		return;
+
+	band[mvmvif->id] = mvmvif->phy_ctxt->channel->band;
+}
+
+static unsigned long iwl_mvm_calc_tcm_stats(struct iwl_mvm *mvm,
+					    unsigned long ts,
+					    bool handle_uapsd)
+{
+	unsigned int elapsed = jiffies_to_msecs(ts - mvm->tcm.ts);
+	unsigned int uapsd_elapsed =
+		jiffies_to_msecs(ts - mvm->tcm.uapsd_nonagg_ts);
+	u32 total_airtime = 0;
+	u32 band_airtime[NUM_NL80211_BANDS] = {0};
+	u32 band[NUM_MAC_INDEX_DRIVER] = {0};
+	int ac, mac, i;
+	bool low_latency = false;
+	enum iwl_mvm_traffic_load load, band_load;
+	bool handle_ll = time_after(ts, mvm->tcm.ll_ts + MVM_LL_PERIOD);
+
+	if (handle_ll)
+		mvm->tcm.ll_ts = ts;
+	if (handle_uapsd)
+		mvm->tcm.uapsd_nonagg_ts = ts;
+
+	mvm->tcm.result.elapsed = elapsed;
+
+	ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+						   IEEE80211_IFACE_ITER_NORMAL,
+						   iwl_mvm_tcm_iterator,
+						   &band);
+
+	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+		u32 vo_vi_pkts = 0;
+		u32 airtime = mdata->rx.airtime + mdata->tx.airtime;
+
+		total_airtime += airtime;
+		band_airtime[band[mac]] += airtime;
+
+		load = iwl_mvm_tcm_load(mvm, airtime, elapsed);
+		mvm->tcm.result.change[mac] = load != mvm->tcm.result.load[mac];
+		mvm->tcm.result.load[mac] = load;
+		mvm->tcm.result.airtime[mac] = airtime;
+
+		for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_VI; ac++)
+			vo_vi_pkts += mdata->rx.pkts[ac] +
+				      mdata->tx.pkts[ac];
+
+		/* enable immediately with enough packets but defer disabling */
+		if (vo_vi_pkts > IWL_MVM_TCM_LOWLAT_ENABLE_THRESH)
+			mvm->tcm.result.low_latency[mac] = true;
+		else if (handle_ll)
+			mvm->tcm.result.low_latency[mac] = false;
+
+		if (handle_ll) {
+			/* clear old data */
+			memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+			memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+		}
+		low_latency |= mvm->tcm.result.low_latency[mac];
+
+		if (!mvm->tcm.result.low_latency[mac] && handle_uapsd)
+			iwl_mvm_check_uapsd_agg_expected_tpt(mvm, uapsd_elapsed,
+							     mac);
+		/* clear old data */
+		if (handle_uapsd)
+			mdata->uapsd_nonagg_detect.rx_bytes = 0;
+		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+	}
+
+	load = iwl_mvm_tcm_load(mvm, total_airtime, elapsed);
+	mvm->tcm.result.global_change = load != mvm->tcm.result.global_load;
+	mvm->tcm.result.global_load = load;
+
+	for (i = 0; i < NUM_NL80211_BANDS; i++) {
+		band_load = iwl_mvm_tcm_load(mvm, band_airtime[i], elapsed);
+		mvm->tcm.result.band_load[i] = band_load;
+	}
+
+	/*
+	 * If the current load isn't low we need to force re-evaluation
+	 * in the TCM period, so that we can return to low load if there
+	 * was no traffic at all (and thus iwl_mvm_recalc_tcm didn't get
+	 * triggered by traffic).
+	 */
+	if (load != IWL_MVM_TRAFFIC_LOW)
+		return MVM_TCM_PERIOD;
+	/*
+	 * If low-latency is active we need to force re-evaluation after
+	 * (the longer) MVM_LL_PERIOD, so that we can disable low-latency
+	 * when there's no traffic at all.
+	 */
+	if (low_latency)
+		return MVM_LL_PERIOD;
+	/*
+	 * Otherwise, we don't need to run the work struct because we're
+	 * in the default "idle" state - traffic indication is low (which
+	 * also covers the "no traffic" case) and low-latency is disabled
+	 * so there's no state that may need to be disabled when there's
+	 * no traffic at all.
+	 *
+	 * Note that this has no impact on the regular scheduling of the
+	 * updates triggered by traffic - those happen whenever one of the
+	 * two timeouts expire (if there's traffic at all.)
+	 */
+	return 0;
+}
+
+void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
+{
+	unsigned long ts = jiffies;
+	bool handle_uapsd =
+		time_after(ts, mvm->tcm.uapsd_nonagg_ts +
+			       msecs_to_jiffies(IWL_MVM_UAPSD_NONAGG_PERIOD));
+
+	spin_lock(&mvm->tcm.lock);
+	if (mvm->tcm.paused || !time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+		spin_unlock(&mvm->tcm.lock);
+		return;
+	}
+	spin_unlock(&mvm->tcm.lock);
+
+	if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
+		mutex_lock(&mvm->mutex);
+		if (iwl_mvm_request_statistics(mvm, true))
+			handle_uapsd = false;
+		mutex_unlock(&mvm->mutex);
+	}
+
+	spin_lock(&mvm->tcm.lock);
+	/* re-check if somebody else won the recheck race */
+	if (!mvm->tcm.paused && time_after(ts, mvm->tcm.ts + MVM_TCM_PERIOD)) {
+		/* calculate statistics */
+		unsigned long work_delay = iwl_mvm_calc_tcm_stats(mvm, ts,
+								  handle_uapsd);
+
+		/* the memset needs to be visible before the timestamp */
+		smp_mb();
+		mvm->tcm.ts = ts;
+		if (work_delay)
+			schedule_delayed_work(&mvm->tcm.work, work_delay);
+	}
+	spin_unlock(&mvm->tcm.lock);
+
+	iwl_mvm_tcm_results(mvm);
+}
+
+void iwl_mvm_tcm_work(struct work_struct *work)
+{
+	struct delayed_work *delayed_work = to_delayed_work(work);
+	struct iwl_mvm *mvm = container_of(delayed_work, struct iwl_mvm,
+					   tcm.work);
+
+	iwl_mvm_recalc_tcm(mvm);
+}
+
+void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel)
+{
+	spin_lock_bh(&mvm->tcm.lock);
+	mvm->tcm.paused = true;
+	spin_unlock_bh(&mvm->tcm.lock);
+	if (with_cancel)
+		cancel_delayed_work_sync(&mvm->tcm.work);
+}
+
+void iwl_mvm_resume_tcm(struct iwl_mvm *mvm)
+{
+	int mac;
+
+	spin_lock_bh(&mvm->tcm.lock);
+	mvm->tcm.ts = jiffies;
+	mvm->tcm.ll_ts = jiffies;
+	for (mac = 0; mac < NUM_MAC_INDEX_DRIVER; mac++) {
+		struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[mac];
+
+		memset(&mdata->rx.pkts, 0, sizeof(mdata->rx.pkts));
+		memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts));
+		memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime));
+		memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime));
+	}
+	/* The TCM data needs to be reset before "paused" flag changes */
+	smp_mb();
+	mvm->tcm.paused = false;
+	spin_unlock_bh(&mvm->tcm.lock);
+}
+
+void iwl_mvm_tcm_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	INIT_DELAYED_WORK(&mvmvif->uapsd_nonagg_detected_wk,
+			  iwl_mvm_tcm_uapsd_nonagg_detected_wk);
+}
+
+void iwl_mvm_tcm_rm_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+	cancel_delayed_work_sync(&mvmvif->uapsd_nonagg_detected_wk);
+}
+
+
+void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, u32 *gp2, u64 *boottime)
+{
+	bool ps_disabled;
+
+	lockdep_assert_held(&mvm->mutex);
+
+	/* Disable power save when reading GP2 */
+	ps_disabled = mvm->ps_disabled;
+	if (!ps_disabled) {
+		mvm->ps_disabled = true;
+		iwl_mvm_power_update_device(mvm);
+	}
+
+	*gp2 = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
+	*boottime = ktime_get_boot_ns();
+
+	if (!ps_disabled) {
+		mvm->ps_disabled = ps_disabled;
+		iwl_mvm_power_update_device(mvm);
+	}
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
new file mode 100644
index 0000000..2146fda
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -0,0 +1,207 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
+				 const struct fw_img *fw)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_context_info_gen3 *ctxt_info_gen3;
+	struct iwl_prph_scratch *prph_scratch;
+	struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+	struct iwl_prph_info *prph_info;
+	void *iml_img;
+	u32 control_flags = 0;
+	int ret;
+
+	/* Allocate prph scratch */
+	prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
+					  &trans_pcie->prph_scratch_dma_addr,
+					  GFP_KERNEL);
+	if (!prph_scratch)
+		return -ENOMEM;
+
+	prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+	prph_sc_ctrl->version.version = 0;
+	prph_sc_ctrl->version.mac_id =
+		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+	prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
+
+	control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
+			IWL_PRPH_SCRATCH_MTR_MODE |
+			(IWL_PRPH_MTR_FORMAT_256B &
+			 IWL_PRPH_SCRATCH_MTR_FORMAT) |
+			IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
+			IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
+	prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
+
+	/* initialize RX default queue */
+	prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+		cpu_to_le64(trans_pcie->rxq->bd_dma);
+
+	/* Configure debug, for integration */
+	iwl_pcie_alloc_fw_monitor(trans, 0);
+	prph_sc_ctrl->hwm_cfg.hwm_base_addr =
+		cpu_to_le64(trans_pcie->fw_mon_phys);
+	prph_sc_ctrl->hwm_cfg.hwm_size =
+		cpu_to_le32(trans_pcie->fw_mon_size);
+
+	/* allocate ucode sections in dram and set addresses */
+	ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
+	if (ret) {
+		dma_free_coherent(trans->dev,
+				  sizeof(*prph_scratch),
+				  prph_scratch,
+				  trans_pcie->prph_scratch_dma_addr);
+		return ret;
+	}
+
+	/* Allocate prph information
+	 * currently we don't assign to the prph info anything, but it would get
+	 * assigned later */
+	prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
+				       &trans_pcie->prph_info_dma_addr,
+				       GFP_KERNEL);
+	if (!prph_info)
+		return -ENOMEM;
+
+	/* Allocate context info */
+	ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
+					    sizeof(*ctxt_info_gen3),
+					    &trans_pcie->ctxt_info_dma_addr,
+					    GFP_KERNEL);
+	if (!ctxt_info_gen3)
+		return -ENOMEM;
+
+	ctxt_info_gen3->prph_info_base_addr =
+		cpu_to_le64(trans_pcie->prph_info_dma_addr);
+	ctxt_info_gen3->prph_scratch_base_addr =
+		cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
+	ctxt_info_gen3->prph_scratch_size =
+		cpu_to_le32(sizeof(*prph_scratch));
+	ctxt_info_gen3->cr_head_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+	ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
+	ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
+	ctxt_info_gen3->cr_idx_arr_size =
+		cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
+	ctxt_info_gen3->tr_idx_arr_size =
+		cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
+	ctxt_info_gen3->mtr_base_addr =
+		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+	ctxt_info_gen3->mcr_base_addr =
+		cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+	ctxt_info_gen3->mtr_size =
+		cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
+	ctxt_info_gen3->mcr_size =
+		cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
+
+	trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
+	trans_pcie->prph_info = prph_info;
+	trans_pcie->prph_scratch = prph_scratch;
+
+	/* Allocate IML */
+	iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
+				     &trans_pcie->iml_dma_addr, GFP_KERNEL);
+	if (!iml_img)
+		return -ENOMEM;
+
+	memcpy(iml_img, trans->iml, trans->iml_len);
+
+	iwl_enable_interrupts(trans);
+
+	/* kick FW self load */
+	iwl_write64(trans, CSR_CTXT_INFO_ADDR,
+		    trans_pcie->ctxt_info_dma_addr);
+	iwl_write64(trans, CSR_IML_DATA_ADDR,
+		    trans_pcie->iml_dma_addr);
+	iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
+	iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
+	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
+
+	return 0;
+}
+
+void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!trans_pcie->ctxt_info_gen3)
+		return;
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
+			  trans_pcie->ctxt_info_gen3,
+			  trans_pcie->ctxt_info_dma_addr);
+	trans_pcie->ctxt_info_dma_addr = 0;
+	trans_pcie->ctxt_info_gen3 = NULL;
+
+	iwl_pcie_ctxt_info_free_fw_img(trans);
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
+			  trans_pcie->prph_scratch,
+			  trans_pcie->prph_scratch_dma_addr);
+	trans_pcie->prph_scratch_dma_addr = 0;
+	trans_pcie->prph_scratch = NULL;
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
+			  trans_pcie->prph_info,
+			  trans_pcie->prph_info_dma_addr);
+	trans_pcie->prph_info_dma_addr = 0;
+	trans_pcie->prph_info = NULL;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
new file mode 100644
index 0000000..b2cd7ef
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -0,0 +1,238 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include "iwl-trans.h"
+#include "iwl-fh.h"
+#include "iwl-context-info.h"
+#include "internal.h"
+#include "iwl-prph.h"
+
+void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+	int i;
+
+	if (!dram->paging) {
+		WARN_ON(dram->paging_cnt);
+		return;
+	}
+
+	/* free paging*/
+	for (i = 0; i < dram->paging_cnt; i++)
+		dma_free_coherent(trans->dev, dram->paging[i].size,
+				  dram->paging[i].block,
+				  dram->paging[i].physical);
+
+	kfree(dram->paging);
+	dram->paging_cnt = 0;
+	dram->paging = NULL;
+}
+
+int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
+			 const struct fw_img *fw,
+			 struct iwl_context_info_dram *ctxt_dram)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+	int i, ret, lmac_cnt, umac_cnt, paging_cnt;
+
+	if (WARN(dram->paging,
+		 "paging shouldn't already be initialized (%d pages)\n",
+		 dram->paging_cnt))
+		iwl_pcie_ctxt_info_free_paging(trans);
+
+	lmac_cnt = iwl_pcie_get_num_sections(fw, 0);
+	/* add 1 due to separator */
+	umac_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + 1);
+	/* add 2 due to separators */
+	paging_cnt = iwl_pcie_get_num_sections(fw, lmac_cnt + umac_cnt + 2);
+
+	dram->fw = kcalloc(umac_cnt + lmac_cnt, sizeof(*dram->fw), GFP_KERNEL);
+	if (!dram->fw)
+		return -ENOMEM;
+	dram->paging = kcalloc(paging_cnt, sizeof(*dram->paging), GFP_KERNEL);
+	if (!dram->paging)
+		return -ENOMEM;
+
+	/* initialize lmac sections */
+	for (i = 0; i < lmac_cnt; i++) {
+		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+						   &dram->fw[dram->fw_cnt]);
+		if (ret)
+			return ret;
+		ctxt_dram->lmac_img[i] =
+			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+		dram->fw_cnt++;
+	}
+
+	/* initialize umac sections */
+	for (i = 0; i < umac_cnt; i++) {
+		/* access FW with +1 to make up for lmac separator */
+		ret = iwl_pcie_ctxt_info_alloc_dma(trans,
+						   &fw->sec[dram->fw_cnt + 1],
+						   &dram->fw[dram->fw_cnt]);
+		if (ret)
+			return ret;
+		ctxt_dram->umac_img[i] =
+			cpu_to_le64(dram->fw[dram->fw_cnt].physical);
+		dram->fw_cnt++;
+	}
+
+	/*
+	 * Initialize paging.
+	 * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+	 * stored separately.
+	 * This is since the timing of its release is different -
+	 * while fw memory can be released on alive, the paging memory can be
+	 * freed only when the device goes down.
+	 * Given that, the logic here in accessing the fw image is a bit
+	 * different - fw_cnt isn't changing so loop counter is added to it.
+	 */
+	for (i = 0; i < paging_cnt; i++) {
+		/* access FW with +2 to make up for lmac & umac separators */
+		int fw_idx = dram->fw_cnt + i + 2;
+
+		ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+						   &dram->paging[i]);
+		if (ret)
+			return ret;
+
+		ctxt_dram->virtual_img[i] =
+			cpu_to_le64(dram->paging[i].physical);
+		dram->paging_cnt++;
+	}
+
+	return 0;
+}
+
+int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
+			    const struct fw_img *fw)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_context_info *ctxt_info;
+	struct iwl_context_info_rbd_cfg *rx_cfg;
+	u32 control_flags = 0;
+	int ret;
+
+	ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info),
+				       &trans_pcie->ctxt_info_dma_addr,
+				       GFP_KERNEL);
+	if (!ctxt_info)
+		return -ENOMEM;
+
+	ctxt_info->version.version = 0;
+	ctxt_info->version.mac_id =
+		cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
+	/* size is in DWs */
+	ctxt_info->version.size = cpu_to_le16(sizeof(*ctxt_info) / 4);
+
+	BUILD_BUG_ON(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) > 0xF);
+	control_flags = IWL_CTXT_INFO_RB_SIZE_4K |
+			IWL_CTXT_INFO_TFD_FORMAT_LONG |
+			RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE) <<
+			IWL_CTXT_INFO_RB_CB_SIZE_POS;
+	ctxt_info->control.control_flags = cpu_to_le32(control_flags);
+
+	/* initialize RX default queue */
+	rx_cfg = &ctxt_info->rbd_cfg;
+	rx_cfg->free_rbd_addr = cpu_to_le64(trans_pcie->rxq->bd_dma);
+	rx_cfg->used_rbd_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma);
+	rx_cfg->status_wr_ptr = cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
+
+	/* initialize TX command queue */
+	ctxt_info->hcmd_cfg.cmd_queue_addr =
+		cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
+	ctxt_info->hcmd_cfg.cmd_queue_size =
+		TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+
+	/* allocate ucode sections in dram and set addresses */
+	ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
+	if (ret) {
+		dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+				  ctxt_info, trans_pcie->ctxt_info_dma_addr);
+		return ret;
+	}
+
+	trans_pcie->ctxt_info = ctxt_info;
+
+	iwl_enable_interrupts(trans);
+
+	/* Configure debug, if exists */
+	if (trans->dbg_dest_tlv)
+		iwl_pcie_apply_destination(trans);
+
+	/* kick FW self load */
+	iwl_write64(trans, CSR_CTXT_INFO_BA, trans_pcie->ctxt_info_dma_addr);
+	iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+
+	/* Context info will be released upon alive or failure to get one */
+
+	return 0;
+}
+
+void iwl_pcie_ctxt_info_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!trans_pcie->ctxt_info)
+		return;
+
+	dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
+			  trans_pcie->ctxt_info,
+			  trans_pcie->ctxt_info_dma_addr);
+	trans_pcie->ctxt_info_dma_addr = 0;
+	trans_pcie->ctxt_info = NULL;
+
+	iwl_pcie_ctxt_info_free_fw_img(trans);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
new file mode 100644
index 0000000..5d65500
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -0,0 +1,1277 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016-2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * All rights reserved.
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/pci.h>
+#include <linux/acpi.h>
+
+#include "fw/acpi.h"
+
+#include "iwl-trans.h"
+#include "iwl-drv.h"
+#include "internal.h"
+
+#define IWL_PCI_DEVICE(dev, subdev, cfg) \
+	.vendor = PCI_VENDOR_ID_INTEL,  .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+	.driver_data = (kernel_ulong_t)&(cfg)
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static const struct pci_device_id iwl_hw_card_ids[] = {
+#if IS_ENABLED(CONFIG_IWLDVM)
+	{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1204, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1304, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1205, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1305, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1206, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1306, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1221, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1321, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1224, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1324, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1225, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1325, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1226, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4232, 0x1326, iwl5100_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1211, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1311, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1214, iwl5100_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1314, iwl5100_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1215, iwl5100_bgn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1315, iwl5100_bgn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1216, iwl5100_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4237, 0x1316, iwl5100_abg_cfg)}, /* Half Mini Card */
+
+/* 5300 Series WiFi */
+	{IWL_PCI_DEVICE(0x4235, 0x1021, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1121, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1024, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1124, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1001, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1101, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1004, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4235, 0x1104, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1011, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1111, iwl5300_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1014, iwl5300_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x4236, 0x1114, iwl5300_agn_cfg)}, /* Half Mini Card */
+
+/* 5350 Series WiFi/WiMax */
+	{IWL_PCI_DEVICE(0x423A, 0x1001, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423A, 0x1021, iwl5350_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423B, 0x1011, iwl5350_agn_cfg)}, /* Mini Card */
+
+/* 5150 Series Wifi/WiMax */
+	{IWL_PCI_DEVICE(0x423C, 0x1201, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1301, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1206, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+	{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1216, iwl5150_abg_cfg)}, /* Mini Card */
+	{IWL_PCI_DEVICE(0x423D, 0x1316, iwl5150_abg_cfg)}, /* Half Mini Card */
+
+/* 6x00 Series */
+	{IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1108, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422B, 0x1128, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x4238, 0x1118, iwl6000_3agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
+
+/* 6x05 Series */
+	{IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1308, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1328, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1318, iwl6005_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0085, 0xC228, iwl6005_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x4820, iwl6005_2agn_d_cfg)},
+	{IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_mow1_cfg)},/* low 5GHz active */
+	{IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_mow2_cfg)},/* high 5GHz active */
+
+/* 6x30 Series */
+	{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
+
+/* 6x50 WiFi/WiMax Series */
+	{IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
+
+/* 6150 WiFi/WiMax Series */
+	{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
+
+/* 1000 Series WiFi */
+	{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
+
+/* 100 Series WiFi */
+	{IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
+	{IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
+
+/* 130 Series WiFi */
+	{IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5007, iwl130_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0897, 0x5015, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0897, 0x5017, iwl130_bg_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5025, iwl130_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0896, 0x5027, iwl130_bg_cfg)},
+
+/* 2x00 Series */
+	{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
+
+/* 2x30 Series */
+	{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
+
+/* 6x35 Series */
+	{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x406A, iwl6035_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088F, 0x426A, iwl6035_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x446A, iwl6035_2agn_sff_cfg)},
+	{IWL_PCI_DEVICE(0x088E, 0x4860, iwl6035_2agn_cfg)},
+	{IWL_PCI_DEVICE(0x088F, 0x5260, iwl6035_2agn_cfg)},
+
+/* 105 Series */
+	{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
+
+/* 135 Series */
+	{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
+	{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
+#endif /* CONFIG_IWLDVM */
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+/* 7260 Series */
+	{IWL_PCI_DEVICE(0x08B1, 0x4070, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4C60, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4C70, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4060, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x406A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4160, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4062, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4162, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4270, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4272, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4260, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x426A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4262, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4470, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4472, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4460, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x446A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4570, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4560, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4370, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4360, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5070, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x5770, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x402A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC070, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC072, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC170, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC060, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC06A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC160, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC062, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC162, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC770, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC760, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC270, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xCC70, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xCC60, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC272, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC260, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC26A, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC262, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC470, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC472, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC460, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC462, iwl7260_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC570, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC560, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC370, iwl7260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC360, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC020, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC02A, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B2, 0xC220, iwl7260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B1, 0xC420, iwl7260_2n_cfg)},
+
+/* 3160 Series */
+	{IWL_PCI_DEVICE(0x08B3, 0x0070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0072, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0170, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0172, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0060, iwl3160_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0062, iwl3160_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0270, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0272, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0470, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x0472, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x0370, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8072, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8170, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8172, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8060, iwl3160_2n_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8062, iwl3160_n_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x8270, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x8370, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B4, 0x8272, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8470, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x8570, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x1070, iwl3160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x08B3, 0x1170, iwl3160_2ac_cfg)},
+
+/* 3165 Series */
+	{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
+
+/* 3168 Series */
+	{IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
+
+/* 7265 Series */
+	{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5102, iwl7265_n_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5190, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x5F10, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x5212, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095B, 0x520A, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9000, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9400, iwl7265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x095A, 0x9E10, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+	{IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x10B0, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0130, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1130, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0132, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1132, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0110, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x01F0, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0012, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1012, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1110, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0250, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0150, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x1150, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xD0B0, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0xB0B0, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8110, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9110, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0x8030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0x9030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8130, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9130, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8132, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9132, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x8150, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9050, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x9150, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0004, iwl8260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0044, iwl8260_2n_cfg)},
+	{IWL_PCI_DEVICE(0x24F5, 0x0010, iwl4165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F6, 0x0030, iwl4165_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0810, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0910, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0850, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0950, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24F3, 0x4010, iwl8260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1130, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0130, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x10D0, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0910, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1014, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x3E02, iwl8275_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x3E01, iwl8275_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x1012, iwl8275_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x0014, iwl8265_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
+
+/* 9000 Series */
+	{IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x1550, iwl9260_killer_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2526, 0x8014, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x30DC, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x31DC, 0x1551, iwl9560_killer_s_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x1552, iwl9560_killer_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
+	{IWL_PCI_DEVICE(0xA370, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
+	{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+/* 22000 Series */
+	{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
+	{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
+
+#endif /* CONFIG_IWLMVM */
+
+	{0}
+};
+MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
+	const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+	struct iwl_trans *iwl_trans;
+	int ret;
+
+	if (WARN_ONCE(!cfg->csr, "CSR addresses aren't configured\n"))
+		return -EINVAL;
+
+	iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
+	if (IS_ERR(iwl_trans))
+		return PTR_ERR(iwl_trans);
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+	/*
+	 * special-case 7265D, it has the same PCI IDs.
+	 *
+	 * Note that because we already pass the cfg to the transport above,
+	 * all the parameters that the transport uses must, until that is
+	 * changed, be identical to the ones in the 7265D configuration.
+	 */
+	if (cfg == &iwl7265_2ac_cfg)
+		cfg_7265d = &iwl7265d_2ac_cfg;
+	else if (cfg == &iwl7265_2n_cfg)
+		cfg_7265d = &iwl7265d_2n_cfg;
+	else if (cfg == &iwl7265_n_cfg)
+		cfg_7265d = &iwl7265d_n_cfg;
+	if (cfg_7265d &&
+	    (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
+		cfg = cfg_7265d;
+		iwl_trans->cfg = cfg_7265d;
+	}
+
+	if (iwl_trans->cfg->rf_id && cfg == &iwl22000_2ac_cfg_hr_cdb &&
+	    iwl_trans->hw_rev != CSR_HW_REV_TYPE_HR_CDB) {
+		u32 rf_id_chp = CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id);
+		u32 jf_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF);
+		u32 hr_chp_id = CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR);
+
+		if (rf_id_chp == jf_chp_id) {
+			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
+				cfg = &iwl22000_2ax_cfg_qnj_jf_b0;
+			else
+				cfg = &iwl22000_2ac_cfg_jf;
+		} else if (rf_id_chp == hr_chp_id) {
+			if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ)
+				cfg = &iwl22000_2ax_cfg_qnj_hr_a0;
+			else
+				cfg = &iwl22000_2ac_cfg_hr;
+		}
+		iwl_trans->cfg = cfg;
+	}
+#endif
+
+	pci_set_drvdata(pdev, iwl_trans);
+	iwl_trans->drv = iwl_drv_start(iwl_trans);
+
+	if (IS_ERR(iwl_trans->drv)) {
+		ret = PTR_ERR(iwl_trans->drv);
+		goto out_free_trans;
+	}
+
+	/* register transport layer debugfs here */
+	ret = iwl_trans_pcie_dbgfs_register(iwl_trans);
+	if (ret)
+		goto out_free_drv;
+
+	/* if RTPM is in use, enable it in our device */
+	if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+		/* We explicitly set the device to active here to
+		 * clear contingent errors.
+		 */
+		pm_runtime_set_active(&pdev->dev);
+
+		pm_runtime_set_autosuspend_delay(&pdev->dev,
+					 iwlwifi_mod_params.d0i3_timeout);
+		pm_runtime_use_autosuspend(&pdev->dev);
+
+		/* We are not supposed to call pm_runtime_allow() by
+		 * ourselves, but let userspace enable runtime PM via
+		 * sysfs.  However, since we don't enable this from
+		 * userspace yet, we need to allow/forbid() ourselves.
+		*/
+		pm_runtime_allow(&pdev->dev);
+	}
+
+	/* The PCI device starts with a reference taken and we are
+	 * supposed to release it here.  But to simplify the
+	 * interaction with the opmode, we don't do it now, but let
+	 * the opmode release it when it's ready.
+	 */
+
+	return 0;
+
+out_free_drv:
+	iwl_drv_stop(iwl_trans->drv);
+out_free_trans:
+	iwl_trans_pcie_free(iwl_trans);
+	return ret;
+}
+
+static void iwl_pci_remove(struct pci_dev *pdev)
+{
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+	/* if RTPM was in use, restore it to the state before probe */
+	if (trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+		/* We should not call forbid here, but we do for now.
+		 * Check the comment to pm_runtime_allow() in
+		 * iwl_pci_probe().
+		 */
+		pm_runtime_forbid(trans->dev);
+	}
+
+	iwl_drv_stop(trans->drv);
+
+	iwl_trans_pcie_free(trans);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwl_pci_suspend(struct device *device)
+{
+	/* Before you put code here, think about WoWLAN. You cannot check here
+	 * whether WoWLAN is enabled or not, and your code will run even if
+	 * WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
+	 */
+
+	return 0;
+}
+
+static int iwl_pci_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	/* Before you put code here, think about WoWLAN. You cannot check here
+	 * whether WoWLAN is enabled or not, and your code will run even if
+	 * WoWLAN is enabled - the NIC may be alive.
+	 */
+
+	/*
+	 * We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state.
+	 */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	if (!trans->op_mode)
+		return 0;
+
+	/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		return 0;
+
+	/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
+	iwl_pcie_conf_msix_hw(trans_pcie);
+
+	/*
+	 * Enable rfkill interrupt (in order to keep track of the rfkill
+	 * status). Must be locked to avoid processing a possible rfkill
+	 * interrupt while in iwl_pcie_check_hw_rf_kill().
+	 */
+	mutex_lock(&trans_pcie->mutex);
+	iwl_enable_rfkill_int(trans);
+	iwl_pcie_check_hw_rf_kill(trans);
+	mutex_unlock(&trans_pcie->mutex);
+
+	return 0;
+}
+
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	if (test_bit(STATUS_FW_ERROR, &trans->status))
+		return 0;
+
+	set_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+	/* config the fw */
+	ret = iwl_op_mode_enter_d0i3(trans->op_mode);
+	if (ret == 1) {
+		IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n");
+		clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+		return -EBUSY;
+	}
+	if (ret)
+		goto err;
+
+	ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+				 test_bit(STATUS_TRANS_IDLE, &trans->status),
+				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+	if (!ret) {
+		IWL_ERR(trans, "Timeout entering D0i3\n");
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+	return 0;
+err:
+	clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+	iwl_trans_fw_error(trans);
+	return ret;
+}
+
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	/* sometimes a D0i3 entry is not followed through */
+	if (!test_bit(STATUS_TRANS_IDLE, &trans->status))
+		return 0;
+
+	/* config the fw */
+	ret = iwl_op_mode_exit_d0i3(trans->op_mode);
+	if (ret)
+		goto err;
+
+	/* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */
+
+	ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+				 !test_bit(STATUS_TRANS_IDLE, &trans->status),
+				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+	if (!ret) {
+		IWL_ERR(trans, "Timeout exiting D0i3\n");
+		ret = -ETIMEDOUT;
+		goto err;
+	}
+
+	return 0;
+err:
+	clear_bit(STATUS_TRANS_IDLE, &trans->status);
+	iwl_trans_fw_error(trans);
+	return ret;
+}
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+static int iwl_pci_runtime_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	int ret;
+
+	IWL_DEBUG_RPM(trans, "entering runtime suspend\n");
+
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+		ret = iwl_pci_fw_enter_d0i3(trans);
+		if (ret < 0)
+			return ret;
+	}
+
+	trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+	iwl_trans_d3_suspend(trans, false, false);
+
+	return 0;
+}
+
+static int iwl_pci_runtime_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+	enum iwl_d3_status d3_status;
+
+	IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n");
+
+	iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		return iwl_pci_fw_exit_d0i3(trans);
+
+	return 0;
+}
+
+static int iwl_pci_system_prepare(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+	IWL_DEBUG_RPM(trans, "preparing for system suspend\n");
+
+	/* This is called before entering system suspend and before
+	 * the runtime resume is called.  Set the suspending flag to
+	 * prevent the wakelock from being taken.
+	 */
+	trans->suspending = true;
+
+	/* Wake the device up from runtime suspend before going to
+	 * platform suspend.  This is needed because we don't know
+	 * whether wowlan any is set and, if it's not, mac80211 will
+	 * disconnect (in which case, we can't be in D0i3).
+	 */
+	pm_runtime_resume(device);
+
+	return 0;
+}
+
+static void iwl_pci_system_complete(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct iwl_trans *trans = pci_get_drvdata(pdev);
+
+	IWL_DEBUG_RPM(trans, "completing system suspend\n");
+
+	/* This is called as a counterpart to the prepare op.  It is
+	 * called either when suspending fails or when suspend
+	 * completed successfully.  Now there's no risk of grabbing
+	 * the wakelock anymore, so we can release the suspending
+	 * flag.
+	 */
+	trans->suspending = false;
+}
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
+static const struct dev_pm_ops iwl_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
+				iwl_pci_resume)
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+	SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
+			   iwl_pci_runtime_resume,
+			   NULL)
+	.prepare = iwl_pci_system_prepare,
+	.complete = iwl_pci_system_complete,
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+};
+
+#define IWL_PM_OPS	(&iwl_dev_pm_ops)
+
+#else /* CONFIG_PM_SLEEP */
+
+#define IWL_PM_OPS	NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static struct pci_driver iwl_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = iwl_hw_card_ids,
+	.probe = iwl_pci_probe,
+	.remove = iwl_pci_remove,
+	.driver.pm = IWL_PM_OPS,
+};
+
+int __must_check iwl_pci_register_driver(void)
+{
+	int ret;
+	ret = pci_register_driver(&iwl_pci_driver);
+	if (ret)
+		pr_err("Unable to initialize PCI module\n");
+
+	return ret;
+}
+
+void iwl_pci_unregister_driver(void)
+{
+	pci_unregister_driver(&iwl_pci_driver);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
new file mode 100644
index 0000000..b63d44b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -0,0 +1,1106 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_int_pcie_h__
+#define __iwl_trans_int_pcie_h__
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+
+#include "iwl-fh.h"
+#include "iwl-csr.h"
+#include "iwl-trans.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-op-mode.h"
+#include "iwl-drv.h"
+
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
+
+/*
+ * RX related structures and functions
+ */
+#define RX_NUM_QUEUES 1
+#define RX_POST_REQ_ALLOC 2
+#define RX_CLAIM_REQ_ALLOC 8
+#define RX_PENDING_WATERMARK 16
+#define FIRST_RX_QUEUE 512
+
+struct iwl_host_cmd;
+
+/*This file includes the declaration that are internal to the
+ * trans_pcie layer */
+
+/**
+ * struct iwl_rx_mem_buffer
+ * @page_dma: bus address of rxb page
+ * @page: driver's pointer to the rxb page
+ * @invalid: rxb is in driver ownership - not owned by HW
+ * @vid: index of this rxb in the global table
+ * @size: size used from the buffer
+ */
+struct iwl_rx_mem_buffer {
+	dma_addr_t page_dma;
+	struct page *page;
+	u16 vid;
+	bool invalid;
+	struct list_head list;
+	u32 size;
+};
+
+/**
+ * struct isr_statistics - interrupt statistics
+ *
+ */
+struct isr_statistics {
+	u32 hw;
+	u32 sw;
+	u32 err_code;
+	u32 sch;
+	u32 alive;
+	u32 rfkill;
+	u32 ctkill;
+	u32 wakeup;
+	u32 rx;
+	u32 tx;
+	u32 unhandled;
+};
+
+#define IWL_CD_STTS_OPTIMIZED_POS	0
+#define IWL_CD_STTS_OPTIMIZED_MSK	0x01
+#define IWL_CD_STTS_TRANSFER_STATUS_POS	1
+#define IWL_CD_STTS_TRANSFER_STATUS_MSK	0x0E
+#define IWL_CD_STTS_WIFI_STATUS_POS	4
+#define IWL_CD_STTS_WIFI_STATUS_MSK	0xF0
+
+/**
+ * enum iwl_completion_desc_transfer_status -  transfer status (bits 1-3)
+ * @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
+ *	In sniffer mode, when split is used, set in last CD completion. (RX)
+ * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
+ *	all CD completion. (RX)
+ * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
+ */
+enum iwl_completion_desc_transfer_status {
+	IWL_CD_STTS_UNUSED,
+	IWL_CD_STTS_UNUSED_2,
+	IWL_CD_STTS_END_TRANSFER,
+	IWL_CD_STTS_OVERFLOW,
+	IWL_CD_STTS_ABORTED,
+	IWL_CD_STTS_ERROR,
+};
+
+/**
+ * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
+ * @IWL_CD_STTS_VALID: the packet is valid (RX)
+ * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
+ * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
+ * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
+ * @IWL_CD_STTS_DUP: duplicate packet (RX)
+ * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
+ * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
+ * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
+ * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
+ * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
+ * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
+ * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
+ * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
+ * @IWL_CD_STTS_NOT_USED: completed but not used (RX)
+ * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
+ */
+enum iwl_completion_desc_wifi_status {
+	IWL_CD_STTS_VALID,
+	IWL_CD_STTS_FCS_ERR,
+	IWL_CD_STTS_SEC_KEY_ERR,
+	IWL_CD_STTS_DECRYPTION_ERR,
+	IWL_CD_STTS_DUP,
+	IWL_CD_STTS_ICV_MIC_ERR,
+	IWL_CD_STTS_INTERNAL_SNAP_ERR,
+	IWL_CD_STTS_SEC_PORT_FAIL,
+	IWL_CD_STTS_BA_OLD_SN,
+	IWL_CD_STTS_QOS_NULL,
+	IWL_CD_STTS_MAC_HDR_ERR,
+	IWL_CD_STTS_MAX_RETRANS,
+	IWL_CD_STTS_EX_LIFETIME,
+	IWL_CD_STTS_NOT_USED,
+	IWL_CD_STTS_REPLAY_ERR,
+};
+
+#define IWL_RX_TD_TYPE_MSK	0xff000000
+#define IWL_RX_TD_SIZE_MSK	0x00ffffff
+#define IWL_RX_TD_SIZE_2K	BIT(11)
+#define IWL_RX_TD_TYPE		0
+
+/**
+ * struct iwl_rx_transfer_desc - transfer descriptor
+ * @type_n_size: buffer type (bit 0: external buff valid,
+ *	bit 1: optional footer valid, bit 2-7: reserved)
+ *	and buffer size
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwl_rx_transfer_desc {
+	__le32 type_n_size;
+	__le64 addr;
+	__le16 rbid;
+	__le16 reserved;
+} __packed;
+
+#define IWL_RX_CD_SIZE		0xffffff00
+
+/**
+ * struct iwl_rx_completion_desc - completion descriptor
+ * @type: buffer type (bit 0: external buff valid,
+ *	bit 1: optional footer valid, bit 2-7: reserved)
+ * @status: status of the completion
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @size: buffer size, masked by IWL_RX_CD_SIZE
+ * @reserved2: reserved
+ */
+struct iwl_rx_completion_desc {
+	u8 type;
+	u8 status;
+	__le16 reserved1;
+	__le16 rbid;
+	__le32 size;
+	u8 reserved2[22];
+} __packed;
+
+/**
+ * struct iwl_rxq - Rx queue
+ * @id: queue index
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
+ *	Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
+ *	In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
+ * @tr_tail: driver's pointer to the transmission ring tail buffer
+ * @tr_tail_dma: physical address of the buffer for the transmission ring tail
+ * @cr_tail: driver's pointer to the completion ring tail buffer
+ * @cr_tail_dma: physical address of the buffer for the completion ring tail
+ * @read: Shared index to newest available Rx buffer
+ * @write: Shared index to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @used_count: Number of RBDs handled to allocator to use for allocation
+ * @write_actual:
+ * @rx_free: list of RBDs with allocated RB ready for use
+ * @rx_used: list of RBDs with no RB attached
+ * @need_update: flag to indicate we need to update read/write index
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ * @lock:
+ * @queue: actual rx queue. Not used for multi-rx queue.
+ *
+ * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
+ */
+struct iwl_rxq {
+	int id;
+	void *bd;
+	dma_addr_t bd_dma;
+	union {
+		void *used_bd;
+		__le32 *bd_32;
+		struct iwl_rx_completion_desc *cd;
+	};
+	dma_addr_t used_bd_dma;
+	__le16 *tr_tail;
+	dma_addr_t tr_tail_dma;
+	__le16 *cr_tail;
+	dma_addr_t cr_tail_dma;
+	u32 read;
+	u32 write;
+	u32 free_count;
+	u32 used_count;
+	u32 write_actual;
+	u32 queue_size;
+	struct list_head rx_free;
+	struct list_head rx_used;
+	bool need_update;
+	void *rb_stts;
+	dma_addr_t rb_stts_dma;
+	spinlock_t lock;
+	struct napi_struct napi;
+	struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+};
+
+/**
+ * struct iwl_rb_allocator - Rx allocator
+ * @req_pending: number of requests the allcator had not processed yet
+ * @req_ready: number of requests honored and ready for claiming
+ * @rbd_allocated: RBDs with pages allocated and ready to be handled to
+ *	the queue. This is a list of &struct iwl_rx_mem_buffer
+ * @rbd_empty: RBDs with no page attached for allocator use. This is a list
+ *	of &struct iwl_rx_mem_buffer
+ * @lock: protects the rbd_allocated and rbd_empty lists
+ * @alloc_wq: work queue for background calls
+ * @rx_alloc: work struct for background calls
+ */
+struct iwl_rb_allocator {
+	atomic_t req_pending;
+	atomic_t req_ready;
+	struct list_head rbd_allocated;
+	struct list_head rbd_empty;
+	spinlock_t lock;
+	struct workqueue_struct *alloc_wq;
+	struct work_struct rx_alloc;
+};
+
+struct iwl_dma_ptr {
+	dma_addr_t dma;
+	void *addr;
+	size_t size;
+};
+
+/**
+ * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ */
+static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
+{
+	return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_get_closed_rb_stts - get closed rb stts from different structs
+ * @rxq - the rxq to get the rb stts from
+ */
+static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
+					    struct iwl_rxq *rxq)
+{
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		__le16 *rb_stts = rxq->rb_stts;
+
+		return READ_ONCE(*rb_stts);
+	} else {
+		struct iwl_rb_status *rb_stts = rxq->rb_stts;
+
+		return READ_ONCE(rb_stts->closed_rb_num);
+	}
+}
+
+/**
+ * iwl_queue_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ */
+static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
+{
+	return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
+}
+
+struct iwl_cmd_meta {
+	/* only for SYNC commands, iff the reply skb is wanted */
+	struct iwl_host_cmd *source;
+	u32 flags;
+	u32 tbs;
+};
+
+
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+/*
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
+ */
+#define IWL_FIRST_TB_SIZE	20
+#define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
+
+struct iwl_pcie_txq_entry {
+	struct iwl_device_cmd *cmd;
+	struct sk_buff *skb;
+	/* buffer to free after command completes */
+	const void *free_buf;
+	struct iwl_cmd_meta meta;
+};
+
+struct iwl_pcie_first_tb_buf {
+	u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
+};
+
+/**
+ * struct iwl_txq - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @tfds: transmit frame descriptors (DMA memory)
+ * @first_tb_bufs: start of command headers, including scratch buffers, for
+ *	the writeback -- this is DMA memory and an array holding one buffer
+ *	for each command on the queue
+ * @first_tb_dma: DMA address for the first_tb_bufs start
+ * @entries: transmit entries (driver state)
+ * @lock: queue lock
+ * @stuck_timer: timer that fires if queue gets stuck
+ * @trans_pcie: pointer back to transport (for timer)
+ * @need_update: indicates need to update read/write index
+ * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
+ * @wd_timeout: queue watchdog timeout (jiffies) - per queue
+ * @frozen: tx stuck queue timer is frozen
+ * @frozen_expiry_remainder: remember how long until the timer fires
+ * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
+ * @write_ptr: 1-st empty entry (index) host_w
+ * @read_ptr: last used entry (index) host_r
+ * @dma_addr:  physical addr for BD's
+ * @n_window: safe queue window
+ * @id: queue id
+ * @low_mark: low watermark, resume queue if free space more than this
+ * @high_mark: high watermark, stop queue if free space less than this
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ *
+ * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
+ * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
+ * there might be HW changes in the future). For the normal TX
+ * queues, n_window, which is the size of the software queue data
+ * is also 256; however, for the command queue, n_window is only
+ * 32 since we don't need so many commands pending. Since the HW
+ * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
+ * This means that we end up with the following:
+ *  HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
+ *  SW entries:           | 0      | ... | 31          |
+ * where N is a number between 0 and 7. This means that the SW
+ * data is a window overlayed over the HW queue.
+ */
+struct iwl_txq {
+	void *tfds;
+	struct iwl_pcie_first_tb_buf *first_tb_bufs;
+	dma_addr_t first_tb_dma;
+	struct iwl_pcie_txq_entry *entries;
+	spinlock_t lock;
+	unsigned long frozen_expiry_remainder;
+	struct timer_list stuck_timer;
+	struct iwl_trans_pcie *trans_pcie;
+	bool need_update;
+	bool frozen;
+	bool ampdu;
+	int block;
+	unsigned long wd_timeout;
+	struct sk_buff_head overflow_q;
+	struct iwl_dma_ptr bc_tbl;
+
+	int write_ptr;
+	int read_ptr;
+	dma_addr_t dma_addr;
+	int n_window;
+	u32 id;
+	int low_mark;
+	int high_mark;
+};
+
+static inline dma_addr_t
+iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+	return txq->first_tb_dma +
+	       sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+struct iwl_tso_hdr_page {
+	struct page *page;
+	u8 *pos;
+};
+
+/**
+ * enum iwl_shared_irq_flags - level of sharing for irq
+ * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
+ * @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
+ */
+enum iwl_shared_irq_flags {
+	IWL_SHARED_IRQ_NON_RX		= BIT(0),
+	IWL_SHARED_IRQ_FIRST_RSS	= BIT(1),
+};
+
+/**
+ * enum iwl_image_response_code - image response values
+ * @IWL_IMAGE_RESP_DEF: the default value of the register
+ * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
+ * @IWL_IMAGE_RESP_FAIL: iml reading failed
+ */
+enum iwl_image_response_code {
+	IWL_IMAGE_RESP_DEF		= 0,
+	IWL_IMAGE_RESP_SUCCESS		= 1,
+	IWL_IMAGE_RESP_FAIL		= 2,
+};
+
+/**
+ * struct iwl_dram_data
+ * @physical: page phy pointer
+ * @block: pointer to the allocated block/page
+ * @size: size of the block/page
+ */
+struct iwl_dram_data {
+	dma_addr_t physical;
+	void *block;
+	int size;
+};
+
+/**
+ * struct iwl_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @fw_cnt: total number of items in array
+ * @paging: paging dram data
+ * @paging_cnt: total number of items in array
+ */
+struct iwl_self_init_dram {
+	struct iwl_dram_data *fw;
+	int fw_cnt;
+	struct iwl_dram_data *paging;
+	int paging_cnt;
+};
+
+/**
+ * struct iwl_trans_pcie - PCIe transport specific data
+ * @rxq: all the RX queue data
+ * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
+ * @global_table: table mapping received VID from hw to rxb
+ * @rba: allocator for RX replenishing
+ * @ctxt_info: context information for FW self init
+ * @ctxt_info_gen3: context information for gen3 devices
+ * @prph_info: prph info for self init
+ * @prph_scratch: prph scratch for self init
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @prph_info_dma_addr: dma addr of prph info
+ * @prph_scratch_dma_addr: dma addr of prph scratch
+ * @ctxt_info_dma_addr: dma addr of context information
+ * @init_dram: DRAM data of firmware image (including paging).
+ *	Context information addresses will be taken from here.
+ *	This is driver's local copy for keeping track of size and
+ *	count for allocating and freeing the memory.
+ * @trans: pointer to the generic transport area
+ * @scd_base_addr: scheduler sram base address in SRAM
+ * @scd_bc_tbls: pointer to the byte count table of the scheduler
+ * @kw: keep warm address
+ * @pci_dev: basic pci-network driver stuff
+ * @hw_base: pci hardware address support
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_write_waitq: wait queue for uCode load
+ * @cmd_queue - command queue number
+ * @rx_buf_size: Rx buffer size
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @scd_set_active: should the transport configure the SCD for HCMD queue
+ * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
+ *	frame.
+ * @rx_page_order: page order for receive buffer size
+ * @reg_lock: protect hw register access
+ * @mutex: to protect stop_device / start_fw / start_hw
+ * @cmd_in_flight: true when we have a host command in flight
+ * @fw_mon_phys: physical address of the buffer for the firmware monitor
+ * @fw_mon_page: points to the first page of the buffer for the firmware monitor
+ * @fw_mon_size: size of the buffer for the firmware monitor
+ * @msix_entries: array of MSI-X entries
+ * @msix_enabled: true if managed to enable MSI-X
+ * @shared_vec_mask: the type of causes the shared vector handles
+ *	(see iwl_shared_irq_flags).
+ * @alloc_vecs: the number of interrupt vectors allocated by the OS
+ * @def_irq: default irq for non rx causes
+ * @fh_init_mask: initial unmasked fh causes
+ * @hw_init_mask: initial unmasked hw causes
+ * @fh_mask: current unmasked fh causes
+ * @hw_mask: current unmasked hw causes
+ * @in_rescan: true if we have triggered a device rescan
+ * @scheduled_for_removal: true if we have scheduled a device removal
+ */
+struct iwl_trans_pcie {
+	struct iwl_rxq *rxq;
+	struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
+	struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
+	struct iwl_rb_allocator rba;
+	union {
+		struct iwl_context_info *ctxt_info;
+		struct iwl_context_info_gen3 *ctxt_info_gen3;
+	};
+	struct iwl_prph_info *prph_info;
+	struct iwl_prph_scratch *prph_scratch;
+	dma_addr_t ctxt_info_dma_addr;
+	dma_addr_t prph_info_dma_addr;
+	dma_addr_t prph_scratch_dma_addr;
+	dma_addr_t iml_dma_addr;
+	struct iwl_self_init_dram init_dram;
+	struct iwl_trans *trans;
+
+	struct net_device napi_dev;
+
+	struct __percpu iwl_tso_hdr_page *tso_hdr_page;
+
+	/* INT ICT Table */
+	__le32 *ict_tbl;
+	dma_addr_t ict_tbl_dma;
+	int ict_index;
+	bool use_ict;
+	bool is_down, opmode_down;
+	bool debug_rfkill;
+	struct isr_statistics isr_stats;
+
+	spinlock_t irq_lock;
+	struct mutex mutex;
+	u32 inta_mask;
+	u32 scd_base_addr;
+	struct iwl_dma_ptr scd_bc_tbls;
+	struct iwl_dma_ptr kw;
+
+	struct iwl_txq *txq_memory;
+	struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+	unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+	unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+
+	/* PCI bus related data */
+	struct pci_dev *pci_dev;
+	void __iomem *hw_base;
+
+	bool ucode_write_complete;
+	wait_queue_head_t ucode_write_waitq;
+	wait_queue_head_t wait_command_queue;
+	wait_queue_head_t d0i3_waitq;
+
+	u8 page_offs, dev_cmd_offs;
+
+	u8 cmd_queue;
+	u8 cmd_fifo;
+	unsigned int cmd_q_wdg_timeout;
+	u8 n_no_reclaim_cmds;
+	u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
+	u8 max_tbs;
+	u16 tfd_size;
+
+	enum iwl_amsdu_size rx_buf_size;
+	bool bc_table_dword;
+	bool scd_set_active;
+	bool sw_csum_tx;
+	bool pcie_dbg_dumped_once;
+	u32 rx_page_order;
+
+	/*protect hw register */
+	spinlock_t reg_lock;
+	bool cmd_hold_nic_awake;
+	bool ref_cmd_in_flight;
+
+	dma_addr_t fw_mon_phys;
+	struct page *fw_mon_page;
+	u32 fw_mon_size;
+
+	struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
+	bool msix_enabled;
+	u8 shared_vec_mask;
+	u32 alloc_vecs;
+	u32 def_irq;
+	u32 fh_init_mask;
+	u32 hw_init_mask;
+	u32 fh_mask;
+	u32 hw_mask;
+	cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
+	u16 tx_cmd_queue_size;
+	bool in_rescan;
+	bool scheduled_for_removal;
+};
+
+static inline struct iwl_trans_pcie *
+IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
+{
+	return (void *)trans->trans_specific;
+}
+
+static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
+				      struct msix_entry *entry)
+{
+	/*
+	 * Before sending the interrupt the HW disables it to prevent
+	 * a nested interrupt. This is done by writing 1 to the corresponding
+	 * bit in the mask register. After handling the interrupt, it should be
+	 * re-enabled by clearing this bit. This register is defined as
+	 * write 1 clear (W1C) register, meaning that it's being clear
+	 * by writing 1 to the bit.
+	 */
+	iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+}
+
+static inline struct iwl_trans *
+iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
+{
+	return container_of((void *)trans_pcie, struct iwl_trans,
+			    trans_specific);
+}
+
+/*
+ * Convention: trans API functions: iwl_trans_pcie_XXX
+ *	Other functions: iwl_pcie_XXX
+ */
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg);
+void iwl_trans_pcie_free(struct iwl_trans *trans);
+
+/*****************************************************
+* RX
+******************************************************/
+int iwl_pcie_rx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans);
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data);
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
+int iwl_pcie_rx_stop(struct iwl_trans *trans);
+void iwl_pcie_rx_free(struct iwl_trans *trans);
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+			    struct iwl_rxq *rxq);
+
+/*****************************************************
+* ICT - interrupt handling
+******************************************************/
+irqreturn_t iwl_pcie_isr(int irq, void *data);
+int iwl_pcie_alloc_ict(struct iwl_trans *trans);
+void iwl_pcie_free_ict(struct iwl_trans *trans);
+void iwl_pcie_reset_ict(struct iwl_trans *trans);
+void iwl_pcie_disable_ict(struct iwl_trans *trans);
+
+/*****************************************************
+* TX / HCMD
+******************************************************/
+int iwl_pcie_tx_init(struct iwl_trans *trans);
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans);
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
+int iwl_pcie_tx_stop(struct iwl_trans *trans);
+void iwl_pcie_tx_free(struct iwl_trans *trans);
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
+			       const struct iwl_trans_txq_scd_cfg *cfg,
+			       unsigned int wdg_timeout);
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
+				bool configure_scd);
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+					bool shared_mode);
+void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
+				  struct iwl_txq *txq);
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+		      struct iwl_device_cmd *dev_cmd, int txq_id);
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+			    struct iwl_rx_cmd_buffer *rxb);
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+			    struct sk_buff_head *skbs);
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+
+static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
+					  u8 idx)
+{
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd = _tfd;
+		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+		return le16_to_cpu(tb->tb_len);
+	} else {
+		struct iwl_tfd *tfd = _tfd;
+		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+
+		return le16_to_cpu(tb->hi_n_len) >> 4;
+	}
+}
+
+/*****************************************************
+* Error handling
+******************************************************/
+void iwl_pcie_dump_csr(struct iwl_trans *trans);
+
+/*****************************************************
+* Helpers
+******************************************************/
+static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	clear_bit(STATUS_INT_ENABLED, &trans->status);
+	if (!trans_pcie->msix_enabled) {
+		/* disable interrupts from uCode/NIC to host */
+		iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+		/* acknowledge/clear/reset any interrupts still pending
+		 * from uCode or flow handler (Rx/Tx DMA) */
+		iwl_write32(trans, CSR_INT, 0xffffffff);
+		iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff);
+	} else {
+		/* disable all the interrupt we might use */
+		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+			    trans_pcie->fh_init_mask);
+		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+			    trans_pcie->hw_init_mask);
+	}
+	IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
+}
+
+#define IWL_NUM_OF_COMPLETION_RINGS	31
+#define IWL_NUM_OF_TRANSFER_RINGS	527
+
+static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
+					    int start)
+{
+	int i = 0;
+
+	while (start < fw->num_sec &&
+	       fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
+	       fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
+		start++;
+		i++;
+	}
+
+	return i;
+}
+
+static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+					       const struct fw_desc *sec,
+					       struct iwl_dram_data *dram)
+{
+	dram->block = dma_alloc_coherent(trans->dev, sec->len,
+					 &dram->physical,
+					 GFP_KERNEL);
+	if (!dram->block)
+		return -ENOMEM;
+
+	dram->size = sec->len;
+	memcpy(dram->block, sec->data, sec->len);
+
+	return 0;
+}
+
+static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
+	int i;
+
+	if (!dram->fw) {
+		WARN_ON(dram->fw_cnt);
+		return;
+	}
+
+	for (i = 0; i < dram->fw_cnt; i++)
+		dma_free_coherent(trans->dev, dram->fw[i].size,
+				  dram->fw[i].block, dram->fw[i].physical);
+
+	kfree(dram->fw);
+	dram->fw_cnt = 0;
+	dram->fw = NULL;
+}
+
+static inline void iwl_disable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	spin_lock(&trans_pcie->irq_lock);
+	_iwl_disable_interrupts(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+}
+
+static inline void _iwl_enable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
+	set_bit(STATUS_INT_ENABLED, &trans->status);
+	if (!trans_pcie->msix_enabled) {
+		trans_pcie->inta_mask = CSR_INI_SET_MASK;
+		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+	} else {
+		/*
+		 * fh/hw_mask keeps all the unmasked causes.
+		 * Unlike msi, in msix cause is enabled when it is unset.
+		 */
+		trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+		trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+			    ~trans_pcie->fh_mask);
+		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+			    ~trans_pcie->hw_mask);
+	}
+}
+
+static inline void iwl_enable_interrupts(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	spin_lock(&trans_pcie->irq_lock);
+	_iwl_enable_interrupts(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+}
+static inline void iwl_enable_hw_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD, ~msk);
+	trans_pcie->hw_mask = msk;
+}
+
+static inline void iwl_enable_fh_int_msk_msix(struct iwl_trans *trans, u32 msk)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~msk);
+	trans_pcie->fh_mask = msk;
+}
+
+static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
+	if (!trans_pcie->msix_enabled) {
+		trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
+		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+	} else {
+		iwl_write32(trans, CSR_MSIX_HW_INT_MASK_AD,
+			    trans_pcie->hw_init_mask);
+		iwl_enable_fh_int_msk_msix(trans,
+					   MSIX_FH_INT_CAUSES_D2S_CH0_NUM);
+	}
+}
+
+static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+	return index & (q->n_window - 1);
+}
+
+static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
+				     struct iwl_txq *txq, int idx)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans->cfg->use_tfh)
+		idx = iwl_pcie_get_cmd_index(txq, idx);
+
+	return txq->tfds + trans_pcie->tfd_size * idx;
+}
+
+static inline const char *queue_name(struct device *dev,
+				     struct iwl_trans_pcie *trans_p, int i)
+{
+	if (trans_p->shared_vec_mask) {
+		int vec = trans_p->shared_vec_mask &
+			  IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+
+		if (i == 0)
+			return DRV_NAME ": shared IRQ";
+
+		return devm_kasprintf(dev, GFP_KERNEL,
+				      DRV_NAME ": queue %d", i + vec);
+	}
+	if (i == 0)
+		return DRV_NAME ": default queue";
+
+	if (i == trans_p->alloc_vecs - 1)
+		return DRV_NAME ": exception";
+
+	return devm_kasprintf(dev, GFP_KERNEL,
+			      DRV_NAME  ": queue %d", i);
+}
+
+static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
+	if (!trans_pcie->msix_enabled) {
+		trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+		iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+	} else {
+		iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD,
+			    trans_pcie->fh_init_mask);
+		iwl_enable_hw_int_msk_msix(trans,
+					   MSIX_HW_INT_CAUSES_REG_RF_KILL);
+	}
+
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_9000) {
+		/*
+		 * On 9000-series devices this bit isn't enabled by default, so
+		 * when we power down the device we need set the bit to allow it
+		 * to wake up the PCI-E bus for RF-kill interrupts.
+		 */
+		iwl_set_bit(trans, CSR_GP_CNTRL,
+			    CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
+	}
+}
+
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+				  struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
+		IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+		iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+	}
+}
+
+static inline void iwl_stop_queue(struct iwl_trans *trans,
+				  struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
+		iwl_op_mode_queue_full(trans->op_mode, txq->id);
+		IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+	} else
+		IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+				    txq->id);
+}
+
+static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
+{
+	int index = iwl_pcie_get_cmd_index(q, i);
+	int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
+	int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
+
+	return w >= r ?
+		(index >= r && index < w) :
+		!(index < r && index >= w);
+}
+
+static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	if (trans_pcie->debug_rfkill)
+		return true;
+
+	return !(iwl_read32(trans, CSR_GP_CNTRL) &
+		CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+}
+
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+						  u32 reg, u32 mask, u32 value)
+{
+	u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+	WARN_ON_ONCE(value & ~mask);
+#endif
+
+	v = iwl_read32(trans, reg);
+	v &= ~mask;
+	v |= value;
+	iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+					      u32 reg, u32 mask)
+{
+	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+					    u32 reg, u32 mask)
+{
+	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+#else
+static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+{
+	return 0;
+}
+#endif
+
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
+
+void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable);
+
+void iwl_pcie_rx_allocator_work(struct work_struct *data);
+
+/* common functions that are used by gen2 transport */
+void iwl_pcie_apm_config(struct iwl_trans *trans);
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans);
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans);
+bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+				       bool was_in_rfkill);
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+		      int slots_num, bool cmd_queue);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans,
+		       struct iwl_txq *txq, int slots_num,  bool cmd_queue);
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+			   struct iwl_dma_ptr *ptr, size_t size);
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
+void iwl_pcie_apply_destination(struct iwl_trans *trans);
+void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+			    struct sk_buff *skb);
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
+#endif
+
+/* common functions that are used by gen3 transport */
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
+
+/* transport gen 2 exported functions */
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+				 const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+				 struct iwl_tx_queue_cfg_cmd *cmd,
+				 int cmd_id, int size,
+				 unsigned int timeout);
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+			   struct iwl_device_cmd *dev_cmd, int txq_id);
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+				  struct iwl_host_cmd *cmd);
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans,
+				     bool low_power);
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power);
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
+#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
new file mode 100644
index 0000000..d4a31e0
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -0,0 +1,2154 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/gfp.h>
+
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "iwl-op-mode.h"
+#include "iwl-context-info-gen3.h"
+
+/******************************************************************************
+ *
+ * RX path functions
+ *
+ ******************************************************************************/
+
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC.  These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC.  The driver and NIC manage the Rx buffers by means
+ * of indexes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
+ *   When the interrupt handler is called, the request is processed.
+ *   The page is either stolen - transferred to the upper layer
+ *   or reused - added immediately to the iwl->rxq->rx_free list.
+ * + When the page is stolen - the driver updates the matching queue's used
+ *   count, detaches the RBD and transfers it to the queue used list.
+ *   When there are two used RBDs - they are transferred to the allocator empty
+ *   list. Work is then scheduled for the allocator to start allocating
+ *   eight buffers.
+ *   When there are another 6 used RBDs - they are transferred to the allocator
+ *   empty list and the driver tries to claim the pre-allocated buffers and
+ *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
+ *   until ready.
+ *   When there are 8+ buffers in the free list - either from allocation or from
+ *   8 reused unstolen pages - restock is called to update the FW and indexes.
+ * + In order to make sure the allocator always has RBDs to use for allocation
+ *   the allocator has initial pool in the size of num_queues*(8-2) - the
+ *   maximum missing RBDs per allocation request (request posted with 2
+ *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
+ *   The queues supplies the recycle of the rest of the RBDs.
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the iwl->rxq.  The driver 'processed' index is updated.
+ * + If there are no allocated buffers in iwl->rxq->rx_free,
+ *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
+ *   If there were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * iwl_rxq_alloc()            Allocates rx_free
+ * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
+ *                            iwl_pcie_rxq_restock.
+ *                            Used only during initialization.
+ * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.
+ * iwl_pcie_rx_allocator()     Background work for allocating pages.
+ *
+ * -- enable interrupts --
+ * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Posts and claims requests to the allocator.
+ *                            Calls iwl_pcie_rxq_restock to refill any empty
+ *                            slots.
+ *
+ * RBD life-cycle:
+ *
+ * Init:
+ * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
+ *
+ * Regular Receive interrupt:
+ * Page Stolen:
+ * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
+ * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
+ * Page not Stolen:
+ * rxq.queue -> rxq.rx_free -> rxq.queue
+ * ...
+ *
+ */
+
+/*
+ * iwl_rxq_space - Return number of free slots available in queue.
+ */
+static int iwl_rxq_space(const struct iwl_rxq *rxq)
+{
+	/* Make sure rx queue size is a power of 2 */
+	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
+
+	/*
+	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+	 * between empty and completely full queues.
+	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+	 * defined for negative dividends.
+	 */
+	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
+}
+
+/*
+ * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
+{
+	return cpu_to_le32((u32)(dma_addr >> 8));
+}
+
+/*
+ * iwl_pcie_rx_stop - stops the Rx DMA
+ */
+int iwl_pcie_rx_stop(struct iwl_trans *trans)
+{
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		/* TODO: remove this for 22560 once fw does it */
+		iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
+		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
+					 RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+	} else if (trans->cfg->mq_rx_supported) {
+		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
+					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
+	} else {
+		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
+					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
+					   1000);
+	}
+}
+
+/*
+ * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
+ */
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+				    struct iwl_rxq *rxq)
+{
+	u32 reg;
+
+	lockdep_assert_held(&rxq->lock);
+
+	/*
+	 * explicitly wake up the NIC if:
+	 * 1. shadow registers aren't enabled
+	 * 2. there is a chance that the NIC is asleep
+	 */
+	if (!trans->cfg->base_params->shadow_reg_enable &&
+	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
+				       reg);
+			iwl_set_bit(trans, CSR_GP_CNTRL,
+				    BIT(trans->cfg->csr->flag_mac_access_req));
+			rxq->need_update = true;
+			return;
+		}
+	}
+
+	rxq->write_actual = round_down(rxq->write, 8);
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		iwl_write32(trans, HBUS_TARG_WRPTR,
+			    (rxq->write_actual |
+			     ((FIRST_RX_QUEUE + rxq->id) << 16)));
+	else if (trans->cfg->mq_rx_supported)
+		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
+			    rxq->write_actual);
+	else
+		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+}
+
+static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		if (!rxq->need_update)
+			continue;
+		spin_lock(&rxq->lock);
+		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+		rxq->need_update = false;
+		spin_unlock(&rxq->lock);
+	}
+}
+
+static void iwl_pcie_restock_bd(struct iwl_trans *trans,
+				struct iwl_rxq *rxq,
+				struct iwl_rx_mem_buffer *rxb)
+{
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		struct iwl_rx_transfer_desc *bd = rxq->bd;
+
+		bd[rxq->write].type_n_size =
+			cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
+			((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
+		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
+		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
+	} else {
+		__le64 *bd = rxq->bd;
+
+		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+	}
+}
+
+/*
+ * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
+ */
+static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct iwl_rx_mem_buffer *rxb;
+
+	/*
+	 * If the device isn't enabled - no need to try to add buffers...
+	 * This can happen when we stop the device and still have an interrupt
+	 * pending. We stop the APM before we sync the interrupts because we
+	 * have to (see comment there). On the other hand, since the APM is
+	 * stopped, we cannot access the HW (in particular not prph).
+	 * So don't try to restock if the APM has been already stopped.
+	 */
+	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		return;
+
+	spin_lock(&rxq->lock);
+	while (rxq->free_count) {
+		/* Get next free Rx buffer, remove from free list */
+		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+				       list);
+		list_del(&rxb->list);
+		rxb->invalid = false;
+		/* 12 first bits are expected to be empty */
+		WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+		/* Point to Rx buffer via next RBD in circular buffer */
+		iwl_pcie_restock_bd(trans, rxq, rxb);
+		rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock(&rxq->lock);
+
+	/*
+	 * If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8.
+	 */
+	if (rxq->write_actual != (rxq->write & ~0x7)) {
+		spin_lock(&rxq->lock);
+		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+		spin_unlock(&rxq->lock);
+	}
+}
+
+/*
+ * iwl_pcie_rxsq_restock - restock implementation for single queue rx
+ */
+static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct iwl_rx_mem_buffer *rxb;
+
+	/*
+	 * If the device isn't enabled - not need to try to add buffers...
+	 * This can happen when we stop the device and still have an interrupt
+	 * pending. We stop the APM before we sync the interrupts because we
+	 * have to (see comment there). On the other hand, since the APM is
+	 * stopped, we cannot access the HW (in particular not prph).
+	 * So don't try to restock if the APM has been already stopped.
+	 */
+	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		return;
+
+	spin_lock(&rxq->lock);
+	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+		__le32 *bd = (__le32 *)rxq->bd;
+		/* The overwritten rxb must be a used one */
+		rxb = rxq->queue[rxq->write];
+		BUG_ON(rxb && rxb->page);
+
+		/* Get next free Rx buffer, remove from free list */
+		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+				       list);
+		list_del(&rxb->list);
+		rxb->invalid = false;
+
+		/* Point to Rx buffer via next RBD in circular buffer */
+		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+		rxq->free_count--;
+	}
+	spin_unlock(&rxq->lock);
+
+	/* If we've added more space for the firmware to place data, tell it.
+	 * Increment device's write pointer in multiples of 8. */
+	if (rxq->write_actual != (rxq->write & ~0x7)) {
+		spin_lock(&rxq->lock);
+		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+		spin_unlock(&rxq->lock);
+	}
+}
+
+/*
+ * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static
+void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+	if (trans->cfg->mq_rx_supported)
+		iwl_pcie_rxmq_restock(trans, rxq);
+	else
+		iwl_pcie_rxsq_restock(trans, rxq);
+}
+
+/*
+ * iwl_pcie_rx_alloc_page - allocates and returns a page.
+ *
+ */
+static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
+					   gfp_t priority)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct page *page;
+	gfp_t gfp_mask = priority;
+
+	if (trans_pcie->rx_page_order > 0)
+		gfp_mask |= __GFP_COMP;
+
+	/* Alloc a new receive buffer */
+	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
+	if (!page) {
+		if (net_ratelimit())
+			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
+				       trans_pcie->rx_page_order);
+		/*
+		 * Issue an error if we don't have enough pre-allocated
+		  * buffers.
+`		 */
+		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
+			IWL_CRIT(trans,
+				 "Failed to alloc_pages\n");
+		return NULL;
+	}
+	return page;
+}
+
+/*
+ * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
+ *
+ * A used RBD is an Rx buffer that has been given to the stack. To use it again
+ * a page must be allocated and the RBD must point to the page. This function
+ * doesn't change the HW pointer but handles the list of pages that is used by
+ * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
+ * allocated buffers.
+ */
+void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+			    struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_mem_buffer *rxb;
+	struct page *page;
+
+	while (1) {
+		spin_lock(&rxq->lock);
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock(&rxq->lock);
+			return;
+		}
+		spin_unlock(&rxq->lock);
+
+		/* Alloc a new receive buffer */
+		page = iwl_pcie_rx_alloc_page(trans, priority);
+		if (!page)
+			return;
+
+		spin_lock(&rxq->lock);
+
+		if (list_empty(&rxq->rx_used)) {
+			spin_unlock(&rxq->lock);
+			__free_pages(page, trans_pcie->rx_page_order);
+			return;
+		}
+		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
+				       list);
+		list_del(&rxb->list);
+		spin_unlock(&rxq->lock);
+
+		BUG_ON(rxb->page);
+		rxb->page = page;
+		/* Get physical address of the RB */
+		rxb->page_dma =
+			dma_map_page(trans->dev, page, 0,
+				     PAGE_SIZE << trans_pcie->rx_page_order,
+				     DMA_FROM_DEVICE);
+		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+			rxb->page = NULL;
+			spin_lock(&rxq->lock);
+			list_add(&rxb->list, &rxq->rx_used);
+			spin_unlock(&rxq->lock);
+			__free_pages(page, trans_pcie->rx_page_order);
+			return;
+		}
+
+		spin_lock(&rxq->lock);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+
+		spin_unlock(&rxq->lock);
+	}
+}
+
+void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	for (i = 0; i < RX_POOL_SIZE; i++) {
+		if (!trans_pcie->rx_pool[i].page)
+			continue;
+		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
+			       PAGE_SIZE << trans_pcie->rx_page_order,
+			       DMA_FROM_DEVICE);
+		__free_pages(trans_pcie->rx_pool[i].page,
+			     trans_pcie->rx_page_order);
+		trans_pcie->rx_pool[i].page = NULL;
+	}
+}
+
+/*
+ * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
+ *
+ * Allocates for each received request 8 pages
+ * Called as a scheduled work item.
+ */
+static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	struct list_head local_empty;
+	int pending = atomic_xchg(&rba->req_pending, 0);
+
+	IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
+
+	/* If we were scheduled - there is at least one request */
+	spin_lock(&rba->lock);
+	/* swap out the rba->rbd_empty to a local list */
+	list_replace_init(&rba->rbd_empty, &local_empty);
+	spin_unlock(&rba->lock);
+
+	while (pending) {
+		int i;
+		LIST_HEAD(local_allocated);
+		gfp_t gfp_mask = GFP_KERNEL;
+
+		/* Do not post a warning if there are only a few requests */
+		if (pending < RX_PENDING_WATERMARK)
+			gfp_mask |= __GFP_NOWARN;
+
+		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
+			struct iwl_rx_mem_buffer *rxb;
+			struct page *page;
+
+			/* List should never be empty - each reused RBD is
+			 * returned to the list, and initial pool covers any
+			 * possible gap between the time the page is allocated
+			 * to the time the RBD is added.
+			 */
+			BUG_ON(list_empty(&local_empty));
+			/* Get the first rxb from the rbd list */
+			rxb = list_first_entry(&local_empty,
+					       struct iwl_rx_mem_buffer, list);
+			BUG_ON(rxb->page);
+
+			/* Alloc a new receive buffer */
+			page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
+			if (!page)
+				continue;
+			rxb->page = page;
+
+			/* Get physical address of the RB */
+			rxb->page_dma = dma_map_page(trans->dev, page, 0,
+					PAGE_SIZE << trans_pcie->rx_page_order,
+					DMA_FROM_DEVICE);
+			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+				rxb->page = NULL;
+				__free_pages(page, trans_pcie->rx_page_order);
+				continue;
+			}
+
+			/* move the allocated entry to the out list */
+			list_move(&rxb->list, &local_allocated);
+			i++;
+		}
+
+		pending--;
+		if (!pending) {
+			pending = atomic_xchg(&rba->req_pending, 0);
+			IWL_DEBUG_RX(trans,
+				     "Pending allocation requests = %d\n",
+				     pending);
+		}
+
+		spin_lock(&rba->lock);
+		/* add the allocated rbds to the allocator allocated list */
+		list_splice_tail(&local_allocated, &rba->rbd_allocated);
+		/* get more empty RBDs for current pending requests */
+		list_splice_tail_init(&rba->rbd_empty, &local_empty);
+		spin_unlock(&rba->lock);
+
+		atomic_inc(&rba->req_ready);
+	}
+
+	spin_lock(&rba->lock);
+	/* return unused rbds to the allocator empty list */
+	list_splice_tail(&local_empty, &rba->rbd_empty);
+	spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
+.*
+.* Called by queue when the queue posted allocation request and
+ * has freed 8 RBDs in order to restock itself.
+ * This function directly moves the allocated RBs to the queue's ownership
+ * and updates the relevant counters.
+ */
+static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
+				      struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	lockdep_assert_held(&rxq->lock);
+
+	/*
+	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
+	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
+	 * function will return early, as there are no ready requests.
+	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
+	 * req_ready > 0, i.e. - there are ready requests and the function
+	 * hands one request to the caller.
+	 */
+	if (atomic_dec_if_positive(&rba->req_ready) < 0)
+		return;
+
+	spin_lock(&rba->lock);
+	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
+		/* Get next free Rx buffer, remove it from free list */
+		struct iwl_rx_mem_buffer *rxb =
+			list_first_entry(&rba->rbd_allocated,
+					 struct iwl_rx_mem_buffer, list);
+
+		list_move(&rxb->list, &rxq->rx_free);
+	}
+	spin_unlock(&rba->lock);
+
+	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
+	rxq->free_count += RX_CLAIM_REQ_ALLOC;
+}
+
+void iwl_pcie_rx_allocator_work(struct work_struct *data)
+{
+	struct iwl_rb_allocator *rba_p =
+		container_of(data, struct iwl_rb_allocator, rx_alloc);
+	struct iwl_trans_pcie *trans_pcie =
+		container_of(rba_p, struct iwl_trans_pcie, rba);
+
+	iwl_pcie_rx_allocator(trans_pcie->trans);
+}
+
+static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
+{
+	struct iwl_rx_transfer_desc *rx_td;
+
+	if (use_rx_td)
+		return sizeof(*rx_td);
+	else
+		return trans->cfg->mq_rx_supported ? sizeof(__le64) :
+			sizeof(__le32);
+}
+
+static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct device *dev = trans->dev;
+	bool use_rx_td = (trans->cfg->device_family >=
+			  IWL_DEVICE_FAMILY_22560);
+	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+	if (rxq->bd)
+		dma_free_coherent(trans->dev,
+				  free_size * rxq->queue_size,
+				  rxq->bd, rxq->bd_dma);
+	rxq->bd_dma = 0;
+	rxq->bd = NULL;
+
+	if (rxq->rb_stts)
+		dma_free_coherent(trans->dev,
+				  use_rx_td ? sizeof(__le16) :
+				  sizeof(struct iwl_rb_status),
+				  rxq->rb_stts, rxq->rb_stts_dma);
+	rxq->rb_stts_dma = 0;
+	rxq->rb_stts = NULL;
+
+	if (rxq->used_bd)
+		dma_free_coherent(trans->dev,
+				  (use_rx_td ? sizeof(*rxq->cd) :
+				   sizeof(__le32)) * rxq->queue_size,
+				  rxq->used_bd, rxq->used_bd_dma);
+	rxq->used_bd_dma = 0;
+	rxq->used_bd = NULL;
+
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+		return;
+
+	if (rxq->tr_tail)
+		dma_free_coherent(dev, sizeof(__le16),
+				  rxq->tr_tail, rxq->tr_tail_dma);
+	rxq->tr_tail_dma = 0;
+	rxq->tr_tail = NULL;
+
+	if (rxq->cr_tail)
+		dma_free_coherent(dev, sizeof(__le16),
+				  rxq->cr_tail, rxq->cr_tail_dma);
+	rxq->cr_tail_dma = 0;
+	rxq->cr_tail = NULL;
+}
+
+static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
+				  struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct device *dev = trans->dev;
+	int i;
+	int free_size;
+	bool use_rx_td = (trans->cfg->device_family >=
+			  IWL_DEVICE_FAMILY_22560);
+
+	spin_lock_init(&rxq->lock);
+	if (trans->cfg->mq_rx_supported)
+		rxq->queue_size = MQ_RX_TABLE_SIZE;
+	else
+		rxq->queue_size = RX_QUEUE_SIZE;
+
+	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
+
+	/*
+	 * Allocate the circular buffer of Read Buffer Descriptors
+	 * (RBDs)
+	 */
+	rxq->bd = dma_zalloc_coherent(dev,
+				      free_size * rxq->queue_size,
+				      &rxq->bd_dma, GFP_KERNEL);
+	if (!rxq->bd)
+		goto err;
+
+	if (trans->cfg->mq_rx_supported) {
+		rxq->used_bd = dma_zalloc_coherent(dev,
+						   (use_rx_td ?
+						   sizeof(*rxq->cd) :
+						   sizeof(__le32)) *
+						   rxq->queue_size,
+						   &rxq->used_bd_dma,
+						   GFP_KERNEL);
+		if (!rxq->used_bd)
+			goto err;
+	}
+
+	/* Allocate the driver's pointer to receive buffer status */
+	rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
+					   sizeof(__le16) :
+					   sizeof(struct iwl_rb_status),
+					   &rxq->rb_stts_dma,
+					   GFP_KERNEL);
+	if (!rxq->rb_stts)
+		goto err;
+
+	if (!use_rx_td)
+		return 0;
+
+	/* Allocate the driver's pointer to TR tail */
+	rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+					   &rxq->tr_tail_dma,
+					   GFP_KERNEL);
+	if (!rxq->tr_tail)
+		goto err;
+
+	/* Allocate the driver's pointer to CR tail */
+	rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
+					   &rxq->cr_tail_dma,
+					   GFP_KERNEL);
+	if (!rxq->cr_tail)
+		goto err;
+	/*
+	 * W/A 22560 device step Z0 must be non zero bug
+	 * TODO: remove this when stop supporting Z0
+	 */
+	*rxq->cr_tail = cpu_to_le16(500);
+
+	return 0;
+
+err:
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		iwl_pcie_free_rxq_dma(trans, rxq);
+	}
+	kfree(trans_pcie->rxq);
+
+	return -ENOMEM;
+}
+
+static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i, ret;
+
+	if (WARN_ON(trans_pcie->rxq))
+		return -EINVAL;
+
+	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+				  GFP_KERNEL);
+	if (!trans_pcie->rxq)
+		return -EINVAL;
+
+	spin_lock_init(&rba->lock);
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 rb_size;
+	unsigned long flags;
+	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+
+	switch (trans_pcie->rx_buf_size) {
+	case IWL_AMSDU_4K:
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+		break;
+	case IWL_AMSDU_8K:
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+		break;
+	case IWL_AMSDU_12K:
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
+		break;
+	default:
+		WARN_ON(1);
+		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+	}
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return;
+
+	/* Stop Rx DMA */
+	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+	/* reset and flush pointers */
+	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+
+	/* Reset driver's Rx queue write index */
+	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+	/* Tell device where to find RBD circular buffer in DRAM */
+	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+		    (u32)(rxq->bd_dma >> 8));
+
+	/* Tell device where in DRAM to update its Rx status */
+	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+		    rxq->rb_stts_dma >> 4);
+
+	/* Enable Rx DMA
+	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
+	 *      the credit mechanism in 5000 HW RX FIFO
+	 * Direct rx interrupts to hosts
+	 * Rx buffer size 4 or 8k or 12k
+	 * RB timeout 0x10
+	 * 256 RBDs
+	 */
+	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+		    rb_size |
+		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+	/* W/A for interrupt coalescing bug in 7260 and 3160 */
+	if (trans->cfg->host_interrupt_operation_mode)
+		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
+}
+
+void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable)
+{
+	if (trans->cfg->device_family != IWL_DEVICE_FAMILY_9000)
+		return;
+
+	if (CSR_HW_REV_STEP(trans->hw_rev) != SILICON_A_STEP)
+		return;
+
+	if (!trans->cfg->integrated)
+		return;
+
+	/*
+	 * Turn on the chicken-bits that cause MAC wakeup for RX-related
+	 * values.
+	 * This costs some power, but needed for W/A 9000 integrated A-step
+	 * bug where shadow registers are not in the retention list and their
+	 * value is lost when NIC powers down
+	 */
+	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
+		    CSR_MAC_SHADOW_REG_CTRL_RX_WAKE);
+	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2,
+		    CSR_MAC_SHADOW_REG_CTL2_RX_WAKE);
+}
+
+static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 rb_size, enabled = 0;
+	unsigned long flags;
+	int i;
+
+	switch (trans_pcie->rx_buf_size) {
+	case IWL_AMSDU_2K:
+		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
+		break;
+	case IWL_AMSDU_4K:
+		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+		break;
+	case IWL_AMSDU_8K:
+		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+		break;
+	case IWL_AMSDU_12K:
+		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+		break;
+	default:
+		WARN_ON(1);
+		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+	}
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return;
+
+	/* Stop Rx DMA */
+	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
+	/* disable free amd used rx queue operation */
+	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		/* Tell device where to find RBD free table in DRAM */
+		iwl_write_prph64_no_grab(trans,
+					 RFH_Q_FRBDCB_BA_LSB(i),
+					 trans_pcie->rxq[i].bd_dma);
+		/* Tell device where to find RBD used table in DRAM */
+		iwl_write_prph64_no_grab(trans,
+					 RFH_Q_URBDCB_BA_LSB(i),
+					 trans_pcie->rxq[i].used_bd_dma);
+		/* Tell device where in DRAM to update its Rx status */
+		iwl_write_prph64_no_grab(trans,
+					 RFH_Q_URBD_STTS_WPTR_LSB(i),
+					 trans_pcie->rxq[i].rb_stts_dma);
+		/* Reset device indice tables */
+		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
+
+		enabled |= BIT(i) | BIT(i + 16);
+	}
+
+	/*
+	 * Enable Rx DMA
+	 * Rx buffer size 4 or 8k or 12k
+	 * Min RB size 4 or 8
+	 * Drop frames that exceed RB size
+	 * 512 RBDs
+	 */
+	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+			       RFH_DMA_EN_ENABLE_VAL | rb_size |
+			       RFH_RXF_DMA_MIN_RB_4_8 |
+			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+			       RFH_RXF_DMA_RBDCB_SIZE_512);
+
+	/*
+	 * Activate DMA snooping.
+	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
+	 * Default queue is 0
+	 */
+	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
+			       RFH_GEN_CFG_RFH_DMA_SNOOP |
+			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
+			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
+			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
+					       trans->cfg->integrated ?
+					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
+					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
+	/* Enable the relevant rx queues */
+	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	/* Set interrupt coalescing timer to default (2048 usecs) */
+	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+	iwl_pcie_enable_rx_wake(trans, true);
+}
+
+void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+{
+	lockdep_assert_held(&rxq->lock);
+
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+	rxq->free_count = 0;
+	rxq->used_count = 0;
+}
+
+int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+	WARN_ON(1);
+	return 0;
+}
+
+static int _iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *def_rxq;
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i, err, queue_size, allocator_pool_size, num_alloc;
+
+	if (!trans_pcie->rxq) {
+		err = iwl_pcie_rx_alloc(trans);
+		if (err)
+			return err;
+	}
+	def_rxq = trans_pcie->rxq;
+
+	cancel_work_sync(&rba->rx_alloc);
+
+	spin_lock(&rba->lock);
+	atomic_set(&rba->req_pending, 0);
+	atomic_set(&rba->req_ready, 0);
+	INIT_LIST_HEAD(&rba->rbd_allocated);
+	INIT_LIST_HEAD(&rba->rbd_empty);
+	spin_unlock(&rba->lock);
+
+	/* free all first - we might be reconfigured for a different size */
+	iwl_pcie_free_rbs_pool(trans);
+
+	for (i = 0; i < RX_QUEUE_SIZE; i++)
+		def_rxq->queue[i] = NULL;
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		rxq->id = i;
+
+		spin_lock(&rxq->lock);
+		/*
+		 * Set read write pointer to reflect that we have processed
+		 * and used all buffers, but have not restocked the Rx queue
+		 * with fresh buffers
+		 */
+		rxq->read = 0;
+		rxq->write = 0;
+		rxq->write_actual = 0;
+		memset(rxq->rb_stts, 0,
+		       (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		       sizeof(__le16) : sizeof(struct iwl_rb_status));
+
+		iwl_pcie_rx_init_rxb_lists(rxq);
+
+		if (!rxq->napi.poll)
+			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+				       iwl_pcie_dummy_napi_poll, 64);
+
+		spin_unlock(&rxq->lock);
+	}
+
+	/* move the pool to the default queue and allocator ownerships */
+	queue_size = trans->cfg->mq_rx_supported ?
+		     MQ_RX_NUM_RBDS : RX_QUEUE_SIZE;
+	allocator_pool_size = trans->num_rx_queues *
+		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+	num_alloc = queue_size + allocator_pool_size;
+	BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+		     ARRAY_SIZE(trans_pcie->rx_pool));
+	for (i = 0; i < num_alloc; i++) {
+		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
+
+		if (i < allocator_pool_size)
+			list_add(&rxb->list, &rba->rbd_empty);
+		else
+			list_add(&rxb->list, &def_rxq->rx_used);
+		trans_pcie->global_table[i] = rxb;
+		rxb->vid = (u16)(i + 1);
+		rxb->invalid = true;
+	}
+
+	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+
+	return 0;
+}
+
+int iwl_pcie_rx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret = _iwl_pcie_rx_init(trans);
+
+	if (ret)
+		return ret;
+
+	if (trans->cfg->mq_rx_supported)
+		iwl_pcie_rx_mq_hw_init(trans);
+	else
+		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
+
+	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
+
+	spin_lock(&trans_pcie->rxq->lock);
+	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
+	spin_unlock(&trans_pcie->rxq->lock);
+
+	return 0;
+}
+
+int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
+{
+	/*
+	 * We don't configure the RFH.
+	 * Restock will be done at alive, after firmware configured the RFH.
+	 */
+	return _iwl_pcie_rx_init(trans);
+}
+
+void iwl_pcie_rx_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+	int i;
+
+	/*
+	 * if rxq is NULL, it means that nothing has been allocated,
+	 * exit now
+	 */
+	if (!trans_pcie->rxq) {
+		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
+		return;
+	}
+
+	cancel_work_sync(&rba->rx_alloc);
+
+	iwl_pcie_free_rbs_pool(trans);
+
+	for (i = 0; i < trans->num_rx_queues; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		iwl_pcie_free_rxq_dma(trans, rxq);
+
+		if (rxq->napi.poll)
+			netif_napi_del(&rxq->napi);
+	}
+	kfree(trans_pcie->rxq);
+}
+
+static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
+					  struct iwl_rb_allocator *rba)
+{
+	spin_lock(&rba->lock);
+	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
+	spin_unlock(&rba->lock);
+}
+
+/*
+ * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
+ *
+ * Called when a RBD can be reused. The RBD is transferred to the allocator.
+ * When there are 2 empty RBDs - a request for allocation is posted
+ */
+static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
+				  struct iwl_rx_mem_buffer *rxb,
+				  struct iwl_rxq *rxq, bool emergency)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rb_allocator *rba = &trans_pcie->rba;
+
+	/* Move the RBD to the used list, will be moved to allocator in batches
+	 * before claiming or posting a request*/
+	list_add_tail(&rxb->list, &rxq->rx_used);
+
+	if (unlikely(emergency))
+		return;
+
+	/* Count the allocator owned RBDs */
+	rxq->used_count++;
+
+	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
+	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
+	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
+	 * after but we still need to post another request.
+	 */
+	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
+		/* Move the 2 RBDs to the allocator ownership.
+		 Allocator has another 6 from pool for the request completion*/
+		iwl_pcie_rx_move_to_allocator(rxq, rba);
+
+		atomic_inc(&rba->req_pending);
+		queue_work(rba->alloc_wq, &rba->rx_alloc);
+	}
+}
+
+static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+				struct iwl_rxq *rxq,
+				struct iwl_rx_mem_buffer *rxb,
+				bool emergency)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+	bool page_stolen = false;
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	u32 offset = 0;
+
+	if (WARN_ON(!rxb))
+		return;
+
+	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
+
+	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
+		struct iwl_rx_packet *pkt;
+		u16 sequence;
+		bool reclaim;
+		int index, cmd_index, len;
+		struct iwl_rx_cmd_buffer rxcb = {
+			._offset = offset,
+			._rx_page_order = trans_pcie->rx_page_order,
+			._page = rxb->page,
+			._page_stolen = false,
+			.truesize = max_len,
+		};
+
+		pkt = rxb_addr(&rxcb);
+
+		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
+			IWL_DEBUG_RX(trans,
+				     "Q %d: RB end marker at offset %d\n",
+				     rxq->id, offset);
+			break;
+		}
+
+		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
+			FH_RSCSR_RXQ_POS != rxq->id,
+		     "frame on invalid queue - is on %d and indicates %d\n",
+		     rxq->id,
+		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
+			FH_RSCSR_RXQ_POS);
+
+		IWL_DEBUG_RX(trans,
+			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
+			     rxq->id, offset,
+			     iwl_get_cmd_string(trans,
+						iwl_cmd_id(pkt->hdr.cmd,
+							   pkt->hdr.group_id,
+							   0)),
+			     pkt->hdr.group_id, pkt->hdr.cmd,
+			     le16_to_cpu(pkt->hdr.sequence));
+
+		len = iwl_rx_packet_len(pkt);
+		len += sizeof(u32); /* account for status word */
+		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
+		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
+
+		/* Reclaim a command buffer only if this packet is a response
+		 *   to a (driver-originated) command.
+		 * If the packet (e.g. Rx frame) originated from uCode,
+		 *   there is no command buffer to reclaim.
+		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+		 *   but apparently a few don't get set; catch them here. */
+		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
+		if (reclaim && !pkt->hdr.group_id) {
+			int i;
+
+			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
+				if (trans_pcie->no_reclaim_cmds[i] ==
+							pkt->hdr.cmd) {
+					reclaim = false;
+					break;
+				}
+			}
+		}
+
+		sequence = le16_to_cpu(pkt->hdr.sequence);
+		index = SEQ_TO_INDEX(sequence);
+		cmd_index = iwl_pcie_get_cmd_index(txq, index);
+
+		if (rxq->id == 0)
+			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+				       &rxcb);
+		else
+			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
+					   &rxcb, rxq->id);
+
+		if (reclaim) {
+			kzfree(txq->entries[cmd_index].free_buf);
+			txq->entries[cmd_index].free_buf = NULL;
+		}
+
+		/*
+		 * After here, we should always check rxcb._page_stolen,
+		 * if it is true then one of the handlers took the page.
+		 */
+
+		if (reclaim) {
+			/* Invoke any callbacks, transfer the buffer to caller,
+			 * and fire off the (possibly) blocking
+			 * iwl_trans_send_cmd()
+			 * as we reclaim the driver command queue */
+			if (!rxcb._page_stolen)
+				iwl_pcie_hcmd_complete(trans, &rxcb);
+			else
+				IWL_WARN(trans, "Claim null rxb?\n");
+		}
+
+		page_stolen |= rxcb._page_stolen;
+		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+			break;
+		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
+	}
+
+	/* page was stolen from us -- free our reference */
+	if (page_stolen) {
+		__free_pages(rxb->page, trans_pcie->rx_page_order);
+		rxb->page = NULL;
+	}
+
+	/* Reuse the page if possible. For notification packets and
+	 * SKBs that fail to Rx correctly, add them back into the
+	 * rx_free list for reuse later. */
+	if (rxb->page != NULL) {
+		rxb->page_dma =
+			dma_map_page(trans->dev, rxb->page, 0,
+				     PAGE_SIZE << trans_pcie->rx_page_order,
+				     DMA_FROM_DEVICE);
+		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
+			/*
+			 * free the page(s) as well to not break
+			 * the invariant that the items on the used
+			 * list have no page(s)
+			 */
+			__free_pages(rxb->page, trans_pcie->rx_page_order);
+			rxb->page = NULL;
+			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+		} else {
+			list_add_tail(&rxb->list, &rxq->rx_free);
+			rxq->free_count++;
+		}
+	} else
+		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
+}
+
+static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
+						  struct iwl_rxq *rxq, int i)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rx_mem_buffer *rxb;
+	u16 vid;
+
+	if (!trans->cfg->mq_rx_supported) {
+		rxb = rxq->queue[i];
+		rxq->queue[i] = NULL;
+		return rxb;
+	}
+
+	/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
+	else
+		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
+
+	if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
+		goto out_err;
+
+	rxb = trans_pcie->global_table[vid - 1];
+	if (rxb->invalid)
+		goto out_err;
+
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
+
+	rxb->invalid = true;
+
+	return rxb;
+
+out_err:
+	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
+	iwl_force_nmi(trans);
+	return NULL;
+}
+
+/*
+ * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
+ */
+static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
+	u32 r, i, count = 0;
+	bool emergency = false;
+
+restart:
+	spin_lock(&rxq->lock);
+	/* uCode's read index (stored in shared DRAM) indicates the last Rx
+	 * buffer that the driver may process (last buffer filled by ucode). */
+	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+	i = rxq->read;
+
+	/* W/A 9000 device step A0 wrap-around bug */
+	r &= (rxq->queue_size - 1);
+
+	/* Rx interrupt, but nothing sent from uCode */
+	if (i == r)
+		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
+
+	while (i != r) {
+		struct iwl_rb_allocator *rba = &trans_pcie->rba;
+		struct iwl_rx_mem_buffer *rxb;
+		/* number of RBDs still waiting for page allocation */
+		u32 rb_pending_alloc =
+			atomic_read(&trans_pcie->rba.req_pending) *
+			RX_CLAIM_REQ_ALLOC;
+
+		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
+			     !emergency)) {
+			iwl_pcie_rx_move_to_allocator(rxq, rba);
+			emergency = true;
+		}
+
+		rxb = iwl_pcie_get_rxb(trans, rxq, i);
+		if (!rxb)
+			goto out;
+
+		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
+		iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
+
+		i = (i + 1) & (rxq->queue_size - 1);
+
+		/*
+		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
+		 * try to claim the pre-allocated buffers from the allocator.
+		 * If not ready - will try to reclaim next time.
+		 * There is no need to reschedule work - allocator exits only
+		 * on success
+		 */
+		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
+			iwl_pcie_rx_allocator_get(trans, rxq);
+
+		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
+			/* Add the remaining empty RBDs for allocator use */
+			iwl_pcie_rx_move_to_allocator(rxq, rba);
+		} else if (emergency) {
+			count++;
+			if (count == 8) {
+				count = 0;
+				if (rb_pending_alloc < rxq->queue_size / 3)
+					emergency = false;
+
+				rxq->read = i;
+				spin_unlock(&rxq->lock);
+				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+				iwl_pcie_rxq_restock(trans, rxq);
+				goto restart;
+			}
+		}
+	}
+out:
+	/* Backtrack one entry */
+	rxq->read = i;
+	/* update cr tail with the rxq read pointer */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		*rxq->cr_tail = cpu_to_le16(r);
+	spin_unlock(&rxq->lock);
+
+	/*
+	 * handle a case where in emergency there are some unallocated RBDs.
+	 * those RBDs are in the used list, but are not tracked by the queue's
+	 * used_count which counts allocator owned RBDs.
+	 * unallocated emergency RBDs must be allocated on exit, otherwise
+	 * when called again the function may not be in emergency mode and
+	 * they will be handed to the allocator with no tracking in the RBD
+	 * allocator counters, which will lead to them never being claimed back
+	 * by the queue.
+	 * by allocating them here, they are now in the queue free list, and
+	 * will be restocked by the next call of iwl_pcie_rxq_restock.
+	 */
+	if (unlikely(emergency && count))
+		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
+
+	if (rxq->napi.poll)
+		napi_gro_flush(&rxq->napi, false);
+
+	iwl_pcie_rxq_restock(trans, rxq);
+}
+
+static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
+{
+	u8 queue = entry->entry;
+	struct msix_entry *entries = entry - queue;
+
+	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
+}
+
+/*
+ * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
+ * This interrupt handler should be used with RSS queue only.
+ */
+irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
+{
+	struct msix_entry *entry = dev_id;
+	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+	struct iwl_trans *trans = trans_pcie->trans;
+
+	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
+
+	if (WARN_ON(entry->entry >= trans->num_rx_queues))
+		return IRQ_NONE;
+
+	lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+	local_bh_disable();
+	iwl_pcie_rx_handle(trans, entry->entry);
+	local_bh_enable();
+
+	iwl_pcie_clear_irq(trans, entry);
+
+	lock_map_release(&trans->sync_cmd_lockdep_map);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
+ */
+static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
+	if (trans->cfg->internal_wimax_coex &&
+	    !trans->cfg->apmg_not_supported &&
+	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
+			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
+	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
+			    APMG_PS_CTRL_VAL_RESET_REQ))) {
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		iwl_op_mode_wimax_active(trans->op_mode);
+		wake_up(&trans_pcie->wait_command_queue);
+		return;
+	}
+
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+		if (!trans_pcie->txq[i])
+			continue;
+		del_timer(&trans_pcie->txq[i]->stuck_timer);
+	}
+
+	/* The STATUS_FW_ERROR bit is set in this function. This must happen
+	 * before we wake up the command caller, to ensure a proper cleanup. */
+	iwl_trans_fw_error(trans);
+
+	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+	wake_up(&trans_pcie->wait_command_queue);
+}
+
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+	u32 inta;
+
+	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	/* Discover which interrupts are active/pending */
+	inta = iwl_read32(trans, CSR_INT);
+
+	/* the thread will service interrupts and re-enable them */
+	return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT	12
+#define ICT_SIZE	(1 << ICT_SHIFT)
+#define ICT_COUNT	(ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 inta;
+	u32 val = 0;
+	u32 read;
+
+	trace_iwlwifi_dev_irq(trans->dev);
+
+	/* Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC. */
+	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+	if (!read)
+		return 0;
+
+	/*
+	 * Collect all entries up to the first 0, starting from ict_index;
+	 * note we already read at ict_index.
+	 */
+	do {
+		val |= read;
+		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+				trans_pcie->ict_index, read);
+		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+		trans_pcie->ict_index =
+			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
+
+		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+					   read);
+	} while (read);
+
+	/* We should not get this value, just ignore it. */
+	if (val == 0xffffffff)
+		val = 0;
+
+	/*
+	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+	 * (bit 15 before shifting it to 31) to clear when using interrupt
+	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
+	 * so we use them to decide on the real state of the Rx bit.
+	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
+	 */
+	if (val & 0xC0000)
+		val |= 0x8000;
+
+	inta = (0xff & val) | ((0xff00 & val) << 16);
+	return inta;
+}
+
+void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+	bool hw_rfkill, prev, report;
+
+	mutex_lock(&trans_pcie->mutex);
+	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	if (hw_rfkill) {
+		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+		set_bit(STATUS_RFKILL_HW, &trans->status);
+	}
+	if (trans_pcie->opmode_down)
+		report = hw_rfkill;
+	else
+		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
+		 hw_rfkill ? "disable radio" : "enable radio");
+
+	isr_stats->rfkill++;
+
+	if (prev != report)
+		iwl_trans_pcie_rf_kill(trans, report);
+	mutex_unlock(&trans_pcie->mutex);
+
+	if (hw_rfkill) {
+		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
+				       &trans->status))
+			IWL_DEBUG_RF_KILL(trans,
+					  "Rfkill while SYNC HCMD in flight\n");
+		wake_up(&trans_pcie->wait_command_queue);
+	} else {
+		clear_bit(STATUS_RFKILL_HW, &trans->status);
+		if (trans_pcie->opmode_down)
+			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	}
+}
+
+irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
+{
+	struct iwl_trans *trans = dev_id;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+	u32 inta = 0;
+	u32 handled = 0;
+
+	lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+	spin_lock(&trans_pcie->irq_lock);
+
+	/* dram interrupt table not set yet,
+	 * use legacy interrupt.
+	 */
+	if (likely(trans_pcie->use_ict))
+		inta = iwl_pcie_int_cause_ict(trans);
+	else
+		inta = iwl_pcie_int_cause_non_ict(trans);
+
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		IWL_DEBUG_ISR(trans,
+			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+			      inta, trans_pcie->inta_mask,
+			      iwl_read32(trans, CSR_INT_MASK),
+			      iwl_read32(trans, CSR_FH_INT_STATUS));
+		if (inta & (~trans_pcie->inta_mask))
+			IWL_DEBUG_ISR(trans,
+				      "We got a masked interrupt (0x%08x)\n",
+				      inta & (~trans_pcie->inta_mask));
+	}
+
+	inta &= trans_pcie->inta_mask;
+
+	/*
+	 * Ignore interrupt if there's nothing in NIC to service.
+	 * This may be due to IRQ shared with another device,
+	 * or due to sporadic interrupts thrown from our NIC.
+	 */
+	if (unlikely(!inta)) {
+		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+		/*
+		 * Re-enable interrupts here since we don't
+		 * have anything to service
+		 */
+		if (test_bit(STATUS_INT_ENABLED, &trans->status))
+			_iwl_enable_interrupts(trans);
+		spin_unlock(&trans_pcie->irq_lock);
+		lock_map_release(&trans->sync_cmd_lockdep_map);
+		return IRQ_NONE;
+	}
+
+	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+		/*
+		 * Hardware disappeared. It might have
+		 * already raised an interrupt.
+		 */
+		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+		spin_unlock(&trans_pcie->irq_lock);
+		goto out;
+	}
+
+	/* Ack/clear/reset pending uCode interrupts.
+	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+	 */
+	/* There is a hardware bug in the interrupt mask function that some
+	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+	 * they are disabled in the CSR_INT_MASK register. Furthermore the
+	 * ICT interrupt handling mechanism has another bug that might cause
+	 * these unmasked interrupts fail to be detected. We workaround the
+	 * hardware bugs here by ACKing all the possible interrupts so that
+	 * interrupt coalescing can still be achieved.
+	 */
+	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
+
+	if (iwl_have_debug_level(IWL_DL_ISR))
+		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
+			      inta, iwl_read32(trans, CSR_INT_MASK));
+
+	spin_unlock(&trans_pcie->irq_lock);
+
+	/* Now service all interrupt bits discovered above. */
+	if (inta & CSR_INT_BIT_HW_ERR) {
+		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
+
+		/* Tell the device to stop sending interrupts */
+		iwl_disable_interrupts(trans);
+
+		isr_stats->hw++;
+		iwl_pcie_irq_handle_error(trans);
+
+		handled |= CSR_INT_BIT_HW_ERR;
+
+		goto out;
+	}
+
+	if (iwl_have_debug_level(IWL_DL_ISR)) {
+		/* NIC fires this, but we don't use it, redundant with WAKEUP */
+		if (inta & CSR_INT_BIT_SCD) {
+			IWL_DEBUG_ISR(trans,
+				      "Scheduler finished to transmit the frame/frames.\n");
+			isr_stats->sch++;
+		}
+
+		/* Alive notification via Rx interrupt will do the real work */
+		if (inta & CSR_INT_BIT_ALIVE) {
+			IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+			isr_stats->alive++;
+			if (trans->cfg->gen2) {
+				/*
+				 * We can restock, since firmware configured
+				 * the RFH
+				 */
+				iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+			}
+		}
+	}
+
+	/* Safely ignore these bits for debug checks below */
+	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+	/* HW RF KILL switch toggled */
+	if (inta & CSR_INT_BIT_RF_KILL) {
+		iwl_pcie_handle_rfkill_irq(trans);
+		handled |= CSR_INT_BIT_RF_KILL;
+	}
+
+	/* Chip got too hot and stopped itself */
+	if (inta & CSR_INT_BIT_CT_KILL) {
+		IWL_ERR(trans, "Microcode CT kill error detected.\n");
+		isr_stats->ctkill++;
+		handled |= CSR_INT_BIT_CT_KILL;
+	}
+
+	/* Error detected by uCode */
+	if (inta & CSR_INT_BIT_SW_ERR) {
+		IWL_ERR(trans, "Microcode SW error detected. "
+			" Restarting 0x%X.\n", inta);
+		isr_stats->sw++;
+		iwl_pcie_irq_handle_error(trans);
+		handled |= CSR_INT_BIT_SW_ERR;
+	}
+
+	/* uCode wakes up after power-down sleep */
+	if (inta & CSR_INT_BIT_WAKEUP) {
+		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+		iwl_pcie_rxq_check_wrptr(trans);
+		iwl_pcie_txq_check_wrptrs(trans);
+
+		isr_stats->wakeup++;
+
+		handled |= CSR_INT_BIT_WAKEUP;
+	}
+
+	/* All uCode command responses, including Tx command responses,
+	 * Rx "responses" (frame-received notification), and other
+	 * notifications from uCode come through here*/
+	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
+		    CSR_INT_BIT_RX_PERIODIC)) {
+		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
+		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+			iwl_write32(trans, CSR_FH_INT_STATUS,
+					CSR_FH_INT_RX_MASK);
+		}
+		if (inta & CSR_INT_BIT_RX_PERIODIC) {
+			handled |= CSR_INT_BIT_RX_PERIODIC;
+			iwl_write32(trans,
+				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
+		}
+		/* Sending RX interrupt require many steps to be done in the
+		 * the device:
+		 * 1- write interrupt to current index in ICT table.
+		 * 2- dma RX frame.
+		 * 3- update RX shared data to indicate last write index.
+		 * 4- send interrupt.
+		 * This could lead to RX race, driver could receive RX interrupt
+		 * but the shared data changes does not reflect this;
+		 * periodic interrupt will detect any dangling Rx activity.
+		 */
+
+		/* Disable periodic interrupt; we use it as just a one-shot. */
+		iwl_write8(trans, CSR_INT_PERIODIC_REG,
+			    CSR_INT_PERIODIC_DIS);
+
+		/*
+		 * Enable periodic interrupt in 8 msec only if we received
+		 * real RX interrupt (instead of just periodic int), to catch
+		 * any dangling Rx interrupt.  If it was just the periodic
+		 * interrupt, there was no dangling Rx activity, and no need
+		 * to extend the periodic interrupt; one-shot is enough.
+		 */
+		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
+			iwl_write8(trans, CSR_INT_PERIODIC_REG,
+				   CSR_INT_PERIODIC_ENA);
+
+		isr_stats->rx++;
+
+		local_bh_disable();
+		iwl_pcie_rx_handle(trans, 0);
+		local_bh_enable();
+	}
+
+	/* This "Tx" DMA channel is used only for loading uCode */
+	if (inta & CSR_INT_BIT_FH_TX) {
+		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
+		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+		isr_stats->tx++;
+		handled |= CSR_INT_BIT_FH_TX;
+		/* Wake up uCode load routine, now that load is complete */
+		trans_pcie->ucode_write_complete = true;
+		wake_up(&trans_pcie->ucode_write_waitq);
+	}
+
+	if (inta & ~handled) {
+		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
+		isr_stats->unhandled++;
+	}
+
+	if (inta & ~(trans_pcie->inta_mask)) {
+		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
+			 inta & ~trans_pcie->inta_mask);
+	}
+
+	spin_lock(&trans_pcie->irq_lock);
+	/* only Re-enable all interrupt if disabled by irq */
+	if (test_bit(STATUS_INT_ENABLED, &trans->status))
+		_iwl_enable_interrupts(trans);
+	/* we are loading the firmware, enable FH_TX interrupt only */
+	else if (handled & CSR_INT_BIT_FH_TX)
+		iwl_enable_fw_load_int(trans);
+	/* Re-enable RF_KILL if it occurred */
+	else if (handled & CSR_INT_BIT_RF_KILL)
+		iwl_enable_rfkill_int(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+
+out:
+	lock_map_release(&trans->sync_cmd_lockdep_map);
+	return IRQ_HANDLED;
+}
+
+/******************************************************************************
+ *
+ * ICT functions
+ *
+ ******************************************************************************/
+
+/* Free dram table */
+void iwl_pcie_free_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans_pcie->ict_tbl) {
+		dma_free_coherent(trans->dev, ICT_SIZE,
+				  trans_pcie->ict_tbl,
+				  trans_pcie->ict_tbl_dma);
+		trans_pcie->ict_tbl = NULL;
+		trans_pcie->ict_tbl_dma = 0;
+	}
+}
+
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
+ * also reset all data related to ICT table interrupt.
+ */
+int iwl_pcie_alloc_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans_pcie->ict_tbl =
+		dma_zalloc_coherent(trans->dev, ICT_SIZE,
+				   &trans_pcie->ict_tbl_dma,
+				   GFP_KERNEL);
+	if (!trans_pcie->ict_tbl)
+		return -ENOMEM;
+
+	/* just an API sanity check ... it is guaranteed to be aligned */
+	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+		iwl_pcie_free_ict(trans);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Device is going up inform it about using ICT interrupt table,
+ * also we need to tell the driver to start using ICT interrupt.
+ */
+void iwl_pcie_reset_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 val;
+
+	if (!trans_pcie->ict_tbl)
+		return;
+
+	spin_lock(&trans_pcie->irq_lock);
+	_iwl_disable_interrupts(trans);
+
+	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
+
+	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
+
+	val |= CSR_DRAM_INT_TBL_ENABLE |
+	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
+	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
+
+	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
+
+	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
+	trans_pcie->use_ict = true;
+	trans_pcie->ict_index = 0;
+	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
+	_iwl_enable_interrupts(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+}
+
+/* Device is going down disable ict interrupt usage */
+void iwl_pcie_disable_ict(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	spin_lock(&trans_pcie->irq_lock);
+	trans_pcie->use_ict = false;
+	spin_unlock(&trans_pcie->irq_lock);
+}
+
+irqreturn_t iwl_pcie_isr(int irq, void *data)
+{
+	struct iwl_trans *trans = data;
+
+	if (!trans)
+		return IRQ_NONE;
+
+	/* Disable (but don't clear!) interrupts here to avoid
+	 * back-to-back ISRs and sporadic interrupts from our NIC.
+	 * If we have something to service, the tasklet will re-enable ints.
+	 * If we *don't* have something, we'll re-enable before leaving here.
+	 */
+	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
+
+	return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
+{
+	return IRQ_WAKE_THREAD;
+}
+
+irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
+{
+	struct msix_entry *entry = dev_id;
+	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
+	struct iwl_trans *trans = trans_pcie->trans;
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+	u32 inta_fh, inta_hw;
+
+	lock_map_acquire(&trans->sync_cmd_lockdep_map);
+
+	spin_lock(&trans_pcie->irq_lock);
+	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+	/*
+	 * Clear causes registers to avoid being handling the same cause.
+	 */
+	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+	spin_unlock(&trans_pcie->irq_lock);
+
+	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
+
+	if (unlikely(!(inta_fh | inta_hw))) {
+		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+		lock_map_release(&trans->sync_cmd_lockdep_map);
+		return IRQ_NONE;
+	}
+
+	if (iwl_have_debug_level(IWL_DL_ISR))
+		IWL_DEBUG_ISR(trans, "ISR inta_fh 0x%08x, enabled 0x%08x\n",
+			      inta_fh,
+			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
+
+	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
+	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
+		local_bh_disable();
+		iwl_pcie_rx_handle(trans, 0);
+		local_bh_enable();
+	}
+
+	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
+	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
+		local_bh_disable();
+		iwl_pcie_rx_handle(trans, 1);
+		local_bh_enable();
+	}
+
+	/* This "Tx" DMA channel is used only for loading uCode */
+	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
+		isr_stats->tx++;
+		/*
+		 * Wake up uCode load routine,
+		 * now that load is complete
+		 */
+		trans_pcie->ucode_write_complete = true;
+		wake_up(&trans_pcie->ucode_write_waitq);
+	}
+
+	/* Error detected by uCode */
+	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
+	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+	    (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+		IWL_ERR(trans,
+			"Microcode SW error detected. Restarting 0x%X.\n",
+			inta_fh);
+		isr_stats->sw++;
+		iwl_pcie_irq_handle_error(trans);
+	}
+
+	/* After checking FH register check HW register */
+	if (iwl_have_debug_level(IWL_DL_ISR))
+		IWL_DEBUG_ISR(trans,
+			      "ISR inta_hw 0x%08x, enabled 0x%08x\n",
+			      inta_hw,
+			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
+
+	/* Alive notification via Rx interrupt will do the real work */
+	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
+		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
+		isr_stats->alive++;
+		if (trans->cfg->gen2) {
+			/* We can restock, since firmware configured the RFH */
+			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
+		}
+	}
+
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
+	    inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
+		/* Reflect IML transfer status */
+		int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
+
+		IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
+		if (res == IWL_IMAGE_RESP_FAIL) {
+			isr_stats->sw++;
+			iwl_pcie_irq_handle_error(trans);
+		}
+	} else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
+		/* uCode wakes up after power-down sleep */
+		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
+		iwl_pcie_rxq_check_wrptr(trans);
+		iwl_pcie_txq_check_wrptrs(trans);
+
+		isr_stats->wakeup++;
+	}
+
+	/* Chip got too hot and stopped itself */
+	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
+		IWL_ERR(trans, "Microcode CT kill error detected.\n");
+		isr_stats->ctkill++;
+	}
+
+	/* HW RF KILL switch toggled */
+	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
+		iwl_pcie_handle_rfkill_irq(trans);
+
+	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+		IWL_ERR(trans,
+			"Hardware error detected. Restarting.\n");
+
+		isr_stats->hw++;
+		iwl_pcie_irq_handle_error(trans);
+	}
+
+	iwl_pcie_clear_irq(trans, entry);
+
+	lock_map_release(&trans->sync_cmd_lockdep_map);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
new file mode 100644
index 0000000..2bc6721
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -0,0 +1,368 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include "iwl-trans.h"
+#include "iwl-prph.h"
+#include "iwl-context-info.h"
+#include "iwl-context-info-gen3.h"
+#include "internal.h"
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
+{
+	int ret = 0;
+
+	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/*
+	 * Disable L0s without affecting L1;
+	 * don't wait for ICH L0s (ICH bug W/A)
+	 */
+	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 */
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	iwl_pcie_apm_config(trans);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(trans, CSR_GP_CNTRL,
+		    BIT(trans->cfg->csr->flag_init_done));
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. iwl_write_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   25000);
+	if (ret < 0) {
+		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
+		return ret;
+	}
+
+	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+	return 0;
+}
+
+static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+	if (op_mode_leave) {
+		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+			iwl_pcie_gen2_apm_init(trans);
+
+		/* inform ME that we are leaving */
+		iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+			    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_PREPARE |
+			    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+		mdelay(1);
+		iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+			      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+		mdelay(5);
+	}
+
+	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+	/* Stop device's DMA activity */
+	iwl_pcie_apm_stop_master(trans);
+
+	iwl_trans_sw_reset(trans);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_init_done));
+}
+
+void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	if (trans_pcie->is_down)
+		return;
+
+	trans_pcie->is_down = true;
+
+	/* Stop dbgc before stopping device */
+	iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+	udelay(100);
+	iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+
+	/* tell the device to stop sending interrupts */
+	iwl_disable_interrupts(trans);
+
+	/* device going down, Stop using ICT table */
+	iwl_pcie_disable_ict(trans);
+
+	/*
+	 * If a HW restart happens during firmware loading,
+	 * then the firmware loading might call this function
+	 * and later it might be called again due to the
+	 * restart. So don't process again if the device is
+	 * already dead.
+	 */
+	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+		IWL_DEBUG_INFO(trans,
+			       "DEVICE_ENABLED bit was set and is now cleared\n");
+		iwl_pcie_gen2_tx_stop(trans);
+		iwl_pcie_rx_stop(trans);
+	}
+
+	iwl_pcie_ctxt_info_free_paging(trans);
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+		iwl_pcie_ctxt_info_gen3_free(trans);
+	else
+		iwl_pcie_ctxt_info_free(trans);
+
+	/* Make sure (redundant) we've released our request to stay awake */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_mac_access_req));
+
+	/* Stop the device, and put it in low power state */
+	iwl_pcie_gen2_apm_stop(trans, false);
+
+	iwl_trans_sw_reset(trans);
+
+	/*
+	 * Upon stop, the IVAR table gets erased, so msi-x won't
+	 * work. This causes a bug in RF-KILL flows, since the interrupt
+	 * that enables radio won't fire on the correct irq, and the
+	 * driver won't be able to handle the interrupt.
+	 * Configure the IVAR table again after reset.
+	 */
+	iwl_pcie_conf_msix_hw(trans_pcie);
+
+	/*
+	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
+	 * This is a bug in certain verions of the hardware.
+	 * Certain devices also keep sending HW RF kill interrupt all
+	 * the time, unless the interrupt is ACKed even if the interrupt
+	 * should be masked. Re-ACK all the interrupts here.
+	 */
+	iwl_disable_interrupts(trans);
+
+	/* clear all status bits */
+	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+	clear_bit(STATUS_INT_ENABLED, &trans->status);
+	clear_bit(STATUS_TPOWER_PMI, &trans->status);
+
+	/*
+	 * Even if we stop the HW, we still want the RF kill
+	 * interrupt
+	 */
+	iwl_enable_rfkill_int(trans);
+
+	/* re-take ownership to prevent other users from stealing the device */
+	iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool was_in_rfkill;
+
+	mutex_lock(&trans_pcie->mutex);
+	trans_pcie->opmode_down = true;
+	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	_iwl_trans_pcie_gen2_stop_device(trans, low_power);
+	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+	mutex_unlock(&trans_pcie->mutex);
+}
+
+static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	/* TODO: most of the logic can be removed in A0 - but not in Z0 */
+	spin_lock(&trans_pcie->irq_lock);
+	iwl_pcie_gen2_apm_init(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+
+	iwl_op_mode_nic_config(trans->op_mode);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	if (iwl_pcie_gen2_rx_init(trans))
+		return -ENOMEM;
+
+	/* Allocate or reset and init all Tx and Command queues */
+	if (iwl_pcie_gen2_tx_init(trans))
+		return -ENOMEM;
+
+	/* enable shadow regs in HW */
+	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+	IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+
+	return 0;
+}
+
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	iwl_pcie_reset_ict(trans);
+
+	/* make sure all queue are not stopped/used */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* now that we got alive we can free the fw image & the context info.
+	 * paging memory cannot be freed included since FW will still use it
+	 */
+	iwl_pcie_ctxt_info_free(trans);
+}
+
+int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
+				 const struct fw_img *fw, bool run_in_rfkill)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool hw_rfkill;
+	int ret;
+
+	/* This may fail if AMT took ownership of the device */
+	if (iwl_pcie_prepare_card_hw(trans)) {
+		IWL_WARN(trans, "Exit HW not ready\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	iwl_enable_rfkill_int(trans);
+
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	/*
+	 * We enabled the RF-Kill interrupt and the handler may very
+	 * well be running. Disable the interrupts to make sure no other
+	 * interrupt can be fired.
+	 */
+	iwl_disable_interrupts(trans);
+
+	/* Make sure it finished running */
+	iwl_pcie_synchronize_irqs(trans);
+
+	mutex_lock(&trans_pcie->mutex);
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+	if (hw_rfkill && !run_in_rfkill) {
+		ret = -ERFKILL;
+		goto out;
+	}
+
+	/* Someone called stop_device, don't try to start_fw */
+	if (trans_pcie->is_down) {
+		IWL_WARN(trans,
+			 "Can't start_fw since the HW hasn't been started\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	ret = iwl_pcie_gen2_nic_init(trans);
+	if (ret) {
+		IWL_ERR(trans, "Unable to init nic\n");
+		goto out;
+	}
+
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
+		ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
+	else
+		ret = iwl_pcie_ctxt_info_init(trans, fw);
+	if (ret)
+		goto out;
+
+	/* re-check RF-Kill state since we may have missed the interrupt */
+	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+	if (hw_rfkill && !run_in_rfkill)
+		ret = -ERFKILL;
+
+out:
+	mutex_unlock(&trans_pcie->mutex);
+	return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
new file mode 100644
index 0000000..7d319b6
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -0,0 +1,3464 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/bitops.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+
+#include "iwl-drv.h"
+#include "iwl-trans.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-scd.h"
+#include "iwl-agn-hw.h"
+#include "fw/error-dump.h"
+#include "fw/dbg.h"
+#include "internal.h"
+#include "iwl-fh.h"
+
+/* extended range in FW SRAM */
+#define IWL_FW_MEM_EXTENDED_START	0x40000
+#define IWL_FW_MEM_EXTENDED_END		0x57FFF
+
+static void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
+{
+#define PCI_DUMP_SIZE	64
+#define PREFIX_LEN	32
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct pci_dev *pdev = trans_pcie->pci_dev;
+	u32 i, pos, alloc_size, *ptr, *buf;
+	char *prefix;
+
+	if (trans_pcie->pcie_dbg_dumped_once)
+		return;
+
+	/* Should be a multiple of 4 */
+	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
+	/* Alloc a max size buffer */
+	if (PCI_ERR_ROOT_ERR_SRC +  4 > PCI_DUMP_SIZE)
+		alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
+	else
+		alloc_size = PCI_DUMP_SIZE + PREFIX_LEN;
+	buf = kmalloc(alloc_size, GFP_ATOMIC);
+	if (!buf)
+		return;
+	prefix = (char *)buf + alloc_size - PREFIX_LEN;
+
+	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
+
+	/* Print wifi device registers */
+	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+	IWL_ERR(trans, "iwlwifi device config registers:\n");
+	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+		if (pci_read_config_dword(pdev, i, ptr))
+			goto err_read;
+	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
+	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+		*ptr = iwl_read32(trans, i);
+	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+	if (pos) {
+		IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
+		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
+			if (pci_read_config_dword(pdev, pos + i, ptr))
+				goto err_read;
+		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
+			       32, 4, buf, i, 0);
+	}
+
+	/* Print parent device registers next */
+	if (!pdev->bus->self)
+		goto out;
+
+	pdev = pdev->bus->self;
+	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+
+	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
+		pci_name(pdev));
+	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
+		if (pci_read_config_dword(pdev, i, ptr))
+			goto err_read;
+	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+
+	/* Print root port AER registers */
+	pos = 0;
+	pdev = pcie_find_root_port(pdev);
+	if (pdev)
+		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
+	if (pos) {
+		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
+			pci_name(pdev));
+		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
+		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
+			if (pci_read_config_dword(pdev, pos + i, ptr))
+				goto err_read;
+		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
+			       4, buf, i, 0);
+	}
+	goto out;
+
+err_read:
+	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
+	IWL_ERR(trans, "Read failed at 0x%X\n", i);
+out:
+	trans_pcie->pcie_dbg_dumped_once = 1;
+	kfree(buf);
+}
+
+static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
+{
+	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
+	iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
+		    BIT(trans->cfg->csr->flag_sw_reset));
+	usleep_range(5000, 6000);
+}
+
+static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (!trans_pcie->fw_mon_page)
+		return;
+
+	dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
+		       trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
+	__free_pages(trans_pcie->fw_mon_page,
+		     get_order(trans_pcie->fw_mon_size));
+	trans_pcie->fw_mon_page = NULL;
+	trans_pcie->fw_mon_phys = 0;
+	trans_pcie->fw_mon_size = 0;
+}
+
+void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct page *page = NULL;
+	dma_addr_t phys;
+	u32 size = 0;
+	u8 power;
+
+	if (!max_power) {
+		/* default max_power is maximum */
+		max_power = 26;
+	} else {
+		max_power += 11;
+	}
+
+	if (WARN(max_power > 26,
+		 "External buffer size for monitor is too big %d, check the FW TLV\n",
+		 max_power))
+		return;
+
+	if (trans_pcie->fw_mon_page) {
+		dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
+					   trans_pcie->fw_mon_size,
+					   DMA_FROM_DEVICE);
+		return;
+	}
+
+	phys = 0;
+	for (power = max_power; power >= 11; power--) {
+		int order;
+
+		size = BIT(power);
+		order = get_order(size);
+		page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
+				   order);
+		if (!page)
+			continue;
+
+		phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
+				    DMA_FROM_DEVICE);
+		if (dma_mapping_error(trans->dev, phys)) {
+			__free_pages(page, order);
+			page = NULL;
+			continue;
+		}
+		IWL_INFO(trans,
+			 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
+			 size, order);
+		break;
+	}
+
+	if (WARN_ON_ONCE(!page))
+		return;
+
+	if (power != max_power)
+		IWL_ERR(trans,
+			"Sorry - debug buffer is only %luK while you requested %luK\n",
+			(unsigned long)BIT(power - 10),
+			(unsigned long)BIT(max_power - 10));
+
+	trans_pcie->fw_mon_page = page;
+	trans_pcie->fw_mon_phys = phys;
+	trans_pcie->fw_mon_size = size;
+}
+
+static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
+{
+	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+		    ((reg & 0x0000ffff) | (2 << 28)));
+	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
+}
+
+static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
+{
+	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
+	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
+		    ((reg & 0x0000ffff) | (3 << 28)));
+}
+
+static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
+{
+	if (trans->cfg->apmg_not_supported)
+		return;
+
+	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
+		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+				       ~APMG_PS_CTRL_MSK_PWR_SRC);
+	else
+		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
+				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+				       ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT	0x041
+
+void iwl_pcie_apm_config(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u16 lctl;
+	u16 cap;
+
+	/*
+	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
+	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
+	 * If so (likely), disable L0S, so device moves directly L0->L1;
+	 *    costs negligible amount of power savings.
+	 * If not (unlikely), enable L0S, so there is at least some
+	 *    power savings, even without L1.
+	 */
+	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
+	if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
+		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+	else
+		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
+	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+	IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
+			(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+			trans->ltr_enabled ? "En" : "Dis");
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
+ * NOTE:  This does not load uCode nor start the embedded processor
+ */
+static int iwl_pcie_apm_init(struct iwl_trans *trans)
+{
+	int ret;
+
+	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
+
+	/*
+	 * Use "set_bit" below rather than "write", to preserve any hardware
+	 * bits already set by default after reset.
+	 */
+
+	/* Disable L0S exit timer (platform NMI Work/Around) */
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+	/*
+	 * Disable L0s without affecting L1;
+	 *  don't wait for ICH L0s (ICH bug W/A)
+	 */
+	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+	/* Set FH wait threshold to maximum (HW error during stress W/A) */
+	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+	/*
+	 * Enable HAP INTA (interrupt from management bus) to
+	 * wake device's PCI Express link L1a -> L0s
+	 */
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+	iwl_pcie_apm_config(trans);
+
+	/* Configure analog phase-lock-loop before activating to D0A */
+	if (trans->cfg->base_params->pll_cfg)
+		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(trans, CSR_GP_CNTRL,
+		    BIT(trans->cfg->csr->flag_init_done));
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is supported, e.g. iwl_write_prph()
+	 * and accesses to uCode SRAM.
+	 */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   25000);
+	if (ret < 0) {
+		IWL_ERR(trans, "Failed to init the card\n");
+		return ret;
+	}
+
+	if (trans->cfg->host_interrupt_operation_mode) {
+		/*
+		 * This is a bit of an abuse - This is needed for 7260 / 3160
+		 * only check host_interrupt_operation_mode even if this is
+		 * not related to host_interrupt_operation_mode.
+		 *
+		 * Enable the oscillator to count wake up time for L1 exit. This
+		 * consumes slightly more power (100uA) - but allows to be sure
+		 * that we wake up from L1 on time.
+		 *
+		 * This looks weird: read twice the same register, discard the
+		 * value, set a bit, and yet again, read that same register
+		 * just to discard the value. But that's the way the hardware
+		 * seems to like it.
+		 */
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
+		iwl_read_prph(trans, OSC_CLK);
+		iwl_read_prph(trans, OSC_CLK);
+	}
+
+	/*
+	 * Enable DMA clock and wait for it to stabilize.
+	 *
+	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+	 * bits do not disable clocks.  This preserves any hardware
+	 * bits already set by default in "CLK_CTRL_REG" after reset.
+	 */
+	if (!trans->cfg->apmg_not_supported) {
+		iwl_write_prph(trans, APMG_CLK_EN_REG,
+			       APMG_CLK_VAL_DMA_CLK_RQT);
+		udelay(20);
+
+		/* Disable L1-Active */
+		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+		/* Clear the interrupt in APMG if the NIC is in RFKILL */
+		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+			       APMG_RTC_INT_STT_RFKILL);
+	}
+
+	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+	return 0;
+}
+
+/*
+ * Enable LP XTAL to avoid HW bug where device may consume much power if
+ * FW is not loaded after device reset. LP XTAL is disabled by default
+ * after device HW reset. Do it only if XTAL is fed by internal source.
+ * Configure device's "persistence" mode to avoid resetting XTAL again when
+ * SHRD_HW_RST occurs in S3.
+ */
+static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
+{
+	int ret;
+	u32 apmg_gp1_reg;
+	u32 apmg_xtal_cfg_reg;
+	u32 dl_cfg_reg;
+
+	/* Force XTAL ON */
+	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+
+	iwl_trans_pcie_sw_reset(trans);
+
+	/*
+	 * Set "initialization complete" bit to move adapter from
+	 * D0U* --> D0A* (powered-up active) state.
+	 */
+	iwl_set_bit(trans, CSR_GP_CNTRL,
+		    BIT(trans->cfg->csr->flag_init_done));
+
+	/*
+	 * Wait for clock stabilization; once stabilized, access to
+	 * device-internal resources is possible.
+	 */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   25000);
+	if (WARN_ON(ret < 0)) {
+		IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
+		/* Release XTAL ON request */
+		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+		return;
+	}
+
+	/*
+	 * Clear "disable persistence" to avoid LP XTAL resetting when
+	 * SHRD_HW_RST is applied in S3.
+	 */
+	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
+
+	/*
+	 * Force APMG XTAL to be active to prevent its disabling by HW
+	 * caused by APMG idle state.
+	 */
+	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
+						    SHR_APMG_XTAL_CFG_REG);
+	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+				 apmg_xtal_cfg_reg |
+				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+
+	iwl_trans_pcie_sw_reset(trans);
+
+	/* Enable LP XTAL by indirect access through CSR */
+	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
+	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
+				 SHR_APMG_GP1_WF_XTAL_LP_EN |
+				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
+
+	/* Clear delay line clock power up */
+	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
+	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
+				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
+
+	/*
+	 * Enable persistence mode to avoid LP XTAL resetting when
+	 * SHRD_HW_RST is applied in S3.
+	 */
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_init_done));
+
+	/* Activates XTAL resources monitor */
+	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
+				 CSR_MONITOR_XTAL_RESOURCES);
+
+	/* Release XTAL ON request */
+	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
+	udelay(10);
+
+	/* Release APMG XTAL */
+	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
+				 apmg_xtal_cfg_reg &
+				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
+}
+
+void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
+{
+	int ret;
+
+	/* stop device's busmaster DMA activity */
+	iwl_set_bit(trans, trans->cfg->csr->addr_sw_reset,
+		    BIT(trans->cfg->csr->flag_stop_master));
+
+	ret = iwl_poll_bit(trans, trans->cfg->csr->addr_sw_reset,
+			   BIT(trans->cfg->csr->flag_master_dis),
+			   BIT(trans->cfg->csr->flag_master_dis), 100);
+	if (ret < 0)
+		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
+
+	IWL_DEBUG_INFO(trans, "stop master\n");
+}
+
+static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
+{
+	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
+
+	if (op_mode_leave) {
+		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+			iwl_pcie_apm_init(trans);
+
+		/* inform ME that we are leaving */
+		if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
+			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+					  APMG_PCIDEV_STT_VAL_WAKE_ME);
+		else if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+				    CSR_HW_IF_CONFIG_REG_PREPARE |
+				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+			mdelay(1);
+			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
+		}
+		mdelay(5);
+	}
+
+	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
+
+	/* Stop device's DMA activity */
+	iwl_pcie_apm_stop_master(trans);
+
+	if (trans->cfg->lp_xtal_workaround) {
+		iwl_pcie_apm_lp_xtal_enable(trans);
+		return;
+	}
+
+	iwl_trans_pcie_sw_reset(trans);
+
+	/*
+	 * Clear "initialization complete" bit to move adapter from
+	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+	 */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_init_done));
+}
+
+static int iwl_pcie_nic_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	/* nic_init */
+	spin_lock(&trans_pcie->irq_lock);
+	ret = iwl_pcie_apm_init(trans);
+	spin_unlock(&trans_pcie->irq_lock);
+
+	if (ret)
+		return ret;
+
+	iwl_pcie_set_pwr(trans, false);
+
+	iwl_op_mode_nic_config(trans->op_mode);
+
+	/* Allocate the RX queue, or reset if it is already allocated */
+	iwl_pcie_rx_init(trans);
+
+	/* Allocate or reset and init all Tx and Command queues */
+	if (iwl_pcie_tx_init(trans))
+		return -ENOMEM;
+
+	if (trans->cfg->base_params->shadow_reg_enable) {
+		/* enable shadow regs in HW */
+		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
+		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
+	}
+
+	return 0;
+}
+
+#define HW_READY_TIMEOUT (50)
+
+/* Note: returns poll_bit return value, which is >= 0 if success */
+static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
+{
+	int ret;
+
+	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+	/* See if we got it */
+	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+			   HW_READY_TIMEOUT);
+
+	if (ret >= 0)
+		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
+
+	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
+	return ret;
+}
+
+/* Note: returns standard 0/-ERROR code */
+int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
+{
+	int ret;
+	int t = 0;
+	int iter;
+
+	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
+
+	ret = iwl_pcie_set_hw_ready(trans);
+	/* If the card is ready, exit 0 */
+	if (ret >= 0)
+		return 0;
+
+	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
+	usleep_range(1000, 2000);
+
+	for (iter = 0; iter < 10; iter++) {
+		/* If HW is not ready, prepare the conditions to check again */
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_PREPARE);
+
+		do {
+			ret = iwl_pcie_set_hw_ready(trans);
+			if (ret >= 0)
+				return 0;
+
+			usleep_range(200, 1000);
+			t += 200;
+		} while (t < 150000);
+		msleep(25);
+	}
+
+	IWL_ERR(trans, "Couldn't prepare the card\n");
+
+	return ret;
+}
+
+/*
+ * ucode
+ */
+static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
+					    u32 dst_addr, dma_addr_t phy_addr,
+					    u32 byte_cnt)
+{
+	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
+
+	iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
+		    dst_addr);
+
+	iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
+		    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
+
+	iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
+		    (iwl_get_dma_hi_addr(phy_addr)
+			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
+
+	iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
+		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
+		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
+		    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
+
+	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
+		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
+		    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
+}
+
+static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
+					u32 dst_addr, dma_addr_t phy_addr,
+					u32 byte_cnt)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+	int ret;
+
+	trans_pcie->ucode_write_complete = false;
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return -EIO;
+
+	iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
+					byte_cnt);
+	iwl_trans_release_nic_access(trans, &flags);
+
+	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
+				 trans_pcie->ucode_write_complete, 5 * HZ);
+	if (!ret) {
+		IWL_ERR(trans, "Failed to load firmware chunk!\n");
+		iwl_trans_pcie_dump_regs(trans);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
+			    const struct fw_desc *section)
+{
+	u8 *v_addr;
+	dma_addr_t p_addr;
+	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
+	int ret = 0;
+
+	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
+		     section_num);
+
+	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
+				    GFP_KERNEL | __GFP_NOWARN);
+	if (!v_addr) {
+		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
+		chunk_sz = PAGE_SIZE;
+		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
+					    &p_addr, GFP_KERNEL);
+		if (!v_addr)
+			return -ENOMEM;
+	}
+
+	for (offset = 0; offset < section->len; offset += chunk_sz) {
+		u32 copy_size, dst_addr;
+		bool extended_addr = false;
+
+		copy_size = min_t(u32, chunk_sz, section->len - offset);
+		dst_addr = section->offset + offset;
+
+		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
+		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
+			extended_addr = true;
+
+		if (extended_addr)
+			iwl_set_bits_prph(trans, LMPM_CHICK,
+					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
+		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
+						   copy_size);
+
+		if (extended_addr)
+			iwl_clear_bits_prph(trans, LMPM_CHICK,
+					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
+
+		if (ret) {
+			IWL_ERR(trans,
+				"Could not load the [%d] uCode section\n",
+				section_num);
+			break;
+		}
+	}
+
+	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
+	return ret;
+}
+
+static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
+					   const struct fw_img *image,
+					   int cpu,
+					   int *first_ucode_section)
+{
+	int shift_param;
+	int i, ret = 0, sec_num = 0x1;
+	u32 val, last_read_idx = 0;
+
+	if (cpu == 1) {
+		shift_param = 0;
+		*first_ucode_section = 0;
+	} else {
+		shift_param = 16;
+		(*first_ucode_section)++;
+	}
+
+	for (i = *first_ucode_section; i < image->num_sec; i++) {
+		last_read_idx = i;
+
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
+		if (!image->sec[i].data ||
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+			IWL_DEBUG_FW(trans,
+				     "Break since Data not valid or Empty section, sec = %d\n",
+				     i);
+			break;
+		}
+
+		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+		if (ret)
+			return ret;
+
+		/* Notify ucode of loaded section number and status */
+		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
+		val = val | (sec_num << shift_param);
+		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
+
+		sec_num = (sec_num << 1) | 0x1;
+	}
+
+	*first_ucode_section = last_read_idx;
+
+	iwl_enable_interrupts(trans);
+
+	if (trans->cfg->use_tfh) {
+		if (cpu == 1)
+			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+				       0xFFFF);
+		else
+			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
+				       0xFFFFFFFF);
+	} else {
+		if (cpu == 1)
+			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+					   0xFFFF);
+		else
+			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
+					   0xFFFFFFFF);
+	}
+
+	return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+				      const struct fw_img *image,
+				      int cpu,
+				      int *first_ucode_section)
+{
+	int i, ret = 0;
+	u32 last_read_idx = 0;
+
+	if (cpu == 1)
+		*first_ucode_section = 0;
+	else
+		(*first_ucode_section)++;
+
+	for (i = *first_ucode_section; i < image->num_sec; i++) {
+		last_read_idx = i;
+
+		/*
+		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+		 * CPU1 to CPU2.
+		 * PAGING_SEPARATOR_SECTION delimiter - separate between
+		 * CPU2 non paged to CPU2 paging sec.
+		 */
+		if (!image->sec[i].data ||
+		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
+		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
+			IWL_DEBUG_FW(trans,
+				     "Break since Data not valid or Empty section, sec = %d\n",
+				     i);
+			break;
+		}
+
+		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+		if (ret)
+			return ret;
+	}
+
+	*first_ucode_section = last_read_idx;
+
+	return 0;
+}
+
+void iwl_pcie_apply_destination(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv;
+	int i;
+
+	IWL_INFO(trans, "Applying debug destination %s\n",
+		 get_fw_dbg_mode_string(dest->monitor_mode));
+
+	if (dest->monitor_mode == EXTERNAL_MODE)
+		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
+	else
+		IWL_WARN(trans, "PCI should have external buffer debug\n");
+
+	for (i = 0; i < trans->dbg_dest_reg_num; i++) {
+		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
+		u32 val = le32_to_cpu(dest->reg_ops[i].val);
+
+		switch (dest->reg_ops[i].op) {
+		case CSR_ASSIGN:
+			iwl_write32(trans, addr, val);
+			break;
+		case CSR_SETBIT:
+			iwl_set_bit(trans, addr, BIT(val));
+			break;
+		case CSR_CLEARBIT:
+			iwl_clear_bit(trans, addr, BIT(val));
+			break;
+		case PRPH_ASSIGN:
+			iwl_write_prph(trans, addr, val);
+			break;
+		case PRPH_SETBIT:
+			iwl_set_bits_prph(trans, addr, BIT(val));
+			break;
+		case PRPH_CLEARBIT:
+			iwl_clear_bits_prph(trans, addr, BIT(val));
+			break;
+		case PRPH_BLOCKBIT:
+			if (iwl_read_prph(trans, addr) & BIT(val)) {
+				IWL_ERR(trans,
+					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
+					val, addr);
+				goto monitor;
+			}
+			break;
+		default:
+			IWL_ERR(trans, "FW debug - unknown OP %d\n",
+				dest->reg_ops[i].op);
+			break;
+		}
+	}
+
+monitor:
+	if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
+		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
+			       trans_pcie->fw_mon_phys >> dest->base_shift);
+		if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+				       (trans_pcie->fw_mon_phys +
+					trans_pcie->fw_mon_size - 256) >>
+						dest->end_shift);
+		else
+			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
+				       (trans_pcie->fw_mon_phys +
+					trans_pcie->fw_mon_size) >>
+						dest->end_shift);
+	}
+}
+
+static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
+				const struct fw_img *image)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret = 0;
+	int first_ucode_section;
+
+	IWL_DEBUG_FW(trans, "working with %s CPU\n",
+		     image->is_dual_cpus ? "Dual" : "Single");
+
+	/* load to FW the binary non secured sections of CPU1 */
+	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
+	if (ret)
+		return ret;
+
+	if (image->is_dual_cpus) {
+		/* set CPU2 header address */
+		iwl_write_prph(trans,
+			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
+		/* load to FW the binary sections of CPU2 */
+		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+						 &first_ucode_section);
+		if (ret)
+			return ret;
+	}
+
+	/* supported for 7000 only for the moment */
+	if (iwlwifi_mod_params.fw_monitor &&
+	    trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		iwl_pcie_alloc_fw_monitor(trans, 0);
+
+		if (trans_pcie->fw_mon_size) {
+			iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
+				       trans_pcie->fw_mon_phys >> 4);
+			iwl_write_prph(trans, MON_BUFF_END_ADDR,
+				       (trans_pcie->fw_mon_phys +
+					trans_pcie->fw_mon_size) >> 4);
+		}
+	} else if (trans->dbg_dest_tlv) {
+		iwl_pcie_apply_destination(trans);
+	}
+
+	iwl_enable_interrupts(trans);
+
+	/* release CPU reset */
+	iwl_write32(trans, CSR_RESET, 0);
+
+	return 0;
+}
+
+static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
+					  const struct fw_img *image)
+{
+	int ret = 0;
+	int first_ucode_section;
+
+	IWL_DEBUG_FW(trans, "working with %s CPU\n",
+		     image->is_dual_cpus ? "Dual" : "Single");
+
+	if (trans->dbg_dest_tlv)
+		iwl_pcie_apply_destination(trans);
+
+	IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
+			iwl_read_prph(trans, WFPM_GP2));
+
+	/*
+	 * Set default value. On resume reading the values that were
+	 * zeored can provide debug data on the resume flow.
+	 * This is for debugging only and has no functional impact.
+	 */
+	iwl_write_prph(trans, WFPM_GP2, 0x01010101);
+
+	/* configure the ucode to be ready to get the secured image */
+	/* release CPU reset */
+	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+
+	/* load to FW the binary Secured sections of CPU1 */
+	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
+					      &first_ucode_section);
+	if (ret)
+		return ret;
+
+	/* load to FW the binary sections of CPU2 */
+	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
+					       &first_ucode_section);
+}
+
+bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool hw_rfkill = iwl_is_rfkill_set(trans);
+	bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	bool report;
+
+	if (hw_rfkill) {
+		set_bit(STATUS_RFKILL_HW, &trans->status);
+		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	} else {
+		clear_bit(STATUS_RFKILL_HW, &trans->status);
+		if (trans_pcie->opmode_down)
+			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	}
+
+	report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+
+	if (prev != report)
+		iwl_trans_pcie_rf_kill(trans, report);
+
+	return hw_rfkill;
+}
+
+struct iwl_causes_list {
+	u32 cause_num;
+	u32 mask_reg;
+	u8 addr;
+};
+
+static struct iwl_causes_list causes_list[] = {
+	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
+	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
+	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
+	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
+	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
+	{MSIX_HW_INT_CAUSES_REG_WAKEUP,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
+	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
+	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
+	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
+	{MSIX_HW_INT_CAUSES_REG_SW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x29},
+	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
+static struct iwl_causes_list causes_list_v2[] = {
+	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
+	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
+	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
+	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
+	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
+	{MSIX_HW_INT_CAUSES_REG_IPC,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
+	{MSIX_HW_INT_CAUSES_REG_SW_ERR_V2,	CSR_MSIX_HW_INT_MASK_AD, 0x15},
+	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
+	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
+	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
+	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
+	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
+	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
+	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
+};
+
+static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
+	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
+	int i, arr_size =
+		(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+		ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
+
+	/*
+	 * Access all non RX causes and map them to the default irq.
+	 * In case we are missing at least one interrupt vector,
+	 * the first interrupt vector will serve non-RX and FBQ causes.
+	 */
+	for (i = 0; i < arr_size; i++) {
+		struct iwl_causes_list *causes =
+			(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
+			causes_list : causes_list_v2;
+
+		iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+		iwl_clear_bit(trans, causes[i].mask_reg,
+			      causes[i].cause_num);
+	}
+}
+
+static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 offset =
+		trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
+	u32 val, idx;
+
+	/*
+	 * The first RX queue - fallback queue, which is designated for
+	 * management frame, command responses etc, is always mapped to the
+	 * first interrupt vector. The other RX queues are mapped to
+	 * the other (N - 2) interrupt vectors.
+	 */
+	val = BIT(MSIX_FH_INT_CAUSES_Q(0));
+	for (idx = 1; idx < trans->num_rx_queues; idx++) {
+		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
+			   MSIX_FH_INT_CAUSES_Q(idx - offset));
+		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
+	}
+	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
+
+	val = MSIX_FH_INT_CAUSES_Q(0);
+	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
+		val |= MSIX_NON_AUTO_CLEAR_CAUSE;
+	iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
+
+	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
+		iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
+}
+
+void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
+{
+	struct iwl_trans *trans = trans_pcie->trans;
+
+	if (!trans_pcie->msix_enabled) {
+		if (trans->cfg->mq_rx_supported &&
+		    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+			iwl_write_prph(trans, UREG_CHICK,
+				       UREG_CHICK_MSI_ENABLE);
+		return;
+	}
+	/*
+	 * The IVAR table needs to be configured again after reset,
+	 * but if the device is disabled, we can't write to
+	 * prph.
+	 */
+	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+		iwl_write_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
+
+	/*
+	 * Each cause from the causes list above and the RX causes is
+	 * represented as a byte in the IVAR table. The first nibble
+	 * represents the bound interrupt vector of the cause, the second
+	 * represents no auto clear for this cause. This will be set if its
+	 * interrupt vector is bound to serve other causes.
+	 */
+	iwl_pcie_map_rx_causes(trans);
+
+	iwl_pcie_map_non_rx_causes(trans);
+}
+
+static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
+{
+	struct iwl_trans *trans = trans_pcie->trans;
+
+	iwl_pcie_conf_msix_hw(trans_pcie);
+
+	if (!trans_pcie->msix_enabled)
+		return;
+
+	trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
+	trans_pcie->fh_mask = trans_pcie->fh_init_mask;
+	trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
+	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
+}
+
+static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	if (trans_pcie->is_down)
+		return;
+
+	trans_pcie->is_down = true;
+
+	/* Stop dbgc before stopping device */
+	if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+		iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
+	} else {
+		iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
+		udelay(100);
+		iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
+	}
+
+	/* tell the device to stop sending interrupts */
+	iwl_disable_interrupts(trans);
+
+	/* device going down, Stop using ICT table */
+	iwl_pcie_disable_ict(trans);
+
+	/*
+	 * If a HW restart happens during firmware loading,
+	 * then the firmware loading might call this function
+	 * and later it might be called again due to the
+	 * restart. So don't process again if the device is
+	 * already dead.
+	 */
+	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+		IWL_DEBUG_INFO(trans,
+			       "DEVICE_ENABLED bit was set and is now cleared\n");
+		iwl_pcie_tx_stop(trans);
+		iwl_pcie_rx_stop(trans);
+
+		/* Power-down device's busmaster DMA clocks */
+		if (!trans->cfg->apmg_not_supported) {
+			iwl_write_prph(trans, APMG_CLK_DIS_REG,
+				       APMG_CLK_VAL_DMA_CLK_RQT);
+			udelay(5);
+		}
+	}
+
+	/* Make sure (redundant) we've released our request to stay awake */
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_mac_access_req));
+
+	/* Stop the device, and put it in low power state */
+	iwl_pcie_apm_stop(trans, false);
+
+	iwl_trans_pcie_sw_reset(trans);
+
+	/*
+	 * Upon stop, the IVAR table gets erased, so msi-x won't
+	 * work. This causes a bug in RF-KILL flows, since the interrupt
+	 * that enables radio won't fire on the correct irq, and the
+	 * driver won't be able to handle the interrupt.
+	 * Configure the IVAR table again after reset.
+	 */
+	iwl_pcie_conf_msix_hw(trans_pcie);
+
+	/*
+	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
+	 * This is a bug in certain verions of the hardware.
+	 * Certain devices also keep sending HW RF kill interrupt all
+	 * the time, unless the interrupt is ACKed even if the interrupt
+	 * should be masked. Re-ACK all the interrupts here.
+	 */
+	iwl_disable_interrupts(trans);
+
+	/* clear all status bits */
+	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+	clear_bit(STATUS_INT_ENABLED, &trans->status);
+	clear_bit(STATUS_TPOWER_PMI, &trans->status);
+
+	/*
+	 * Even if we stop the HW, we still want the RF kill
+	 * interrupt
+	 */
+	iwl_enable_rfkill_int(trans);
+
+	/* re-take ownership to prevent other users from stealing the device */
+	iwl_pcie_prepare_card_hw(trans);
+}
+
+void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (trans_pcie->msix_enabled) {
+		int i;
+
+		for (i = 0; i < trans_pcie->alloc_vecs; i++)
+			synchronize_irq(trans_pcie->msix_entries[i].vector);
+	} else {
+		synchronize_irq(trans_pcie->pci_dev->irq);
+	}
+}
+
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+				   const struct fw_img *fw, bool run_in_rfkill)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool hw_rfkill;
+	int ret;
+
+	/* This may fail if AMT took ownership of the device */
+	if (iwl_pcie_prepare_card_hw(trans)) {
+		IWL_WARN(trans, "Exit HW not ready\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	iwl_enable_rfkill_int(trans);
+
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	/*
+	 * We enabled the RF-Kill interrupt and the handler may very
+	 * well be running. Disable the interrupts to make sure no other
+	 * interrupt can be fired.
+	 */
+	iwl_disable_interrupts(trans);
+
+	/* Make sure it finished running */
+	iwl_pcie_synchronize_irqs(trans);
+
+	mutex_lock(&trans_pcie->mutex);
+
+	/* If platform's RF_KILL switch is NOT set to KILL */
+	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+	if (hw_rfkill && !run_in_rfkill) {
+		ret = -ERFKILL;
+		goto out;
+	}
+
+	/* Someone called stop_device, don't try to start_fw */
+	if (trans_pcie->is_down) {
+		IWL_WARN(trans,
+			 "Can't start_fw since the HW hasn't been started\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	/* make sure rfkill handshake bits are cleared */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+	/* clear (again), then enable host interrupts */
+	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+	ret = iwl_pcie_nic_init(trans);
+	if (ret) {
+		IWL_ERR(trans, "Unable to init nic\n");
+		goto out;
+	}
+
+	/*
+	 * Now, we load the firmware and don't want to be interrupted, even
+	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
+	 * FH_TX interrupt which is needed to load the firmware). If the
+	 * RF-Kill switch is toggled, we will find out after having loaded
+	 * the firmware and return the proper value to the caller.
+	 */
+	iwl_enable_fw_load_int(trans);
+
+	/* really make sure rfkill handshake bits are cleared */
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+	/* Load the given image to the HW */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+	else
+		ret = iwl_pcie_load_given_ucode(trans, fw);
+
+	/* re-check RF-Kill state since we may have missed the interrupt */
+	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
+	if (hw_rfkill && !run_in_rfkill)
+		ret = -ERFKILL;
+
+out:
+	mutex_unlock(&trans_pcie->mutex);
+	return ret;
+}
+
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+	iwl_pcie_reset_ict(trans);
+	iwl_pcie_tx_start(trans, scd_addr);
+}
+
+void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
+				       bool was_in_rfkill)
+{
+	bool hw_rfkill;
+
+	/*
+	 * Check again since the RF kill state may have changed while
+	 * all the interrupts were disabled, in this case we couldn't
+	 * receive the RF kill interrupt and update the state in the
+	 * op_mode.
+	 * Don't call the op_mode if the rkfill state hasn't changed.
+	 * This allows the op_mode to call stop_device from the rfkill
+	 * notification without endless recursion. Under very rare
+	 * circumstances, we might have a small recursion if the rfkill
+	 * state changed exactly now while we were called from stop_device.
+	 * This is very unlikely but can happen and is supported.
+	 */
+	hw_rfkill = iwl_is_rfkill_set(trans);
+	if (hw_rfkill) {
+		set_bit(STATUS_RFKILL_HW, &trans->status);
+		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	} else {
+		clear_bit(STATUS_RFKILL_HW, &trans->status);
+		clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	}
+	if (hw_rfkill != was_in_rfkill)
+		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+}
+
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool was_in_rfkill;
+
+	mutex_lock(&trans_pcie->mutex);
+	trans_pcie->opmode_down = true;
+	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
+	_iwl_trans_pcie_stop_device(trans, low_power);
+	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
+	mutex_unlock(&trans_pcie->mutex);
+}
+
+void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
+{
+	struct iwl_trans_pcie __maybe_unused *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
+		 state ? "disabled" : "enabled");
+	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
+		if (trans->cfg->gen2)
+			_iwl_trans_pcie_gen2_stop_device(trans, true);
+		else
+			_iwl_trans_pcie_stop_device(trans, true);
+	}
+}
+
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
+				      bool reset)
+{
+	if (!reset) {
+		/* Enable persistence mode to avoid reset */
+		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
+	}
+
+	iwl_disable_interrupts(trans);
+
+	/*
+	 * in testing mode, the host stays awake and the
+	 * hardware won't be reset (not even partially)
+	 */
+	if (test)
+		return;
+
+	iwl_pcie_disable_ict(trans);
+
+	iwl_pcie_synchronize_irqs(trans);
+
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_mac_access_req));
+	iwl_clear_bit(trans, CSR_GP_CNTRL,
+		      BIT(trans->cfg->csr->flag_init_done));
+
+	iwl_pcie_enable_rx_wake(trans, false);
+
+	if (reset) {
+		/*
+		 * reset TX queues -- some of their registers reset during S3
+		 * so if we don't reset everything here the D3 image would try
+		 * to execute some invalid memory upon resume
+		 */
+		iwl_trans_pcie_tx_reset(trans);
+	}
+
+	iwl_pcie_set_pwr(trans, true);
+}
+
+static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+				    enum iwl_d3_status *status,
+				    bool test,  bool reset)
+{
+	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 val;
+	int ret;
+
+	if (test) {
+		iwl_enable_interrupts(trans);
+		*status = IWL_D3_STATUS_ALIVE;
+		return 0;
+	}
+
+	iwl_pcie_enable_rx_wake(trans, true);
+
+	iwl_set_bit(trans, CSR_GP_CNTRL,
+		    BIT(trans->cfg->csr->flag_mac_access_req));
+	iwl_set_bit(trans, CSR_GP_CNTRL,
+		    BIT(trans->cfg->csr->flag_init_done));
+
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+		udelay(2);
+
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   BIT(trans->cfg->csr->flag_mac_clock_ready),
+			   25000);
+	if (ret < 0) {
+		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
+		return ret;
+	}
+
+	/*
+	 * Reconfigure IVAR table in case of MSIX or reset ict table in
+	 * MSI mode since HW reset erased it.
+	 * Also enables interrupts - none will happen as
+	 * the device doesn't know we're waking it up, only when
+	 * the opmode actually tells it after this call.
+	 */
+	iwl_pcie_conf_msix_hw(trans_pcie);
+	if (!trans_pcie->msix_enabled)
+		iwl_pcie_reset_ict(trans);
+	iwl_enable_interrupts(trans);
+
+	iwl_pcie_set_pwr(trans, false);
+
+	if (!reset) {
+		iwl_clear_bit(trans, CSR_GP_CNTRL,
+			      BIT(trans->cfg->csr->flag_mac_access_req));
+	} else {
+		iwl_trans_pcie_tx_reset(trans);
+
+		ret = iwl_pcie_rx_init(trans);
+		if (ret) {
+			IWL_ERR(trans,
+				"Failed to resume the device (RX reset)\n");
+			return ret;
+		}
+	}
+
+	IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
+			iwl_read_prph(trans, WFPM_GP2));
+
+	val = iwl_read32(trans, CSR_RESET);
+	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
+		*status = IWL_D3_STATUS_RESET;
+	else
+		*status = IWL_D3_STATUS_ALIVE;
+
+	return 0;
+}
+
+static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
+					struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int max_irqs, num_irqs, i, ret;
+	u16 pci_cmd;
+
+	if (!trans->cfg->mq_rx_supported)
+		goto enable_msi;
+
+	max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
+	for (i = 0; i < max_irqs; i++)
+		trans_pcie->msix_entries[i].entry = i;
+
+	num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
+					 MSIX_MIN_INTERRUPT_VECTORS,
+					 max_irqs);
+	if (num_irqs < 0) {
+		IWL_DEBUG_INFO(trans,
+			       "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
+			       num_irqs);
+		goto enable_msi;
+	}
+	trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
+
+	IWL_DEBUG_INFO(trans,
+		       "MSI-X enabled. %d interrupt vectors were allocated\n",
+		       num_irqs);
+
+	/*
+	 * In case the OS provides fewer interrupts than requested, different
+	 * causes will share the same interrupt vector as follows:
+	 * One interrupt less: non rx causes shared with FBQ.
+	 * Two interrupts less: non rx causes shared with FBQ and RSS.
+	 * More than two interrupts: we will use fewer RSS queues.
+	 */
+	if (num_irqs <= max_irqs - 2) {
+		trans_pcie->trans->num_rx_queues = num_irqs + 1;
+		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
+			IWL_SHARED_IRQ_FIRST_RSS;
+	} else if (num_irqs == max_irqs - 1) {
+		trans_pcie->trans->num_rx_queues = num_irqs;
+		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
+	} else {
+		trans_pcie->trans->num_rx_queues = num_irqs - 1;
+	}
+	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
+
+	trans_pcie->alloc_vecs = num_irqs;
+	trans_pcie->msix_enabled = true;
+	return;
+
+enable_msi:
+	ret = pci_enable_msi(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
+		/* enable rfkill interrupt: hw bug w/a */
+		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
+		}
+	}
+}
+
+static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
+{
+	int iter_rx_q, i, ret, cpu, offset;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
+	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
+	offset = 1 + i;
+	for (; i < iter_rx_q ; i++) {
+		/*
+		 * Get the cpu prior to the place to search
+		 * (i.e. return will be > i - 1).
+		 */
+		cpu = cpumask_next(i - offset, cpu_online_mask);
+		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
+		ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
+					    &trans_pcie->affinity_mask[i]);
+		if (ret)
+			IWL_ERR(trans_pcie->trans,
+				"Failed to set affinity mask for IRQ %d\n",
+				i);
+	}
+}
+
+static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
+				      struct iwl_trans_pcie *trans_pcie)
+{
+	int i;
+
+	for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+		int ret;
+		struct msix_entry *msix_entry;
+		const char *qname = queue_name(&pdev->dev, trans_pcie, i);
+
+		if (!qname)
+			return -ENOMEM;
+
+		msix_entry = &trans_pcie->msix_entries[i];
+		ret = devm_request_threaded_irq(&pdev->dev,
+						msix_entry->vector,
+						iwl_pcie_msix_isr,
+						(i == trans_pcie->def_irq) ?
+						iwl_pcie_irq_msix_handler :
+						iwl_pcie_irq_rx_msix_handler,
+						IRQF_SHARED,
+						qname,
+						msix_entry);
+		if (ret) {
+			IWL_ERR(trans_pcie->trans,
+				"Error allocating IRQ %d\n", i);
+
+			return ret;
+		}
+	}
+	iwl_pcie_irq_set_affinity(trans_pcie->trans);
+
+	return 0;
+}
+
+static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int err;
+
+	lockdep_assert_held(&trans_pcie->mutex);
+
+	err = iwl_pcie_prepare_card_hw(trans);
+	if (err) {
+		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
+		return err;
+	}
+
+	iwl_trans_pcie_sw_reset(trans);
+
+	err = iwl_pcie_apm_init(trans);
+	if (err)
+		return err;
+
+	iwl_pcie_init_msix(trans_pcie);
+
+	/* From now on, the op_mode will be kept updated about RF kill state */
+	iwl_enable_rfkill_int(trans);
+
+	trans_pcie->opmode_down = false;
+
+	/* Set is_down to false here so that...*/
+	trans_pcie->is_down = false;
+
+	/* ...rfkill can call stop_device and set it false if needed */
+	iwl_pcie_check_hw_rf_kill(trans);
+
+	/* Make sure we sync here, because we'll need full access later */
+	if (low_power)
+		pm_runtime_resume(trans->dev);
+
+	return 0;
+}
+
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+
+	mutex_lock(&trans_pcie->mutex);
+	ret = _iwl_trans_pcie_start_hw(trans, low_power);
+	mutex_unlock(&trans_pcie->mutex);
+
+	return ret;
+}
+
+static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	mutex_lock(&trans_pcie->mutex);
+
+	/* disable interrupts - don't enable HW RF kill interrupt */
+	iwl_disable_interrupts(trans);
+
+	iwl_pcie_apm_stop(trans, true);
+
+	iwl_disable_interrupts(trans);
+
+	iwl_pcie_disable_ict(trans);
+
+	mutex_unlock(&trans_pcie->mutex);
+
+	iwl_pcie_synchronize_irqs(trans);
+}
+
+static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+{
+	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
+}
+
+static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+{
+	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
+			       ((reg & 0x000FFFFF) | (3 << 24)));
+	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
+}
+
+static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
+				      u32 val)
+{
+	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
+			       ((addr & 0x000FFFFF) | (3 << 24)));
+	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static void iwl_trans_pcie_configure(struct iwl_trans *trans,
+				     const struct iwl_trans_config *trans_cfg)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
+	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
+	trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
+		trans_pcie->n_no_reclaim_cmds = 0;
+	else
+		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
+	if (trans_pcie->n_no_reclaim_cmds)
+		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
+		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
+
+	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
+	trans_pcie->rx_page_order =
+		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
+
+	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
+	trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
+
+	trans_pcie->page_offs = trans_cfg->cb_data_offs;
+	trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+
+	trans->command_groups = trans_cfg->command_groups;
+	trans->command_groups_size = trans_cfg->command_groups_size;
+
+	/* Initialize NAPI here - it should be before registering to mac80211
+	 * in the opmode but after the HW struct is allocated.
+	 * As this function may be called again in some corner cases don't
+	 * do anything if NAPI was already initialized.
+	 */
+	if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
+		init_dummy_netdev(&trans_pcie->napi_dev);
+}
+
+void iwl_trans_pcie_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	iwl_pcie_synchronize_irqs(trans);
+
+	if (trans->cfg->gen2)
+		iwl_pcie_gen2_tx_free(trans);
+	else
+		iwl_pcie_tx_free(trans);
+	iwl_pcie_rx_free(trans);
+
+	if (trans_pcie->rba.alloc_wq) {
+		destroy_workqueue(trans_pcie->rba.alloc_wq);
+		trans_pcie->rba.alloc_wq = NULL;
+	}
+
+	if (trans_pcie->msix_enabled) {
+		for (i = 0; i < trans_pcie->alloc_vecs; i++) {
+			irq_set_affinity_hint(
+				trans_pcie->msix_entries[i].vector,
+				NULL);
+		}
+
+		trans_pcie->msix_enabled = false;
+	} else {
+		iwl_pcie_free_ict(trans);
+	}
+
+	iwl_pcie_free_fw_monitor(trans);
+
+	for_each_possible_cpu(i) {
+		struct iwl_tso_hdr_page *p =
+			per_cpu_ptr(trans_pcie->tso_hdr_page, i);
+
+		if (p->page)
+			__free_page(p->page);
+	}
+
+	free_percpu(trans_pcie->tso_hdr_page);
+	mutex_destroy(&trans_pcie->mutex);
+	iwl_trans_free(trans);
+}
+
+static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
+{
+	if (state)
+		set_bit(STATUS_TPOWER_PMI, &trans->status);
+	else
+		clear_bit(STATUS_TPOWER_PMI, &trans->status);
+}
+
+struct iwl_trans_pcie_removal {
+	struct pci_dev *pdev;
+	struct work_struct work;
+};
+
+static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
+{
+	struct iwl_trans_pcie_removal *removal =
+		container_of(wk, struct iwl_trans_pcie_removal, work);
+	struct pci_dev *pdev = removal->pdev;
+	char *prop[] = {"EVENT=INACCESSIBLE", NULL};
+
+	dev_err(&pdev->dev, "Device gone - attempting removal\n");
+	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
+	pci_lock_rescan_remove();
+	pci_dev_put(pdev);
+	pci_stop_and_remove_bus_device(pdev);
+	pci_unlock_rescan_remove();
+
+	kfree(removal);
+	module_put(THIS_MODULE);
+}
+
+static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans,
+					   unsigned long *flags)
+{
+	int ret;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+
+	if (trans_pcie->cmd_hold_nic_awake)
+		goto out;
+
+	/* this bit wakes up the NIC */
+	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+				 BIT(trans->cfg->csr->flag_mac_access_req));
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000)
+		udelay(2);
+
+	/*
+	 * These bits say the device is running, and should keep running for
+	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+	 * but they do not indicate that embedded SRAM is restored yet;
+	 * HW with volatile SRAM must save/restore contents to/from
+	 * host DRAM when sleeping/waking for power-saving.
+	 * Each direction takes approximately 1/4 millisecond; with this
+	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+	 * series of register accesses are expected (e.g. reading Event Log),
+	 * to keep device from sleeping.
+	 *
+	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+	 * SRAM is okay/restored.  We don't check that here because this call
+	 * is just for hardware register access; but GP1 MAC_SLEEP
+	 * check is a good idea before accessing the SRAM of HW with
+	 * volatile SRAM (e.g. reading Event Log).
+	 *
+	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
+	 * and do not save/restore SRAM when power cycling.
+	 */
+	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+			   BIT(trans->cfg->csr->flag_val_mac_access_en),
+			   (BIT(trans->cfg->csr->flag_mac_clock_ready) |
+			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+	if (unlikely(ret < 0)) {
+		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
+
+		WARN_ONCE(1,
+			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
+			  cntrl);
+
+		iwl_trans_pcie_dump_regs(trans);
+
+		if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
+			struct iwl_trans_pcie_removal *removal;
+
+			if (trans_pcie->scheduled_for_removal)
+				goto err;
+
+			IWL_ERR(trans, "Device gone - scheduling removal!\n");
+
+			/*
+			 * get a module reference to avoid doing this
+			 * while unloading anyway and to avoid
+			 * scheduling a work with code that's being
+			 * removed.
+			 */
+			if (!try_module_get(THIS_MODULE)) {
+				IWL_ERR(trans,
+					"Module is being unloaded - abort\n");
+				goto err;
+			}
+
+			removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
+			if (!removal) {
+				module_put(THIS_MODULE);
+				goto err;
+			}
+			/*
+			 * we don't need to clear this flag, because
+			 * the trans will be freed and reallocated.
+			*/
+			trans_pcie->scheduled_for_removal = true;
+
+			removal->pdev = to_pci_dev(trans->dev);
+			INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
+			pci_dev_get(removal->pdev);
+			schedule_work(&removal->work);
+		} else {
+			iwl_write32(trans, CSR_RESET,
+				    CSR_RESET_REG_FLAG_FORCE_NMI);
+		}
+
+err:
+		spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+		return false;
+	}
+
+out:
+	/*
+	 * Fool sparse by faking we release the lock - sparse will
+	 * track nic_access anyway.
+	 */
+	__release(&trans_pcie->reg_lock);
+	return true;
+}
+
+static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
+					      unsigned long *flags)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->reg_lock);
+
+	/*
+	 * Fool sparse by faking we acquiring the lock - sparse will
+	 * track nic_access anyway.
+	 */
+	__acquire(&trans_pcie->reg_lock);
+
+	if (trans_pcie->cmd_hold_nic_awake)
+		goto out;
+
+	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+				   BIT(trans->cfg->csr->flag_mac_access_req));
+	/*
+	 * Above we read the CSR_GP_CNTRL register, which will flush
+	 * any previous writes, but we need the write that clears the
+	 * MAC_ACCESS_REQ bit to be performed before any other writes
+	 * scheduled on different CPUs (after we drop reg_lock).
+	 */
+	mmiowb();
+out:
+	spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
+}
+
+static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+				   void *buf, int dwords)
+{
+	unsigned long flags;
+	int offs, ret = 0;
+	u32 *vals = buf;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
+		for (offs = 0; offs < dwords; offs++)
+			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
+		iwl_trans_release_nic_access(trans, &flags);
+	} else {
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+				    const void *buf, int dwords)
+{
+	unsigned long flags;
+	int offs, ret = 0;
+	const u32 *vals = buf;
+
+	if (iwl_trans_grab_nic_access(trans, &flags)) {
+		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
+		for (offs = 0; offs < dwords; offs++)
+			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
+				    vals ? vals[offs] : 0);
+		iwl_trans_release_nic_access(trans, &flags);
+	} else {
+		ret = -EBUSY;
+	}
+	return ret;
+}
+
+static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
+					    unsigned long txqs,
+					    bool freeze)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int queue;
+
+	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+		struct iwl_txq *txq = trans_pcie->txq[queue];
+		unsigned long now;
+
+		spin_lock_bh(&txq->lock);
+
+		now = jiffies;
+
+		if (txq->frozen == freeze)
+			goto next_queue;
+
+		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+				    freeze ? "Freezing" : "Waking", queue);
+
+		txq->frozen = freeze;
+
+		if (txq->read_ptr == txq->write_ptr)
+			goto next_queue;
+
+		if (freeze) {
+			if (unlikely(time_after(now,
+						txq->stuck_timer.expires))) {
+				/*
+				 * The timer should have fired, maybe it is
+				 * spinning right now on the lock.
+				 */
+				goto next_queue;
+			}
+			/* remember how long until the timer fires */
+			txq->frozen_expiry_remainder =
+				txq->stuck_timer.expires - now;
+			del_timer(&txq->stuck_timer);
+			goto next_queue;
+		}
+
+		/*
+		 * Wake a non-empty queue -> arm timer with the
+		 * remainder before it froze
+		 */
+		mod_timer(&txq->stuck_timer,
+			  now + txq->frozen_expiry_remainder);
+
+next_queue:
+		spin_unlock_bh(&txq->lock);
+	}
+}
+
+static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+		struct iwl_txq *txq = trans_pcie->txq[i];
+
+		if (i == trans_pcie->cmd_queue)
+			continue;
+
+		spin_lock_bh(&txq->lock);
+
+		if (!block && !(WARN_ON_ONCE(!txq->block))) {
+			txq->block--;
+			if (!txq->block) {
+				iwl_write32(trans, HBUS_TARG_WRPTR,
+					    txq->write_ptr | (i << 8));
+			}
+		} else if (block) {
+			txq->block++;
+		}
+
+		spin_unlock_bh(&txq->lock);
+	}
+}
+
+#define IWL_FLUSH_WAIT_MS	2000
+
+void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+	u32 txq_id = txq->id;
+	u32 status;
+	bool active;
+	u8 fifo;
+
+	if (trans->cfg->use_tfh) {
+		IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+			txq->read_ptr, txq->write_ptr);
+		/* TODO: access new SCD registers and dump them */
+		return;
+	}
+
+	status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+	fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+	active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+	IWL_ERR(trans,
+		"Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+		txq_id, active ? "" : "in", fifo,
+		jiffies_to_msecs(txq->wd_timeout),
+		txq->read_ptr, txq->write_ptr,
+		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+			(trans->cfg->base_params->max_tfd_queue_size - 1),
+		iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+			(trans->cfg->base_params->max_tfd_queue_size - 1),
+		iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+				       struct iwl_trans_rxq_dma_data *data)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
+		return -EINVAL;
+
+	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
+	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
+	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
+	data->fr_bd_wid = 0;
+
+	return 0;
+}
+
+static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq;
+	unsigned long now = jiffies;
+	u8 wr_ptr;
+
+	if (!test_bit(txq_idx, trans_pcie->queue_used))
+		return -EINVAL;
+
+	IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
+	txq = trans_pcie->txq[txq_idx];
+	wr_ptr = READ_ONCE(txq->write_ptr);
+
+	while (txq->read_ptr != READ_ONCE(txq->write_ptr) &&
+	       !time_after(jiffies,
+			   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
+		u8 write_ptr = READ_ONCE(txq->write_ptr);
+
+		if (WARN_ONCE(wr_ptr != write_ptr,
+			      "WR pointer moved while flushing %d -> %d\n",
+			      wr_ptr, write_ptr))
+			return -ETIMEDOUT;
+		usleep_range(1000, 2000);
+	}
+
+	if (txq->read_ptr != txq->write_ptr) {
+		IWL_ERR(trans,
+			"fail to flush all tx fifo queues Q %d\n", txq_idx);
+		iwl_trans_pcie_log_scd_error(trans, txq);
+		return -ETIMEDOUT;
+	}
+
+	IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
+
+	return 0;
+}
+
+static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int cnt;
+	int ret = 0;
+
+	/* waiting for all the tx frames complete might take a while */
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+
+		if (cnt == trans_pcie->cmd_queue)
+			continue;
+		if (!test_bit(cnt, trans_pcie->queue_used))
+			continue;
+		if (!(BIT(cnt) & txq_bm))
+			continue;
+
+		ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
+		if (ret)
+			break;
+	}
+
+	return ret;
+}
+
+static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+					 u32 mask, u32 value)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+
+	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+}
+
+static void iwl_trans_pcie_ref(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (iwlwifi_mod_params.d0i3_disable)
+		return;
+
+	pm_runtime_get(&trans_pcie->pci_dev->dev);
+
+#ifdef CONFIG_PM
+	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
+}
+
+static void iwl_trans_pcie_unref(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	if (iwlwifi_mod_params.d0i3_disable)
+		return;
+
+	pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
+	pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
+
+#ifdef CONFIG_PM
+	IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+		      atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
+}
+
+static const char *get_csr_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+	switch (cmd) {
+	IWL_CMD(CSR_HW_IF_CONFIG_REG);
+	IWL_CMD(CSR_INT_COALESCING);
+	IWL_CMD(CSR_INT);
+	IWL_CMD(CSR_INT_MASK);
+	IWL_CMD(CSR_FH_INT_STATUS);
+	IWL_CMD(CSR_GPIO_IN);
+	IWL_CMD(CSR_RESET);
+	IWL_CMD(CSR_GP_CNTRL);
+	IWL_CMD(CSR_HW_REV);
+	IWL_CMD(CSR_EEPROM_REG);
+	IWL_CMD(CSR_EEPROM_GP);
+	IWL_CMD(CSR_OTP_GP_REG);
+	IWL_CMD(CSR_GIO_REG);
+	IWL_CMD(CSR_GP_UCODE_REG);
+	IWL_CMD(CSR_GP_DRIVER_REG);
+	IWL_CMD(CSR_UCODE_DRV_GP1);
+	IWL_CMD(CSR_UCODE_DRV_GP2);
+	IWL_CMD(CSR_LED_REG);
+	IWL_CMD(CSR_DRAM_INT_TBL_REG);
+	IWL_CMD(CSR_GIO_CHICKEN_BITS);
+	IWL_CMD(CSR_ANA_PLL_CFG);
+	IWL_CMD(CSR_HW_REV_WA_REG);
+	IWL_CMD(CSR_MONITOR_STATUS_REG);
+	IWL_CMD(CSR_DBG_HPET_MEM_REG);
+	default:
+		return "UNKNOWN";
+	}
+#undef IWL_CMD
+}
+
+void iwl_pcie_dump_csr(struct iwl_trans *trans)
+{
+	int i;
+	static const u32 csr_tbl[] = {
+		CSR_HW_IF_CONFIG_REG,
+		CSR_INT_COALESCING,
+		CSR_INT,
+		CSR_INT_MASK,
+		CSR_FH_INT_STATUS,
+		CSR_GPIO_IN,
+		CSR_RESET,
+		CSR_GP_CNTRL,
+		CSR_HW_REV,
+		CSR_EEPROM_REG,
+		CSR_EEPROM_GP,
+		CSR_OTP_GP_REG,
+		CSR_GIO_REG,
+		CSR_GP_UCODE_REG,
+		CSR_GP_DRIVER_REG,
+		CSR_UCODE_DRV_GP1,
+		CSR_UCODE_DRV_GP2,
+		CSR_LED_REG,
+		CSR_DRAM_INT_TBL_REG,
+		CSR_GIO_CHICKEN_BITS,
+		CSR_ANA_PLL_CFG,
+		CSR_MONITOR_STATUS_REG,
+		CSR_HW_REV_WA_REG,
+		CSR_DBG_HPET_MEM_REG
+	};
+	IWL_ERR(trans, "CSR values:\n");
+	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
+		"CSR_INT_PERIODIC_REG)\n");
+	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
+		IWL_ERR(trans, "  %25s: 0X%08x\n",
+			get_csr_string(csr_tbl[i]),
+			iwl_read32(trans, csr_tbl[i]));
+	}
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
+	if (!debugfs_create_file(#name, mode, parent, trans,		\
+				 &iwl_dbgfs_##name##_ops))		\
+		goto err;						\
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FILE_OPS(name)					\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name)                                    \
+static const struct file_operations iwl_dbgfs_##name##_ops = {          \
+	.write = iwl_dbgfs_##name##_write,                              \
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
+static const struct file_operations iwl_dbgfs_##name##_ops = {		\
+	.write = iwl_dbgfs_##name##_write,				\
+	.read = iwl_dbgfs_##name##_read,				\
+	.open = simple_open,						\
+	.llseek = generic_file_llseek,					\
+};
+
+static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq;
+	char *buf;
+	int pos = 0;
+	int cnt;
+	int ret;
+	size_t bufsz;
+
+	bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
+
+	if (!trans_pcie->txq_memory)
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
+		txq = trans_pcie->txq[cnt];
+		pos += scnprintf(buf + pos, bufsz - pos,
+				"hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
+				cnt, txq->read_ptr, txq->write_ptr,
+				!!test_bit(cnt, trans_pcie->queue_used),
+				 !!test_bit(cnt, trans_pcie->queue_stopped),
+				 txq->need_update, txq->frozen,
+				 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
+				       char __user *user_buf,
+				       size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	char *buf;
+	int pos = 0, i, ret;
+	size_t bufsz = sizeof(buf);
+
+	bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+
+	if (!trans_pcie->rxq)
+		return -EAGAIN;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+				 i);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+				 rxq->read);
+		pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
+				 rxq->write);
+		pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
+				 rxq->write_actual);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
+				 rxq->need_update);
+		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+				 rxq->free_count);
+		if (rxq->rb_stts) {
+			u32 r =	__le16_to_cpu(iwl_get_closed_rb_stts(trans,
+								     rxq));
+			pos += scnprintf(buf + pos, bufsz - pos,
+					 "\tclosed_rb_num: %u\n",
+					 r & 0x0FFF);
+		} else {
+			pos += scnprintf(buf + pos, bufsz - pos,
+					 "\tclosed_rb_num: Not Allocated\n");
+		}
+	}
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
+					char __user *user_buf,
+					size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+
+	int pos = 0;
+	char *buf;
+	int bufsz = 24 * 64; /* 24 items * 64 char per item */
+	ssize_t ret;
+
+	buf = kzalloc(bufsz, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+			"Interrupt Statistics Report:\n");
+
+	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+		isr_stats->hw);
+	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+		isr_stats->sw);
+	if (isr_stats->sw || isr_stats->hw) {
+		pos += scnprintf(buf + pos, bufsz - pos,
+			"\tLast Restarting Code:  0x%X\n",
+			isr_stats->err_code);
+	}
+#ifdef CONFIG_IWLWIFI_DEBUG
+	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+		isr_stats->sch);
+	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+		isr_stats->alive);
+#endif
+	pos += scnprintf(buf + pos, bufsz - pos,
+		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+		isr_stats->ctkill);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+		isr_stats->wakeup);
+
+	pos += scnprintf(buf + pos, bufsz - pos,
+		"Rx command responses:\t\t %u\n", isr_stats->rx);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+		isr_stats->tx);
+
+	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+		isr_stats->unhandled);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
+					 const char __user *user_buf,
+					 size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
+	u32 reset_flag;
+	int ret;
+
+	ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
+	if (ret)
+		return ret;
+	if (reset_flag == 0)
+		memset(isr_stats, 0, sizeof(*isr_stats));
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+				   const char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+
+	iwl_pcie_dump_csr(trans);
+
+	return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	char *buf = NULL;
+	ssize_t ret;
+
+	ret = iwl_dump_fh(trans, &buf);
+	if (ret < 0)
+		return ret;
+	if (!buf)
+		return -EINVAL;
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+	kfree(buf);
+	return ret;
+}
+
+static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	char buf[100];
+	int pos;
+
+	pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
+			trans_pcie->debug_rfkill,
+			!(iwl_read32(trans, CSR_GP_CNTRL) &
+				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
+				      const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	struct iwl_trans *trans = file->private_data;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	bool old = trans_pcie->debug_rfkill;
+	int ret;
+
+	ret = kstrtobool_from_user(user_buf, count, &trans_pcie->debug_rfkill);
+	if (ret)
+		return ret;
+	if (old == trans_pcie->debug_rfkill)
+		return count;
+	IWL_WARN(trans, "changing debug rfkill %d->%d\n",
+		 old, trans_pcie->debug_rfkill);
+	iwl_pcie_handle_rfkill_irq(trans);
+
+	return count;
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
+
+/* Create the debugfs files and directories */
+int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
+{
+	struct dentry *dir = trans->dbgfs_dir;
+
+	DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
+	DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
+	DEBUGFS_ADD_FILE(interrupt, dir, 0600);
+	DEBUGFS_ADD_FILE(csr, dir, 0200);
+	DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
+	DEBUGFS_ADD_FILE(rfkill, dir, 0600);
+	return 0;
+
+err:
+	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
+	return -ENOMEM;
+}
+#endif /*CONFIG_IWLWIFI_DEBUGFS */
+
+static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 cmdlen = 0;
+	int i;
+
+	for (i = 0; i < trans_pcie->max_tbs; i++)
+		cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
+
+	return cmdlen;
+}
+
+static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data,
+				   int allocated_rb_nums)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
+	/* Dump RBs is supported only for pre-9000 devices (1 queue) */
+	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+	u32 i, r, j, rb_len = 0;
+
+	spin_lock(&rxq->lock);
+
+	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
+
+	for (i = rxq->read, j = 0;
+	     i != r && j < allocated_rb_nums;
+	     i = (i + 1) & RX_QUEUE_MASK, j++) {
+		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
+		struct iwl_fw_error_dump_rb *rb;
+
+		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
+			       DMA_FROM_DEVICE);
+
+		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
+		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
+		rb = (void *)(*data)->data;
+		rb->index = cpu_to_le32(i);
+		memcpy(rb->data, page_address(rxb->page), max_len);
+		/* remap the page for the free benefit */
+		rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
+						     max_len,
+						     DMA_FROM_DEVICE);
+
+		*data = iwl_fw_error_next_data(*data);
+	}
+
+	spin_unlock(&rxq->lock);
+
+	return rb_len;
+}
+#define IWL_CSR_TO_DUMP (0x250)
+
+static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
+				   struct iwl_fw_error_dump_data **data)
+{
+	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
+	__le32 *val;
+	int i;
+
+	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
+	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
+	val = (void *)(*data)->data;
+
+	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
+		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+
+	*data = iwl_fw_error_next_data(*data);
+
+	return csr_len;
+}
+
+static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
+				       struct iwl_fw_error_dump_data **data)
+{
+	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
+	unsigned long flags;
+	__le32 *val;
+	int i;
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return 0;
+
+	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
+	(*data)->len = cpu_to_le32(fh_regs_len);
+	val = (void *)(*data)->data;
+
+	if (!trans->cfg->gen2)
+		for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
+		     i += sizeof(u32))
+			*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
+	else
+		for (i = FH_MEM_LOWER_BOUND_GEN2; i < FH_MEM_UPPER_BOUND_GEN2;
+		     i += sizeof(u32))
+			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
+								      i));
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	*data = iwl_fw_error_next_data(*data);
+
+	return sizeof(**data) + fh_regs_len;
+}
+
+static u32
+iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
+				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
+				 u32 monitor_len)
+{
+	u32 buf_size_in_dwords = (monitor_len >> 2);
+	u32 *buffer = (u32 *)fw_mon_data->data;
+	unsigned long flags;
+	u32 i;
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		return 0;
+
+	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
+	for (i = 0; i < buf_size_in_dwords; i++)
+		buffer[i] = iwl_read_prph_no_grab(trans,
+				MON_DMARB_RD_DATA_ADDR);
+	iwl_write_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+	return monitor_len;
+}
+
+static u32
+iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
+			    struct iwl_fw_error_dump_data **data,
+			    u32 monitor_len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 len = 0;
+
+	if ((trans_pcie->fw_mon_page &&
+	     trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
+	    trans->dbg_dest_tlv) {
+		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
+		u32 base, write_ptr, wrap_cnt;
+
+		/* If there was a dest TLV - use the values from there */
+		if (trans->dbg_dest_tlv) {
+			write_ptr =
+				le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
+			wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+		} else {
+			base = MON_BUFF_BASE_ADDR;
+			write_ptr = MON_BUFF_WRPTR;
+			wrap_cnt = MON_BUFF_CYCLE_CNT;
+		}
+
+		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
+		fw_mon_data = (void *)(*data)->data;
+		fw_mon_data->fw_mon_wr_ptr =
+			cpu_to_le32(iwl_read_prph(trans, write_ptr));
+		fw_mon_data->fw_mon_cycle_cnt =
+			cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
+		fw_mon_data->fw_mon_base_ptr =
+			cpu_to_le32(iwl_read_prph(trans, base));
+
+		len += sizeof(**data) + sizeof(*fw_mon_data);
+		if (trans_pcie->fw_mon_page) {
+			/*
+			 * The firmware is now asserted, it won't write anything
+			 * to the buffer. CPU can take ownership to fetch the
+			 * data. The buffer will be handed back to the device
+			 * before the firmware will be restarted.
+			 */
+			dma_sync_single_for_cpu(trans->dev,
+						trans_pcie->fw_mon_phys,
+						trans_pcie->fw_mon_size,
+						DMA_FROM_DEVICE);
+			memcpy(fw_mon_data->data,
+			       page_address(trans_pcie->fw_mon_page),
+			       trans_pcie->fw_mon_size);
+
+			monitor_len = trans_pcie->fw_mon_size;
+		} else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
+			/*
+			 * Update pointers to reflect actual values after
+			 * shifting
+			 */
+			if (trans->dbg_dest_tlv->version) {
+				base = (iwl_read_prph(trans, base) &
+					IWL_LDBG_M2S_BUF_BA_MSK) <<
+				       trans->dbg_dest_tlv->base_shift;
+				base *= IWL_M2S_UNIT_SIZE;
+				base += trans->cfg->smem_offset;
+			} else {
+				base = iwl_read_prph(trans, base) <<
+				       trans->dbg_dest_tlv->base_shift;
+			}
+
+			iwl_trans_read_mem(trans, base, fw_mon_data->data,
+					   monitor_len / sizeof(u32));
+		} else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
+			monitor_len =
+				iwl_trans_pci_dump_marbh_monitor(trans,
+								 fw_mon_data,
+								 monitor_len);
+		} else {
+			/* Didn't match anything - output no monitor data */
+			monitor_len = 0;
+		}
+
+		len += monitor_len;
+		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
+	}
+
+	return len;
+}
+
+static struct iwl_trans_dump_data
+*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+			  const struct iwl_fw_dbg_trigger_tlv *trigger)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_fw_error_dump_data *data;
+	struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_fw_error_dump_txcmd *txcmd;
+	struct iwl_trans_dump_data *dump_data;
+	u32 len, num_rbs = 0;
+	u32 monitor_len;
+	int i, ptr;
+	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+			!trans->cfg->mq_rx_supported &&
+			trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
+
+	/* transport dump header */
+	len = sizeof(*dump_data);
+
+	/* host commands */
+	len += sizeof(*data) +
+		cmdq->n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
+
+	/* FW monitor */
+	if (trans_pcie->fw_mon_page) {
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+		       trans_pcie->fw_mon_size;
+		monitor_len = trans_pcie->fw_mon_size;
+	} else if (trans->dbg_dest_tlv) {
+		u32 base, end, cfg_reg;
+
+		if (trans->dbg_dest_tlv->version == 1) {
+			cfg_reg = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+			cfg_reg = iwl_read_prph(trans, cfg_reg);
+			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
+				trans->dbg_dest_tlv->base_shift;
+			base *= IWL_M2S_UNIT_SIZE;
+			base += trans->cfg->smem_offset;
+
+			monitor_len =
+				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
+				trans->dbg_dest_tlv->end_shift;
+			monitor_len *= IWL_M2S_UNIT_SIZE;
+		} else {
+			base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
+			end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
+
+			base = iwl_read_prph(trans, base) <<
+			       trans->dbg_dest_tlv->base_shift;
+			end = iwl_read_prph(trans, end) <<
+			      trans->dbg_dest_tlv->end_shift;
+
+			/* Make "end" point to the actual end */
+			if (trans->cfg->device_family >=
+			    IWL_DEVICE_FAMILY_8000 ||
+			    trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
+				end += (1 << trans->dbg_dest_tlv->end_shift);
+			monitor_len = end - base;
+		}
+		len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
+		       monitor_len;
+	} else {
+		monitor_len = 0;
+	}
+
+	if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
+		if (!(trans->dbg_dump_mask &
+		      BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
+			return NULL;
+
+		dump_data = vzalloc(len);
+		if (!dump_data)
+			return NULL;
+
+		data = (void *)dump_data->data;
+		len = iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+		dump_data->len = len;
+
+		return dump_data;
+	}
+
+	/* CSR registers */
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+		len += sizeof(*data) + IWL_CSR_TO_DUMP;
+
+	/* FH registers */
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
+		if (trans->cfg->gen2)
+			len += sizeof(*data) +
+			       (FH_MEM_UPPER_BOUND_GEN2 -
+				FH_MEM_LOWER_BOUND_GEN2);
+		else
+			len += sizeof(*data) +
+			       (FH_MEM_UPPER_BOUND -
+				FH_MEM_LOWER_BOUND);
+	}
+
+	if (dump_rbs) {
+		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
+		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
+		/* RBs */
+		num_rbs =
+			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
+			& 0x0FFF;
+		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
+		len += num_rbs * (sizeof(*data) +
+				  sizeof(struct iwl_fw_error_dump_rb) +
+				  (PAGE_SIZE << trans_pcie->rx_page_order));
+	}
+
+	/* Paged memory for gen2 HW */
+	if (trans->cfg->gen2 &&
+	    trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
+		for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
+			len += sizeof(*data) +
+			       sizeof(struct iwl_fw_error_dump_paging) +
+			       trans_pcie->init_dram.paging[i].size;
+
+	dump_data = vzalloc(len);
+	if (!dump_data)
+		return NULL;
+
+	len = 0;
+	data = (void *)dump_data->data;
+
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
+		u16 tfd_size = trans_pcie->tfd_size;
+
+		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
+		txcmd = (void *)data->data;
+		spin_lock_bh(&cmdq->lock);
+		ptr = cmdq->write_ptr;
+		for (i = 0; i < cmdq->n_window; i++) {
+			u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+			u32 caplen, cmdlen;
+
+			cmdlen = iwl_trans_pcie_get_cmdlen(trans,
+							   cmdq->tfds +
+							   tfd_size * ptr);
+			caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
+
+			if (cmdlen) {
+				len += sizeof(*txcmd) + caplen;
+				txcmd->cmdlen = cpu_to_le32(cmdlen);
+				txcmd->caplen = cpu_to_le32(caplen);
+				memcpy(txcmd->data, cmdq->entries[idx].cmd,
+				       caplen);
+				txcmd = (void *)((u8 *)txcmd->data + caplen);
+			}
+
+			ptr = iwl_queue_dec_wrap(trans, ptr);
+		}
+		spin_unlock_bh(&cmdq->lock);
+
+		data->len = cpu_to_le32(len);
+		len += sizeof(*data);
+		data = iwl_fw_error_next_data(data);
+	}
+
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
+		len += iwl_trans_pcie_dump_csr(trans, &data);
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
+		len += iwl_trans_pcie_fh_regs_dump(trans, &data);
+	if (dump_rbs)
+		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
+
+	/* Paged memory for gen2 HW */
+	if (trans->cfg->gen2 &&
+	    trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
+		for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
+			struct iwl_fw_error_dump_paging *paging;
+			dma_addr_t addr =
+				trans_pcie->init_dram.paging[i].physical;
+			u32 page_len = trans_pcie->init_dram.paging[i].size;
+
+			data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
+			data->len = cpu_to_le32(sizeof(*paging) + page_len);
+			paging = (void *)data->data;
+			paging->index = cpu_to_le32(i);
+			dma_sync_single_for_cpu(trans->dev, addr, page_len,
+						DMA_BIDIRECTIONAL);
+			memcpy(paging->data,
+			       trans_pcie->init_dram.paging[i].block, page_len);
+			data = iwl_fw_error_next_data(data);
+
+			len += sizeof(*data) + sizeof(*paging) + page_len;
+		}
+	}
+	if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
+		len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
+
+	dump_data->len = len;
+
+	return dump_data;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
+		return iwl_pci_fw_enter_d0i3(trans);
+
+	return 0;
+}
+
+static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+	if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 &&
+	    (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3))
+		iwl_pci_fw_exit_d0i3(trans);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#define IWL_TRANS_COMMON_OPS						\
+	.op_mode_leave = iwl_trans_pcie_op_mode_leave,			\
+	.write8 = iwl_trans_pcie_write8,				\
+	.write32 = iwl_trans_pcie_write32,				\
+	.read32 = iwl_trans_pcie_read32,				\
+	.read_prph = iwl_trans_pcie_read_prph,				\
+	.write_prph = iwl_trans_pcie_write_prph,			\
+	.read_mem = iwl_trans_pcie_read_mem,				\
+	.write_mem = iwl_trans_pcie_write_mem,				\
+	.configure = iwl_trans_pcie_configure,				\
+	.set_pmi = iwl_trans_pcie_set_pmi,				\
+	.sw_reset = iwl_trans_pcie_sw_reset,				\
+	.grab_nic_access = iwl_trans_pcie_grab_nic_access,		\
+	.release_nic_access = iwl_trans_pcie_release_nic_access,	\
+	.set_bits_mask = iwl_trans_pcie_set_bits_mask,			\
+	.ref = iwl_trans_pcie_ref,					\
+	.unref = iwl_trans_pcie_unref,					\
+	.dump_data = iwl_trans_pcie_dump_data,				\
+	.dump_regs = iwl_trans_pcie_dump_regs,				\
+	.d3_suspend = iwl_trans_pcie_d3_suspend,			\
+	.d3_resume = iwl_trans_pcie_d3_resume
+
+#ifdef CONFIG_PM_SLEEP
+#define IWL_TRANS_PM_OPS						\
+	.suspend = iwl_trans_pcie_suspend,				\
+	.resume = iwl_trans_pcie_resume,
+#else
+#define IWL_TRANS_PM_OPS
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct iwl_trans_ops trans_ops_pcie = {
+	IWL_TRANS_COMMON_OPS,
+	IWL_TRANS_PM_OPS
+	.start_hw = iwl_trans_pcie_start_hw,
+	.fw_alive = iwl_trans_pcie_fw_alive,
+	.start_fw = iwl_trans_pcie_start_fw,
+	.stop_device = iwl_trans_pcie_stop_device,
+
+	.send_cmd = iwl_trans_pcie_send_hcmd,
+
+	.tx = iwl_trans_pcie_tx,
+	.reclaim = iwl_trans_pcie_reclaim,
+
+	.txq_disable = iwl_trans_pcie_txq_disable,
+	.txq_enable = iwl_trans_pcie_txq_enable,
+
+	.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
+
+	.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
+
+	.freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
+	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
+};
+
+static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
+	IWL_TRANS_COMMON_OPS,
+	IWL_TRANS_PM_OPS
+	.start_hw = iwl_trans_pcie_start_hw,
+	.fw_alive = iwl_trans_pcie_gen2_fw_alive,
+	.start_fw = iwl_trans_pcie_gen2_start_fw,
+	.stop_device = iwl_trans_pcie_gen2_stop_device,
+
+	.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
+
+	.tx = iwl_trans_pcie_gen2_tx,
+	.reclaim = iwl_trans_pcie_reclaim,
+
+	.txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
+	.txq_free = iwl_trans_pcie_dyn_txq_free,
+	.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
+	.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
+};
+
+struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
+				       const struct pci_device_id *ent,
+				       const struct iwl_cfg *cfg)
+{
+	struct iwl_trans_pcie *trans_pcie;
+	struct iwl_trans *trans;
+	int ret, addr_size;
+
+	ret = pcim_enable_device(pdev);
+	if (ret)
+		return ERR_PTR(ret);
+
+	if (cfg->gen2)
+		trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+					&pdev->dev, cfg, &trans_ops_pcie_gen2);
+	else
+		trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
+					&pdev->dev, cfg, &trans_ops_pcie);
+	if (!trans)
+		return ERR_PTR(-ENOMEM);
+
+	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	trans_pcie->trans = trans;
+	trans_pcie->opmode_down = true;
+	spin_lock_init(&trans_pcie->irq_lock);
+	spin_lock_init(&trans_pcie->reg_lock);
+	mutex_init(&trans_pcie->mutex);
+	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+	trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+	if (!trans_pcie->tso_hdr_page) {
+		ret = -ENOMEM;
+		goto out_no_pci;
+	}
+
+
+	if (!cfg->base_params->pcie_l1_allowed) {
+		/*
+		 * W/A - seems to solve weird behavior. We need to remove this
+		 * if we don't want to stay in L1 all the time. This wastes a
+		 * lot of power.
+		 */
+		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+				       PCIE_LINK_STATE_L1 |
+				       PCIE_LINK_STATE_CLKPM);
+	}
+
+	if (cfg->use_tfh) {
+		addr_size = 64;
+		trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
+		trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
+	} else {
+		addr_size = 36;
+		trans_pcie->max_tbs = IWL_NUM_OF_TBS;
+		trans_pcie->tfd_size = sizeof(struct iwl_tfd);
+	}
+	trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
+
+	pci_set_master(pdev);
+
+	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
+	if (!ret)
+		ret = pci_set_consistent_dma_mask(pdev,
+						  DMA_BIT_MASK(addr_size));
+	if (ret) {
+		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (!ret)
+			ret = pci_set_consistent_dma_mask(pdev,
+							  DMA_BIT_MASK(32));
+		/* both attempts failed: */
+		if (ret) {
+			dev_err(&pdev->dev, "No suitable DMA available\n");
+			goto out_no_pci;
+		}
+	}
+
+	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
+	if (ret) {
+		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
+		goto out_no_pci;
+	}
+
+	trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
+	if (!trans_pcie->hw_base) {
+		dev_err(&pdev->dev, "pcim_iomap_table failed\n");
+		ret = -ENODEV;
+		goto out_no_pci;
+	}
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+	trans_pcie->pci_dev = pdev;
+	iwl_disable_interrupts(trans);
+
+	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
+	/*
+	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+	 * changed, and now the revision step also includes bit 0-1 (no more
+	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
+	 * in the old format.
+	 */
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_8000) {
+		unsigned long flags;
+
+		trans->hw_rev = (trans->hw_rev & 0xfff0) |
+				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
+
+		ret = iwl_pcie_prepare_card_hw(trans);
+		if (ret) {
+			IWL_WARN(trans, "Exit HW not ready\n");
+			goto out_no_pci;
+		}
+
+		/*
+		 * in-order to recognize C step driver should read chip version
+		 * id located at the AUX bus MISC address space.
+		 */
+		iwl_set_bit(trans, CSR_GP_CNTRL,
+			    BIT(trans->cfg->csr->flag_init_done));
+		udelay(2);
+
+		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+				   BIT(trans->cfg->csr->flag_mac_clock_ready),
+				   BIT(trans->cfg->csr->flag_mac_clock_ready),
+				   25000);
+		if (ret < 0) {
+			IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
+			goto out_no_pci;
+		}
+
+		if (iwl_trans_grab_nic_access(trans, &flags)) {
+			u32 hw_step;
+
+			hw_step = iwl_read_prph_no_grab(trans, WFPM_CTRL_REG);
+			hw_step |= ENABLE_WFPM;
+			iwl_write_prph_no_grab(trans, WFPM_CTRL_REG, hw_step);
+			hw_step = iwl_read_prph_no_grab(trans, AUX_MISC_REG);
+			hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
+			if (hw_step == 0x3)
+				trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
+						(SILICON_C_STEP << 2);
+			iwl_trans_release_nic_access(trans, &flags);
+		}
+	}
+
+	/*
+	 * 9000-series integrated A-step has a problem with suspend/resume
+	 * and sometimes even causes the whole platform to get stuck. This
+	 * workaround makes the hardware not go into the problematic state.
+	 */
+	if (trans->cfg->integrated &&
+	    trans->cfg->device_family == IWL_DEVICE_FAMILY_9000 &&
+	    CSR_HW_REV_STEP(trans->hw_rev) == SILICON_A_STEP)
+		iwl_set_bit(trans, CSR_HOST_CHICKEN,
+			    CSR_HOST_CHICKEN_PM_IDLE_SRC_DIS_SB_PME);
+
+#if IS_ENABLED(CONFIG_IWLMVM)
+	trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
+
+	if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+	    CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
+		u32 hw_status;
+
+		hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
+		if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
+			/*
+			* b step fw is the same for physical card and fpga
+			*/
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+		else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
+			 CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
+			trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
+		} else {
+			/*
+			* a step no FPGA
+			*/
+			trans->cfg = &iwl22000_2ac_cfg_hr;
+		}
+	}
+#endif
+
+	iwl_pcie_set_interrupt_capa(pdev, trans);
+	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
+	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
+		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
+
+	/* Initialize the wait queue for commands */
+	init_waitqueue_head(&trans_pcie->wait_command_queue);
+
+	init_waitqueue_head(&trans_pcie->d0i3_waitq);
+
+	if (trans_pcie->msix_enabled) {
+		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
+		if (ret)
+			goto out_no_pci;
+	 } else {
+		ret = iwl_pcie_alloc_ict(trans);
+		if (ret)
+			goto out_no_pci;
+
+		ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
+						iwl_pcie_isr,
+						iwl_pcie_irq_handler,
+						IRQF_SHARED, DRV_NAME, trans);
+		if (ret) {
+			IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
+			goto out_free_ict;
+		}
+		trans_pcie->inta_mask = CSR_INI_SET_MASK;
+	 }
+
+	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+						   WQ_HIGHPRI | WQ_UNBOUND, 1);
+	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+	trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+#else
+	trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
+	return trans;
+
+out_free_ict:
+	iwl_pcie_free_ict(trans);
+out_no_pci:
+	free_percpu(trans_pcie->tso_hdr_page);
+	iwl_trans_free(trans);
+	return ERR_PTR(ret);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
new file mode 100644
index 0000000..b99f33f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -0,0 +1,1291 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018        Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *  * Neither the name Intel Corporation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <linux/pm_runtime.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-io.h"
+#include "internal.h"
+#include "fw/api/tx.h"
+
+ /*
+ * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int txq_id;
+
+	/*
+	 * This function can be called before the op_mode disabled the
+	 * queues. This happens when we have an rfkill interrupt.
+	 * Since we stop Tx altogether - mark the queues as stopped.
+	 */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < ARRAY_SIZE(trans_pcie->txq); txq_id++) {
+		if (!trans_pcie->txq[txq_id])
+			continue;
+		iwl_pcie_gen2_txq_unmap(trans, txq_id);
+	}
+}
+
+/*
+ * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
+					  struct iwl_txq *txq, u16 byte_cnt,
+					  int num_tbs)
+{
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+	struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	u8 filled_tfd_size, num_fetch_chunks;
+	u16 len = byte_cnt;
+	__le16 bc_ent;
+
+	if (trans_pcie->bc_table_dword)
+		len = DIV_ROUND_UP(len, 4);
+
+	if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
+		return;
+
+	filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+				   num_tbs * sizeof(struct iwl_tfh_tb);
+	/*
+	 * filled_tfd_size contains the number of filled bytes in the TFD.
+	 * Dividing it by 64 will give the number of chunks to fetch
+	 * to SRAM- 0 for one chunk, 1 for 2 and so on.
+	 * If, for example, TFD contains only 3 TBs then 32 bytes
+	 * of the TFD are used, and only one chunk of 64 bytes should
+	 * be fetched
+	 */
+	num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+	bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
+		scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+	else
+		scd_bc_tbl->tfd_offset[idx] = bc_ent;
+}
+
+/*
+ * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
+					 struct iwl_txq *txq)
+{
+	lockdep_assert_held(&txq->lock);
+
+	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+	/*
+	 * if not in power-save mode, uCode will never sleep when we're
+	 * trying to tx (during RFKILL, we're not trying to tx).
+	 */
+	iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
+				    struct iwl_tfh_tfd *tfd)
+{
+	return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
+				    struct iwl_cmd_meta *meta,
+				    struct iwl_tfh_tfd *tfd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i, num_tbs;
+
+	/* Sanity check on number of chunks */
+	num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+
+	if (num_tbs > trans_pcie->max_tbs) {
+		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+		return;
+	}
+
+	/* first TB is never freed - it's the bidirectional DMA data */
+	for (i = 1; i < num_tbs; i++) {
+		if (meta->tbs & BIT(i))
+			dma_unmap_page(trans->dev,
+				       le64_to_cpu(tfd->tbs[i].addr),
+				       le16_to_cpu(tfd->tbs[i].tb_len),
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(trans->dev,
+					 le64_to_cpu(tfd->tbs[i].addr),
+					 le16_to_cpu(tfd->tbs[i].tb_len),
+					 DMA_TO_DEVICE);
+	}
+
+	tfd->num_tbs = 0;
+}
+
+static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+	 * idx is bounded by n_window
+	 */
+	int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+	lockdep_assert_held(&txq->lock);
+
+	iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+				iwl_pcie_get_tfd(trans, txq, idx));
+
+	/* free SKB */
+	if (txq->entries) {
+		struct sk_buff *skb;
+
+		skb = txq->entries[idx].skb;
+
+		/* Can be called from irqs-disabled context
+		 * If skb is not NULL, it means that the whole queue is being
+		 * freed and that the queue is not empty - free the skb
+		 */
+		if (skb) {
+			iwl_op_mode_free_skb(trans->op_mode, skb);
+			txq->entries[idx].skb = NULL;
+		}
+	}
+}
+
+static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
+				struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+				u16 len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
+	struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+	/* Each TFD can point to a maximum max_tbs Tx buffers */
+	if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
+		IWL_ERR(trans, "Error can not send more than %d chunks\n",
+			trans_pcie->max_tbs);
+		return -EINVAL;
+	}
+
+	put_unaligned_le64(addr, &tb->addr);
+	tb->tb_len = cpu_to_le16(len);
+
+	tfd->num_tbs = cpu_to_le16(idx + 1);
+
+	return idx;
+}
+
+static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
+				     struct sk_buff *skb,
+				     struct iwl_tfh_tfd *tfd, int start_len,
+				     u8 hdr_len, struct iwl_device_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	u16 length, iv_len, amsdu_pad;
+	u8 *start_hdr;
+	struct iwl_tso_hdr_page *hdr_page;
+	struct page **page_ptr;
+	struct tso_t tso;
+
+	/* if the packet is protected, then it must be CCMP or GCMP */
+	iv_len = ieee80211_has_protected(hdr->frame_control) ?
+		IEEE80211_CCMP_HDR_LEN : 0;
+
+	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+			     &dev_cmd->hdr, start_len, 0);
+
+	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+	amsdu_pad = 0;
+
+	/* total amount of header we may need for this A-MSDU */
+	hdr_room = DIV_ROUND_UP(total_len, mss) *
+		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+
+	/* Our device supports 9 segments at most, it will fit in 1 page */
+	hdr_page = get_page_hdr(trans, hdr_room);
+	if (!hdr_page)
+		return -ENOMEM;
+
+	get_page(hdr_page->page);
+	start_hdr = hdr_page->pos;
+	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+	*page_ptr = hdr_page->page;
+	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
+	hdr_page->pos += iv_len;
+
+	/*
+	 * Pull the ieee80211 header + IV to be able to use TSO core,
+	 * we will restore it for the tx_status flow.
+	 */
+	skb_pull(skb, hdr_len + iv_len);
+
+	/*
+	 * Remove the length of all the headers that we don't actually
+	 * have in the MPDU by themselves, but that we duplicate into
+	 * all the different MSDUs inside the A-MSDU.
+	 */
+	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+	tso_start(skb, &tso);
+
+	while (total_len) {
+		/* this is the data left for this subframe */
+		unsigned int data_left = min_t(unsigned int, mss, total_len);
+		struct sk_buff *csum_skb = NULL;
+		unsigned int tb_len;
+		dma_addr_t tb_phys;
+		u8 *subf_hdrs_start = hdr_page->pos;
+
+		total_len -= data_left;
+
+		memset(hdr_page->pos, 0, amsdu_pad);
+		hdr_page->pos += amsdu_pad;
+		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+				  data_left)) & 0x3;
+		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+		hdr_page->pos += ETH_ALEN;
+		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+		hdr_page->pos += ETH_ALEN;
+
+		length = snap_ip_tcp_hdrlen + data_left;
+		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+		hdr_page->pos += sizeof(length);
+
+		/*
+		 * This will copy the SNAP as well which will be considered
+		 * as MAC header.
+		 */
+		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+
+		hdr_page->pos += snap_ip_tcp_hdrlen;
+
+		tb_len = hdr_page->pos - start_hdr;
+		tb_phys = dma_map_single(trans->dev, start_hdr,
+					 tb_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+			dev_kfree_skb(csum_skb);
+			goto out_err;
+		}
+		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr, tb_len);
+		/* add this subframe's headers' length to the tx_cmd */
+		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+
+		/* prepare the start_hdr for the next subframe */
+		start_hdr = hdr_page->pos;
+
+		/* put the payload */
+		while (data_left) {
+			tb_len = min_t(unsigned int, tso.size, data_left);
+			tb_phys = dma_map_single(trans->dev, tso.data,
+						 tb_len, DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+				dev_kfree_skb(csum_skb);
+				goto out_err;
+			}
+			iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
+						       tb_len);
+
+			data_left -= tb_len;
+			tso_build_data(skb, &tso, tb_len);
+		}
+	}
+
+	/* re -add the WiFi header and IV */
+	skb_push(skb, hdr_len + iv_len);
+
+	return 0;
+
+out_err:
+#endif
+	return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
+					  struct iwl_txq *txq,
+					  struct iwl_device_cmd *dev_cmd,
+					  struct sk_buff *skb,
+					  struct iwl_cmd_meta *out_meta,
+					  int hdr_len,
+					  int tx_cmd_len)
+{
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+	dma_addr_t tb_phys;
+	int len;
+	void *tb1_addr;
+
+	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+	/*
+	 * The second TB (tb1) points to the remainder of the TX command
+	 * and the 802.11 header - dword aligned size
+	 * (This calculation modifies the TX command, so do it before the
+	 * setup of the first TB)
+	 */
+	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+	      IWL_FIRST_TB_SIZE;
+
+	/* do not align A-MSDU to dword as the subframe header aligns it */
+
+	/* map the data for TB1 */
+	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+	tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+		goto out_err;
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
+
+	if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
+				      len + IWL_FIRST_TB_SIZE,
+				      hdr_len, dev_cmd))
+		goto out_err;
+
+	/* building the A-MSDU might have changed this data, memcpy it now */
+	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+	return tfd;
+
+out_err:
+	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+	return NULL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
+				    struct iwl_txq *txq,
+				    struct iwl_device_cmd *dev_cmd,
+				    struct sk_buff *skb,
+				    struct iwl_cmd_meta *out_meta,
+				    int hdr_len,
+				    int tx_cmd_len)
+{
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+	dma_addr_t tb_phys;
+	int i, len, tb1_len, tb2_len;
+	void *tb1_addr;
+
+	tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
+
+	/* The first TB points to bi-directional DMA data */
+	memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
+
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+	/*
+	 * The second TB (tb1) points to the remainder of the TX command
+	 * and the 802.11 header - dword aligned size
+	 * (This calculation modifies the TX command, so do it before the
+	 * setup of the first TB)
+	 */
+	len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+	      IWL_FIRST_TB_SIZE;
+
+	tb1_len = ALIGN(len, 4);
+
+	/* map the data for TB1 */
+	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+	tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+		goto out_err;
+	iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+
+	/* set up TFD's third entry to point to remainder of skb's head */
+	tb2_len = skb_headlen(skb) - hdr_len;
+
+	if (tb2_len > 0) {
+		tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+					 tb2_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+			goto out_err;
+		iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len);
+	}
+
+	/* set up the remaining entries to point to the data */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		int tb_idx;
+
+		if (!skb_frag_size(frag))
+			continue;
+
+		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+			goto out_err;
+		tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys,
+					      skb_frag_size(frag));
+
+		out_meta->tbs |= BIT(tb_idx);
+	}
+
+	trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+			     IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+	trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
+
+	return tfd;
+
+out_err:
+	iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+	return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
+					    struct iwl_txq *txq,
+					    struct iwl_device_cmd *dev_cmd,
+					    struct sk_buff *skb,
+					    struct iwl_cmd_meta *out_meta)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
+	int len, hdr_len;
+	bool amsdu;
+
+	/* There must be data left over for TB1 or this code must be changed */
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+	memset(tfd, 0, sizeof(*tfd));
+
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
+		len = sizeof(struct iwl_tx_cmd_gen2);
+	else
+		len = sizeof(struct iwl_tx_cmd_gen3);
+
+	amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+			(*ieee80211_get_qos_ctl(hdr) &
+			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+	hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+	if (amsdu)
+		return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+						    out_meta, hdr_len, len);
+
+	return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+				      hdr_len, len);
+}
+
+int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+			   struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_cmd_meta *out_meta;
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	u16 cmd_len;
+	int idx;
+	void *tfd;
+
+	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+		      "TX on unused queue %d\n", txq_id))
+		return -EINVAL;
+
+	if (skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+	    __skb_linearize(skb))
+		return -ENOMEM;
+
+	spin_lock(&txq->lock);
+
+	if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
+		struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+			(void *)dev_cmd->payload;
+
+		cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+	} else {
+		struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+			(void *)dev_cmd->payload;
+
+		cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+	}
+
+	if (iwl_queue_space(trans, txq) < txq->high_mark) {
+		iwl_stop_queue(trans, txq);
+
+		/* don't put the packet on the ring, if there is no room */
+		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
+			struct iwl_device_cmd **dev_cmd_ptr;
+
+			dev_cmd_ptr = (void *)((u8 *)skb->cb +
+					       trans_pcie->dev_cmd_offs);
+
+			*dev_cmd_ptr = dev_cmd;
+			__skb_queue_tail(&txq->overflow_q, skb);
+			spin_unlock(&txq->lock);
+			return 0;
+		}
+	}
+
+	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+
+	/* Set up driver data for this TFD */
+	txq->entries[idx].skb = skb;
+	txq->entries[idx].cmd = dev_cmd;
+
+	dev_cmd->hdr.sequence =
+		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+			    INDEX_TO_SEQ(idx)));
+
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_meta = &txq->entries[idx].meta;
+	out_meta->flags = 0;
+
+	tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+	if (!tfd) {
+		spin_unlock(&txq->lock);
+		return -1;
+	}
+
+	/* Set up entry for this TFD in Tx byte-count array */
+	iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
+				      iwl_pcie_gen2_get_num_tbs(trans, tfd));
+
+	/* start timer if queue currently empty */
+	if (txq->read_ptr == txq->write_ptr) {
+		if (txq->wd_timeout)
+			mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
+		iwl_trans_ref(trans);
+	}
+
+	/* Tell device the write index *just past* this latest filled TFD */
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually.
+	 */
+	spin_unlock(&txq->lock);
+	return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/*
+ * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
+				      struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_device_cmd *out_cmd;
+	struct iwl_cmd_meta *out_meta;
+	unsigned long flags;
+	void *dup_buf = NULL;
+	dma_addr_t phys_addr;
+	int i, cmd_pos, idx;
+	u16 copy_size, cmd_size, tb0_size;
+	bool had_nocopy = false;
+	u8 group_id = iwl_cmd_groupid(cmd->id);
+	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+	struct iwl_tfh_tfd *tfd;
+
+	copy_size = sizeof(struct iwl_cmd_header_wide);
+	cmd_size = sizeof(struct iwl_cmd_header_wide);
+
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		cmddata[i] = cmd->data[i];
+		cmdlen[i] = cmd->len[i];
+
+		if (!cmd->len[i])
+			continue;
+
+		/* need at least IWL_FIRST_TB_SIZE copied */
+		if (copy_size < IWL_FIRST_TB_SIZE) {
+			int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+			if (copy > cmdlen[i])
+				copy = cmdlen[i];
+			cmdlen[i] -= copy;
+			cmddata[i] += copy;
+			copy_size += copy;
+		}
+
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+			had_nocopy = true;
+			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+			/*
+			 * This is also a chunk that isn't copied
+			 * to the static buffer so set had_nocopy.
+			 */
+			had_nocopy = true;
+
+			/* only allowed once */
+			if (WARN_ON(dup_buf)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+
+			dup_buf = kmemdup(cmddata[i], cmdlen[i],
+					  GFP_ATOMIC);
+			if (!dup_buf)
+				return -ENOMEM;
+		} else {
+			/* NOCOPY must not be followed by normal! */
+			if (WARN_ON(had_nocopy)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+			copy_size += cmdlen[i];
+		}
+		cmd_size += cmd->len[i];
+	}
+
+	/*
+	 * If any of the command structures end up being larger than the
+	 * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
+	 * separate TFDs, then we will need to increase the size of the buffers
+	 */
+	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+		 "Command %s (%#x) is too large (%d bytes)\n",
+		 iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
+		idx = -EINVAL;
+		goto free_dup_buf;
+	}
+
+	spin_lock_bh(&txq->lock);
+
+	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+	memset(tfd, 0, sizeof(*tfd));
+
+	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+		spin_unlock_bh(&txq->lock);
+
+		IWL_ERR(trans, "No space in command queue\n");
+		iwl_op_mode_cmd_queue_full(trans->op_mode);
+		idx = -ENOSPC;
+		goto free_dup_buf;
+	}
+
+	out_cmd = txq->entries[idx].cmd;
+	out_meta = &txq->entries[idx].meta;
+
+	/* re-initialize to NULL */
+	memset(out_meta, 0, sizeof(*out_meta));
+	if (cmd->flags & CMD_WANT_SKB)
+		out_meta->source = cmd;
+
+	/* set up the header */
+	out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+	out_cmd->hdr_wide.group_id = group_id;
+	out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+	out_cmd->hdr_wide.length =
+		cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
+	out_cmd->hdr_wide.reserved = 0;
+	out_cmd->hdr_wide.sequence =
+		cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+					 INDEX_TO_SEQ(txq->write_ptr));
+
+	cmd_pos = sizeof(struct iwl_cmd_header_wide);
+	copy_size = sizeof(struct iwl_cmd_header_wide);
+
+	/* and copy the data that needs to be copied */
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		int copy;
+
+		if (!cmd->len[i])
+			continue;
+
+		/* copy everything if not nocopy/dup */
+		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					   IWL_HCMD_DFL_DUP))) {
+			copy = cmd->len[i];
+
+			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+			cmd_pos += copy;
+			copy_size += copy;
+			continue;
+		}
+
+		/*
+		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+		 * in total (for bi-directional DMA), but copy up to what
+		 * we can fit into the payload for debug dump purposes.
+		 */
+		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+		cmd_pos += copy;
+
+		/* However, treat copy_size the proper way, we need it below */
+		if (copy_size < IWL_FIRST_TB_SIZE) {
+			copy = IWL_FIRST_TB_SIZE - copy_size;
+
+			if (copy > cmd->len[i])
+				copy = cmd->len[i];
+			copy_size += copy;
+		}
+	}
+
+	IWL_DEBUG_HC(trans,
+		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		     iwl_get_cmd_string(trans, cmd->id), group_id,
+		     out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
+		     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+
+	/* start the TFD with the minimum copy bytes */
+	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+	iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
+			     tb0_size);
+
+	/* map first command fragment, if any remains */
+	if (copy_size > tb0_size) {
+		phys_addr = dma_map_single(trans->dev,
+					   ((u8 *)&out_cmd->hdr) + tb0_size,
+					   copy_size - tb0_size,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(trans->dev, phys_addr)) {
+			idx = -ENOMEM;
+			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+			goto out;
+		}
+		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
+				     copy_size - tb0_size);
+	}
+
+	/* map the remaining (adjusted) nocopy/dup fragments */
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		const void *data = cmddata[i];
+
+		if (!cmdlen[i])
+			continue;
+		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					   IWL_HCMD_DFL_DUP)))
+			continue;
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+			data = dup_buf;
+		phys_addr = dma_map_single(trans->dev, (void *)data,
+					   cmdlen[i], DMA_TO_DEVICE);
+		if (dma_mapping_error(trans->dev, phys_addr)) {
+			idx = -ENOMEM;
+			iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+			goto out;
+		}
+		iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+	}
+
+	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+	out_meta->flags = cmd->flags;
+	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+		kzfree(txq->entries[idx].free_buf);
+	txq->entries[idx].free_buf = dup_buf;
+
+	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+	/* start timer if queue currently empty */
+	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+	    !trans_pcie->ref_cmd_in_flight) {
+		trans_pcie->ref_cmd_in_flight = true;
+		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+		iwl_trans_ref(trans);
+	}
+	/* Increment and update queue's write index */
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+	iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+out:
+	spin_unlock_bh(&txq->lock);
+free_dup_buf:
+	if (idx < 0)
+		kfree(dup_buf);
+	return idx;
+}
+
+#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
+
+static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
+					struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+	int cmd_idx;
+	int ret;
+
+	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+				  &trans->status),
+		 "Command %s: a command is already active!\n", cmd_str))
+		return -EIO;
+
+	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+				 pm_runtime_active(&trans_pcie->pci_dev->dev),
+				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+		if (!ret) {
+			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+	if (cmd_idx < 0) {
+		ret = cmd_idx;
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+			cmd_str, ret);
+		return ret;
+	}
+
+	ret = wait_event_timeout(trans_pcie->wait_command_queue,
+				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+					   &trans->status),
+				 HOST_COMPLETE_TIMEOUT);
+	if (!ret) {
+		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+			cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+			txq->read_ptr, txq->write_ptr);
+
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+			       cmd_str);
+		ret = -ETIMEDOUT;
+
+		iwl_force_nmi(trans);
+		iwl_trans_fw_error(trans);
+
+		goto cancel;
+	}
+
+	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+		IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+		dump_stack();
+		ret = -EIO;
+		goto cancel;
+	}
+
+	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+		ret = -ERFKILL;
+		goto cancel;
+	}
+
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+		IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+		ret = -EIO;
+		goto cancel;
+	}
+
+	return 0;
+
+cancel:
+	if (cmd->flags & CMD_WANT_SKB) {
+		/*
+		 * Cancel the CMD_WANT_SKB flag for the cmd in the
+		 * TX cmd queue. Otherwise in case the cmd comes
+		 * in later, it will possibly set an invalid
+		 * address (cmd->meta.source).
+		 */
+		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+	}
+
+	if (cmd->resp_pkt) {
+		iwl_free_resp(cmd);
+		cmd->resp_pkt = NULL;
+	}
+
+	return ret;
+}
+
+int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
+				  struct iwl_host_cmd *cmd)
+{
+	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+				  cmd->id);
+		return -ERFKILL;
+	}
+
+	if (cmd->flags & CMD_ASYNC) {
+		int ret;
+
+		/* An asynchronous command can not expect an SKB to be set. */
+		if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+			return -EINVAL;
+
+		ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+		if (ret < 0) {
+			IWL_ERR(trans,
+				"Error sending %s: enqueue_hcmd failed: %d\n",
+				iwl_get_cmd_string(trans, cmd->id), ret);
+			return ret;
+		}
+		return 0;
+	}
+
+	return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
+}
+
+/*
+ * iwl_pcie_gen2_txq_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+	spin_lock_bh(&txq->lock);
+	while (txq->write_ptr != txq->read_ptr) {
+		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+				   txq_id, txq->read_ptr);
+
+		if (txq_id != trans_pcie->cmd_queue) {
+			int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+			struct sk_buff *skb = txq->entries[idx].skb;
+
+			if (WARN_ON_ONCE(!skb))
+				continue;
+
+			iwl_pcie_free_tso_page(trans_pcie, skb);
+		}
+		iwl_pcie_gen2_free_tfd(trans, txq);
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+
+		if (txq->read_ptr == txq->write_ptr) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+			if (txq_id != trans_pcie->cmd_queue) {
+				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+					      txq->id);
+				iwl_trans_unref(trans);
+			} else if (trans_pcie->ref_cmd_in_flight) {
+				trans_pcie->ref_cmd_in_flight = false;
+				IWL_DEBUG_RPM(trans,
+					      "clear ref_cmd_in_flight\n");
+				iwl_trans_unref(trans);
+			}
+			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+		}
+	}
+
+	while (!skb_queue_empty(&txq->overflow_q)) {
+		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+		iwl_op_mode_free_skb(trans->op_mode, skb);
+	}
+
+	spin_unlock_bh(&txq->lock);
+
+	/* just in case - this queue may have been stopped */
+	iwl_wake_queue(trans, txq);
+}
+
+static void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
+					  struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct device *dev = trans->dev;
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->tfds) {
+		dma_free_coherent(dev,
+				  trans_pcie->tfd_size * txq->n_window,
+				  txq->tfds, txq->dma_addr);
+		dma_free_coherent(dev,
+				  sizeof(*txq->first_tb_bufs) * txq->n_window,
+				  txq->first_tb_bufs, txq->first_tb_dma);
+	}
+
+	kfree(txq->entries);
+	iwl_pcie_free_dma_ptr(trans, &txq->bc_tbl);
+	kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	int i;
+
+	if (WARN_ON(!txq))
+		return;
+
+	iwl_pcie_gen2_txq_unmap(trans, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < txq->n_window; i++) {
+			kzfree(txq->entries[i].cmd);
+			kzfree(txq->entries[i].free_buf);
+		}
+	del_timer_sync(&txq->stuck_timer);
+
+	iwl_pcie_gen2_txq_free_memory(trans, txq);
+
+	trans_pcie->txq[txq_id] = NULL;
+
+	clear_bit(txq_id, trans_pcie->queue_used);
+}
+
+int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
+				 struct iwl_tx_queue_cfg_cmd *cmd,
+				 int cmd_id, int size,
+				 unsigned int timeout)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tx_queue_cfg_rsp *rsp;
+	struct iwl_txq *txq;
+	struct iwl_host_cmd hcmd = {
+		.id = cmd_id,
+		.len = { sizeof(*cmd) },
+		.data = { cmd, },
+		.flags = CMD_WANT_SKB,
+	};
+	int ret, qid;
+	u32 wr_ptr;
+
+	txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+	if (!txq)
+		return -ENOMEM;
+	ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
+				     (trans->cfg->device_family >=
+				      IWL_DEVICE_FAMILY_22560) ?
+				     sizeof(struct iwl_gen3_bc_tbl) :
+				     sizeof(struct iwlagn_scd_bc_tbl));
+	if (ret) {
+		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+		kfree(txq);
+		return -ENOMEM;
+	}
+
+	ret = iwl_pcie_txq_alloc(trans, txq, size, false);
+	if (ret) {
+		IWL_ERR(trans, "Tx queue alloc failed\n");
+		goto error;
+	}
+	ret = iwl_pcie_txq_init(trans, txq, size, false);
+	if (ret) {
+		IWL_ERR(trans, "Tx queue init failed\n");
+		goto error;
+	}
+
+	txq->wd_timeout = msecs_to_jiffies(timeout);
+
+	cmd->tfdq_addr = cpu_to_le64(txq->dma_addr);
+	cmd->byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+	cmd->cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+
+	ret = iwl_trans_send_cmd(trans, &hcmd);
+	if (ret)
+		goto error;
+
+	if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) {
+		ret = -EINVAL;
+		goto error_free_resp;
+	}
+
+	rsp = (void *)hcmd.resp_pkt->data;
+	qid = le16_to_cpu(rsp->queue_number);
+	wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+	if (qid >= ARRAY_SIZE(trans_pcie->txq)) {
+		WARN_ONCE(1, "queue index %d unsupported", qid);
+		ret = -EIO;
+		goto error_free_resp;
+	}
+
+	if (test_and_set_bit(qid, trans_pcie->queue_used)) {
+		WARN_ONCE(1, "queue %d already used", qid);
+		ret = -EIO;
+		goto error_free_resp;
+	}
+
+	txq->id = qid;
+	trans_pcie->txq[qid] = txq;
+	wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
+
+	/* Place first TFD at index corresponding to start sequence number */
+	txq->read_ptr = wr_ptr;
+	txq->write_ptr = wr_ptr;
+	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+			   (txq->write_ptr) | (qid << 16));
+	IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+	iwl_free_resp(&hcmd);
+	return qid;
+
+error_free_resp:
+	iwl_free_resp(&hcmd);
+error:
+	iwl_pcie_gen2_txq_free_memory(trans, txq);
+	return ret;
+}
+
+void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	/*
+	 * Upon HW Rfkill - we stop the device, and then stop the queues
+	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
+	 * allow the op_mode to call txq_disable after it already called
+	 * stop_device.
+	 */
+	if (!test_and_clear_bit(queue, trans_pcie->queue_used)) {
+		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+			  "queue %d not used", queue);
+		return;
+	}
+
+	iwl_pcie_gen2_txq_unmap(trans, queue);
+
+	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* Free all TX queues */
+	for (i = 0; i < ARRAY_SIZE(trans_pcie->txq); i++) {
+		if (!trans_pcie->txq[i])
+			continue;
+
+		iwl_pcie_gen2_txq_free(trans, i);
+	}
+}
+
+int iwl_pcie_gen2_tx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *cmd_queue;
+	int txq_id = trans_pcie->cmd_queue, ret;
+
+	/* alloc and init the command queue */
+	if (!trans_pcie->txq[txq_id]) {
+		cmd_queue = kzalloc(sizeof(*cmd_queue), GFP_KERNEL);
+		if (!cmd_queue) {
+			IWL_ERR(trans, "Not enough memory for command queue\n");
+			return -ENOMEM;
+		}
+		trans_pcie->txq[txq_id] = cmd_queue;
+		ret = iwl_pcie_txq_alloc(trans, cmd_queue, TFD_CMD_SLOTS, true);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+	} else {
+		cmd_queue = trans_pcie->txq[txq_id];
+	}
+
+	ret = iwl_pcie_txq_init(trans, cmd_queue, TFD_CMD_SLOTS, true);
+	if (ret) {
+		IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+		goto error;
+	}
+	trans_pcie->txq[txq_id]->id = txq_id;
+	set_bit(txq_id, trans_pcie->queue_used);
+
+	return 0;
+
+error:
+	iwl_pcie_gen2_tx_free(trans);
+	return ret;
+}
+
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
new file mode 100644
index 0000000..93f0d38
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -0,0 +1,2478 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 Intel Corporation
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ *  Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/etherdevice.h>
+#include <linux/ieee80211.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <net/ip6_checksum.h>
+#include <net/tso.h>
+
+#include "iwl-debug.h"
+#include "iwl-csr.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "iwl-scd.h"
+#include "iwl-op-mode.h"
+#include "internal.h"
+#include "fw/api/tx.h"
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill.  Driver and device exchange status of each
+ * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ ***************************************************/
+
+int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+	unsigned int max;
+	unsigned int used;
+
+	/*
+	 * To avoid ambiguity between empty and completely full queues, there
+	 * should always be less than max_tfd_queue_size elements in the queue.
+	 * If q->n_window is smaller than max_tfd_queue_size, there is no need
+	 * to reserve any queue entries for this purpose.
+	 */
+	if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
+		max = q->n_window;
+	else
+		max = trans->cfg->base_params->max_tfd_queue_size - 1;
+
+	/*
+	 * max_tfd_queue_size is a power of 2, so the following is equivalent to
+	 * modulo by max_tfd_queue_size and is well defined.
+	 */
+	used = (q->write_ptr - q->read_ptr) &
+		(trans->cfg->base_params->max_tfd_queue_size - 1);
+
+	if (WARN_ON(used > max))
+		return 0;
+
+	return max - used;
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+	q->n_window = slots_num;
+
+	/* slots_num must be power-of-two size, otherwise
+	 * iwl_pcie_get_cmd_index is broken. */
+	if (WARN_ON(!is_power_of_2(slots_num)))
+		return -EINVAL;
+
+	q->low_mark = q->n_window / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_window / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->write_ptr = 0;
+	q->read_ptr = 0;
+
+	return 0;
+}
+
+int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
+			   struct iwl_dma_ptr *ptr, size_t size)
+{
+	if (WARN_ON(ptr->addr))
+		return -EINVAL;
+
+	ptr->addr = dma_alloc_coherent(trans->dev, size,
+				       &ptr->dma, GFP_KERNEL);
+	if (!ptr->addr)
+		return -ENOMEM;
+	ptr->size = size;
+	return 0;
+}
+
+void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
+{
+	if (unlikely(!ptr->addr))
+		return;
+
+	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
+	memset(ptr, 0, sizeof(*ptr));
+}
+
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
+{
+	struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
+
+	spin_lock(&txq->lock);
+	/* check if triggered erroneously */
+	if (txq->read_ptr == txq->write_ptr) {
+		spin_unlock(&txq->lock);
+		return;
+	}
+	spin_unlock(&txq->lock);
+
+	iwl_trans_pcie_log_scd_error(trans, txq);
+
+	iwl_force_nmi(trans);
+}
+
+/*
+ * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
+					     struct iwl_txq *txq, u16 byte_cnt,
+					     int num_tbs)
+{
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int write_ptr = txq->write_ptr;
+	int txq_id = txq->id;
+	u8 sec_ctl = 0;
+	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+	__le16 bc_ent;
+	struct iwl_tx_cmd *tx_cmd =
+		(void *)txq->entries[txq->write_ptr].cmd->payload;
+	u8 sta_id = tx_cmd->sta_id;
+
+	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+
+	sec_ctl = tx_cmd->sec_ctl;
+
+	switch (sec_ctl & TX_CMD_SEC_MSK) {
+	case TX_CMD_SEC_CCM:
+		len += IEEE80211_CCMP_MIC_LEN;
+		break;
+	case TX_CMD_SEC_TKIP:
+		len += IEEE80211_TKIP_ICV_LEN;
+		break;
+	case TX_CMD_SEC_WEP:
+		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+		break;
+	}
+	if (trans_pcie->bc_table_dword)
+		len = DIV_ROUND_UP(len, 4);
+
+	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+		return;
+
+	bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].
+			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
+}
+
+static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
+					    struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie =
+		IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
+	int txq_id = txq->id;
+	int read_ptr = txq->read_ptr;
+	u8 sta_id = 0;
+	__le16 bc_ent;
+	struct iwl_tx_cmd *tx_cmd =
+		(void *)txq->entries[read_ptr].cmd->payload;
+
+	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+	if (txq_id != trans_pcie->cmd_queue)
+		sta_id = tx_cmd->sta_id;
+
+	bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+		scd_bc_tbl[txq_id].
+			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+}
+
+/*
+ * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
+				    struct iwl_txq *txq)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 reg = 0;
+	int txq_id = txq->id;
+
+	lockdep_assert_held(&txq->lock);
+
+	/*
+	 * explicitly wake up the NIC if:
+	 * 1. shadow registers aren't enabled
+	 * 2. NIC is woken up for CMD regardless of shadow outside this function
+	 * 3. there is a chance that the NIC is asleep
+	 */
+	if (!trans->cfg->base_params->shadow_reg_enable &&
+	    txq_id != trans_pcie->cmd_queue &&
+	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
+		/*
+		 * wake up nic if it's powered down ...
+		 * uCode will wake up, and interrupt us again, so next
+		 * time we'll skip this part.
+		 */
+		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
+
+		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
+				       txq_id, reg);
+			iwl_set_bit(trans, CSR_GP_CNTRL,
+				    BIT(trans->cfg->csr->flag_mac_access_req));
+			txq->need_update = true;
+			return;
+		}
+	}
+
+	/*
+	 * if not in power-save mode, uCode will never sleep when we're
+	 * trying to tx (during RFKILL, we're not trying to tx).
+	 */
+	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
+	if (!txq->block)
+		iwl_write32(trans, HBUS_TARG_WRPTR,
+			    txq->write_ptr | (txq_id << 8));
+}
+
+void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i;
+
+	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
+		struct iwl_txq *txq = trans_pcie->txq[i];
+
+		if (!test_bit(i, trans_pcie->queue_used))
+			continue;
+
+		spin_lock_bh(&txq->lock);
+		if (txq->need_update) {
+			iwl_pcie_txq_inc_wr_ptr(trans, txq);
+			txq->need_update = false;
+		}
+		spin_unlock_bh(&txq->lock);
+	}
+}
+
+static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
+						  void *_tfd, u8 idx)
+{
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd = _tfd;
+		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+		return (dma_addr_t)(le64_to_cpu(tb->addr));
+	} else {
+		struct iwl_tfd *tfd = _tfd;
+		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+		dma_addr_t addr = get_unaligned_le32(&tb->lo);
+		dma_addr_t hi_len;
+
+		if (sizeof(dma_addr_t) <= sizeof(u32))
+			return addr;
+
+		hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+		/*
+		 * shift by 16 twice to avoid warnings on 32-bit
+		 * (where this code never runs anyway due to the
+		 * if statement above)
+		 */
+		return addr | ((hi_len << 16) << 16);
+	}
+}
+
+static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
+				       u8 idx, dma_addr_t addr, u16 len)
+{
+	struct iwl_tfd *tfd_fh = (void *)tfd;
+	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
+
+	u16 hi_n_len = len << 4;
+
+	put_unaligned_le32(addr, &tb->lo);
+	hi_n_len |= iwl_get_dma_hi_addr(addr);
+
+	tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+	tfd_fh->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
+{
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd = _tfd;
+
+		return le16_to_cpu(tfd->num_tbs) & 0x1f;
+	} else {
+		struct iwl_tfd *tfd = _tfd;
+
+		return tfd->num_tbs & 0x1f;
+	}
+}
+
+static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+			       struct iwl_cmd_meta *meta,
+			       struct iwl_txq *txq, int index)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int i, num_tbs;
+	void *tfd = iwl_pcie_get_tfd(trans, txq, index);
+
+	/* Sanity check on number of chunks */
+	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
+
+	if (num_tbs > trans_pcie->max_tbs) {
+		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+		/* @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* first TB is never freed - it's the bidirectional DMA data */
+
+	for (i = 1; i < num_tbs; i++) {
+		if (meta->tbs & BIT(i))
+			dma_unmap_page(trans->dev,
+				       iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
+				       iwl_pcie_tfd_tb_get_len(trans, tfd, i),
+				       DMA_TO_DEVICE);
+		else
+			dma_unmap_single(trans->dev,
+					 iwl_pcie_tfd_tb_get_addr(trans, tfd,
+								  i),
+					 iwl_pcie_tfd_tb_get_len(trans, tfd,
+								 i),
+					 DMA_TO_DEVICE);
+	}
+
+	if (trans->cfg->use_tfh) {
+		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+		tfd_fh->num_tbs = 0;
+	} else {
+		struct iwl_tfd *tfd_fh = (void *)tfd;
+
+		tfd_fh->num_tbs = 0;
+	}
+
+}
+
+/*
+ * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans - transport private data
+ * @txq - tx queue
+ * @dma_dir - the direction of the DMA mapping
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+	 * idx is bounded by n_window
+	 */
+	int rd_ptr = txq->read_ptr;
+	int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
+
+	lockdep_assert_held(&txq->lock);
+
+	/* We have only q->n_window txq->entries, but we use
+	 * TFD_QUEUE_SIZE_MAX tfds
+	 */
+	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
+
+	/* free SKB */
+	if (txq->entries) {
+		struct sk_buff *skb;
+
+		skb = txq->entries[idx].skb;
+
+		/* Can be called from irqs-disabled context
+		 * If skb is not NULL, it means that the whole queue is being
+		 * freed and that the queue is not empty - free the skb
+		 */
+		if (skb) {
+			iwl_op_mode_free_skb(trans->op_mode, skb);
+			txq->entries[idx].skb = NULL;
+		}
+	}
+}
+
+static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+				  dma_addr_t addr, u16 len, bool reset)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	void *tfd;
+	u32 num_tbs;
+
+	tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
+
+	if (reset)
+		memset(tfd, 0, trans_pcie->tfd_size);
+
+	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
+
+	/* Each TFD can point to a maximum max_tbs Tx buffers */
+	if (num_tbs >= trans_pcie->max_tbs) {
+		IWL_ERR(trans, "Error can not send more than %d chunks\n",
+			trans_pcie->max_tbs);
+		return -EINVAL;
+	}
+
+	if (WARN(addr & ~IWL_TX_DMA_MASK,
+		 "Unaligned address = %llx\n", (unsigned long long)addr))
+		return -EINVAL;
+
+	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
+
+	return num_tbs;
+}
+
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+		       int slots_num, bool cmd_queue)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	size_t tfd_sz = trans_pcie->tfd_size *
+		trans->cfg->base_params->max_tfd_queue_size;
+	size_t tb0_buf_sz;
+	int i;
+
+	if (WARN_ON(txq->entries || txq->tfds))
+		return -EINVAL;
+
+	if (trans->cfg->use_tfh)
+		tfd_sz = trans_pcie->tfd_size * slots_num;
+
+	timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
+	txq->trans_pcie = trans_pcie;
+
+	txq->n_window = slots_num;
+
+	txq->entries = kcalloc(slots_num,
+			       sizeof(struct iwl_pcie_txq_entry),
+			       GFP_KERNEL);
+
+	if (!txq->entries)
+		goto error;
+
+	if (cmd_queue)
+		for (i = 0; i < slots_num; i++) {
+			txq->entries[i].cmd =
+				kmalloc(sizeof(struct iwl_device_cmd),
+					GFP_KERNEL);
+			if (!txq->entries[i].cmd)
+				goto error;
+		}
+
+	/* Circular buffer of transmit frame descriptors (TFDs),
+	 * shared with device */
+	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+				       &txq->dma_addr, GFP_KERNEL);
+	if (!txq->tfds)
+		goto error;
+
+	BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
+
+	tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+	txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+					      &txq->first_tb_dma,
+					      GFP_KERNEL);
+	if (!txq->first_tb_bufs)
+		goto err_free_tfds;
+
+	return 0;
+err_free_tfds:
+	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+error:
+	if (txq->entries && cmd_queue)
+		for (i = 0; i < slots_num; i++)
+			kfree(txq->entries[i].cmd);
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	return -ENOMEM;
+
+}
+
+int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+		      int slots_num, bool cmd_queue)
+{
+	int ret;
+	u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
+
+	txq->need_update = false;
+
+	/* max_tfd_queue_size must be power-of-two size, otherwise
+	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
+	if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+		      "Max tfd queue size must be a power of two, but is %d",
+		      tfd_queue_max_size))
+		return -EINVAL;
+
+	/* Initialize queue's high/low-water marks, and head/tail indexes */
+	ret = iwl_queue_init(txq, slots_num);
+	if (ret)
+		return ret;
+
+	spin_lock_init(&txq->lock);
+
+	if (cmd_queue) {
+		static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
+
+		lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
+	}
+
+	__skb_queue_head_init(&txq->overflow_q);
+
+	return 0;
+}
+
+void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
+			    struct sk_buff *skb)
+{
+	struct page **page_ptr;
+
+	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+
+	if (*page_ptr) {
+		__free_page(*page_ptr);
+		*page_ptr = NULL;
+	}
+}
+
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	lockdep_assert_held(&trans_pcie->reg_lock);
+
+	if (trans_pcie->ref_cmd_in_flight) {
+		trans_pcie->ref_cmd_in_flight = false;
+		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+		iwl_trans_unref(trans);
+	}
+
+	if (!trans->cfg->base_params->apmg_wake_up_wa)
+		return;
+	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+		return;
+
+	trans_pcie->cmd_hold_nic_awake = false;
+	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+				   BIT(trans->cfg->csr->flag_mac_access_req));
+}
+
+/*
+ * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+	spin_lock_bh(&txq->lock);
+	while (txq->write_ptr != txq->read_ptr) {
+		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+				   txq_id, txq->read_ptr);
+
+		if (txq_id != trans_pcie->cmd_queue) {
+			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
+
+			if (WARN_ON_ONCE(!skb))
+				continue;
+
+			iwl_pcie_free_tso_page(trans_pcie, skb);
+		}
+		iwl_pcie_txq_free_tfd(trans, txq);
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+
+		if (txq->read_ptr == txq->write_ptr) {
+			unsigned long flags;
+
+			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+			if (txq_id != trans_pcie->cmd_queue) {
+				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+					      txq->id);
+				iwl_trans_unref(trans);
+			} else {
+				iwl_pcie_clear_cmd_in_flight(trans);
+			}
+			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+		}
+	}
+
+	while (!skb_queue_empty(&txq->overflow_q)) {
+		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+		iwl_op_mode_free_skb(trans->op_mode, skb);
+	}
+
+	spin_unlock_bh(&txq->lock);
+
+	/* just in case - this queue may have been stopped */
+	iwl_wake_queue(trans, txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	struct device *dev = trans->dev;
+	int i;
+
+	if (WARN_ON(!txq))
+		return;
+
+	iwl_pcie_txq_unmap(trans, txq_id);
+
+	/* De-alloc array of command/tx buffers */
+	if (txq_id == trans_pcie->cmd_queue)
+		for (i = 0; i < txq->n_window; i++) {
+			kzfree(txq->entries[i].cmd);
+			kzfree(txq->entries[i].free_buf);
+		}
+
+	/* De-alloc circular buffer of TFDs */
+	if (txq->tfds) {
+		dma_free_coherent(dev,
+				  trans_pcie->tfd_size *
+				  trans->cfg->base_params->max_tfd_queue_size,
+				  txq->tfds, txq->dma_addr);
+		txq->dma_addr = 0;
+		txq->tfds = NULL;
+
+		dma_free_coherent(dev,
+				  sizeof(*txq->first_tb_bufs) * txq->n_window,
+				  txq->first_tb_bufs, txq->first_tb_dma);
+	}
+
+	kfree(txq->entries);
+	txq->entries = NULL;
+
+	del_timer_sync(&txq->stuck_timer);
+
+	/* 0-fill queue descriptor structure */
+	memset(txq, 0, sizeof(*txq));
+}
+
+void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int nq = trans->cfg->base_params->num_of_queues;
+	int chan;
+	u32 reg_val;
+	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
+				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
+
+	/* make sure all queue are not stopped/used */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	trans_pcie->scd_base_addr =
+		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
+
+	WARN_ON(scd_base_addr != 0 &&
+		scd_base_addr != trans_pcie->scd_base_addr);
+
+	/* reset context data, TX status and translation data */
+	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
+				   SCD_CONTEXT_MEM_LOWER_BOUND,
+			    NULL, clear_dwords);
+
+	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
+		       trans_pcie->scd_bc_tbls.dma >> 10);
+
+	/* The chain extension of the SCD doesn't work well. This feature is
+	 * enabled by default by the HW, so we need to disable it manually.
+	 */
+	if (trans->cfg->base_params->scd_chain_ext_wa)
+		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
+
+	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
+				trans_pcie->cmd_fifo,
+				trans_pcie->cmd_q_wdg_timeout);
+
+	/* Activate all Tx DMA/FIFO channels */
+	iwl_scd_activate_fifos(trans);
+
+	/* Enable DMA channel */
+	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
+		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
+				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+	/* Update FH chicken bits */
+	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
+	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
+			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+	/* Enable L1-Active */
+	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
+		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+}
+
+void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int txq_id;
+
+	/*
+	 * we should never get here in gen2 trans mode return early to avoid
+	 * having invalid accesses
+	 */
+	if (WARN_ON_ONCE(trans->cfg->gen2))
+		return;
+
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		struct iwl_txq *txq = trans_pcie->txq[txq_id];
+		if (trans->cfg->use_tfh)
+			iwl_write_direct64(trans,
+					   FH_MEM_CBBC_QUEUE(trans, txq_id),
+					   txq->dma_addr);
+		else
+			iwl_write_direct32(trans,
+					   FH_MEM_CBBC_QUEUE(trans, txq_id),
+					   txq->dma_addr >> 8);
+		iwl_pcie_txq_unmap(trans, txq_id);
+		txq->read_ptr = 0;
+		txq->write_ptr = 0;
+	}
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+			   trans_pcie->kw.dma >> 4);
+
+	/*
+	 * Send 0 as the scd_base_addr since the device may have be reset
+	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+	 * contain garbage.
+	 */
+	iwl_pcie_tx_start(trans, 0);
+}
+
+static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	unsigned long flags;
+	int ch, ret;
+	u32 mask = 0;
+
+	spin_lock(&trans_pcie->irq_lock);
+
+	if (!iwl_trans_grab_nic_access(trans, &flags))
+		goto out;
+
+	/* Stop each Tx DMA channel */
+	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
+		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
+	}
+
+	/* Wait for DMA channels to be idle */
+	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
+	if (ret < 0)
+		IWL_ERR(trans,
+			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
+			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
+
+	iwl_trans_release_nic_access(trans, &flags);
+
+out:
+	spin_unlock(&trans_pcie->irq_lock);
+}
+
+/*
+ * iwl_pcie_tx_stop - Stop all Tx DMA channels
+ */
+int iwl_pcie_tx_stop(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int txq_id;
+
+	/* Turn off all Tx DMA fifos */
+	iwl_scd_deactivate_fifos(trans);
+
+	/* Turn off all Tx DMA channels */
+	iwl_pcie_tx_stop_fh(trans);
+
+	/*
+	 * This function can be called before the op_mode disabled the
+	 * queues. This happens when we have an rfkill interrupt.
+	 * Since we stop Tx altogether - mark the queues as stopped.
+	 */
+	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* This can happen: start_hw, stop_device */
+	if (!trans_pcie->txq_memory)
+		return 0;
+
+	/* Unmap DMA from host system and free skb's */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++)
+		iwl_pcie_txq_unmap(trans, txq_id);
+
+	return 0;
+}
+
+/*
+ * iwl_trans_tx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void iwl_pcie_tx_free(struct iwl_trans *trans)
+{
+	int txq_id;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+	/* Tx queues */
+	if (trans_pcie->txq_memory) {
+		for (txq_id = 0;
+		     txq_id < trans->cfg->base_params->num_of_queues;
+		     txq_id++) {
+			iwl_pcie_txq_free(trans, txq_id);
+			trans_pcie->txq[txq_id] = NULL;
+		}
+	}
+
+	kfree(trans_pcie->txq_memory);
+	trans_pcie->txq_memory = NULL;
+
+	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
+
+	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+}
+
+/*
+ * iwl_pcie_tx_alloc - allocate TX context
+ * Allocate all Tx DMA structures and initialize them
+ */
+static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
+{
+	int ret;
+	int txq_id, slots_num;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
+
+	bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
+		sizeof(struct iwl_gen3_bc_tbl) :
+		sizeof(struct iwlagn_scd_bc_tbl);
+
+	/*It is not allowed to alloc twice, so warn when this happens.
+	 * We cannot rely on the previous allocation, so free and fail */
+	if (WARN_ON(trans_pcie->txq_memory)) {
+		ret = -EINVAL;
+		goto error;
+	}
+
+	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+				     bc_tbls_size);
+	if (ret) {
+		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+		goto error;
+	}
+
+	/* Alloc keep-warm buffer */
+	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
+	if (ret) {
+		IWL_ERR(trans, "Keep Warm allocation failed\n");
+		goto error;
+	}
+
+	trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
+					 sizeof(struct iwl_txq), GFP_KERNEL);
+	if (!trans_pcie->txq_memory) {
+		IWL_ERR(trans, "Not enough memory for txq\n");
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+		slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+		ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
+					 slots_num, cmd_queue);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+			goto error;
+		}
+		trans_pcie->txq[txq_id]->id = txq_id;
+	}
+
+	return 0;
+
+error:
+	iwl_pcie_tx_free(trans);
+
+	return ret;
+}
+
+int iwl_pcie_tx_init(struct iwl_trans *trans)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	int ret;
+	int txq_id, slots_num;
+	bool alloc = false;
+
+	if (!trans_pcie->txq_memory) {
+		ret = iwl_pcie_tx_alloc(trans);
+		if (ret)
+			goto error;
+		alloc = true;
+	}
+
+	spin_lock(&trans_pcie->irq_lock);
+
+	/* Turn off all Tx DMA fifos */
+	iwl_scd_deactivate_fifos(trans);
+
+	/* Tell NIC where to find the "keep warm" buffer */
+	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
+			   trans_pcie->kw.dma >> 4);
+
+	spin_unlock(&trans_pcie->irq_lock);
+
+	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
+	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
+	     txq_id++) {
+		bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
+
+		slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+		ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
+					slots_num, cmd_queue);
+		if (ret) {
+			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+			goto error;
+		}
+
+		/*
+		 * Tell nic where to find circular buffer of TFDs for a
+		 * given Tx queue, and enable the DMA channel used for that
+		 * queue.
+		 * Circular buffer (TFD queue in DRAM) physical base address
+		 */
+		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
+				   trans_pcie->txq[txq_id]->dma_addr >> 8);
+	}
+
+	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
+	if (trans->cfg->base_params->num_of_queues > 20)
+		iwl_set_bits_prph(trans, SCD_GP_CTRL,
+				  SCD_GP_CTRL_ENABLE_31_QUEUES);
+
+	return 0;
+error:
+	/*Upon error, free only if we allocated something */
+	if (alloc)
+		iwl_pcie_tx_free(trans);
+	return ret;
+}
+
+static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
+{
+	lockdep_assert_held(&txq->lock);
+
+	if (!txq->wd_timeout)
+		return;
+
+	/*
+	 * station is asleep and we send data - that must
+	 * be uAPSD or PS-Poll. Don't rearm the timer.
+	 */
+	if (txq->frozen)
+		return;
+
+	/*
+	 * if empty delete timer, otherwise move timer forward
+	 * since we're making progress on this queue
+	 */
+	if (txq->read_ptr == txq->write_ptr)
+		del_timer(&txq->stuck_timer);
+	else
+		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+			    struct sk_buff_head *skbs)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
+	int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+	int last_to_free;
+
+	/* This function is not meant to release cmd queue*/
+	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
+		return;
+
+	spin_lock_bh(&txq->lock);
+
+	if (!test_bit(txq_id, trans_pcie->queue_used)) {
+		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+				    txq_id, ssn);
+		goto out;
+	}
+
+	if (read_ptr == tfd_num)
+		goto out;
+
+	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
+			   txq_id, txq->read_ptr, tfd_num, ssn);
+
+	/*Since we free until index _not_ inclusive, the one before index is
+	 * the last we will free. This one must be used */
+	last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
+
+	if (!iwl_queue_used(txq, last_to_free)) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, last_to_free,
+			trans->cfg->base_params->max_tfd_queue_size,
+			txq->write_ptr, txq->read_ptr);
+		goto out;
+	}
+
+	if (WARN_ON(!skb_queue_empty(skbs)))
+		goto out;
+
+	for (;
+	     read_ptr != tfd_num;
+	     txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
+	     read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+		struct sk_buff *skb = txq->entries[read_ptr].skb;
+
+		if (WARN_ON_ONCE(!skb))
+			continue;
+
+		iwl_pcie_free_tso_page(trans_pcie, skb);
+
+		__skb_queue_tail(skbs, skb);
+
+		txq->entries[read_ptr].skb = NULL;
+
+		if (!trans->cfg->use_tfh)
+			iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+
+		iwl_pcie_txq_free_tfd(trans, txq);
+	}
+
+	iwl_pcie_txq_progress(txq);
+
+	if (iwl_queue_space(trans, txq) > txq->low_mark &&
+	    test_bit(txq_id, trans_pcie->queue_stopped)) {
+		struct sk_buff_head overflow_skbs;
+
+		__skb_queue_head_init(&overflow_skbs);
+		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
+
+		/*
+		 * This is tricky: we are in reclaim path which is non
+		 * re-entrant, so noone will try to take the access the
+		 * txq data from that path. We stopped tx, so we can't
+		 * have tx as well. Bottom line, we can unlock and re-lock
+		 * later.
+		 */
+		spin_unlock_bh(&txq->lock);
+
+		while (!skb_queue_empty(&overflow_skbs)) {
+			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
+			struct iwl_device_cmd *dev_cmd_ptr;
+
+			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+						 trans_pcie->dev_cmd_offs);
+
+			/*
+			 * Note that we can very well be overflowing again.
+			 * In that case, iwl_queue_space will be small again
+			 * and we won't wake mac80211's queue.
+			 */
+			iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
+		}
+		spin_lock_bh(&txq->lock);
+
+		if (iwl_queue_space(trans, txq) > txq->low_mark)
+			iwl_wake_queue(trans, txq);
+	}
+
+	if (txq->read_ptr == txq->write_ptr) {
+		IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
+		iwl_trans_unref(trans);
+	}
+
+out:
+	spin_unlock_bh(&txq->lock);
+}
+
+static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
+				      const struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	const struct iwl_cfg *cfg = trans->cfg;
+	int ret;
+
+	lockdep_assert_held(&trans_pcie->reg_lock);
+
+	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
+	    !trans_pcie->ref_cmd_in_flight) {
+		trans_pcie->ref_cmd_in_flight = true;
+		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
+		iwl_trans_ref(trans);
+	}
+
+	/*
+	 * wake up the NIC to make sure that the firmware will see the host
+	 * command - we will let the NIC sleep once all the host commands
+	 * returned. This needs to be done only on NICs that have
+	 * apmg_wake_up_wa set.
+	 */
+	if (cfg->base_params->apmg_wake_up_wa &&
+	    !trans_pcie->cmd_hold_nic_awake) {
+		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+					 BIT(cfg->csr->flag_mac_access_req));
+
+		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+				   BIT(cfg->csr->flag_val_mac_access_en),
+				   (BIT(cfg->csr->flag_mac_clock_ready) |
+				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+				   15000);
+		if (ret < 0) {
+			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+					BIT(cfg->csr->flag_mac_access_req));
+			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
+			return -EIO;
+		}
+		trans_pcie->cmd_hold_nic_awake = true;
+	}
+
+	return 0;
+}
+
+/*
+ * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' index, all entries between old and new 'R' index
+ * need to be reclaimed. As result, some free space forms.  If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	unsigned long flags;
+	int nfreed = 0;
+	u16 r;
+
+	lockdep_assert_held(&txq->lock);
+
+	idx = iwl_pcie_get_cmd_index(txq, idx);
+	r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+
+	if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
+	    (!iwl_queue_used(txq, idx))) {
+		IWL_ERR(trans,
+			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
+			__func__, txq_id, idx,
+			trans->cfg->base_params->max_tfd_queue_size,
+			txq->write_ptr, txq->read_ptr);
+		return;
+	}
+
+	for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
+	     r = iwl_queue_inc_wrap(trans, r)) {
+		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+
+		if (nfreed++ > 0) {
+			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
+				idx, txq->write_ptr, r);
+			iwl_force_nmi(trans);
+		}
+	}
+
+	if (txq->read_ptr == txq->write_ptr) {
+		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+		iwl_pcie_clear_cmd_in_flight(trans);
+		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+	}
+
+	iwl_pcie_txq_progress(txq);
+}
+
+static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
+				 u16 txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 tbl_dw_addr;
+	u32 tbl_dw;
+	u16 scd_q2ratid;
+
+	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+	tbl_dw_addr = trans_pcie->scd_base_addr +
+			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
+
+	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
+
+	if (txq_id & 0x1)
+		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+	else
+		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
+
+	return 0;
+}
+
+/* Receiver address (actually, Rx station's index into station table),
+ * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
+#define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
+
+bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
+			       const struct iwl_trans_txq_scd_cfg *cfg,
+			       unsigned int wdg_timeout)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+	int fifo = -1;
+	bool scd_bug = false;
+
+	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
+		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
+
+	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
+
+	if (cfg) {
+		fifo = cfg->fifo;
+
+		/* Disable the scheduler prior configuring the cmd queue */
+		if (txq_id == trans_pcie->cmd_queue &&
+		    trans_pcie->scd_set_active)
+			iwl_scd_enable_set_active(trans, 0);
+
+		/* Stop this Tx queue before configuring it */
+		iwl_scd_txq_set_inactive(trans, txq_id);
+
+		/* Set this queue as a chain-building queue unless it is CMD */
+		if (txq_id != trans_pcie->cmd_queue)
+			iwl_scd_txq_set_chain(trans, txq_id);
+
+		if (cfg->aggregate) {
+			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
+
+			/* Map receiver-address / traffic-ID to this queue */
+			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
+
+			/* enable aggregations for the queue */
+			iwl_scd_txq_enable_agg(trans, txq_id);
+			txq->ampdu = true;
+		} else {
+			/*
+			 * disable aggregations for the queue, this will also
+			 * make the ra_tid mapping configuration irrelevant
+			 * since it is now a non-AGG queue.
+			 */
+			iwl_scd_txq_disable_agg(trans, txq_id);
+
+			ssn = txq->read_ptr;
+		}
+	} else {
+		/*
+		 * If we need to move the SCD write pointer by steps of
+		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
+		 * the op_mode know by returning true later.
+		 * Do this only in case cfg is NULL since this trick can
+		 * be done only if we have DQA enabled which is true for mvm
+		 * only. And mvm never sets a cfg pointer.
+		 * This is really ugly, but this is the easiest way out for
+		 * this sad hardware issue.
+		 * This bug has been fixed on devices 9000 and up.
+		 */
+		scd_bug = !trans->cfg->mq_rx_supported &&
+			!((ssn - txq->write_ptr) & 0x3f) &&
+			(ssn != txq->write_ptr);
+		if (scd_bug)
+			ssn++;
+	}
+
+	/* Place first TFD at index corresponding to start sequence number.
+	 * Assumes that ssn_idx is valid (!= 0xFFF) */
+	txq->read_ptr = (ssn & 0xff);
+	txq->write_ptr = (ssn & 0xff);
+	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
+			   (ssn & 0xff) | (txq_id << 8));
+
+	if (cfg) {
+		u8 frame_limit = cfg->frame_limit;
+
+		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
+
+		/* Set up Tx window size and frame limit for this queue */
+		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
+				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
+		iwl_trans_write_mem32(trans,
+			trans_pcie->scd_base_addr +
+			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
+			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
+
+		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
+		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
+			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
+			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
+			       SCD_QUEUE_STTS_REG_MSK);
+
+		/* enable the scheduler for this queue (only) */
+		if (txq_id == trans_pcie->cmd_queue &&
+		    trans_pcie->scd_set_active)
+			iwl_scd_enable_set_active(trans, BIT(txq_id));
+
+		IWL_DEBUG_TX_QUEUES(trans,
+				    "Activate queue %d on FIFO %d WrPtr: %d\n",
+				    txq_id, fifo, ssn & 0xff);
+	} else {
+		IWL_DEBUG_TX_QUEUES(trans,
+				    "Activate queue %d WrPtr: %d\n",
+				    txq_id, ssn & 0xff);
+	}
+
+	return scd_bug;
+}
+
+void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
+					bool shared_mode)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[txq_id];
+
+	txq->ampdu = !shared_mode;
+}
+
+void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
+				bool configure_scd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u32 stts_addr = trans_pcie->scd_base_addr +
+			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
+	static const u32 zero_val[4] = {};
+
+	trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
+	trans_pcie->txq[txq_id]->frozen = false;
+
+	/*
+	 * Upon HW Rfkill - we stop the device, and then stop the queues
+	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
+	 * allow the op_mode to call txq_disable after it already called
+	 * stop_device.
+	 */
+	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
+		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+			  "queue %d not used", txq_id);
+		return;
+	}
+
+	if (configure_scd) {
+		iwl_scd_txq_set_inactive(trans, txq_id);
+
+		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
+				    ARRAY_SIZE(zero_val));
+	}
+
+	iwl_pcie_txq_unmap(trans, txq_id);
+	trans_pcie->txq[txq_id]->ampdu = false;
+
+	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS   *****/
+
+/*
+ * iwl_pcie_enqueue_hcmd - enqueue a uCode command
+ * @priv: device private data point
+ * @cmd: a pointer to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
+ * command queue.
+ */
+static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
+				 struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+	struct iwl_device_cmd *out_cmd;
+	struct iwl_cmd_meta *out_meta;
+	unsigned long flags;
+	void *dup_buf = NULL;
+	dma_addr_t phys_addr;
+	int idx;
+	u16 copy_size, cmd_size, tb0_size;
+	bool had_nocopy = false;
+	u8 group_id = iwl_cmd_groupid(cmd->id);
+	int i, ret;
+	u32 cmd_pos;
+	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
+	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
+
+	if (WARN(!trans->wide_cmd_header &&
+		 group_id > IWL_ALWAYS_LONG_GROUP,
+		 "unsupported wide command %#x\n", cmd->id))
+		return -EINVAL;
+
+	if (group_id != 0) {
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+		cmd_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		copy_size = sizeof(struct iwl_cmd_header);
+		cmd_size = sizeof(struct iwl_cmd_header);
+	}
+
+	/* need one for the header if the first is NOCOPY */
+	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
+
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		cmddata[i] = cmd->data[i];
+		cmdlen[i] = cmd->len[i];
+
+		if (!cmd->len[i])
+			continue;
+
+		/* need at least IWL_FIRST_TB_SIZE copied */
+		if (copy_size < IWL_FIRST_TB_SIZE) {
+			int copy = IWL_FIRST_TB_SIZE - copy_size;
+
+			if (copy > cmdlen[i])
+				copy = cmdlen[i];
+			cmdlen[i] -= copy;
+			cmddata[i] += copy;
+			copy_size += copy;
+		}
+
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
+			had_nocopy = true;
+			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
+			/*
+			 * This is also a chunk that isn't copied
+			 * to the static buffer so set had_nocopy.
+			 */
+			had_nocopy = true;
+
+			/* only allowed once */
+			if (WARN_ON(dup_buf)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+
+			dup_buf = kmemdup(cmddata[i], cmdlen[i],
+					  GFP_ATOMIC);
+			if (!dup_buf)
+				return -ENOMEM;
+		} else {
+			/* NOCOPY must not be followed by normal! */
+			if (WARN_ON(had_nocopy)) {
+				idx = -EINVAL;
+				goto free_dup_buf;
+			}
+			copy_size += cmdlen[i];
+		}
+		cmd_size += cmd->len[i];
+	}
+
+	/*
+	 * If any of the command structures end up being larger than
+	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
+	 * allocated into separate TFDs, then we will need to
+	 * increase the size of the buffers.
+	 */
+	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
+		 "Command %s (%#x) is too large (%d bytes)\n",
+		 iwl_get_cmd_string(trans, cmd->id),
+		 cmd->id, copy_size)) {
+		idx = -EINVAL;
+		goto free_dup_buf;
+	}
+
+	spin_lock_bh(&txq->lock);
+
+	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+		spin_unlock_bh(&txq->lock);
+
+		IWL_ERR(trans, "No space in command queue\n");
+		iwl_op_mode_cmd_queue_full(trans->op_mode);
+		idx = -ENOSPC;
+		goto free_dup_buf;
+	}
+
+	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+	out_cmd = txq->entries[idx].cmd;
+	out_meta = &txq->entries[idx].meta;
+
+	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
+	if (cmd->flags & CMD_WANT_SKB)
+		out_meta->source = cmd;
+
+	/* set up the header */
+	if (group_id != 0) {
+		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr_wide.group_id = group_id;
+		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
+		out_cmd->hdr_wide.length =
+			cpu_to_le16(cmd_size -
+				    sizeof(struct iwl_cmd_header_wide));
+		out_cmd->hdr_wide.reserved = 0;
+		out_cmd->hdr_wide.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(txq->write_ptr));
+
+		cmd_pos = sizeof(struct iwl_cmd_header_wide);
+		copy_size = sizeof(struct iwl_cmd_header_wide);
+	} else {
+		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
+		out_cmd->hdr.sequence =
+			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
+						 INDEX_TO_SEQ(txq->write_ptr));
+		out_cmd->hdr.group_id = 0;
+
+		cmd_pos = sizeof(struct iwl_cmd_header);
+		copy_size = sizeof(struct iwl_cmd_header);
+	}
+
+	/* and copy the data that needs to be copied */
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		int copy;
+
+		if (!cmd->len[i])
+			continue;
+
+		/* copy everything if not nocopy/dup */
+		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					   IWL_HCMD_DFL_DUP))) {
+			copy = cmd->len[i];
+
+			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+			cmd_pos += copy;
+			copy_size += copy;
+			continue;
+		}
+
+		/*
+		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
+		 * in total (for bi-directional DMA), but copy up to what
+		 * we can fit into the payload for debug dump purposes.
+		 */
+		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
+
+		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
+		cmd_pos += copy;
+
+		/* However, treat copy_size the proper way, we need it below */
+		if (copy_size < IWL_FIRST_TB_SIZE) {
+			copy = IWL_FIRST_TB_SIZE - copy_size;
+
+			if (copy > cmd->len[i])
+				copy = cmd->len[i];
+			copy_size += copy;
+		}
+	}
+
+	IWL_DEBUG_HC(trans,
+		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
+		     iwl_get_cmd_string(trans, cmd->id),
+		     group_id, out_cmd->hdr.cmd,
+		     le16_to_cpu(out_cmd->hdr.sequence),
+		     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
+
+	/* start the TFD with the minimum copy bytes */
+	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
+	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
+	iwl_pcie_txq_build_tfd(trans, txq,
+			       iwl_pcie_get_first_tb_dma(txq, idx),
+			       tb0_size, true);
+
+	/* map first command fragment, if any remains */
+	if (copy_size > tb0_size) {
+		phys_addr = dma_map_single(trans->dev,
+					   ((u8 *)&out_cmd->hdr) + tb0_size,
+					   copy_size - tb0_size,
+					   DMA_TO_DEVICE);
+		if (dma_mapping_error(trans->dev, phys_addr)) {
+			iwl_pcie_tfd_unmap(trans, out_meta, txq,
+					   txq->write_ptr);
+			idx = -ENOMEM;
+			goto out;
+		}
+
+		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
+				       copy_size - tb0_size, false);
+	}
+
+	/* map the remaining (adjusted) nocopy/dup fragments */
+	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
+		const void *data = cmddata[i];
+
+		if (!cmdlen[i])
+			continue;
+		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
+					   IWL_HCMD_DFL_DUP)))
+			continue;
+		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
+			data = dup_buf;
+		phys_addr = dma_map_single(trans->dev, (void *)data,
+					   cmdlen[i], DMA_TO_DEVICE);
+		if (dma_mapping_error(trans->dev, phys_addr)) {
+			iwl_pcie_tfd_unmap(trans, out_meta, txq,
+					   txq->write_ptr);
+			idx = -ENOMEM;
+			goto out;
+		}
+
+		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
+	}
+
+	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
+	out_meta->flags = cmd->flags;
+	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
+		kzfree(txq->entries[idx].free_buf);
+	txq->entries[idx].free_buf = dup_buf;
+
+	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
+
+	/* start timer if queue currently empty */
+	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
+	if (ret < 0) {
+		idx = ret;
+		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+		goto out;
+	}
+
+	/* Increment and update queue's write index */
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+	iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
+ out:
+	spin_unlock_bh(&txq->lock);
+ free_dup_buf:
+	if (idx < 0)
+		kfree(dup_buf);
+	return idx;
+}
+
+/*
+ * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ */
+void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
+			    struct iwl_rx_cmd_buffer *rxb)
+{
+	struct iwl_rx_packet *pkt = rxb_addr(rxb);
+	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+	u8 group_id;
+	u32 cmd_id;
+	int txq_id = SEQ_TO_QUEUE(sequence);
+	int index = SEQ_TO_INDEX(sequence);
+	int cmd_index;
+	struct iwl_device_cmd *cmd;
+	struct iwl_cmd_meta *meta;
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+
+	/* If a Tx command is being handled and it isn't in the actual
+	 * command queue then there a command routing bug has been introduced
+	 * in the queue management code. */
+	if (WARN(txq_id != trans_pcie->cmd_queue,
+		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+		 txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
+		 txq->write_ptr)) {
+		iwl_print_hex_error(trans, pkt, 32);
+		return;
+	}
+
+	spin_lock_bh(&txq->lock);
+
+	cmd_index = iwl_pcie_get_cmd_index(txq, index);
+	cmd = txq->entries[cmd_index].cmd;
+	meta = &txq->entries[cmd_index].meta;
+	group_id = cmd->hdr.group_id;
+	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
+
+	iwl_pcie_tfd_unmap(trans, meta, txq, index);
+
+	/* Input error checking is done when commands are added to queue. */
+	if (meta->flags & CMD_WANT_SKB) {
+		struct page *p = rxb_steal_page(rxb);
+
+		meta->source->resp_pkt = pkt;
+		meta->source->_rx_page_addr = (unsigned long)page_address(p);
+		meta->source->_rx_page_order = trans_pcie->rx_page_order;
+	}
+
+	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
+		iwl_op_mode_async_cb(trans->op_mode, cmd);
+
+	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
+
+	if (!(meta->flags & CMD_ASYNC)) {
+		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
+			IWL_WARN(trans,
+				 "HCMD_ACTIVE already clear for command %s\n",
+				 iwl_get_cmd_string(trans, cmd_id));
+		}
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+			       iwl_get_cmd_string(trans, cmd_id));
+		wake_up(&trans_pcie->wait_command_queue);
+	}
+
+	if (meta->flags & CMD_MAKE_TRANS_IDLE) {
+		IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
+			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
+		set_bit(STATUS_TRANS_IDLE, &trans->status);
+		wake_up(&trans_pcie->d0i3_waitq);
+	}
+
+	if (meta->flags & CMD_WAKE_UP_TRANS) {
+		IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
+			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
+		clear_bit(STATUS_TRANS_IDLE, &trans->status);
+		wake_up(&trans_pcie->d0i3_waitq);
+	}
+
+	meta->flags = 0;
+
+	spin_unlock_bh(&txq->lock);
+}
+
+#define HOST_COMPLETE_TIMEOUT	(2 * HZ)
+
+static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
+				    struct iwl_host_cmd *cmd)
+{
+	int ret;
+
+	/* An asynchronous command can not expect an SKB to be set. */
+	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+		return -EINVAL;
+
+	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+	if (ret < 0) {
+		IWL_ERR(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
+			iwl_get_cmd_string(trans, cmd->id), ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
+				   struct iwl_host_cmd *cmd)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
+	int cmd_idx;
+	int ret;
+
+	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
+		       iwl_get_cmd_string(trans, cmd->id));
+
+	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+				  &trans->status),
+		 "Command %s: a command is already active!\n",
+		 iwl_get_cmd_string(trans, cmd->id)))
+		return -EIO;
+
+	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
+		       iwl_get_cmd_string(trans, cmd->id));
+
+	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+				 pm_runtime_active(&trans_pcie->pci_dev->dev),
+				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+		if (!ret) {
+			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+	if (cmd_idx < 0) {
+		ret = cmd_idx;
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_ERR(trans,
+			"Error sending %s: enqueue_hcmd failed: %d\n",
+			iwl_get_cmd_string(trans, cmd->id), ret);
+		return ret;
+	}
+
+	ret = wait_event_timeout(trans_pcie->wait_command_queue,
+				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+					   &trans->status),
+				 HOST_COMPLETE_TIMEOUT);
+	if (!ret) {
+		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+			iwl_get_cmd_string(trans, cmd->id),
+			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+			txq->read_ptr, txq->write_ptr);
+
+		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+			       iwl_get_cmd_string(trans, cmd->id));
+		ret = -ETIMEDOUT;
+
+		iwl_force_nmi(trans);
+		iwl_trans_fw_error(trans);
+
+		goto cancel;
+	}
+
+	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+		iwl_trans_dump_regs(trans);
+		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
+			iwl_get_cmd_string(trans, cmd->id));
+		dump_stack();
+		ret = -EIO;
+		goto cancel;
+	}
+
+	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+		ret = -ERFKILL;
+		goto cancel;
+	}
+
+	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
+			iwl_get_cmd_string(trans, cmd->id));
+		ret = -EIO;
+		goto cancel;
+	}
+
+	return 0;
+
+cancel:
+	if (cmd->flags & CMD_WANT_SKB) {
+		/*
+		 * Cancel the CMD_WANT_SKB flag for the cmd in the
+		 * TX cmd queue. Otherwise in case the cmd comes
+		 * in later, it will possibly set an invalid
+		 * address (cmd->meta.source).
+		 */
+		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+	}
+
+	if (cmd->resp_pkt) {
+		iwl_free_resp(cmd);
+		cmd->resp_pkt = NULL;
+	}
+
+	return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
+{
+	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+				  cmd->id);
+		return -ERFKILL;
+	}
+
+	if (cmd->flags & CMD_ASYNC)
+		return iwl_pcie_send_hcmd_async(trans, cmd);
+
+	/* We still can fail on RFKILL that can be asserted while we wait */
+	return iwl_pcie_send_hcmd_sync(trans, cmd);
+}
+
+static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
+			     struct iwl_txq *txq, u8 hdr_len,
+			     struct iwl_cmd_meta *out_meta,
+			     struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	u16 tb2_len;
+	int i;
+
+	/*
+	 * Set up TFD's third entry to point directly to remainder
+	 * of skb's head, if any
+	 */
+	tb2_len = skb_headlen(skb) - hdr_len;
+
+	if (tb2_len > 0) {
+		dma_addr_t tb2_phys = dma_map_single(trans->dev,
+						     skb->data + hdr_len,
+						     tb2_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
+			iwl_pcie_tfd_unmap(trans, out_meta, txq,
+					   txq->write_ptr);
+			return -EINVAL;
+		}
+		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
+	}
+
+	/* set up the remaining entries to point to the data */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		dma_addr_t tb_phys;
+		int tb_idx;
+
+		if (!skb_frag_size(frag))
+			continue;
+
+		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+					   skb_frag_size(frag), DMA_TO_DEVICE);
+
+		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+			iwl_pcie_tfd_unmap(trans, out_meta, txq,
+					   txq->write_ptr);
+			return -EINVAL;
+		}
+		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+						skb_frag_size(frag), false);
+
+		out_meta->tbs |= BIT(tb_idx);
+	}
+
+	trace_iwlwifi_dev_tx(trans->dev, skb,
+			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
+			     trans_pcie->tfd_size,
+			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
+			     hdr_len);
+	trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
+	return 0;
+}
+
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
+
+	if (!p->page)
+		goto alloc;
+
+	/* enough room on this page */
+	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
+		return p;
+
+	/* We don't have enough room on this page, get a new one. */
+	__free_page(p->page);
+
+alloc:
+	p->page = alloc_page(GFP_ATOMIC);
+	if (!p->page)
+		return NULL;
+	p->pos = page_address(p->page);
+	return p;
+}
+
+static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
+					bool ipv6, unsigned int len)
+{
+	if (ipv6) {
+		struct ipv6hdr *iphv6 = iph;
+
+		tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
+					       len + tcph->doff * 4,
+					       IPPROTO_TCP, 0);
+	} else {
+		struct iphdr *iphv4 = iph;
+
+		ip_send_check(iphv4);
+		tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
+						 len + tcph->doff * 4,
+						 IPPROTO_TCP, 0);
+	}
+}
+
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+				   struct iwl_txq *txq, u8 hdr_len,
+				   struct iwl_cmd_meta *out_meta,
+				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+{
+	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
+	struct ieee80211_hdr *hdr = (void *)skb->data;
+	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+	unsigned int mss = skb_shinfo(skb)->gso_size;
+	u16 length, iv_len, amsdu_pad;
+	u8 *start_hdr;
+	struct iwl_tso_hdr_page *hdr_page;
+	struct page **page_ptr;
+	int ret;
+	struct tso_t tso;
+
+	/* if the packet is protected, then it must be CCMP or GCMP */
+	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
+	iv_len = ieee80211_has_protected(hdr->frame_control) ?
+		IEEE80211_CCMP_HDR_LEN : 0;
+
+	trace_iwlwifi_dev_tx(trans->dev, skb,
+			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
+			     trans_pcie->tfd_size,
+			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
+
+	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
+	amsdu_pad = 0;
+
+	/* total amount of header we may need for this A-MSDU */
+	hdr_room = DIV_ROUND_UP(total_len, mss) *
+		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
+
+	/* Our device supports 9 segments at most, it will fit in 1 page */
+	hdr_page = get_page_hdr(trans, hdr_room);
+	if (!hdr_page)
+		return -ENOMEM;
+
+	get_page(hdr_page->page);
+	start_hdr = hdr_page->pos;
+	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
+	*page_ptr = hdr_page->page;
+	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
+	hdr_page->pos += iv_len;
+
+	/*
+	 * Pull the ieee80211 header + IV to be able to use TSO core,
+	 * we will restore it for the tx_status flow.
+	 */
+	skb_pull(skb, hdr_len + iv_len);
+
+	/*
+	 * Remove the length of all the headers that we don't actually
+	 * have in the MPDU by themselves, but that we duplicate into
+	 * all the different MSDUs inside the A-MSDU.
+	 */
+	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+	tso_start(skb, &tso);
+
+	while (total_len) {
+		/* this is the data left for this subframe */
+		unsigned int data_left =
+			min_t(unsigned int, mss, total_len);
+		struct sk_buff *csum_skb = NULL;
+		unsigned int hdr_tb_len;
+		dma_addr_t hdr_tb_phys;
+		struct tcphdr *tcph;
+		u8 *iph, *subf_hdrs_start = hdr_page->pos;
+
+		total_len -= data_left;
+
+		memset(hdr_page->pos, 0, amsdu_pad);
+		hdr_page->pos += amsdu_pad;
+		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+				  data_left)) & 0x3;
+		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+		hdr_page->pos += ETH_ALEN;
+		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+		hdr_page->pos += ETH_ALEN;
+
+		length = snap_ip_tcp_hdrlen + data_left;
+		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+		hdr_page->pos += sizeof(length);
+
+		/*
+		 * This will copy the SNAP as well which will be considered
+		 * as MAC header.
+		 */
+		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+		iph = hdr_page->pos + 8;
+		tcph = (void *)(iph + ip_hdrlen);
+
+		/* For testing on current hardware only */
+		if (trans_pcie->sw_csum_tx) {
+			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
+					     GFP_ATOMIC);
+			if (!csum_skb) {
+				ret = -ENOMEM;
+				goto out_unmap;
+			}
+
+			iwl_compute_pseudo_hdr_csum(iph, tcph,
+						    skb->protocol ==
+							htons(ETH_P_IPV6),
+						    data_left);
+
+			skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
+			skb_reset_transport_header(csum_skb);
+			csum_skb->csum_start =
+				(unsigned char *)tcp_hdr(csum_skb) -
+						 csum_skb->head;
+		}
+
+		hdr_page->pos += snap_ip_tcp_hdrlen;
+
+		hdr_tb_len = hdr_page->pos - start_hdr;
+		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
+					     hdr_tb_len, DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
+			dev_kfree_skb(csum_skb);
+			ret = -EINVAL;
+			goto out_unmap;
+		}
+		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
+				       hdr_tb_len, false);
+		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
+					       hdr_tb_len);
+		/* add this subframe's headers' length to the tx_cmd */
+		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+
+		/* prepare the start_hdr for the next subframe */
+		start_hdr = hdr_page->pos;
+
+		/* put the payload */
+		while (data_left) {
+			unsigned int size = min_t(unsigned int, tso.size,
+						  data_left);
+			dma_addr_t tb_phys;
+
+			if (trans_pcie->sw_csum_tx)
+				skb_put_data(csum_skb, tso.data, size);
+
+			tb_phys = dma_map_single(trans->dev, tso.data,
+						 size, DMA_TO_DEVICE);
+			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+				dev_kfree_skb(csum_skb);
+				ret = -EINVAL;
+				goto out_unmap;
+			}
+
+			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
+					       size, false);
+			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
+						       size);
+
+			data_left -= size;
+			tso_build_data(skb, &tso, size);
+		}
+
+		/* For testing on early hardware only */
+		if (trans_pcie->sw_csum_tx) {
+			__wsum csum;
+
+			csum = skb_checksum(csum_skb,
+					    skb_checksum_start_offset(csum_skb),
+					    csum_skb->len -
+					    skb_checksum_start_offset(csum_skb),
+					    0);
+			dev_kfree_skb(csum_skb);
+			dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
+						hdr_tb_len, DMA_TO_DEVICE);
+			tcph->check = csum_fold(csum);
+			dma_sync_single_for_device(trans->dev, hdr_tb_phys,
+						   hdr_tb_len, DMA_TO_DEVICE);
+		}
+	}
+
+	/* re -add the WiFi header and IV */
+	skb_push(skb, hdr_len + iv_len);
+
+	return 0;
+
+out_unmap:
+	iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
+	return ret;
+}
+#else /* CONFIG_INET */
+static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
+				   struct iwl_txq *txq, u8 hdr_len,
+				   struct iwl_cmd_meta *out_meta,
+				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
+{
+	/* No A-MSDU without CONFIG_INET */
+	WARN_ON(1);
+
+	return -1;
+}
+#endif /* CONFIG_INET */
+
+int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
+		      struct iwl_device_cmd *dev_cmd, int txq_id)
+{
+	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+	struct ieee80211_hdr *hdr;
+	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
+	struct iwl_cmd_meta *out_meta;
+	struct iwl_txq *txq;
+	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
+	void *tb1_addr;
+	void *tfd;
+	u16 len, tb1_len;
+	bool wait_write_ptr;
+	__le16 fc;
+	u8 hdr_len;
+	u16 wifi_seq;
+	bool amsdu;
+
+	txq = trans_pcie->txq[txq_id];
+
+	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+		      "TX on unused queue %d\n", txq_id))
+		return -EINVAL;
+
+	if (unlikely(trans_pcie->sw_csum_tx &&
+		     skb->ip_summed == CHECKSUM_PARTIAL)) {
+		int offs = skb_checksum_start_offset(skb);
+		int csum_offs = offs + skb->csum_offset;
+		__wsum csum;
+
+		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
+			return -1;
+
+		csum = skb_checksum(skb, offs, skb->len - offs, 0);
+		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
+
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	}
+
+	if (skb_is_nonlinear(skb) &&
+	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+	    __skb_linearize(skb))
+		return -ENOMEM;
+
+	/* mac80211 always puts the full header into the SKB's head,
+	 * so there's no need to check if it's readable there
+	 */
+	hdr = (struct ieee80211_hdr *)skb->data;
+	fc = hdr->frame_control;
+	hdr_len = ieee80211_hdrlen(fc);
+
+	spin_lock(&txq->lock);
+
+	if (iwl_queue_space(trans, txq) < txq->high_mark) {
+		iwl_stop_queue(trans, txq);
+
+		/* don't put the packet on the ring, if there is no room */
+		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
+			struct iwl_device_cmd **dev_cmd_ptr;
+
+			dev_cmd_ptr = (void *)((u8 *)skb->cb +
+					       trans_pcie->dev_cmd_offs);
+
+			*dev_cmd_ptr = dev_cmd;
+			__skb_queue_tail(&txq->overflow_q, skb);
+
+			spin_unlock(&txq->lock);
+			return 0;
+		}
+	}
+
+	/* In AGG mode, the index in the ring must correspond to the WiFi
+	 * sequence number. This is a HW requirements to help the SCD to parse
+	 * the BA.
+	 * Check here that the packets are in the right place on the ring.
+	 */
+	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+	WARN_ONCE(txq->ampdu &&
+		  (wifi_seq & 0xff) != txq->write_ptr,
+		  "Q: %d WiFi Seq %d tfdNum %d",
+		  txq_id, wifi_seq, txq->write_ptr);
+
+	/* Set up driver data for this TFD */
+	txq->entries[txq->write_ptr].skb = skb;
+	txq->entries[txq->write_ptr].cmd = dev_cmd;
+
+	dev_cmd->hdr.sequence =
+		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+			    INDEX_TO_SEQ(txq->write_ptr)));
+
+	tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
+		       offsetof(struct iwl_tx_cmd, scratch);
+
+	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
+
+	/* Set up first empty entry in queue's array of Tx/cmd buffers */
+	out_meta = &txq->entries[txq->write_ptr].meta;
+	out_meta->flags = 0;
+
+	/*
+	 * The second TB (tb1) points to the remainder of the TX command
+	 * and the 802.11 header - dword aligned size
+	 * (This calculation modifies the TX command, so do it before the
+	 * setup of the first TB)
+	 */
+	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
+	      hdr_len - IWL_FIRST_TB_SIZE;
+	/* do not align A-MSDU to dword as the subframe header aligns it */
+	amsdu = ieee80211_is_data_qos(fc) &&
+		(*ieee80211_get_qos_ctl(hdr) &
+		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+	if (trans_pcie->sw_csum_tx || !amsdu) {
+		tb1_len = ALIGN(len, 4);
+		/* Tell NIC about any 2-byte padding after MAC header */
+		if (tb1_len != len)
+			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
+	} else {
+		tb1_len = len;
+	}
+
+	/*
+	 * The first TB points to bi-directional DMA data, we'll
+	 * memcpy the data into it later.
+	 */
+	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
+			       IWL_FIRST_TB_SIZE, true);
+
+	/* there must be data left over for TB1 or this code must be changed */
+	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
+
+	/* map the data for TB1 */
+	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
+		goto out_err;
+	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
+
+	/*
+	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
+	 * (adding subframes, etc.).
+	 * This can happen in some testing flows when the amsdu was already
+	 * pre-built, and we just need to send the resulting skb.
+	 */
+	if (amsdu && skb_shinfo(skb)->gso_size) {
+		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
+						     out_meta, dev_cmd,
+						     tb1_len)))
+			goto out_err;
+	} else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
+				       out_meta, dev_cmd, tb1_len))) {
+		goto out_err;
+	}
+
+	/* building the A-MSDU might have changed this data, so memcpy it now */
+	memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
+	       IWL_FIRST_TB_SIZE);
+
+	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+	/* Set up entry for this TFD in Tx byte-count array */
+	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+					 iwl_pcie_tfd_get_num_tbs(trans, tfd));
+
+	wait_write_ptr = ieee80211_has_morefrags(fc);
+
+	/* start timer if queue currently empty */
+	if (txq->read_ptr == txq->write_ptr) {
+		if (txq->wd_timeout) {
+			/*
+			 * If the TXQ is active, then set the timer, if not,
+			 * set the timer in remainder so that the timer will
+			 * be armed with the right value when the station will
+			 * wake up.
+			 */
+			if (!txq->frozen)
+				mod_timer(&txq->stuck_timer,
+					  jiffies + txq->wd_timeout);
+			else
+				txq->frozen_expiry_remainder = txq->wd_timeout;
+		}
+		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
+		iwl_trans_ref(trans);
+	}
+
+	/* Tell device the write index *just past* this latest filled TFD */
+	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+	if (!wait_write_ptr)
+		iwl_pcie_txq_inc_wr_ptr(trans, txq);
+
+	/*
+	 * At this point the frame is "transmitted" successfully
+	 * and we will get a TX status notification eventually.
+	 */
+	spin_unlock(&txq->lock);
+	return 0;
+out_err:
+	spin_unlock(&txq->lock);
+	return -1;
+}