GIT a437af9970a1e1a4abfa776673c6bc5bb53f18e3 master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6.git#ALL

---
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/README.ipw2100
new file mode 100644
--- /dev/null
+++ b/Documentation/networking/README.ipw2100
@@ -0,0 +1,246 @@
+
+===========================
+Intel(R) PRO/Wireless 2100 Network Connection Driver for Linux
+README.ipw2100
+
+March 14, 2005
+
+===========================
+Index
+---------------------------
+0. Introduction
+1. Release 1.1.0 Current Features
+2. Command Line Parameters
+3. Sysfs Helper Files
+4. Radio Kill Switch
+5. Dynamic Firmware
+6. Power Management
+7. Support
+8. License
+
+
+===========================
+0. Introduction
+------------ -----   -----       ----       ---       --         -     
+
+This document provides a brief overview of the features supported by the 
+IPW2100 driver project.  The main project website, where the latest 
+development version of the driver can be found, is:
+
+	http://ipw2100.sourceforge.net
+
+There you can find the not only the latest releases, but also information about
+potential fixes and patches, as well as links to the development mailing list
+for the driver project.
+
+
+===========================
+1. Release 1.1.0 Current Supported Features
+---------------------------     
+- Managed (BSS) and Ad-Hoc (IBSS)
+- WEP (shared key and open)
+- Wireless Tools support 
+- 802.1x (tested with XSupplicant 1.0.1)
+
+Enabled (but not supported) features:
+- Monitor/RFMon mode
+- WPA/WPA2
+
+The distinction between officially supported and enabled is a reflection
+on the amount of validation and interoperability testing that has been
+performed on a given feature.
+
+
+===========================
+2. Command Line Parameters
+---------------------------     
+
+If the driver is built as a module, the following optional parameters are used
+by entering them on the command line with the modprobe command using this
+syntax:
+
+	modprobe ipw2100 [<option>=<VAL1><,VAL2>...]
+
+For example, to disable the radio on driver loading, enter:
+
+	modprobe ipw2100 disable=1
+
+The ipw2100 driver supports the following module parameters:
+
+Name		Value		Example:
+debug		0x0-0xffffffff	debug=1024
+mode		0,1,2		mode=1   /* AdHoc */
+channel		int		channel=3 /* Only valid in AdHoc or Monitor */
+associate	boolean		associate=0 /* Do NOT auto associate */
+disable		boolean		disable=1 /* Do not power the HW */
+
+
+===========================
+3. Sysfs Helper Files
+---------------------------     
+
+There are several ways to control the behavior of the driver.  Many of the 
+general capabilities are exposed through the Wireless Tools (iwconfig).  There
+are a few capabilities that are exposed through entries in the Linux Sysfs.
+
+
+----- Driver Level ------
+For the driver level files, look in /sys/bus/pci/drivers/ipw2100/
+
+  debug_level  
+	
+	This controls the same global as the 'debug' module parameter.  For 
+        information on the various debugging levels available, run the 'dvals'
+	script found in the driver source directory.
+
+	NOTE:  'debug_level' is only enabled if CONFIG_IPW2100_DEBUG is turn
+	       on.
+
+----- Device Level ------
+For the device level files look in
+	
+	/sys/bus/pci/drivers/ipw2100/{PCI-ID}/
+
+For example:
+	/sys/bus/pci/drivers/ipw2100/0000:02:01.0
+
+For the device level files, see /sys/bus/pci/drivers/ipw2100:
+
+  rf_kill
+	read - 
+	0 = RF kill not enabled (radio on)
+	1 = SW based RF kill active (radio off)
+	2 = HW based RF kill active (radio off)
+	3 = Both HW and SW RF kill active (radio off)
+	write -
+	0 = If SW based RF kill active, turn the radio back on
+	1 = If radio is on, activate SW based RF kill
+
+	NOTE: If you enable the SW based RF kill and then toggle the HW
+  	based RF kill from ON -> OFF -> ON, the radio will NOT come back on
+
+
+===========================
+4. Radio Kill Switch
+---------------------------
+Most laptops provide the ability for the user to physically disable the radio.
+Some vendors have implemented this as a physical switch that requires no
+software to turn the radio off and on.  On other laptops, however, the switch
+is controlled through a button being pressed and a software driver then making
+calls to turn the radio off and on.  This is referred to as a "software based
+RF kill switch"
+
+See the Sysfs helper file 'rf_kill' for determining the state of the RF switch
+on your system.
+
+
+===========================
+5. Dynamic Firmware
+---------------------------     
+As the firmware is licensed under a restricted use license, it can not be 
+included within the kernel sources.  To enable the IPW2100 you will need a 
+firmware image to load into the wireless NIC's processors.
+
+You can obtain these images from <http://ipw2100.sf.net/firmware.php>.
+
+See INSTALL for instructions on installing the firmware.
+
+
+===========================
+6. Power Management
+---------------------------     
+The IPW2100 supports the configuration of the Power Save Protocol 
+through a private wireless extension interface.  The IPW2100 supports 
+the following different modes:
+
+	off	No power management.  Radio is always on.
+	on	Automatic power management
+	1-5	Different levels of power management.  The higher the 
+		number the greater the power savings, but with an impact to 
+		packet latencies. 
+
+Power management works by powering down the radio after a certain 
+interval of time has passed where no packets are passed through the 
+radio.  Once powered down, the radio remains in that state for a given 
+period of time.  For higher power savings, the interval between last 
+packet processed to sleep is shorter and the sleep period is longer.
+
+When the radio is asleep, the access point sending data to the station 
+must buffer packets at the AP until the station wakes up and requests 
+any buffered packets.  If you have an AP that does not correctly support 
+the PSP protocol you may experience packet loss or very poor performance 
+while power management is enabled.  If this is the case, you will need 
+to try and find a firmware update for your AP, or disable power 
+management (via `iwconfig eth1 power off`)
+
+To configure the power level on the IPW2100 you use a combination of 
+iwconfig and iwpriv.  iwconfig is used to turn power management on, off, 
+and set it to auto.
+
+	iwconfig eth1 power off    Disables radio power down
+	iwconfig eth1 power on     Enables radio power management to 
+				   last set level (defaults to AUTO)
+	iwpriv eth1 set_power 0    Sets power level to AUTO and enables 
+				   power management if not previously 
+				   enabled.
+	iwpriv eth1 set_power 1-5  Set the power level as specified, 
+				   enabling power management if not 
+				   previously enabled.
+
+You can view the current power level setting via:
+	
+	iwpriv eth1 get_power
+
+It will return the current period or timeout that is configured as a string
+in the form of xxxx/yyyy (z) where xxxx is the timeout interval (amount of
+time after packet processing), yyyy is the period to sleep (amount of time to 
+wait before powering the radio and querying the access point for buffered
+packets), and z is the 'power level'.  If power management is turned off the
+xxxx/yyyy will be replaced with 'off' -- the level reported will be the active
+level if `iwconfig eth1 power on` is invoked.
+
+
+===========================
+7. Support
+---------------------------     
+
+For general development information and support,
+go to:
+	
+    http://ipw2100.sf.net/
+
+The ipw2100 1.1.0 driver and firmware can be downloaded from:  
+
+    http://support.intel.com
+
+For installation support on the ipw2100 1.1.0 driver on Linux kernels 
+2.6.8 or greater, email support is available from:  
+
+    http://supportmail.intel.com
+
+===========================
+8. License
+---------------------------     
+
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License (version 2) as 
+  published by the Free Software Foundation.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  License Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
new file mode 100644
--- /dev/null
+++ b/Documentation/networking/README.ipw2200
@@ -0,0 +1,300 @@
+
+Intel(R) PRO/Wireless 2915ABG Driver for Linux in support of:
+
+Intel(R) PRO/Wireless 2200BG Network Connection 
+Intel(R) PRO/Wireless 2915ABG Network Connection 
+
+Note: The Intel(R) PRO/Wireless 2915ABG Driver for Linux and Intel(R) 
+PRO/Wireless 2200BG Driver for Linux is a unified driver that works on 
+both hardware adapters listed above. In this document the Intel(R) 
+PRO/Wireless 2915ABG Driver for Linux will be used to reference the 
+unified driver.
+
+Copyright (C) 2004-2005, Intel Corporation
+
+README.ipw2200
+
+Version: 1.0.0
+Date   : January 31, 2005
+
+
+Index
+-----------------------------------------------
+1.   Introduction
+1.1. Overview of features
+1.2. Module parameters
+1.3. Wireless Extension Private Methods
+1.4. Sysfs Helper Files
+2.   About the Version Numbers
+3.   Support
+4.   License
+
+
+1.   Introduction
+-----------------------------------------------
+The following sections attempt to provide a brief introduction to using 
+the Intel(R) PRO/Wireless 2915ABG Driver for Linux.
+
+This document is not meant to be a comprehensive manual on 
+understanding or using wireless technologies, but should be sufficient 
+to get you moving without wires on Linux.
+
+For information on building and installing the driver, see the INSTALL
+file.
+
+
+1.1. Overview of Features
+-----------------------------------------------
+The current release (1.0.0) supports the following features:
+
++ BSS mode (Infrastructure, Managed)
++ IBSS mode (Ad-Hoc)
++ WEP (OPEN and SHARED KEY mode)
++ 802.1x EAP via wpa_supplicant and xsupplicant
++ Wireless Extension support 
++ Full B and G rate support (2200 and 2915)
++ Full A rate support (2915 only)
++ Transmit power control
++ S state support (ACPI suspend/resume)
++ long/short preamble support
+
+
+
+1.2. Command Line Parameters
+-----------------------------------------------
+
+Like many modules used in the Linux kernel, the Intel(R) PRO/Wireless 
+2915ABG Driver for Linux allows certain configuration options to be 
+provided as module parameters.  The most common way to specify a module 
+parameter is via the command line.  
+
+The general form is:
+
+% modprobe ipw2200 parameter=value
+
+Where the supported parameter are:
+
+  associate
+	Set to 0 to disable the auto scan-and-associate functionality of the
+	driver.  If disabled, the driver will not attempt to scan 
+	for and associate to a network until it has been configured with 
+	one or more properties for the target network, for example configuring 
+	the network SSID.  Default is 1 (auto-associate)
+	
+	Example: % modprobe ipw2200 associate=0
+
+  auto_create
+	Set to 0 to disable the auto creation of an Ad-Hoc network 
+	matching the channel and network name parameters provided.  
+	Default is 1.
+
+  channel
+	channel number for association.  The normal method for setting
+        the channel would be to use the standard wireless tools
+        (i.e. `iwconfig eth1 channel 10`), but it is useful sometimes
+	to set this while debugging.  Channel 0 means 'ANY'
+
+  debug
+	If using a debug build, this is used to control the amount of debug
+	info is logged.  See the 'dval' and 'load' script for more info on
+	how to use this (the dval and load scripts are provided as part 
+	of the ipw2200 development snapshot releases available from the 
+	SourceForge project at http://ipw2200.sf.net)
+
+  mode
+	Can be used to set the default mode of the adapter.  
+	0 = Managed, 1 = Ad-Hoc
+
+
+1.3. Wireless Extension Private Methods
+-----------------------------------------------
+
+As an interface designed to handle generic hardware, there are certain 
+capabilities not exposed through the normal Wireless Tool interface.  As 
+such, a provision is provided for a driver to declare custom, or 
+private, methods.  The Intel(R) PRO/Wireless 2915ABG Driver for Linux 
+defines several of these to configure various settings.
+
+The general form of using the private wireless methods is:
+
+	% iwpriv $IFNAME method parameters
+
+Where $IFNAME is the interface name the device is registered with 
+(typically eth1, customized via one of the various network interface
+name managers, such as ifrename)
+
+The supported private methods are:
+
+  get_mode
+	Can be used to report out which IEEE mode the driver is 
+	configured to support.  Example:
+	
+	% iwpriv eth1 get_mode
+	eth1	get_mode:802.11bg (6)
+
+  set_mode
+	Can be used to configure which IEEE mode the driver will 
+	support.  
+
+	Usage:
+	% iwpriv eth1 set_mode {mode}
+	Where {mode} is a number in the range 1-7:
+	1	802.11a (2915 only)
+	2	802.11b
+	3	802.11ab (2915 only)
+	4	802.11g 
+	5	802.11ag (2915 only)
+	6	802.11bg
+	7	802.11abg (2915 only)
+
+  get_preamble
+	Can be used to report configuration of preamble length.
+
+  set_preamble
+	Can be used to set the configuration of preamble length:
+
+	Usage:
+	% iwpriv eth1 set_preamble {mode}
+	Where {mode} is one of:
+	1	Long preamble only
+	0	Auto (long or short based on connection)
+	
+
+1.4. Sysfs Helper Files:
+-----------------------------------------------
+
+The Linux kernel provides a pseudo file system that can be used to 
+access various components of the operating system.  The Intel(R) 
+PRO/Wireless 2915ABG Driver for Linux exposes several configuration 
+parameters through this mechanism.
+
+An entry in the sysfs can support reading and/or writing.  You can 
+typically query the contents of a sysfs entry through the use of cat, 
+and can set the contents via echo.  For example:
+
+% cat /sys/bus/pci/drivers/ipw2200/debug_level
+
+Will report the current debug level of the driver's logging subsystem 
+(only available if CONFIG_IPW_DEBUG was configured when the driver was 
+built).
+
+You can set the debug level via:
+
+% echo $VALUE > /sys/bus/pci/drivers/ipw2200/debug_level
+
+Where $VALUE would be a number in the case of this sysfs entry.  The 
+input to sysfs files does not have to be a number.  For example, the 
+firmware loader used by hotplug utilizes sysfs entries for transferring 
+the firmware image from user space into the driver.
+
+The Intel(R) PRO/Wireless 2915ABG Driver for Linux exposes sysfs entries 
+at two levels -- driver level, which apply to all instances of the 
+driver (in the event that there are more than one device installed) and 
+device level, which applies only to the single specific instance.
+
+
+1.4.1 Driver Level Sysfs Helper Files
+-----------------------------------------------
+
+For the driver level files, look in /sys/bus/pci/drivers/ipw2200/
+
+  debug_level  
+	
+	This controls the same global as the 'debug' module parameter
+
+
+1.4.2 Device Level Sysfs Helper Files
+-----------------------------------------------
+
+For the device level files, look in
+	
+	/sys/bus/pci/drivers/ipw2200/{PCI-ID}/
+
+For example:
+	/sys/bus/pci/drivers/ipw2200/0000:02:01.0
+
+For the device level files, see /sys/bus/pci/[drivers/ipw2200:
+
+  rf_kill
+	read - 
+	0 = RF kill not enabled (radio on)
+	1 = SW based RF kill active (radio off)
+	2 = HW based RF kill active (radio off)
+	3 = Both HW and SW RF kill active (radio off)
+	write -
+	0 = If SW based RF kill active, turn the radio back on
+	1 = If radio is on, activate SW based RF kill
+
+	NOTE: If you enable the SW based RF kill and then toggle the HW
+  	based RF kill from ON -> OFF -> ON, the radio will NOT come back on
+	
+  ucode 
+	read-only access to the ucode version number
+
+
+2.   About the Version Numbers
+-----------------------------------------------
+
+Due to the nature of open source development projects, there are 
+frequently changes being incorporated that have not gone through 
+a complete validation process.  These changes are incorporated into 
+development snapshot releases.
+
+Releases are numbered with a three level scheme: 
+
+	major.minor.development
+
+Any version where the 'development' portion is 0 (for example
+1.0.0, 1.1.0, etc.) indicates a stable version that will be made 
+available for kernel inclusion.
+
+Any version where the 'development' portion is not a 0 (for
+example 1.0.1, 1.1.5, etc.) indicates a development version that is
+being made available for testing and cutting edge users.  The stability 
+and functionality of the development releases are not know.  We make
+efforts to try and keep all snapshots reasonably stable, but due to the
+frequency of their release, and the desire to get those releases 
+available as quickly as possible, unknown anomalies should be expected.
+
+The major version number will be incremented when significant changes
+are made to the driver.  Currently, there are no major changes planned.
+
+
+3.  Support
+-----------------------------------------------
+
+For installation support of the 1.0.0 version, you can contact 
+http://supportmail.intel.com, or you can use the open source project 
+support.
+
+For general information and support, go to:
+	
+    http://ipw2200.sf.net/
+
+
+4.  License
+-----------------------------------------------
+
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License version 2 as 
+  published by the Free Software Foundation.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+
diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/cxgb.txt
new file mode 100644
--- /dev/null
+++ b/Documentation/networking/cxgb.txt
@@ -0,0 +1,352 @@
+                 Chelsio N210 10Gb Ethernet Network Controller
+
+                         Driver Release Notes for Linux
+
+                                 Version 2.1.1
+
+                                 June 20, 2005
+
+CONTENTS
+========
+ INTRODUCTION
+ FEATURES
+ PERFORMANCE
+ DRIVER MESSAGES
+ KNOWN ISSUES
+ SUPPORT
+
+
+INTRODUCTION
+============
+
+ This document describes the Linux driver for Chelsio 10Gb Ethernet Network
+ Controller. This driver supports the Chelsio N210 NIC and is backward
+ compatible with the Chelsio N110 model 10Gb NICs.
+
+
+FEATURES
+========
+
+ Adaptive Interrupts (adaptive-rx)
+ ---------------------------------
+
+  This feature provides an adaptive algorithm that adjusts the interrupt
+  coalescing parameters, allowing the driver to dynamically adapt the latency
+  settings to achieve the highest performance during various types of network
+  load.
+
+  The interface used to control this feature is ethtool. Please see the
+  ethtool manpage for additional usage information.
+
+  By default, adaptive-rx is disabled.
+  To enable adaptive-rx:
+
+      ethtool -C <interface> adaptive-rx on
+
+  To disable adaptive-rx, use ethtool:
+
+      ethtool -C <interface> adaptive-rx off
+
+  After disabling adaptive-rx, the timer latency value will be set to 50us.
+  You may set the timer latency after disabling adaptive-rx:
+
+      ethtool -C <interface> rx-usecs <microseconds>
+
+  An example to set the timer latency value to 100us on eth0:
+
+      ethtool -C eth0 rx-usecs 100
+
+  You may also provide a timer latency value while disabling adpative-rx:
+
+      ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
+
+  If adaptive-rx is disabled and a timer latency value is specified, the timer
+  will be set to the specified value until changed by the user or until
+  adaptive-rx is enabled.
+
+  To view the status of the adaptive-rx and timer latency values:
+
+      ethtool -c <interface>
+
+
+ TCP Segmentation Offloading (TSO) Support
+ -----------------------------------------
+
+  This feature, also known as "large send", enables a system's protocol stack
+  to offload portions of outbound TCP processing to a network interface card
+  thereby reducing system CPU utilization and enhancing performance.
+
+  The interface used to control this feature is ethtool version 1.8 or higher.
+  Please see the ethtool manpage for additional usage information.
+
+  By default, TSO is enabled.
+  To disable TSO:
+
+      ethtool -K <interface> tso off
+
+  To enable TSO:
+
+      ethtool -K <interface> tso on
+
+  To view the status of TSO:
+
+      ethtool -k <interface>
+
+
+PERFORMANCE
+===========
+
+ The following information is provided as an example of how to change system
+ parameters for "performance tuning" an what value to use. You may or may not
+ want to change these system parameters, depending on your server/workstation
+ application. Doing so is not warranted in any way by Chelsio Communications,
+ and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
+ of data or damage to equipment.
+
+ Your distribution may have a different way of doing things, or you may prefer
+ a different method. These commands are shown only to provide an example of
+ what to do and are by no means definitive.
+
+ Making any of the following system changes will only last until you reboot
+ your system. You may want to write a script that runs at boot-up which
+ includes the optimal settings for your system.
+
+  Setting PCI Latency Timer:
+      setpci -d 1425:* 0x0c.l=0x0000F800
+
+  Disabling TCP timestamp:
+      sysctl -w net.ipv4.tcp_timestamps=0
+
+  Disabling SACK:
+      sysctl -w net.ipv4.tcp_sack=0
+
+  Setting large number of incoming connection requests:
+      sysctl -w net.ipv4.tcp_max_syn_backlog=3000
+
+  Setting maximum receive socket buffer size:
+      sysctl -w net.core.rmem_max=1024000
+
+  Setting maximum send socket buffer size:
+      sysctl -w net.core.wmem_max=1024000
+
+  Set smp_affinity (on a multiprocessor system) to a single CPU:
+      echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+  Setting default receive socket buffer size:
+      sysctl -w net.core.rmem_default=524287
+
+  Setting default send socket buffer size:
+      sysctl -w net.core.wmem_default=524287
+
+  Setting maximum option memory buffers:
+      sysctl -w net.core.optmem_max=524287
+
+  Setting maximum backlog (# of unprocessed packets before kernel drops):
+      sysctl -w net.core.netdev_max_backlog=300000
+
+  Setting TCP read buffers (min/default/max):
+      sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
+
+  Setting TCP write buffers (min/pressure/max):
+      sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
+
+  Setting TCP buffer space (min/pressure/max):
+      sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
+
+  TCP window size for single connections:
+   The receive buffer (RX_WINDOW) size must be at least as large as the
+   Bandwidth-Delay Product of the communication link between the sender and
+   receiver. Due to the variations of RTT, you may want to increase the buffer
+   size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
+   "TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
+   At 10Gb speeds, use the following formula:
+       RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
+       Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
+   RX_WINDOW sizes of 256KB - 512KB should be sufficient.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size:
+       sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
+
+  TCP window size for multiple connections:
+   The receive buffer (RX_WINDOW) size may be calculated the same as single
+   connections, but should be divided by the number of connections. The
+   smaller window prevents congestion and facilitates better pacing,
+   especially if/when MAC level flow control does not work well or when it is
+   not supported on the machine. Experimentation may be necessary to attain
+   the correct value. This method is provided as a starting point fot the
+   correct receive buffer size.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size is
+   performed in the same manner as single connection.
+
+
+DRIVER MESSAGES
+===============
+
+ The following messages are the most common messages logged by syslog. These
+ may be found in /var/log/messages.
+
+  Driver up:
+     Chelsio Network Driver - version 2.1.1
+
+  NIC detected:
+     eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
+
+  Link up:
+     eth#: link is up at 10 Gbps, full duplex
+
+  Link down:
+     eth#: link is down
+
+
+KNOWN ISSUES
+============
+
+ These issues have been identified during testing. The following information
+ is provided as a workaround to the problem. In some cases, this problem is
+ inherent to Linux or to a particular Linux Distribution and/or hardware
+ platform.
+
+  1. Large number of TCP retransmits on a multiprocessor (SMP) system.
+
+      On a system with multiple CPUs, the interrupt (IRQ) for the network
+      controller may be bound to more than one CPU. This will cause TCP
+      retransmits if the packet data were to be split across different CPUs
+      and re-assembled in a different order than expected.
+
+      To eliminate the TCP retransmits, set smp_affinity on the particular
+      interrupt to a single CPU. You can locate the interrupt (IRQ) used on
+      the N110/N210 by using ifconfig:
+          ifconfig <dev_name> | grep Interrupt
+      Set the smp_affinity to a single CPU:
+          echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+      It is highly suggested that you do not run the irqbalance daemon on your
+      system, as this will change any smp_affinity setting you have applied.
+      The irqbalance daemon runs on a 10 second interval and binds interrupts
+      to the least loaded CPU determined by the daemon. To disable this daemon:
+          chkconfig --level 2345 irqbalance off
+
+      By default, some Linux distributions enable the kernel feature,
+      irqbalance, which performs the same function as the daemon. To disable
+      this feature, add the following line to your bootloader:
+          noirqbalance
+
+          Example using the Grub bootloader:
+              title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
+              root (hd0,0)
+              kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
+              initrd /initrd-2.4.21-27.ELsmp.img
+
+  2. After running insmod, the driver is loaded and the incorrect network
+     interface is brought up without running ifup.
+
+      When using 2.4.x kernels, including RHEL kernels, the Linux kernel
+      invokes a script named "hotplug". This script is primarily used to
+      automatically bring up USB devices when they are plugged in, however,
+      the script also attempts to automatically bring up a network interface
+      after loading the kernel module. The hotplug script does this by scanning
+      the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
+      for HWADDR=<mac_address>.
+
+      If the hotplug script does not find the HWADDRR within any of the
+      ifcfg-eth# files, it will bring up the device with the next available
+      interface name. If this interface is already configured for a different
+      network card, your new interface will have incorrect IP address and
+      network settings.
+
+      To solve this issue, you can add the HWADDR=<mac_address> key to the
+      interface config file of your network controller.
+
+      To disable this "hotplug" feature, you may add the driver (module name)
+      to the "blacklist" file located in /etc/hotplug. It has been noted that
+      this does not work for network devices because the net.agent script
+      does not use the blacklist file. Simply remove, or rename, the net.agent
+      script located in /etc/hotplug to disable this feature.
+
+  3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
+     on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
+
+      If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
+      chipset, you may experience the "133-Mhz Mode Split Completion Data
+      Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
+      bus PCI-X bus.
+
+      AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
+      can provide stale data via split completion cycles to a PCI-X card that
+      is operating at 133 Mhz", causing data corruption.
+
+      AMD's provides three workarounds for this problem, however, Chelsio
+      recommends the first option for best performance with this bug:
+
+        For 133Mhz secondary bus operation, limit the transaction length and
+        the number of outstanding transactions, via BIOS configuration
+        programming of the PCI-X card, to the following:
+
+           Data Length (bytes): 1k
+           Total allowed outstanding transactions: 2
+
+      Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
+      section 56, "133-MHz Mode Split Completion Data Corruption" for more
+      details with this bug and workarounds suggested by AMD.
+
+      It may be possible to work outside AMD's recommended PCI-X settings, try
+      increasing the Data Length to 2k bytes for increased performance. If you
+      have issues with these settings, please revert to the "safe" settings
+      and duplicate the problem before submitting a bug or asking for support.
+
+      NOTE: The default setting on most systems is 8 outstanding transactions
+            and 2k bytes data length.
+
+  4. On multiprocessor systems, it has been noted that an application which
+     is handling 10Gb networking can switch between CPUs causing degraded
+     and/or unstable performance.
+
+      If running on an SMP system and taking performance measurements, it
+      is suggested you either run the latest netperf-2.4.0+ or use a binding
+      tool such as Tim Hockin's procstate utilities (runon)
+      <http://www.hockin.org/~thockin/procstate/>.
+
+      Binding netserver and netperf (or other applications) to particular
+      CPUs will have a significant difference in performance measurements.
+      You may need to experiment which CPU to bind the application to in
+      order to achieve the best performance for your system.
+
+      If you are developing an application designed for 10Gb networking,
+      please keep in mind you may want to look at kernel functions
+      sched_setaffinity & sched_getaffinity to bind your application.
+
+      If you are just running user-space applications such as ftp, telnet,
+      etc., you may want to try the runon tool provided by Tim Hockin's
+      procstate utility. You could also try binding the interface to a
+      particular CPU: runon 0 ifup eth0
+
+
+SUPPORT
+=======
+
+ If you have problems with the software or hardware, please contact our
+ customer support team via email at support@chelsio.com or check our website
+ at http://www.chelsio.com
+
+===============================================================================
+
+ Chelsio Communications
+ 370 San Aleso Ave.
+ Suite 100
+ Sunnyvale, CA 94085
+ http://www.chelsio.com
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as
+published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+ Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
+
+===============================================================================
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
new file mode 100644
--- /dev/null
+++ b/Documentation/networking/phy.txt
@@ -0,0 +1,288 @@
+
+-------
+PHY Abstraction Layer
+(Updated 2005-07-21)
+
+Purpose
+
+ Most network devices consist of set of registers which provide an interface
+ to a MAC layer, which communicates with the physical connection through a
+ PHY.  The PHY concerns itself with negotiating link parameters with the link
+ partner on the other side of the network connection (typically, an ethernet
+ cable), and provides a register interface to allow drivers to determine what
+ settings were chosen, and to configure what settings are allowed.
+
+ While these devices are distinct from the network devices, and conform to a
+ standard layout for the registers, it has been common practice to integrate
+ the PHY management code with the network driver.  This has resulted in large
+ amounts of redundant code.  Also, on embedded systems with multiple (and
+ sometimes quite different) ethernet controllers connected to the same 
+ management bus, it is difficult to ensure safe use of the bus.
+
+ Since the PHYs are devices, and the management busses through which they are
+ accessed are, in fact, busses, the PHY Abstraction Layer treats them as such.
+ In doing so, it has these goals:
+
+   1) Increase code-reuse
+   2) Increase overall code-maintainability
+   3) Speed development time for new network drivers, and for new systems
+ 
+ Basically, this layer is meant to provide an interface to PHY devices which
+ allows network driver writers to write as little code as possible, while
+ still providing a full feature set.
+
+The MDIO bus
+
+ Most network devices are connected to a PHY by means of a management bus.
+ Different devices use different busses (though some share common interfaces).
+ In order to take advantage of the PAL, each bus interface needs to be
+ registered as a distinct device.
+
+ 1) read and write functions must be implemented.  Their prototypes are:
+
+     int write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+     int read(struct mii_bus *bus, int mii_id, int regnum);
+
+   mii_id is the address on the bus for the PHY, and regnum is the register
+   number.  These functions are guaranteed not to be called from interrupt
+   time, so it is safe for them to block, waiting for an interrupt to signal
+   the operation is complete
+ 
+ 2) A reset function is necessary.  This is used to return the bus to an
+   initialized state.
+
+ 3) A probe function is needed.  This function should set up anything the bus
+   driver needs, setup the mii_bus structure, and register with the PAL using
+   mdiobus_register.  Similarly, there's a remove function to undo all of
+   that (use mdiobus_unregister).
+ 
+ 4) Like any driver, the device_driver structure must be configured, and init
+   exit functions are used to register the driver.
+
+ 5) The bus must also be declared somewhere as a device, and registered.
+
+ As an example for how one driver implemented an mdio bus driver, see
+ drivers/net/gianfar_mii.c and arch/ppc/syslib/mpc85xx_devices.c
+
+Connecting to a PHY
+
+ Sometime during startup, the network driver needs to establish a connection
+ between the PHY device, and the network device.  At this time, the PHY's bus
+ and drivers need to all have been loaded, so it is ready for the connection.
+ At this point, there are several ways to connect to the PHY:
+
+ 1) The PAL handles everything, and only calls the network driver when
+   the link state changes, so it can react.
+
+ 2) The PAL handles everything except interrupts (usually because the
+   controller has the interrupt registers).
+
+ 3) The PAL handles everything, but checks in with the driver every second,
+   allowing the network driver to react first to any changes before the PAL
+   does.
+ 
+ 4) The PAL serves only as a library of functions, with the network device
+   manually calling functions to update status, and configure the PHY
+
+
+Letting the PHY Abstraction Layer do Everything
+
+ If you choose option 1 (The hope is that every driver can, but to still be
+ useful to drivers that can't), connecting to the PHY is simple:
+
+ First, you need a function to react to changes in the link state.  This
+ function follows this protocol:
+
+   static void adjust_link(struct net_device *dev);
+ 
+ Next, you need to know the device name of the PHY connected to this device. 
+ The name will look something like, "phy0:0", where the first number is the
+ bus id, and the second is the PHY's address on that bus.
+ 
+ Now, to connect, just call this function:
+ 
+   phydev = phy_connect(dev, phy_name, &adjust_link, flags);
+
+ phydev is a pointer to the phy_device structure which represents the PHY.  If
+ phy_connect is successful, it will return the pointer.  dev, here, is the
+ pointer to your net_device.  Once done, this function will have started the
+ PHY's software state machine, and registered for the PHY's interrupt, if it
+ has one.  The phydev structure will be populated with information about the
+ current state, though the PHY will not yet be truly operational at this
+ point.
+
+ flags is a u32 which can optionally contain phy-specific flags.
+ This is useful if the system has put hardware restrictions on
+ the PHY/controller, of which the PHY needs to be aware.
+
+ Now just make sure that phydev->supported and phydev->advertising have any
+ values pruned from them which don't make sense for your controller (a 10/100
+ controller may be connected to a gigabit capable PHY, so you would need to
+ mask off SUPPORTED_1000baseT*).  See include/linux/ethtool.h for definitions
+ for these bitfields. Note that you should not SET any bits, or the PHY may
+ get put into an unsupported state.
+
+ Lastly, once the controller is ready to handle network traffic, you call
+ phy_start(phydev).  This tells the PAL that you are ready, and configures the
+ PHY to connect to the network.  If you want to handle your own interrupts,
+ just set phydev->irq to PHY_IGNORE_INTERRUPT before you call phy_start.
+ Similarly, if you don't want to use interrupts, set phydev->irq to PHY_POLL.
+
+ When you want to disconnect from the network (even if just briefly), you call
+ phy_stop(phydev).
+
+Keeping Close Tabs on the PAL
+
+ It is possible that the PAL's built-in state machine needs a little help to
+ keep your network device and the PHY properly in sync.  If so, you can
+ register a helper function when connecting to the PHY, which will be called
+ every second before the state machine reacts to any changes.  To do this, you
+ need to manually call phy_attach() and phy_prepare_link(), and then call
+ phy_start_machine() with the second argument set to point to your special
+ handler.
+
+ Currently there are no examples of how to use this functionality, and testing
+ on it has been limited because the author does not have any drivers which use
+ it (they all use option 1).  So Caveat Emptor.
+
+Doing it all yourself
+
+ There's a remote chance that the PAL's built-in state machine cannot track
+ the complex interactions between the PHY and your network device.  If this is
+ so, you can simply call phy_attach(), and not call phy_start_machine or
+ phy_prepare_link().  This will mean that phydev->state is entirely yours to
+ handle (phy_start and phy_stop toggle between some of the states, so you
+ might need to avoid them).
+
+ An effort has been made to make sure that useful functionality can be
+ accessed without the state-machine running, and most of these functions are
+ descended from functions which did not interact with a complex state-machine.
+ However, again, no effort has been made so far to test running without the
+ state machine, so tryer beware.
+
+ Here is a brief rundown of the functions:
+
+ int phy_read(struct phy_device *phydev, u16 regnum);
+ int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+
+   Simple read/write primitives.  They invoke the bus's read/write function
+   pointers.
+
+ void phy_print_status(struct phy_device *phydev);
+ 
+   A convenience function to print out the PHY status neatly.
+
+ int phy_clear_interrupt(struct phy_device *phydev);
+ int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+   
+   Clear the PHY's interrupt, and configure which ones are allowed,
+   respectively.  Currently only supports all on, or all off.
+ 
+ int phy_enable_interrupts(struct phy_device *phydev);
+ int phy_disable_interrupts(struct phy_device *phydev);
+
+   Functions which enable/disable PHY interrupts, clearing them
+   before and after, respectively.
+
+ int phy_start_interrupts(struct phy_device *phydev);
+ int phy_stop_interrupts(struct phy_device *phydev);
+
+   Requests the IRQ for the PHY interrupts, then enables them for
+   start, or disables then frees them for stop.
+
+ struct phy_device * phy_attach(struct net_device *dev, const char *phy_id,
+		 u32 flags);
+
+   Attaches a network device to a particular PHY, binding the PHY to a generic
+   driver if none was found during bus initialization.  Passes in
+   any phy-specific flags as needed.
+
+ int phy_start_aneg(struct phy_device *phydev);
+   
+   Using variables inside the phydev structure, either configures advertising
+   and resets autonegotiation, or disables autonegotiation, and configures
+   forced settings.
+
+ static inline int phy_read_status(struct phy_device *phydev);
+
+   Fills the phydev structure with up-to-date information about the current
+   settings in the PHY.
+
+ void phy_sanitize_settings(struct phy_device *phydev)
+   
+   Resolves differences between currently desired settings, and
+   supported settings for the given PHY device.  Does not make
+   the changes in the hardware, though.
+
+ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+
+   Ethtool convenience functions.
+
+ int phy_mii_ioctl(struct phy_device *phydev,
+                 struct mii_ioctl_data *mii_data, int cmd);
+
+   The MII ioctl.  Note that this function will completely screw up the state
+   machine if you write registers like BMCR, BMSR, ADVERTISE, etc.  Best to
+   use this only to write registers which are not standard, and don't set off
+   a renegotiation.
+
+
+PHY Device Drivers
+
+ With the PHY Abstraction Layer, adding support for new PHYs is
+ quite easy.  In some cases, no work is required at all!  However,
+ many PHYs require a little hand-holding to get up-and-running.
+
+Generic PHY driver
+
+ If the desired PHY doesn't have any errata, quirks, or special
+ features you want to support, then it may be best to not add
+ support, and let the PHY Abstraction Layer's Generic PHY Driver
+ do all of the work.  
+
+Writing a PHY driver
+
+ If you do need to write a PHY driver, the first thing to do is
+ make sure it can be matched with an appropriate PHY device.
+ This is done during bus initialization by reading the device's
+ UID (stored in registers 2 and 3), then comparing it to each
+ driver's phy_id field by ANDing it with each driver's
+ phy_id_mask field.  Also, it needs a name.  Here's an example:
+
+   static struct phy_driver dm9161_driver = {
+         .phy_id         = 0x0181b880,
+	 .name           = "Davicom DM9161E",
+	 .phy_id_mask    = 0x0ffffff0,
+	 ...
+   }
+
+ Next, you need to specify what features (speed, duplex, autoneg,
+ etc) your PHY device and driver support.  Most PHYs support
+ PHY_BASIC_FEATURES, but you can look in include/mii.h for other
+ features.
+
+ Each driver consists of a number of function pointers:
+
+   config_init: configures PHY into a sane state after a reset.
+     For instance, a Davicom PHY requires descrambling disabled.
+   probe: Does any setup needed by the driver
+   suspend/resume: power management
+   config_aneg: Changes the speed/duplex/negotiation settings
+   read_status: Reads the current speed/duplex/negotiation settings
+   ack_interrupt: Clear a pending interrupt
+   config_intr: Enable or disable interrupts
+   remove: Does any driver take-down
+
+ Of these, only config_aneg and read_status are required to be
+ assigned by the driver code.  The rest are optional.  Also, it is
+ preferred to use the generic phy driver's versions of these two
+ functions if at all possible: genphy_read_status and
+ genphy_config_aneg.  If this is not possible, it is likely that
+ you only need to perform some actions before and after invoking
+ these functions, and so your functions will wrap the generic
+ ones.
+
+ Feel free to look at the Marvell, Cicada, and Davicom drivers in
+ drivers/net/phy/ for examples (the lxt and qsemi drivers have
+ not been tested as of this writing)
diff --git a/MAINTAINERS b/MAINTAINERS
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -991,6 +991,13 @@ M:	mike.miller@hp.com
 L:	iss_storagedev@hp.com
 S:	Supported
  
+HOST AP DRIVER
+P:	Jouni Malinen
+M:	jkmaline@cc.hut.fi
+L:	hostap@shmoo.com
+W:	http://hostap.epitest.fi/
+S:	Maintained
+
 HP100:	Driver for HP 10/100 Mbit/s Voice Grade Network Adapter Series
 P:	Jaroslav Kysela
 M:	perex@suse.cz
@@ -2092,6 +2099,12 @@ M:	support@simtec.co.uk
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
 
+SIS 190 ETHERNET DRIVER
+P:	Francois Romieu
+M:	romieu@fr.zoreil.com
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 SIS 5513 IDE CONTROLLER DRIVER
 P:	Lionel Bouton
 M:	Lionel.Bouton@inet6.fr
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -131,6 +131,8 @@ config NET_SB1000
 
 	source "drivers/net/arcnet/Kconfig"
 
+source "drivers/net/phy/Kconfig"
+
 #
 #	Ethernet
 #
@@ -1921,6 +1923,16 @@ config R8169_VLAN
 	  
 	  If in doubt, say Y.
 
+config SIS190
+      tristate "SiS190 gigabit ethernet support"
+      depends on PCI
+      select CRC32
+      ---help---
+        Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
+
+        To compile this driver as a module, choose M here: the module
+        will be called sis190.  This is recommended.
+
 config SKGE
 	tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
 	depends on PCI && EXPERIMENTAL
@@ -1932,7 +1944,25 @@ config SKGE
 
 	  It does not support the link failover and network management 
 	  features that "portable" vendor supplied sk98lin driver does.
-	
+
+
+config SKY2
+	tristate "SysKonnect Yukon2 support (EXPERIMENTAL)"
+	depends on PCI && EXPERIMENTAL
+	select CRC32
+	---help---
+	  This driver support the Marvell Yukon 2 Gigabit Ethernet adapter.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sky2.  This is recommended.
+
+config SKY2_EC_A1
+	bool "Support old Yukon-EC A1 chipset"
+	depends on SKY2
+	---help---
+	  Include support for early revisions of the Yukon EC chipset
+	  that required extra workarounds. If in doubt, say N.
+
 config SK98LIN
 	tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support"
 	depends on PCI
@@ -2091,6 +2121,25 @@ endmenu
 menu "Ethernet (10000 Mbit)"
 	depends on !UML
 
+config CHELSIO_T1
+        tristate "Chelsio 10Gb Ethernet support"
+        depends on PCI
+        help
+          This driver supports Chelsio N110 and N210 models 10Gb Ethernet
+          cards. More information about adapter features and performance
+          tuning is in <file:Documentation/networking/cxgb.txt>.
+
+          For general information about Chelsio and our products, visit
+          our website at <http://www.chelsio.com>.
+
+          For customer support, please visit our customer support page at
+          <http://www.chelsio.com/support.htm>.
+
+          Please send feedback to <linux-bugs@chelsio.com>.
+
+          To compile this driver as a module, choose M here: the module
+          will be called cxgb.
+
 config IXGB
 	tristate "Intel(R) PRO/10GbE support"
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@ endif
 obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 
@@ -42,6 +43,7 @@ obj-$(CONFIG_EEPRO100) += eepro100.o
 obj-$(CONFIG_E100) += e100.o
 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SIS190) += sis190.o
 obj-$(CONFIG_SIS900) += sis900.o
 obj-$(CONFIG_YELLOWFIN) += yellowfin.o
 obj-$(CONFIG_ACENIC) += acenic.o
@@ -54,6 +56,7 @@ obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
 obj-$(CONFIG_TC35815) += tc35815.o
 obj-$(CONFIG_SKGE) += skge.o
+obj-$(CONFIG_SKY2) += sky2.o
 obj-$(CONFIG_SK98LIN) += sk98lin/
 obj-$(CONFIG_SKFP) += skfp/
 obj-$(CONFIG_VIA_RHINE) += via-rhine.o
@@ -65,6 +68,7 @@ obj-$(CONFIG_ADAPTEC_STARFIRE) += starfi
 #
 
 obj-$(CONFIG_MII) += mii.o
+obj-$(CONFIG_PHYLIB) += phy/
 
 obj-$(CONFIG_SUNDANCE) += sundance.o
 obj-$(CONFIG_HAMACHI) += hamachi.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -323,12 +323,6 @@ extern struct net_device *proteon_probe(
 extern struct net_device *smctr_probe(int unit);
 
 static struct devprobe2 tr_probes2[] __initdata = {
-#ifdef CONFIG_SKISA
-	{sk_isa_probe, 0},
-#endif
-#ifdef CONFIG_PROTEON
-	{proteon_probe, 0},
-#endif
 #ifdef CONFIG_SMCTR
 	{smctr_probe, 0},
 #endif
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -1106,18 +1106,13 @@ static int alb_handle_addr_collision_on_
 			}
 		}
 
-		if (found) {
-			/* a slave was found that is using the mac address
-			 * of the new slave
-			 */
-			printk(KERN_ERR DRV_NAME
-			       ": Error: the hw address of slave %s is not "
-			       "unique - cannot enslave it!",
-			       slave->dev->name);
-			return -EINVAL;
-		}
+		if (!found)
+			return 0;
 
-		return 0;
+		/* Try setting slave mac to bond address and fall-through
+		   to code handling that situation below... */
+		alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
+				       bond->alb_info.rlb_enabled);
 	}
 
 	/* The slave's address is equal to the address of the bond.
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
+#
+# Chelsio 10Gb NIC driver for Linux.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb.o
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
+
+
+cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
+
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: common.h                                                            *
+ * $Revision: 1.21 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_COMMON_H_
+#define _CXGB_COMMON_H_
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/pci_ids.h>
+
+#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "2.1.1"
+#define PFX      DRV_NAME ": "
+
+#define CH_ERR(fmt, ...)   printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
+#define CH_WARN(fmt, ...)  printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
+#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
+
+#define CH_DEVICE(devid, ssid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+#define SUPPORTED_PAUSE       (1 << 13)
+#define SUPPORTED_LOOPBACK    (1 << 15)
+
+#define ADVERTISED_PAUSE      (1 << 13)
+#define ADVERTISED_ASYM_PAUSE (1 << 14)
+
+typedef struct adapter adapter_t;
+
+void t1_elmer0_ext_intr(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
+			int speed, int duplex, int fc);
+
+struct t1_rx_mode {
+	struct net_device *dev;
+	u32 idx;
+	struct dev_mc_list *list;
+};
+
+#define t1_rx_mode_promisc(rm)	(rm->dev->flags & IFF_PROMISC)
+#define t1_rx_mode_allmulti(rm)	(rm->dev->flags & IFF_ALLMULTI)
+#define t1_rx_mode_mc_cnt(rm)	(rm->dev->mc_count)
+
+static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
+{
+	u8 *addr = 0;
+
+	if (rm->idx++ < rm->dev->mc_count) {
+		addr = rm->list->dmi_addr;
+		rm->list = rm->list->next;
+	}
+	return addr;
+}
+
+#define	MAX_NPORTS 4
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+enum {
+	CHBT_BOARD_N110,
+	CHBT_BOARD_N210
+};
+
+enum {
+	CHBT_TERM_T1,
+	CHBT_TERM_T2
+};
+
+enum {
+	CHBT_MAC_PM3393,
+};
+
+enum {
+	CHBT_PHY_88X2010,
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+/* Revisions of T1 chip */
+enum {
+	TERM_T1A   = 0,
+	TERM_T1B   = 1,
+	TERM_T2    = 3
+};
+
+struct sge_params {
+	unsigned int cmdQ_size[2];
+	unsigned int freelQ_size[2];
+	unsigned int large_buf_capacity;
+	unsigned int rx_coalesce_usecs;
+	unsigned int last_rx_coalesce_raw;
+	unsigned int default_rx_coalesce_usecs;
+	unsigned int sample_interval_usecs;
+	unsigned int coalesce_enable;
+	unsigned int polling;
+};
+
+struct chelsio_pci_params {
+	unsigned short speed;
+	unsigned char  width;
+	unsigned char  is_pcix;
+};
+
+struct adapter_params {
+	struct sge_params sge;
+	struct chelsio_pci_params pci;
+
+	const struct board_info *brd_info;
+
+	unsigned int   nports;          /* # of ethernet ports */
+	unsigned int   stats_update_period;
+	unsigned short chip_revision;
+	unsigned char  chip_version;
+};
+
+struct link_config {
+	unsigned int   supported;        /* link capabilities */
+	unsigned int   advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_duplex; /* duplex user has requested */
+	unsigned char  duplex;           /* actual link duplex */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+};
+
+struct cmac;
+struct cphy;
+
+struct port_info {
+	struct net_device *dev;
+	struct cmac *mac;
+	struct cphy *phy;
+	struct link_config link_config;
+	struct net_device_stats netstats;
+};
+
+struct sge;
+struct peespi;
+
+struct adapter {
+	u8 *regs;
+	struct pci_dev *pdev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+	u32 mmio_len;
+
+	struct work_struct ext_intr_handler_task;
+	struct adapter_params params;
+
+	struct vlan_group *vlan_grp;
+
+	/* Terminator modules. */
+	struct sge    *sge;
+	struct peespi *espi;
+
+	struct port_info port[MAX_NPORTS];
+	struct work_struct stats_update_task;
+	struct timer_list stats_update_timer;
+
+	struct semaphore mib_mutex;
+	spinlock_t tpi_lock;
+	spinlock_t work_lock;
+	/* guards async operations */
+	spinlock_t async_lock ____cacheline_aligned;
+	u32 slow_intr_mask;
+};
+
+enum {                                           /* adapter flags */
+	FULL_INIT_DONE        = 1 << 0,
+	TSO_CAPABLE           = 1 << 2,
+	TCP_CSUM_CAPABLE      = 1 << 3,
+	UDP_CSUM_CAPABLE      = 1 << 4,
+	VLAN_ACCEL_CAPABLE    = 1 << 5,
+	RX_CSUM_ENABLED       = 1 << 6,
+};
+
+struct mdio_ops;
+struct gmac;
+struct gphy;
+
+struct board_info {
+	unsigned char           board;
+	unsigned char           port_number;
+	unsigned long           caps;
+	unsigned char           chip_term;
+	unsigned char           chip_mac;
+	unsigned char           chip_phy;
+	unsigned int            clock_core;
+	unsigned int            clock_mc3;
+	unsigned int            clock_mc4;
+	unsigned int            espi_nports;
+	unsigned int            clock_cspi;
+	unsigned int            clock_elmer0;
+	unsigned char           mdio_mdien;
+	unsigned char           mdio_mdiinv;
+	unsigned char           mdio_mdc;
+	unsigned char           mdio_phybaseaddr;
+	struct gmac            *gmac;
+	struct gphy            *gphy;
+	struct mdio_ops	       *mdio_ops;
+	const char             *desc;
+};
+
+extern struct pci_device_id t1_pci_tbl[];
+
+static inline int adapter_matches_type(const adapter_t *adapter,
+				       int version, int revision)
+{
+	return adapter->params.chip_version == version &&
+	       adapter->params.chip_revision == revision;
+}
+
+#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
+#define is_T2(adap)     adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
+
+/* Returns true if an adapter supports VLAN acceleration and TSO */
+static inline int vlan_tso_capable(const adapter_t *adapter)
+{
+	return !t1_is_T1B(adapter);
+}
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define board_info(adapter) ((adapter)->params.brd_info)
+#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+	return board_info(adap)->clock_core / 1000000;
+}
+
+extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+extern void t1_interrupts_enable(adapter_t *adapter);
+extern void t1_interrupts_disable(adapter_t *adapter);
+extern void t1_interrupts_clear(adapter_t *adapter);
+extern int elmer0_ext_intr_handler(adapter_t *adapter);
+extern int t1_slow_intr_handler(adapter_t *adapter);
+
+extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+extern const struct board_info *t1_get_board_info(unsigned int board_id);
+extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+						    unsigned short ssid);
+extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
+extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+		     struct adapter_params *p);
+extern int t1_init_hw_modules(adapter_t *adapter);
+extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+extern void t1_free_sw_modules(adapter_t *adapter);
+extern void t1_fatal_err(adapter_t *adapter);
+
+extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
+
+#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cphy.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPHY_H_
+#define _CXGB_CPHY_H_
+
+#include "common.h"
+
+struct mdio_ops {
+	void (*init)(adapter_t *adapter, const struct board_info *bi);
+	int  (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+		     int reg_addr, unsigned int *val);
+	int  (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+		      int reg_addr, unsigned int val);
+};
+
+/* PHY interrupt types */
+enum {
+	cphy_cause_link_change = 0x1,
+	cphy_cause_error = 0x2
+};
+
+struct cphy;
+
+/* PHY operations */
+struct cphy_ops {
+	void (*destroy)(struct cphy *);
+	int (*reset)(struct cphy *, int wait);
+
+	int (*interrupt_enable)(struct cphy *);
+	int (*interrupt_disable)(struct cphy *);
+	int (*interrupt_clear)(struct cphy *);
+	int (*interrupt_handler)(struct cphy *);
+
+	int (*autoneg_enable)(struct cphy *);
+	int (*autoneg_disable)(struct cphy *);
+	int (*autoneg_restart)(struct cphy *);
+
+	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+	int (*set_loopback)(struct cphy *, int on);
+	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+			       int *duplex, int *fc);
+};
+
+/* A PHY instance */
+struct cphy {
+	int addr;                            /* PHY address */
+	adapter_t *adapter;                  /* associated adapter */
+	struct cphy_ops *ops;                /* PHY operations */
+	int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+			 int reg_addr, unsigned int *val);
+	int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+			  int reg_addr, unsigned int val);
+	struct cphy_instance *instance;
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
+			    unsigned int *valp)
+{
+	return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
+}
+
+static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
+			     unsigned int val)
+{
+	return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
+}
+
+static inline int simple_mdio_read(struct cphy *cphy, int reg,
+				   unsigned int *valp)
+{
+	return mdio_read(cphy, 0, reg, valp);
+}
+
+static inline int simple_mdio_write(struct cphy *cphy, int reg,
+				    unsigned int val)
+{
+	return mdio_write(cphy, 0, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
+			     int phy_addr, struct cphy_ops *phy_ops,
+			     struct mdio_ops *mdio_ops)
+{
+	phy->adapter = adapter;
+	phy->addr    = phy_addr;
+	phy->ops     = phy_ops;
+	if (mdio_ops) {
+		phy->mdio_read  = mdio_ops->read;
+		phy->mdio_write = mdio_ops->write;
+	}
+}
+
+/* Operations of the PHY-instance factory */
+struct gphy {
+	/* Construct a PHY instance with the given PHY address */
+	struct cphy *(*create)(adapter_t *adapter, int phy_addr,
+			       struct mdio_ops *mdio_ops);
+
+	/*
+	 * Reset the PHY chip.  This resets the whole PHY chip, not individual
+	 * ports.
+	 */
+	int (*reset)(adapter_t *adapter);
+};
+
+extern struct gphy t1_mv88x201x_ops;
+extern struct gphy t1_dummy_phy_ops;
+
+#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cpl5_cmd.h                                                          *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPL5_CMD_H_
+#define _CXGB_CPL5_CMD_H_
+
+#include <asm/byteorder.h>
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+enum CPL_opcode {
+	CPL_RX_PKT            = 0xAD,
+	CPL_TX_PKT            = 0xB2,
+	CPL_TX_PKT_LSO        = 0xB6,
+};
+
+enum {                /* TX_PKT_LSO ethernet types */
+	CPL_ETH_II,
+	CPL_ETH_II_VLAN,
+	CPL_ETH_802_3,
+	CPL_ETH_802_3_VLAN
+};
+
+struct cpl_rx_data {
+	u32 rsvd0;
+	u32 len;
+	u32 seq;
+	u16 urg;
+	u8  rsvd1;
+	u8  status;
+};
+
+/*
+ * We want this header's alignment to be no more stringent than 2-byte aligned.
+ * All fields are u8 or u16 except for the length.  However that field is not
+ * used so we break it into 2 16-bit parts to easily meet our alignment needs.
+ */
+struct cpl_tx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	u16 len_hi;
+	u16 len_lo;
+};
+
+struct cpl_tx_pkt_lso {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	u32 len;
+
+	u32 rsvd2;
+	u8 rsvd3;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 tcp_hdr_words:4;
+	u8 ip_hdr_words:4;
+#else
+	u8 ip_hdr_words:4;
+	u8 tcp_hdr_words:4;
+#endif
+	u16 eth_type_mss;
+};
+
+struct cpl_rx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 csum_valid:1;
+	u8 bad_pkt:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 bad_pkt:1;
+	u8 csum_valid:1;
+	u8 iff:4;
+#endif
+	u16 csum;
+	u16 vlan;
+	u16 len;
+};
+
+#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cxgb2.c                                                             *
+ * $Revision: 1.25 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  Chelsio 10Gb Ethernet Driver.                                            *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+
+#include "cpl5_cmd.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+#ifdef work_struct
+#include <linux/tqueue.h>
+#define INIT_WORK INIT_TQUEUE
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+	mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+	del_timer_sync(&ap->stats_update_timer);
+	flush_scheduled_tasks();
+}
+
+/*
+ * Stats update timer for 2.4.  It schedules a task to do the actual update as
+ * we need to access MAC statistics in process context.
+ */
+static void mac_stats_timer(unsigned long data)
+{
+	struct adapter *ap = (struct adapter *)data;
+
+	schedule_task(&ap->stats_update_task);
+}
+#else
+#include <linux/workqueue.h>
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+	cancel_delayed_work(&ap->stats_update_task);
+}
+#endif
+
+#define MAX_CMDQ_ENTRIES 16384
+#define MAX_CMDQ1_ENTRIES 1024
+#define MAX_RX_BUFFERS 16384
+#define MAX_RX_JUMBO_BUFFERS 16384
+#define MAX_TX_BUFFERS_HIGH	16384U
+#define MAX_TX_BUFFERS_LOW	1536U
+#define MIN_FL_ENTRIES 32
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/*
+ * The EEPROM is actually bigger but only the first few bytes are used so we
+ * only report those.
+ */
+#define EEPROM_SIZE 32
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("GPL");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+MODULE_PARM(dflt_msg_enable, "i");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+
+
+static const char pci_speed[][4] = {
+	"33", "66", "100", "133"
+};
+
+/*
+ * Setup MAC to receive the types of packets we want.
+ */
+static void t1_set_rxmode(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct t1_rx_mode rm;
+
+	rm.dev = dev;
+	rm.idx = 0;
+	rm.list = dev->mc_list;
+	mac->ops->set_rx_mode(mac, &rm);
+}
+
+static void link_report(struct port_info *p)
+{
+	if (!netif_carrier_ok(p->dev))
+        	printk(KERN_INFO "%s: link down\n", p->dev->name);
+	else {
+		const char *s = "10Mbps";
+
+		switch (p->link_config.speed) {
+			case SPEED_10000: s = "10Gbps"; break;
+			case SPEED_1000:  s = "1000Mbps"; break;
+			case SPEED_100:   s = "100Mbps"; break;
+		}
+
+        printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+		       p->dev->name, s,
+		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+	}
+}
+
+void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+			int speed, int duplex, int pause)
+{
+	struct port_info *p = &adapter->port[port_id];
+
+	if (link_stat != netif_carrier_ok(p->dev)) {
+		if (link_stat)
+			netif_carrier_on(p->dev);
+		else
+			netif_carrier_off(p->dev);
+		link_report(p);
+
+	}
+}
+
+static void link_start(struct port_info *p)
+{
+	struct cmac *mac = p->mac;
+
+	mac->ops->reset(mac);
+	if (mac->ops->macaddress_set)
+		mac->ops->macaddress_set(mac, p->dev->dev_addr);
+	t1_set_rxmode(p->dev);
+	t1_link_start(p->phy, mac, &p->link_config);
+	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static void enable_hw_csum(struct adapter *adapter)
+{
+	if (adapter->flags & TSO_CAPABLE)
+		t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
+	t1_tp_set_tcp_checksum_offload(adapter, 1);
+}
+
+/*
+ * Things to do upon first use of a card.
+ * This must run with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adapter)
+{
+	int err = 0;
+
+	if (!(adapter->flags & FULL_INIT_DONE)) {
+		err = t1_init_hw_modules(adapter);
+		if (err)
+			goto out_err;
+
+		enable_hw_csum(adapter);
+		adapter->flags |= FULL_INIT_DONE;
+	}
+
+	t1_interrupts_clear(adapter);
+	if ((err = request_irq(adapter->pdev->irq,
+			       t1_select_intr_handler(adapter), SA_SHIRQ,
+			       adapter->name, adapter))) {
+		goto out_err;
+	}
+	t1_sge_start(adapter->sge);
+	t1_interrupts_enable(adapter);
+ out_err:
+	return err;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+	t1_sge_stop(adapter->sge);
+	t1_interrupts_disable(adapter);
+	free_irq(adapter->pdev->irq, adapter);
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct adapter *adapter = dev->priv;
+	int other_ports = adapter->open_device_map & PORT_MASK;
+
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	__set_bit(dev->if_port, &adapter->open_device_map);
+	link_start(&adapter->port[dev->if_port]);
+	netif_start_queue(dev);
+	if (!other_ports && adapter->params.stats_update_period)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct cmac *mac = p->mac;
+
+	netif_stop_queue(dev);
+	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+	netif_carrier_off(dev);
+
+	clear_bit(dev->if_port, &adapter->open_device_map);
+	if (adapter->params.stats_update_period &&
+	    !(adapter->open_device_map & PORT_MASK)) {
+		/* Stop statistics accumulation. */
+		smp_mb__after_clear_bit();
+		spin_lock(&adapter->work_lock);   /* sync with update task */
+		spin_unlock(&adapter->work_lock);
+		cancel_mac_stats_update(adapter);
+	}
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+	return 0;
+}
+
+static struct net_device_stats *t1_get_stats(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct net_device_stats *ns = &p->netstats;
+	const struct cmac_statistics *pstats;
+
+	/* Do a full update of the MAC stats */
+	pstats = p->mac->ops->statistics_update(p->mac,
+						      MAC_STATS_UPDATE_FULL);
+
+	ns->tx_packets = pstats->TxUnicastFramesOK +
+		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
+
+	ns->rx_packets = pstats->RxUnicastFramesOK +
+		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
+
+	ns->tx_bytes = pstats->TxOctetsOK;
+	ns->rx_bytes = pstats->RxOctetsOK;
+
+	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
+		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
+	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
+		pstats->RxFCSErrors + pstats->RxAlignErrors +
+		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
+		pstats->RxSymbolErrors + pstats->RxRuntErrors;
+
+	ns->multicast  = pstats->RxMulticastFramesOK;
+	ns->collisions = pstats->TxTotalCollisions;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
+		pstats->RxJabberErrors;
+	ns->rx_over_errors   = 0;
+	ns->rx_crc_errors    = pstats->RxFCSErrors;
+	ns->rx_frame_errors  = pstats->RxAlignErrors;
+	ns->rx_fifo_errors   = 0;
+	ns->rx_missed_errors = 0;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
+	ns->tx_carrier_errors   = 0;
+	ns->tx_fifo_errors      = pstats->TxUnderrun;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors    = pstats->TxLateCollisions;
+	return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+
+	return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	struct adapter *adapter = dev->priv;
+
+	adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+        "TxOctetsOK",
+        "TxOctetsBad",
+        "TxUnicastFramesOK",
+        "TxMulticastFramesOK",
+        "TxBroadcastFramesOK",
+        "TxPauseFrames",
+        "TxFramesWithDeferredXmissions",
+        "TxLateCollisions",
+        "TxTotalCollisions",
+        "TxFramesAbortedDueToXSCollisions",
+        "TxUnderrun",
+        "TxLengthErrors",
+        "TxInternalMACXmitError",
+        "TxFramesWithExcessiveDeferral",
+        "TxFCSErrors",
+
+        "RxOctetsOK",
+        "RxOctetsBad",
+        "RxUnicastFramesOK",
+        "RxMulticastFramesOK",
+        "RxBroadcastFramesOK",
+        "RxPauseFrames",
+        "RxFCSErrors",
+        "RxAlignErrors",
+        "RxSymbolErrors",
+        "RxDataErrors",
+        "RxSequenceErrors",
+        "RxRuntErrors",
+        "RxJabberErrors",
+        "RxInternalMACRcvError",
+        "RxInRangeLengthErrors",
+        "RxOutOfRangeLengthField",
+        "RxFrameTooLongErrors",
+
+	"TSO",
+	"VLANextractions",
+	"VLANinsertions",
+	"RxCsumGood",
+	"TxCsumOffload",
+	"RxDrops"
+
+	"respQ_empty",
+	"respQ_overflow",
+	"freelistQ_empty",
+	"pkt_too_big",
+	"pkt_mismatch",
+	"cmdQ_full0",
+	"cmdQ_full1",
+	"tx_ipfrags",
+	"tx_reg_pkts",
+	"tx_lso_pkts",
+	"tx_do_cksum",
+	
+	"espi_DIP2ParityErr",
+	"espi_DIP4Err",
+	"espi_RxDrops",
+	"espi_TxDrops",
+	"espi_RxOvfl",
+	"espi_ParityErr"
+};
+ 
+#define T2_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T2_REGMAP_SIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct adapter *adapter = dev->priv;
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->fw_version, "N/A");
+	strcpy(info->bus_info, pci_name(adapter->pdev));
+}
+
+static int get_stats_count(struct net_device *dev)
+{
+	return ARRAY_SIZE(stats_strings);
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	const struct cmac_statistics *s;
+	const struct sge_port_stats *ss;
+	const struct sge_intr_counts *t;
+
+	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+	ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
+	t = t1_sge_get_intr_counts(adapter->sge);
+
+        *data++ = s->TxOctetsOK;
+        *data++ = s->TxOctetsBad;
+        *data++ = s->TxUnicastFramesOK;
+        *data++ = s->TxMulticastFramesOK;
+        *data++ = s->TxBroadcastFramesOK;
+        *data++ = s->TxPauseFrames;
+        *data++ = s->TxFramesWithDeferredXmissions;
+        *data++ = s->TxLateCollisions;
+        *data++ = s->TxTotalCollisions;
+        *data++ = s->TxFramesAbortedDueToXSCollisions;
+        *data++ = s->TxUnderrun;
+        *data++ = s->TxLengthErrors;
+        *data++ = s->TxInternalMACXmitError;
+        *data++ = s->TxFramesWithExcessiveDeferral;
+        *data++ = s->TxFCSErrors;
+
+        *data++ = s->RxOctetsOK;
+        *data++ = s->RxOctetsBad;
+        *data++ = s->RxUnicastFramesOK;
+        *data++ = s->RxMulticastFramesOK;
+        *data++ = s->RxBroadcastFramesOK;
+        *data++ = s->RxPauseFrames;
+        *data++ = s->RxFCSErrors;
+        *data++ = s->RxAlignErrors;
+        *data++ = s->RxSymbolErrors;
+        *data++ = s->RxDataErrors;
+        *data++ = s->RxSequenceErrors;
+        *data++ = s->RxRuntErrors;
+        *data++ = s->RxJabberErrors;
+        *data++ = s->RxInternalMACRcvError;
+        *data++ = s->RxInRangeLengthErrors;
+        *data++ = s->RxOutOfRangeLengthField;
+        *data++ = s->RxFrameTooLongErrors;
+
+	*data++ = ss->tso;
+	*data++ = ss->vlan_xtract;
+	*data++ = ss->vlan_insert;
+	*data++ = ss->rx_cso_good;
+	*data++ = ss->tx_cso;
+	*data++ = ss->rx_drops;
+
+	*data++ = (u64)t->respQ_empty;
+	*data++ = (u64)t->respQ_overflow;
+	*data++ = (u64)t->freelistQ_empty;
+	*data++ = (u64)t->pkt_too_big;
+	*data++ = (u64)t->pkt_mismatch;
+	*data++ = (u64)t->cmdQ_full[0];
+	*data++ = (u64)t->cmdQ_full[1];
+	*data++ = (u64)t->tx_ipfrags;
+	*data++ = (u64)t->tx_reg_pkts;
+	*data++ = (u64)t->tx_lso_pkts;
+	*data++ = (u64)t->tx_do_cksum;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+				  unsigned int start, unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for ( ; start <= end; start += sizeof(u32))
+		*p++ = readl(ap->regs + start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct adapter *ap = dev->priv;
+
+	/*
+	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
+	 */
+	regs->version = 2;
+
+	memset(buf, 0, T2_REGMAP_SIZE);
+	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	cmd->supported = p->link_config.supported;
+	cmd->advertising = p->link_config.advertising;
+
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = p->link_config.speed;
+		cmd->duplex = p->link_config.duplex;
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+
+        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+        cmd->phy_address = p->phy->addr;
+        cmd->transceiver = XCVR_EXTERNAL;
+        cmd->autoneg = p->link_config.autoneg;
+        cmd->maxtxpkt = 0;
+        cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+	int cap = 0;
+
+	switch (speed) {
+	case SPEED_10:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10baseT_Full;
+		else
+			cap = SUPPORTED_10baseT_Half;
+		break;
+	case SPEED_100:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_100baseT_Full;
+		else
+			cap = SUPPORTED_100baseT_Half;
+		break;
+	case SPEED_1000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_1000baseT_Full;
+		else
+			cap = SUPPORTED_1000baseT_Half;
+		break;
+	case SPEED_10000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10000baseT_Full;
+	}
+	return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+		      ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (!(lc->supported & SUPPORTED_Autoneg))
+		return -EOPNOTSUPP;             /* can't change speed/duplex */
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+
+		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+			return -EINVAL;
+		lc->requested_speed = cmd->speed;
+		lc->requested_duplex = cmd->duplex;
+		lc->advertising = 0;
+	} else {
+		cmd->advertising &= ADVERTISED_MASK;
+		if (cmd->advertising & (cmd->advertising - 1))
+			cmd->advertising = lc->supported;
+		cmd->advertising &= lc->supported;
+		if (!cmd->advertising)
+			return -EINVAL;
+		lc->requested_speed = SPEED_INVALID;
+		lc->requested_duplex = DUPLEX_INVALID;
+		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+	}
+	lc->autoneg = cmd->autoneg;
+	if (netif_running(dev))
+		t1_link_start(p->phy, p->mac, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & SUPPORTED_Autoneg)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (lc->autoneg == AUTONEG_ENABLE) {
+		if (netif_running(dev))
+			t1_link_start(p->phy, p->mac, lc);
+	} else {
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		if (netif_running(dev))
+			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
+							 lc->fc);
+	}
+	return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+
+	return (adapter->flags & RX_CSUM_ENABLED) != 0;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct adapter *adapter = dev->priv;
+
+	if (data)
+		adapter->flags |= RX_CSUM_ENABLED;
+	else
+		adapter->flags &= ~RX_CSUM_ENABLED;
+	return 0;
+}
+
+static int set_tso(struct net_device *dev, u32 value)
+{
+	struct adapter *adapter = dev->priv;
+
+	if (!(adapter->flags & TSO_CAPABLE))
+		return value ? -EOPNOTSUPP : 0;
+	return ethtool_op_set_tso(dev, value);
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_mini_max_pending = 0;
+	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+	e->tx_max_pending = MAX_CMDQ_ENTRIES;
+
+	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
+	e->rx_mini_pending = 0;
+	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
+	e->tx_pending = adapter->params.sge.cmdQ_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
+	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+	    e->tx_pending > MAX_CMDQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES ||
+	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+        return -EBUSY;
+
+	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
+	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
+	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
+	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
+		MAX_CMDQ1_ENTRIES : e->tx_pending;
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+
+	/*
+	 * If RX coalescing is requested we use NAPI, otherwise interrupts.
+	 * This choice can be made only when all ports and the TOE are off.
+	 */
+	if (adapter->open_device_map == 0)
+		adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
+
+	if (adapter->params.sge.polling) {
+		adapter->params.sge.rx_coalesce_usecs = 0;
+	} else {
+		adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
+	}
+ 	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
+	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
+	return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+
+	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
+	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
+	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
+	return 0;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+    return EEPROM_SIZE;
+}
+
+#define EEPROM_MAGIC(ap) \
+	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 *data)
+{
+	int i;
+	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
+	struct adapter *adapter = dev->priv;
+
+	e->magic = EEPROM_MAGIC(adapter);
+	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
+		t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
+	memcpy(data, buf + e->offset, e->len);
+	return 0;
+}
+
+static struct ethtool_ops t1_ethtool_ops = {
+	.get_settings      = get_settings,
+	.set_settings      = set_settings,
+	.get_drvinfo       = get_drvinfo,
+	.get_msglevel      = get_msglevel,
+	.set_msglevel      = set_msglevel,
+	.get_ringparam     = get_sge_param,
+	.set_ringparam     = set_sge_param,
+	.get_coalesce      = get_coalesce,
+	.set_coalesce      = set_coalesce,
+	.get_eeprom_len    = get_eeprom_len,
+	.get_eeprom        = get_eeprom,
+	.get_pauseparam    = get_pauseparam,
+	.set_pauseparam    = set_pauseparam,
+	.get_rx_csum       = get_rx_csum,
+	.set_rx_csum       = set_rx_csum,
+	.get_tx_csum       = ethtool_op_get_tx_csum,
+	.set_tx_csum       = ethtool_op_set_tx_csum,
+	.get_sg            = ethtool_op_get_sg,
+	.set_sg            = ethtool_op_set_sg,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = get_strings,
+	.get_stats_count   = get_stats_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len      = get_regs_len,
+	.get_regs          = get_regs,
+	.get_tso           = ethtool_op_get_tso,
+	.set_tso           = set_tso,
+};
+
+static void cxgb_proc_cleanup(struct adapter *adapter,
+					struct proc_dir_entry *dir)
+{
+	const char *name;
+	name = adapter->name;
+	remove_proc_entry(name, dir);
+}
+//#define chtoe_setup_toedev(adapter) NULL
+#define update_mtu_tab(adapter)
+#define write_smt_entry(adapter, idx)
+
+static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+        struct adapter *adapter = dev->priv;
+        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+	switch (cmd) {
+        case SIOCGMIIPHY:
+                data->phy_id = adapter->port[dev->if_port].phy->addr;
+                /* FALLTHRU */
+        case SIOCGMIIREG: {
+		struct cphy *phy = adapter->port[dev->if_port].phy;
+		u32 val;
+
+		if (!phy->mdio_read)
+            return -EOPNOTSUPP;
+		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+			       &val);
+                data->val_out = val;
+                break;
+	}
+        case SIOCSMIIREG: {
+		struct cphy *phy = adapter->port[dev->if_port].phy;
+
+                if (!capable(CAP_NET_ADMIN))
+                    return -EPERM;
+		if (!phy->mdio_write)
+            return -EOPNOTSUPP;
+		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+			        data->val_in);
+                break;
+	}
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int t1_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+
+	if (!mac->ops->set_mtu)
+        return -EOPNOTSUPP;
+	if (new_mtu < 68)
+        return -EINVAL;
+	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
+		return ret;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int t1_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct sockaddr *addr = p;
+
+	if (!mac->ops->macaddress_set)
+		return -EOPNOTSUPP;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	mac->ops->macaddress_set(mac, dev->dev_addr);
+	return 0;
+}
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void vlan_rx_register(struct net_device *dev,
+				   struct vlan_group *grp)
+{
+	struct adapter *adapter = dev->priv;
+
+	spin_lock_irq(&adapter->async_lock);
+	adapter->vlan_grp = grp;
+	t1_set_vlan_accel(adapter, grp != NULL);
+	spin_unlock_irq(&adapter->async_lock);
+}
+
+static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct adapter *adapter = dev->priv;
+
+	spin_lock_irq(&adapter->async_lock);
+	if (adapter->vlan_grp)
+		adapter->vlan_grp->vlan_devices[vid] = NULL;
+	spin_unlock_irq(&adapter->async_lock);
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void t1_netpoll(struct net_device *dev)
+{
+	unsigned long flags;
+	struct adapter *adapter = dev->priv;
+
+	local_irq_save(flags);
+        t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
+	local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.  This is used only if the MAC
+ * does not have any other way to prevent stats counter overflow.
+ */
+static void mac_stats_task(void *data)
+{
+	int i;
+	struct adapter *adapter = data;
+
+	for_each_port(adapter, i) {
+		struct port_info *p = &adapter->port[i];
+
+		if (netif_running(p->dev))
+			p->mac->ops->statistics_update(p->mac,
+						       MAC_STATS_UPDATE_FAST);
+	}
+
+	/* Schedule the next statistics update if any port is active. */
+	spin_lock(&adapter->work_lock);
+	if (adapter->open_device_map & PORT_MASK)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+	spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes elmer0 external interrupts in process context.
+ */
+static void ext_intr_task(void *data)
+{
+	struct adapter *adapter = data;
+
+	elmer0_ext_intr_handler(adapter);
+
+	/* Now reenable external interrupts */
+	spin_lock_irq(&adapter->async_lock);
+	adapter->slow_intr_mask |= F_PL_INTR_EXT;
+	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+	spin_unlock_irq(&adapter->async_lock);
+}
+
+/*
+ * Interrupt-context handler for elmer0 external interrupts.
+ */
+void t1_elmer0_ext_intr(struct adapter *adapter)
+{
+	/*
+	 * Schedule a task to handle external interrupts as we require
+	 * a process context.  We disable EXT interrupts in the interim
+	 * and let the task reenable them when it's done.
+	 */
+	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+	schedule_work(&adapter->ext_intr_handler_task);
+}
+
+void t1_fatal_err(struct adapter *adapter)
+{
+	if (adapter->flags & FULL_INIT_DONE) {
+		t1_sge_stop(adapter->sge);
+		t1_interrupts_disable(adapter);
+	}
+	CH_ALERT("%s: encountered fatal error, operation suspended\n",
+		 adapter->name);
+}
+
+static int __devinit init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	static int version_printed;
+
+	int i, err, pci_using_dac = 0;
+	unsigned long mmio_start, mmio_len;
+	const struct board_info *bi;
+	struct adapter *adapter = NULL;
+	struct port_info *pi;
+
+	if (!version_printed) {
+		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
+		       DRV_VERSION);
+		++version_printed;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err)
+        return err;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		CH_ERR("%s: cannot find PCI device memory base address\n",
+		       pci_name(pdev));
+		err = -ENODEV;
+		goto out_disable_pdev;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		pci_using_dac = 1;
+
+		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+			CH_ERR("%s: unable to obtain 64-bit DMA for"
+			       "consistent allocations\n", pci_name(pdev));
+			err = -ENODEV;
+			goto out_disable_pdev;
+		}
+
+	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
+		CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+    mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bi = t1_get_board_info(ent->driver_data);
+
+	for (i = 0; i < bi->port_number; ++i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_MODULE_OWNER(netdev);
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		if (!adapter) {
+			adapter = netdev->priv;
+			adapter->pdev = pdev;
+			adapter->port[0].dev = netdev;  /* so we don't leak it */
+
+			adapter->regs = ioremap(mmio_start, mmio_len);
+			if (!adapter->regs) {
+				CH_ERR("%s: cannot map device registers\n",
+				       pci_name(pdev));
+				err = -ENOMEM;
+				goto out_free_dev;
+			}
+
+			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
+				err = -ENODEV;	  /* Can't handle this chip rev */
+				goto out_free_dev;
+			}
+
+			adapter->name = pci_name(pdev);
+			adapter->msg_enable = dflt_msg_enable;
+			adapter->mmio_len = mmio_len;
+
+			init_MUTEX(&adapter->mib_mutex);
+			spin_lock_init(&adapter->tpi_lock);
+			spin_lock_init(&adapter->work_lock);
+			spin_lock_init(&adapter->async_lock);
+
+			INIT_WORK(&adapter->ext_intr_handler_task,
+				  ext_intr_task, adapter);
+			INIT_WORK(&adapter->stats_update_task, mac_stats_task,
+				  adapter);
+#ifdef work_struct
+			init_timer(&adapter->stats_update_timer);
+			adapter->stats_update_timer.function = mac_stats_timer;
+			adapter->stats_update_timer.data =
+				(unsigned long)adapter;
+#endif
+
+			pci_set_drvdata(pdev, netdev);
+		}
+
+		pi = &adapter->port[i];
+		pi->dev = netdev;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+		netdev->if_port = i;
+		netdev->mem_start = mmio_start;
+		netdev->mem_end = mmio_start + mmio_len - 1;
+		netdev->priv = adapter;
+		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+		netdev->features |= NETIF_F_LLTX;
+
+		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+		if (vlan_tso_capable(adapter)) {
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+			adapter->flags |= VLAN_ACCEL_CAPABLE;
+			netdev->features |=
+				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+			netdev->vlan_rx_register = vlan_rx_register;
+			netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
+#endif
+			adapter->flags |= TSO_CAPABLE;
+			netdev->features |= NETIF_F_TSO;
+		}
+
+		netdev->open = cxgb_open;
+		netdev->stop = cxgb_close;
+		netdev->hard_start_xmit = t1_start_xmit;
+		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
+			sizeof(struct cpl_tx_pkt_lso) :
+			sizeof(struct cpl_tx_pkt);
+		netdev->get_stats = t1_get_stats;
+		netdev->set_multicast_list = t1_set_rxmode;
+		netdev->do_ioctl = t1_ioctl;
+		netdev->change_mtu = t1_change_mtu;
+		netdev->set_mac_address = t1_set_mac_addr;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+		netdev->poll_controller = t1_netpoll;
+#endif
+		netdev->weight = 64;
+
+        SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+	}
+
+	if (t1_init_sw_modules(adapter, bi) < 0) {
+		err = -ENODEV;
+		goto out_free_dev;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for (i = 0; i < bi->port_number; ++i) {
+		err = register_netdev(adapter->port[i].dev);
+		if (err)
+			CH_WARN("%s: cannot register net device %s, skipping\n",
+				pci_name(pdev), adapter->port[i].dev->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i].dev->name;
+
+                __set_bit(i, &adapter->registered_device_map);
+		}
+	}
+	if (!adapter->registered_device_map) {
+		CH_ERR("%s: could not register any net devices\n",
+		       pci_name(pdev));
+		goto out_release_adapter_res;
+	}
+
+	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
+	       bi->desc, adapter->params.chip_revision,
+	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+	       adapter->params.pci.speed, adapter->params.pci.width);
+	return 0;
+
+ out_release_adapter_res:
+	t1_free_sw_modules(adapter);
+ out_free_dev:
+	if (adapter) {
+		if (adapter->regs) iounmap(adapter->regs);
+		for (i = bi->port_number - 1; i >= 0; --i)
+			if (adapter->port[i].dev) {
+				cxgb_proc_cleanup(adapter, proc_root_driver);
+				kfree(adapter->port[i].dev);
+			}
+	}
+	pci_release_regions(pdev);
+ out_disable_pdev:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static inline void t1_sw_reset(struct pci_dev *pdev)
+{
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		int i;
+		struct adapter *adapter = dev->priv;
+
+		for_each_port(adapter, i)
+			if (test_bit(i, &adapter->registered_device_map))
+				unregister_netdev(adapter->port[i].dev);
+
+		t1_free_sw_modules(adapter);
+		iounmap(adapter->regs);
+		while (--i >= 0)
+			if (adapter->port[i].dev) {
+				cxgb_proc_cleanup(adapter, proc_root_driver);
+				kfree(adapter->port[i].dev);
+			}
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+		t1_sw_reset(pdev);
+	}
+}
+
+static struct pci_driver driver = {
+	.name     = DRV_NAME,
+	.id_table = t1_pci_tbl,
+	.probe    = init_one,
+	.remove   = __devexit_p(remove_one),
+};
+
+static int __init t1_init_module(void)
+{
+	return pci_module_init(&driver);
+}
+
+static void __exit t1_cleanup_module(void)
+{
+	pci_unregister_driver(&driver);
+}
+
+module_init(t1_init_module);
+module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: elmer0.h                                                            *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 22:49:43 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ELMER0_H_
+#define _CXGB_ELMER0_H_
+
+/* ELMER0 registers */
+#define A_ELMER0_VERSION 0x100000
+#define A_ELMER0_PHY_CFG 0x100004
+#define A_ELMER0_INT_ENABLE 0x100008
+#define A_ELMER0_INT_CAUSE 0x10000c
+#define A_ELMER0_GPI_CFG 0x100010
+#define A_ELMER0_GPI_STAT 0x100014
+#define A_ELMER0_GPO 0x100018
+#define A_ELMER0_PORT0_MI1_CFG 0x400000
+
+#define S_MI1_MDI_ENABLE    0
+#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
+#define F_MI1_MDI_ENABLE    V_MI1_MDI_ENABLE(1U)
+
+#define S_MI1_MDI_INVERT    1
+#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
+#define F_MI1_MDI_INVERT    V_MI1_MDI_INVERT(1U)
+
+#define S_MI1_PREAMBLE_ENABLE    2
+#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
+#define F_MI1_PREAMBLE_ENABLE    V_MI1_PREAMBLE_ENABLE(1U)
+
+#define S_MI1_SOF    3
+#define M_MI1_SOF    0x3
+#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
+#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
+
+#define S_MI1_CLK_DIV    5
+#define M_MI1_CLK_DIV    0xff
+#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
+#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
+
+#define A_ELMER0_PORT0_MI1_ADDR 0x400004
+
+#define S_MI1_REG_ADDR    0
+#define M_MI1_REG_ADDR    0x1f
+#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
+#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
+
+#define S_MI1_PHY_ADDR    5
+#define M_MI1_PHY_ADDR    0x1f
+#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
+#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
+
+#define A_ELMER0_PORT0_MI1_DATA 0x400008
+
+#define S_MI1_DATA    0
+#define M_MI1_DATA    0xffff
+#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
+#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
+
+#define A_ELMER0_PORT0_MI1_OP 0x40000c
+
+#define S_MI1_OP    0
+#define M_MI1_OP    0x3
+#define V_MI1_OP(x) ((x) << S_MI1_OP)
+#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
+
+#define S_MI1_ADDR_AUTOINC    2
+#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
+#define F_MI1_ADDR_AUTOINC    V_MI1_ADDR_AUTOINC(1U)
+
+#define S_MI1_OP_BUSY    31
+#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
+#define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
+
+#define A_ELMER0_PORT1_MI1_CFG 0x500000
+#define A_ELMER0_PORT1_MI1_ADDR 0x500004
+#define A_ELMER0_PORT1_MI1_DATA 0x500008
+#define A_ELMER0_PORT1_MI1_OP 0x50000c
+#define A_ELMER0_PORT2_MI1_CFG 0x600000
+#define A_ELMER0_PORT2_MI1_ADDR 0x600004
+#define A_ELMER0_PORT2_MI1_DATA 0x600008
+#define A_ELMER0_PORT2_MI1_OP 0x60000c
+#define A_ELMER0_PORT3_MI1_CFG 0x700000
+#define A_ELMER0_PORT3_MI1_ADDR 0x700004
+#define A_ELMER0_PORT3_MI1_DATA 0x700008
+#define A_ELMER0_PORT3_MI1_OP 0x70000c
+
+/* Simple bit definition for GPI and GP0 registers. */
+#define     ELMER0_GP_BIT0              0x0001
+#define     ELMER0_GP_BIT1              0x0002
+#define     ELMER0_GP_BIT2              0x0004
+#define     ELMER0_GP_BIT3              0x0008
+#define     ELMER0_GP_BIT4              0x0010
+#define     ELMER0_GP_BIT5              0x0020
+#define     ELMER0_GP_BIT6              0x0040
+#define     ELMER0_GP_BIT7              0x0080
+#define     ELMER0_GP_BIT8              0x0100
+#define     ELMER0_GP_BIT9              0x0200
+#define     ELMER0_GP_BIT10             0x0400
+#define     ELMER0_GP_BIT11             0x0800
+#define     ELMER0_GP_BIT12             0x1000
+#define     ELMER0_GP_BIT13             0x2000
+#define     ELMER0_GP_BIT14             0x4000
+#define     ELMER0_GP_BIT15             0x8000
+#define     ELMER0_GP_BIT16             0x10000
+#define     ELMER0_GP_BIT17             0x20000
+#define     ELMER0_GP_BIT18             0x40000
+#define     ELMER0_GP_BIT19             0x80000
+
+#define MI1_OP_DIRECT_WRITE 1
+#define MI1_OP_DIRECT_READ  2
+
+#define MI1_OP_INDIRECT_ADDRESS  0
+#define MI1_OP_INDIRECT_WRITE    1
+#define MI1_OP_INDIRECT_READ_INC 2
+#define MI1_OP_INDIRECT_READ     3
+
+#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.c                                                              *
+ * $Revision: 1.14 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  Ethernet SPI functionality.                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "espi.h"
+
+struct peespi {
+	adapter_t *adapter;
+	struct espi_intr_counts intr_cnt;
+	u32 misc_ctrl;
+	spinlock_t lock;
+};
+
+#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
+			F_RAMPARITYERR | F_DIP2PARITYERR)
+#define MON_MASK  (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
+		   | F_MONITORED_INTERFACE)
+
+#define TRICN_CNFG 14
+#define TRICN_CMD_READ  0x11
+#define TRICN_CMD_WRITE 0x21
+#define TRICN_CMD_ATTEMPTS 10
+
+static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
+		       int ch_addr, int reg_offset, u32 wr_data)
+{
+	int busy, attempts = TRICN_CMD_ATTEMPTS;
+
+	writel(V_WRITE_DATA(wr_data) |
+	       V_REGISTER_OFFSET(reg_offset) |
+	       V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
+	       V_BUNDLE_ADDR(bundle_addr) |
+	       V_SPI4_COMMAND(TRICN_CMD_WRITE),
+	       adapter->regs + A_ESPI_CMD_ADDR);
+	writel(0, adapter->regs + A_ESPI_GOSTAT);
+
+	do {
+		busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
+	} while (busy && --attempts);
+
+	if (busy)
+		CH_ERR("%s: TRICN write timed out\n", adapter->name);
+
+	return busy;
+}
+
+/* 1. Deassert rx_reset_core. */
+/* 2. Program TRICN_CNFG registers. */
+/* 3. Deassert rx_reset_link */
+static int tricn_init(adapter_t *adapter)
+{
+	int     i               = 0;
+	int     sme             = 1;
+	int     stat            = 0;
+	int     timeout         = 0;
+	int     is_ready        = 0;
+	int     dynamic_deskew  = 0;
+
+	if (dynamic_deskew)
+		sme = 0;
+
+
+	/* 1 */
+	timeout=1000;
+	do {
+		stat = readl(adapter->regs + A_ESPI_RX_RESET);
+		is_ready = (stat & 0x4);
+		timeout--;
+		udelay(5);
+	} while (!is_ready || (timeout==0));
+	writel(0x2, adapter->regs + A_ESPI_RX_RESET);
+	if (timeout==0)
+	{
+		CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
+		t1_fatal_err(adapter);
+	}
+
+	/* 2 */
+	if (sme) {
+		tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+	}
+	for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+	for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+	for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+	for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+	for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
+	for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+
+	/* 3 */
+	writel(0x3, adapter->regs + A_ESPI_RX_RESET);
+
+	return 0;
+}
+
+void t1_espi_intr_enable(struct peespi *espi)
+{
+	u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	/*
+	 * Cannot enable ESPI interrupts on T1B because HW asserts the
+	 * interrupt incorrectly, namely the driver gets ESPI interrupts
+	 * but no data is actually dropped (can verify this reading the ESPI
+	 * drop registers).  Also, once the ESPI interrupt is asserted it
+	 * cannot be cleared (HW bug).
+	 */
+	enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
+	writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+void t1_espi_intr_clear(struct peespi *espi)
+{
+	writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
+}
+
+void t1_espi_intr_disable(struct peespi *espi)
+{
+	u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+int t1_espi_intr_handler(struct peespi *espi)
+{
+	u32 cnt;
+	u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+	if (status & F_DIP4ERR)
+		espi->intr_cnt.DIP4_err++;
+	if (status & F_RXDROP)
+		espi->intr_cnt.rx_drops++;
+	if (status & F_TXDROP)
+		espi->intr_cnt.tx_drops++;
+	if (status & F_RXOVERFLOW)
+		espi->intr_cnt.rx_ovflw++;
+	if (status & F_RAMPARITYERR)
+		espi->intr_cnt.parity_err++;
+	if (status & F_DIP2PARITYERR) {
+		espi->intr_cnt.DIP2_parity_err++;
+
+		/*
+		 * Must read the error count to clear the interrupt
+		 * that it causes.
+		 */
+		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	}
+
+	/*
+	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+	 * write the status as is.
+	 */
+	if (status && t1_is_T1B(espi->adapter))
+		status = 1;
+	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	return 0;
+}
+
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
+{
+    return &espi->intr_cnt;
+}
+
+static void espi_setup_for_pm3393(adapter_t *adapter)
+{
+	u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
+
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
+	writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+	writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+	writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
+}
+
+/* T2 Init part --  */
+/* 1. Set T_ESPI_MISCCTRL_ADDR */
+/* 2. Init ESPI registers. */
+/* 3. Init TriCN Hard Macro */
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
+	u32 cnt;
+
+	u32 status_enable_extra = 0;
+	adapter_t *adapter = espi->adapter;
+	u32 status, burstval = 0x800100;
+
+	/* Disable ESPI training.  MACs that can handle it enable it below. */
+	writel(0, adapter->regs + A_ESPI_TRAIN);
+
+	if (is_T2(adapter)) {
+		writel(V_OUT_OF_SYNC_COUNT(4) |
+		       V_DIP2_PARITY_ERR_THRES(3) |
+		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
+		if (nports == 4) {
+			/* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
+			burstval = 0x200040;
+		}
+	}
+	writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+
+	switch (mac_type) {
+	case CHBT_MAC_PM3393:
+		espi_setup_for_pm3393(adapter);
+		break;
+	default:
+		return -1;
+	}
+
+	/*
+	 * Make sure any pending interrupts from the SPI are
+	 * Cleared before enabling the interrupt.
+	 */
+	writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+	if (status & F_DIP2PARITYERR) {
+		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	}
+
+	/*
+	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+	 * write the status as is.
+	 */
+	if (status && t1_is_T1B(espi->adapter))
+		status = 1;
+	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+	writel(status_enable_extra | F_RXSTATUSENABLE,
+	       adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
+
+	if (is_T2(adapter)) {
+		tricn_init(adapter);
+		/*
+		 * Always position the control at the 1st port egress IN
+		 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
+		 */
+		espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
+				   & ~MON_MASK) | (F_MONITORED_DIRECTION
+				   | F_MONITORED_INTERFACE);
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+		spin_lock_init(&espi->lock);
+	}
+
+	return 0;
+}
+
+void t1_espi_destroy(struct peespi *espi)
+{
+	kfree(espi);
+}
+
+struct peespi *t1_espi_create(adapter_t *adapter)
+{
+	struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
+
+	memset(espi, 0, sizeof(*espi));
+
+	if (espi)
+		espi->adapter = adapter;
+	return espi;
+}
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
+{
+	struct peespi *espi = adapter->espi;
+
+	if (!is_T2(adapter))
+		return;
+	spin_lock(&espi->lock);
+	espi->misc_ctrl = (val & ~MON_MASK) |
+			  (espi->misc_ctrl & MON_MASK);
+	writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	spin_unlock(&espi->lock);
+}
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
+{
+	u32 sel;
+
+	struct peespi *espi = adapter->espi;
+
+	if (!is_T2(adapter))
+		return 0;
+	sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
+	if (!wait) {
+		if (!spin_trylock(&espi->lock))
+			return 0;
+	}
+	else
+		spin_lock(&espi->lock);
+	if ((sel != (espi->misc_ctrl & MON_MASK))) {
+		writel(((espi->misc_ctrl & ~MON_MASK) | sel),
+		       adapter->regs + A_ESPI_MISC_CONTROL);
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	}
+	else
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+	spin_unlock(&espi->lock);
+	return sel;
+}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ESPI_H_
+#define _CXGB_ESPI_H_
+
+#include "common.h"
+
+struct espi_intr_counts {
+	unsigned int DIP4_err;
+	unsigned int rx_drops;
+	unsigned int tx_drops;
+	unsigned int rx_ovflw;
+	unsigned int parity_err;
+	unsigned int DIP2_parity_err;
+};
+
+struct peespi;
+
+struct peespi *t1_espi_create(adapter_t *adapter);
+void t1_espi_destroy(struct peespi *espi);
+int t1_espi_init(struct peespi *espi, int mac_type, int nports);
+
+void t1_espi_intr_enable(struct peespi *);
+void t1_espi_intr_clear(struct peespi *);
+void t1_espi_intr_disable(struct peespi *);
+int t1_espi_intr_handler(struct peespi *);
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+
+#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: gmac.h                                                              *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  Generic MAC functionality.                                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_GMAC_H_
+#define _CXGB_GMAC_H_
+
+#include "common.h"
+
+enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
+enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
+
+struct cmac_statistics {
+	/* Transmit */
+	u64 TxOctetsOK;
+	u64 TxOctetsBad;
+	u64 TxUnicastFramesOK;
+	u64 TxMulticastFramesOK;
+	u64 TxBroadcastFramesOK;
+	u64 TxPauseFrames;
+	u64 TxFramesWithDeferredXmissions;
+	u64 TxLateCollisions;
+	u64 TxTotalCollisions;
+	u64 TxFramesAbortedDueToXSCollisions;
+	u64 TxUnderrun;
+	u64 TxLengthErrors;
+	u64 TxInternalMACXmitError;
+	u64 TxFramesWithExcessiveDeferral;
+	u64 TxFCSErrors;
+
+	/* Receive */
+	u64 RxOctetsOK;
+	u64 RxOctetsBad;
+	u64 RxUnicastFramesOK;
+	u64 RxMulticastFramesOK;
+	u64 RxBroadcastFramesOK;
+	u64 RxPauseFrames;
+	u64 RxFCSErrors;
+	u64 RxAlignErrors;
+	u64 RxSymbolErrors;
+	u64 RxDataErrors;
+	u64 RxSequenceErrors;
+	u64 RxRuntErrors;
+	u64 RxJabberErrors;
+	u64 RxInternalMACRcvError;
+	u64 RxInRangeLengthErrors;
+	u64 RxOutOfRangeLengthField;
+	u64 RxFrameTooLongErrors;
+};
+
+struct cmac_ops {
+	void (*destroy)(struct cmac *);
+	int (*reset)(struct cmac *);
+	int (*interrupt_enable)(struct cmac *);
+	int (*interrupt_disable)(struct cmac *);
+	int (*interrupt_clear)(struct cmac *);
+	int (*interrupt_handler)(struct cmac *);
+
+	int (*enable)(struct cmac *, int);
+	int (*disable)(struct cmac *, int);
+
+	int (*loopback_enable)(struct cmac *);
+	int (*loopback_disable)(struct cmac *);
+
+	int (*set_mtu)(struct cmac *, int mtu);
+	int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
+
+	int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
+	int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
+				   int *fc);
+
+	const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
+
+	int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
+	int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+};
+
+typedef struct _cmac_instance cmac_instance;
+
+struct cmac {
+	struct cmac_statistics stats;
+	adapter_t *adapter;
+	struct cmac_ops *ops;
+	cmac_instance *instance;
+};
+
+struct gmac {
+	unsigned int stats_update_period;
+	struct cmac *(*create)(adapter_t *adapter, int index);
+	int (*reset)(adapter_t *);
+};
+
+extern struct gmac t1_pm3393_ops;
+extern struct gmac t1_chelsio_mac_ops;
+extern struct gmac t1_vsc7321_ops;
+extern struct gmac t1_ixf1010_ops;
+extern struct gmac t1_dummy_mac_ops;
+
+#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: mv88x201x.c                                                         *
+ * $Revision: 1.12 $                                                         *
+ * $Date: 2005/04/15 19:27:14 $                                              *
+ * Description:                                                              *
+ *  Marvell PHY (mv88x201x) functionality.                                   *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "cphy.h"
+#include "elmer0.h"
+
+/*
+ * The 88x2010 Rev C. requires some link status registers * to be read
+ * twice in order to get the right values. Future * revisions will fix
+ * this problem and then this macro * can disappear.
+ */
+#define MV88x2010_LINK_STATUS_BUGS    1
+
+static int led_init(struct cphy *cphy)
+{
+	/* Setup the LED registers so we can turn on/off.
+	 * Writing these bits maps control to another
+	 * register. mmd(0x1) addr(0x7)
+	 */
+	mdio_write(cphy, 0x3, 0x8304, 0xdddd);
+	return 0;
+}
+
+static int led_link(struct cphy *cphy, u32 do_enable)
+{
+	u32 led = 0;
+#define LINK_ENABLE_BIT 0x1
+
+	mdio_read(cphy, 0x1, 0x7, &led);
+
+	if (do_enable & LINK_ENABLE_BIT) {
+		led |= LINK_ENABLE_BIT;
+		mdio_write(cphy, 0x1, 0x7, led);
+	} else {
+		led &= ~LINK_ENABLE_BIT;
+		mdio_write(cphy, 0x1, 0x7, led);
+	}
+	return 0;
+}
+
+/* Port Reset */
+static int mv88x201x_reset(struct cphy *cphy, int wait)
+{
+	/* This can be done through registers.  It is not required since
+	 * a full chip reset is used.
+	 */
+	return 0;
+}
+
+static int mv88x201x_interrupt_enable(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Enable PHY LASI interrupts. */
+	mdio_write(cphy, 0x1, 0x9002, 0x1);
+
+	/* Enable Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer |= ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_disable(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Disable PHY LASI interrupts. */
+	mdio_write(cphy, 0x1, 0x9002, 0x0);
+
+	/* Disable Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer &= ~ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_clear(struct cphy *cphy)
+{
+	u32 elmer;
+	u32 val;
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Required to read twice before clear takes affect. */
+	mdio_read(cphy, 0x1, 0x9003, &val);
+	mdio_read(cphy, 0x1, 0x9004, &val);
+	mdio_read(cphy, 0x1, 0x9005, &val);
+
+	/* Read this register after the others above it else
+	 * the register doesn't clear correctly.
+	 */
+	mdio_read(cphy, 0x1, 0x1, &val);
+#endif
+
+	/* Clear link status. */
+	mdio_read(cphy, 0x1, 0x1, &val);
+	/* Clear PHY LASI interrupts. */
+	mdio_read(cphy, 0x1, 0x9005, &val);
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Do it again. */
+	mdio_read(cphy, 0x1, 0x9003, &val);
+	mdio_read(cphy, 0x1, 0x9004, &val);
+#endif
+
+	/* Clear Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+	elmer |= ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_handler(struct cphy *cphy)
+{
+	/* Clear interrupts */
+	mv88x201x_interrupt_clear(cphy);
+
+	/* We have only enabled link change interrupts and so
+	 * cphy_cause must be a link change interrupt.
+	 */
+	return cphy_cause_link_change;
+}
+
+static int mv88x201x_set_loopback(struct cphy *cphy, int on)
+{
+	return 0;
+}
+
+static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	u32 val = 0;
+#define LINK_STATUS_BIT 0x4
+
+	if (link_ok) {
+		/* Read link status. */
+		mdio_read(cphy, 0x1, 0x1, &val);
+		val &= LINK_STATUS_BIT;
+		*link_ok = (val == LINK_STATUS_BIT);
+		/* Turn on/off Link LED */
+		led_link(cphy, *link_ok);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = PAUSE_RX | PAUSE_TX;
+	return 0;
+}
+
+static void mv88x201x_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops mv88x201x_ops = {
+	.destroy           = mv88x201x_destroy,
+	.reset             = mv88x201x_reset,
+	.interrupt_enable  = mv88x201x_interrupt_enable,
+	.interrupt_disable = mv88x201x_interrupt_disable,
+	.interrupt_clear   = mv88x201x_interrupt_clear,
+	.interrupt_handler = mv88x201x_interrupt_handler,
+	.get_link_status   = mv88x201x_get_link_status,
+	.set_loopback      = mv88x201x_set_loopback,
+};
+
+static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
+					 struct mdio_ops *mdio_ops)
+{
+	u32 val;
+	struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy)
+		return NULL;
+	memset(cphy, 0, sizeof(*cphy));
+	cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
+
+	/* Commands the PHY to enable XFP's clock. */
+	mdio_read(cphy, 0x3, 0x8300, &val);
+	mdio_write(cphy, 0x3, 0x8300, val | 1);
+
+	/* Clear link status. Required because of a bug in the PHY.  */
+	mdio_read(cphy, 0x1, 0x8, &val);
+	mdio_read(cphy, 0x3, 0x8, &val);
+
+	/* Allows for Link,Ack LED turn on/off */
+	led_init(cphy);
+	return cphy;
+}
+
+/* Chip Reset */
+static int mv88x201x_phy_reset(adapter_t *adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~4;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	msleep(100);
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+	msleep(1000);
+
+	/* Now lets enable the Laser. Delay 100us */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= 0x8000;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(100);
+	return 0;
+}
+
+struct gphy t1_mv88x201x_ops = {
+	mv88x201x_phy_create,
+	mv88x201x_phy_reset
+};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: pm3393.c                                                            *
+ * $Revision: 1.16 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "gmac.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
+ */
+enum {
+    MMD_RESERVED,
+    MMD_PMAPMD,
+    MMD_WIS,
+    MMD_PCS,
+    MMD_PHY_XGXS,	/* XGMII Extender Sublayer */
+    MMD_DTE_XGXS,
+};
+
+enum {
+    PHY_XGXS_CTRL_1,
+    PHY_XGXS_STATUS_1
+};
+
+#define OFFSET(REG_ADDR)    (REG_ADDR << 2)
+
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define MAX_FRAME_SIZE  9600
+
+#define IPG 12
+#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
+	SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
+	SUNI1x10GEXP_BITMSK_TXXG_PADEN)
+#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
+	SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
+
+/* Update statistics every 15 minutes */
+#define STATS_TICK_SECS (15 * 60)
+
+enum {                     /* RMON registers */
+	RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
+	RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
+	RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
+	RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
+	RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
+	RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
+	RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
+	RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
+	RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
+	RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
+	RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
+	RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
+	RxUndersizedFrames =  SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+
+	TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
+	TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
+	TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
+	TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
+	TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
+	TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
+	TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
+};
+
+struct _cmac_instance {
+	u8 enabled;
+	u8 fc;
+	u8 mac_addr[6];
+};
+
+static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
+{
+	t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
+{
+	t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+/* Port reset. */
+static int pm3393_reset(struct cmac *cmac)
+{
+	return 0;
+}
+
+/*
+ * Enable interrupts for the PM3393
+
+	1. Enable PM3393 BLOCK interrupts.
+	2. Enable PM3393 Master Interrupt bit(INTE)
+	3. Enable ELMER's PM3393 bit.
+	4. Enable Terminator external interrupt.
+*/
+static int pm3393_interrupt_enable(struct cmac *cmac)
+{
+	u32 pl_intr;
+
+	/* PM3393 - Enabling all hardware block interrupts.
+	 */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
+
+	/* Don't interrupt on statistics overflow, we are polling */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
+
+	/* PM3393 - Global interrupt enable
+	 */
+	/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
+		0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
+	return 0;
+}
+
+static int pm3393_interrupt_disable(struct cmac *cmac)
+{
+	u32 elmer;
+
+	/* PM3393 - Enabling HW interrupt blocks. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
+
+	/* PM3393 - Global interrupt enable */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
+
+	/* ELMER - External chip interrupts. */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer &= ~ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
+	 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
+	 */
+
+	return 0;
+}
+
+static int pm3393_interrupt_clear(struct cmac *cmac)
+{
+	u32 elmer;
+	u32 pl_intr;
+	u32 val32;
+
+	/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
+	 *          bit WCIMODE=0 for a clear-on-read.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
+	       &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
+
+	/* PM3393 - Global interrupt status
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
+
+	/* ELMER - External chip interrupts.
+	 */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
+	elmer |= ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT
+	 */
+	pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
+
+	return 0;
+}
+
+/* Interrupt handler */
+static int pm3393_interrupt_handler(struct cmac *cmac)
+{
+	u32 master_intr_status;
+/*
+	1. Read master interrupt register.
+	2. Read BLOCK's interrupt status registers.
+	3. Handle BLOCK interrupts.
+*/
+	/* Read the master interrupt status register. */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
+	       &master_intr_status);
+
+	/* TBD XXX Lets just clear everything for now */
+	pm3393_interrupt_clear(cmac);
+
+	return 0;
+}
+
+static int pm3393_enable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
+			(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
+
+	if (which & MAC_DIRECTION_TX) {
+		u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
+
+		if (cmac->instance->fc & PAUSE_RX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
+		if (cmac->instance->fc & PAUSE_TX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
+	}
+
+	cmac->instance->enabled |= which;
+	return 0;
+}
+
+static int pm3393_enable_port(struct cmac *cmac, int which)
+{
+	/* Clear port statistics */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
+	udelay(2);
+	memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
+
+	pm3393_enable(cmac, which);
+
+	/*
+	 * XXX This should be done by the PHY and preferrably not at all.
+	 * The PHY doesn't give us link status indication on its own so have
+	 * the link management code query it instead.
+	 */
+	{
+		extern void link_changed(adapter_t *adapter, int port_id);
+
+		link_changed(cmac->adapter, 0);
+	}
+	return 0;
+}
+
+static int pm3393_disable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
+	if (which & MAC_DIRECTION_TX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
+
+	/*
+	 * The disable is graceful. Give the PM3393 time.  Can't wait very
+	 * long here, we may be holding locks.
+	 */
+	udelay(20);
+
+	cmac->instance->enabled &= ~which;
+	return 0;
+}
+
+static int pm3393_loopback_enable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_loopback_disable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_set_mtu(struct cmac *cmac, int mtu)
+{
+	int enabled = cmac->instance->enabled;
+
+	/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
+	mtu += 14 + 4;
+	if (mtu > MAX_FRAME_SIZE)
+		return -EINVAL;
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static u32 calc_crc(u8 *b, int len)
+{
+	int i;
+	u32 crc = (u32)~0;
+
+	/* calculate crc one bit at a time */
+	while (len--) {
+		crc ^= *b++;
+		for (i = 0; i < 8; i++) {
+			if (crc & 0x1)
+				crc = (crc >> 1) ^ 0xedb88320;
+			else
+				crc = (crc >> 1);
+		}
+	}
+
+	/* reverse bits */
+	crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
+	crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
+	crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
+	/* swap bytes */
+	crc = (crc >> 16) | (crc << 16);
+	crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
+
+	return crc;
+}
+
+static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
+{
+	int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
+	u32 rx_mode;
+
+	/* Disable MAC RX before reconfiguring it */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX);
+
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
+	rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
+		     SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
+		(u16)rx_mode);
+
+	if (t1_rx_mode_promisc(rm)) {
+		/* Promiscuous mode. */
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
+	}
+	if (t1_rx_mode_allmulti(rm)) {
+		/* Accept all multicast. */
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	} else if (t1_rx_mode_mc_cnt(rm)) {
+		/* Accept one or more multicast(s). */
+		u8 *addr;
+		int bit;
+		u16 mc_filter[4] = { 0, };
+
+		while ((addr = t1_get_next_mcaddr(rm))) {
+			bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f;	/* bit[23:28] */
+			mc_filter[bit >> 4] |= 1 << (bit & 0xf);
+		}
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	}
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
+
+	if (enabled)
+		pm3393_enable(cmac, MAC_DIRECTION_RX);
+
+	return 0;
+}
+
+static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
+				      int *duplex, int *fc)
+{
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = cmac->instance->fc;
+	return 0;
+}
+
+static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+				      int fc)
+{
+	if (speed >= 0 && speed != SPEED_10000)
+		return -1;
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -1;
+	if (fc & ~(PAUSE_TX | PAUSE_RX))
+		return -1;
+
+	if (fc != cmac->instance->fc) {
+		cmac->instance->fc = (u8) fc;
+		if (cmac->instance->enabled & MAC_DIRECTION_TX)
+			pm3393_enable(cmac, MAC_DIRECTION_TX);
+	}
+	return 0;
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+	{ \
+		t1_tpi_read((mac)->adapter, OFFSET(name), &val0);	\
+		t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
+		t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
+		(mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
+						(((u64)val1 & 0xffff) << 16) | \
+						(((u64)val2 & 0xff) << 32) | \
+						((mac)->stats.stat_name & \
+							(~(u64)0 << 40)); \
+		if (ro &	\
+			((name -  SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
+			(mac)->stats.stat_name += ((u64)1 << 40); \
+	}
+
+static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+							      int flag)
+{
+	u64	ro;
+	u32	val0, val1, val2, val3;
+
+	/* Snap the counters */
+	pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+
+	/* Counter rollover, clear on read */
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
+	ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+		(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+	/* Rx stats */
+	RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
+	RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
+	RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
+	RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
+	RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
+	RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
+	RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
+				RxInternalMACRcvError);
+	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+	RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
+	RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
+	RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
+	RMON_UPDATE(mac, RxFragments, RxRuntErrors);
+	RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+
+	/* Tx stats */
+	RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
+	RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
+				TxInternalMACXmitError);
+	RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
+	RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
+	RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
+	RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
+	RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+
+	return &mac->stats;
+}
+
+static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
+{
+	memcpy(mac_addr, cmac->instance->mac_addr, 6);
+	return 0;
+}
+
+static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+{
+	u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
+
+	/*
+	 * MAC addr: 00:07:43:00:13:09
+	 *
+	 * ma[5] = 0x09
+	 * ma[4] = 0x13
+	 * ma[3] = 0x00
+	 * ma[2] = 0x43
+	 * ma[1] = 0x07
+	 * ma[0] = 0x00
+	 *
+	 * The PM3393 requires byte swapping and reverse order entry
+	 * when programming MAC addresses:
+	 *
+	 * low_bits[15:0]    = ma[1]:ma[0]
+	 * mid_bits[31:16]   = ma[3]:ma[2]
+	 * high_bits[47:32]  = ma[5]:ma[4]
+	 */
+
+	/* Store local copy */
+	memcpy(cmac->instance->mac_addr, ma, 6);
+
+	lo = ((u32) ma[1] << 8) | (u32) ma[0];
+	mid = ((u32) ma[3] << 8) | (u32) ma[2];
+	hi = ((u32) ma[5] << 8) | (u32) ma[4];
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	/* Set RXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
+
+	/* Set TXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
+
+	/* Setup Exact Match Filter 1 with our MAC address
+	 *
+	 * Must disable exact match filter before configuring it.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
+	val &= 0xff0f;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
+
+	val |= 0x0090;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static void pm3393_destroy(struct cmac *cmac)
+{
+	kfree(cmac);
+}
+
+static struct cmac_ops pm3393_ops = {
+	.destroy                 = pm3393_destroy,
+	.reset                   = pm3393_reset,
+	.interrupt_enable        = pm3393_interrupt_enable,
+	.interrupt_disable       = pm3393_interrupt_disable,
+	.interrupt_clear         = pm3393_interrupt_clear,
+	.interrupt_handler       = pm3393_interrupt_handler,
+	.enable                  = pm3393_enable_port,
+	.disable                 = pm3393_disable,
+	.loopback_enable         = pm3393_loopback_enable,
+	.loopback_disable        = pm3393_loopback_disable,
+	.set_mtu                 = pm3393_set_mtu,
+	.set_rx_mode             = pm3393_set_rx_mode,
+	.get_speed_duplex_fc     = pm3393_get_speed_duplex_fc,
+	.set_speed_duplex_fc     = pm3393_set_speed_duplex_fc,
+	.statistics_update       = pm3393_update_statistics,
+	.macaddress_get          = pm3393_macaddress_get,
+	.macaddress_set          = pm3393_macaddress_set
+};
+
+static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *cmac;
+
+	cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!cmac)
+		return NULL;
+	memset(cmac, 0, sizeof(*cmac));
+
+	cmac->ops = &pm3393_ops;
+	cmac->instance = (cmac_instance *) (cmac + 1);
+	cmac->adapter = adapter;
+	cmac->instance->fc = PAUSE_TX | PAUSE_RX;
+
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
+	t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
+	t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001);   /* PL4IO Enable */
+	t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
+	t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202);	/* PL4IO Calendar Repetitions */
+
+	t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080);	/* EFLX Enable */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000);	/* EFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000);	/* EFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040);	/* EFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc);	/* EFLX Almost Full */
+	t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199);	/* EFLX Almost Empty */
+	t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240);	/* EFLX Cut Through Threshold */
+	t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000);	/* EFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001);	/* EFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff);	/* EFLX enable overflow interrupt The other bit are undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff);	/* EFLX Undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000);	/* IFLX Configuration - enable */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000);	/* IFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000);	/* IFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100);	/* IFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00);	/* IFLX Almost Full Limit */
+	t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599);	/* IFLX Almost Empty Limit */
+	t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000);	/* IFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001);	/* IFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff);	/* IFLX Enable overflow interrupt.  The other bit are undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008);	/* PL4MOS Starving Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008);	/* PL4MOS Hungry Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008);	/* PL4MOS Transfer Size */
+	t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005);	/* PL4MOS Disable */
+
+	t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103);	/* PL4ODP Training Repeat and SOP rule */
+	t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000);	/* PL4ODP MAX_T setting */
+
+	t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087);	/* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
+	t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f);	/* PL4IDU Enable Dip4 check error interrupts */
+
+	t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32);	/* # TXXG Config */
+	/* For T1 use timer based Mac flow control. */
+	t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
+	t1_tpi_write(adapter, OFFSET(0x2040), 0x059c);	/* # RXXG Config */
+	t1_tpi_write(adapter, OFFSET(0x2049), 0x0001);	/* # RXXG Cut Through */
+	t1_tpi_write(adapter, OFFSET(0x2070), 0x0000);	/* # Disable promiscuous mode */
+
+	/* Setup Exact Match Filter 0 to allow broadcast packets.
+	 */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0000);	/* # Disable Match Enable bit */
+	t1_tpi_write(adapter, OFFSET(0x204a), 0xffff);	/* # low addr */
+	t1_tpi_write(adapter, OFFSET(0x204b), 0xffff);	/* # mid addr */
+	t1_tpi_write(adapter, OFFSET(0x204c), 0xffff);	/* # high addr */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0009);	/* # Enable Match Enable bit */
+
+	t1_tpi_write(adapter, OFFSET(0x0003), 0x0000);	/* # NO SOP/ PAD_EN setup */
+	t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0);	/* # RXEQB disabled */
+	t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f);	/* # No Preemphasis */
+
+	return cmac;
+}
+
+static int pm3393_mac_reset(adapter_t * adapter)
+{
+	u32 val;
+	u32 x;
+	u32 is_pl4_reset_finished;
+	u32 is_pl4_outof_lock;
+	u32 is_xaui_mabc_pll_locked;
+	u32 successful_reset;
+	int i;
+
+	/* The following steps are required to properly reset
+	 * the PM3393. This information is provided in the
+	 * PM3393 datasheet (Issue 2: November 2002)
+	 * section 13.1 -- Device Reset.
+	 *
+	 * The PM3393 has three types of components that are
+	 * individually reset:
+	 *
+	 * DRESETB      - Digital circuitry
+	 * PL4_ARESETB  - PL4 analog circuitry
+	 * XAUI_ARESETB - XAUI bus analog circuitry
+	 *
+	 * Steps to reset PM3393 using RSTB pin:
+	 *
+	 * 1. Assert RSTB pin low ( write 0 )
+	 * 2. Wait at least 1ms to initiate a complete initialization of device.
+	 * 3. Wait until all external clocks and REFSEL are stable.
+	 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
+	 * 5. De-assert RSTB ( write 1 )
+	 * 6. Wait until internal timers to expires after ~14ms.
+	 *    - Allows analog clock synthesizer(PL4CSU) to stabilize to
+	 *      selected reference frequency before allowing the digital
+	 *      portion of the device to operate.
+	 * 7. Wait at least 200us for XAUI interface to stabilize.
+	 * 8. Verify the PM3393 came out of reset successfully.
+	 *    Set successful reset flag if everything worked else try again
+	 *    a few more times.
+	 */
+
+	successful_reset = 0;
+	for (i = 0; i < 3 && !successful_reset; i++) {
+		/* 1 */
+		t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+		val &= ~1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 2 */
+		msleep(1);
+
+		/* 3 */
+		msleep(1);
+
+		/* 4 */
+		msleep(2 /*1 extra ms for safety */ );
+
+		/* 5 */
+		val |= 1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 6 */
+		msleep(15 /*1 extra ms for safety */ );
+
+		/* 7 */
+		msleep(1);
+
+		/* 8 */
+
+		/* Has PL4 analog block come out of reset correctly? */
+		t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
+		is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
+
+		/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
+		 *         figure out why? */
+
+		/* Have all PL4 block clocks locked? */
+		x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
+		     /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */  |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
+		is_pl4_outof_lock = (val & x);
+
+		/* ??? If this fails, might be able to software reset the XAUI part
+		 *     and try to recover... thus saving us from doing another HW reset */
+		/* Has the XAUI MABC PLL circuitry stablized? */
+		is_xaui_mabc_pll_locked =
+		    (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
+
+		successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
+				    && is_xaui_mabc_pll_locked);
+	}
+	return successful_reset ? 0 : 1;
+}
+
+struct gmac t1_pm3393_ops = {
+	STATS_TICK_SECS,
+	pm3393_mac_create,
+	pm3393_mac_reset
+};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: regs.h                                                              *
+ * $Revision: 1.8 $                                                          *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_REGS_H_
+#define _CXGB_REGS_H_
+
+/* SGE registers */
+#define A_SG_CONTROL 0x0
+
+#define S_CMDQ0_ENABLE    0
+#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
+#define F_CMDQ0_ENABLE    V_CMDQ0_ENABLE(1U)
+
+#define S_CMDQ1_ENABLE    1
+#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
+#define F_CMDQ1_ENABLE    V_CMDQ1_ENABLE(1U)
+
+#define S_FL0_ENABLE    2
+#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
+#define F_FL0_ENABLE    V_FL0_ENABLE(1U)
+
+#define S_FL1_ENABLE    3
+#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
+#define F_FL1_ENABLE    V_FL1_ENABLE(1U)
+
+#define S_CPL_ENABLE    4
+#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
+#define F_CPL_ENABLE    V_CPL_ENABLE(1U)
+
+#define S_RESPONSE_QUEUE_ENABLE    5
+#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
+#define F_RESPONSE_QUEUE_ENABLE    V_RESPONSE_QUEUE_ENABLE(1U)
+
+#define S_CMDQ_PRIORITY    6
+#define M_CMDQ_PRIORITY    0x3
+#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
+#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+
+#define S_DISABLE_CMDQ1_GTS    9
+#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
+#define F_DISABLE_CMDQ1_GTS    V_DISABLE_CMDQ1_GTS(1U)
+
+#define S_DISABLE_FL0_GTS    10
+#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
+#define F_DISABLE_FL0_GTS    V_DISABLE_FL0_GTS(1U)
+
+#define S_DISABLE_FL1_GTS    11
+#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
+#define F_DISABLE_FL1_GTS    V_DISABLE_FL1_GTS(1U)
+
+#define S_ENABLE_BIG_ENDIAN    12
+#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
+#define F_ENABLE_BIG_ENDIAN    V_ENABLE_BIG_ENDIAN(1U)
+
+#define S_ISCSI_COALESCE    14
+#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
+#define F_ISCSI_COALESCE    V_ISCSI_COALESCE(1U)
+
+#define S_RX_PKT_OFFSET    15
+#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+
+#define S_VLAN_XTRACT    18
+#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
+#define F_VLAN_XTRACT    V_VLAN_XTRACT(1U)
+
+#define A_SG_DOORBELL 0x4
+#define A_SG_CMD0BASELWR 0x8
+#define A_SG_CMD0BASEUPR 0xc
+#define A_SG_CMD1BASELWR 0x10
+#define A_SG_CMD1BASEUPR 0x14
+#define A_SG_FL0BASELWR 0x18
+#define A_SG_FL0BASEUPR 0x1c
+#define A_SG_FL1BASELWR 0x20
+#define A_SG_FL1BASEUPR 0x24
+#define A_SG_CMD0SIZE 0x28
+#define A_SG_FL0SIZE 0x2c
+#define A_SG_RSPSIZE 0x30
+#define A_SG_RSPBASELWR 0x34
+#define A_SG_RSPBASEUPR 0x38
+#define A_SG_FLTHRESHOLD 0x3c
+#define A_SG_RSPQUEUECREDIT 0x40
+#define A_SG_SLEEPING 0x48
+#define A_SG_INTRTIMER 0x4c
+#define A_SG_CMD1SIZE 0xb0
+#define A_SG_FL1SIZE 0xb4
+#define A_SG_INT_ENABLE 0xb8
+
+#define S_RESPQ_EXHAUSTED    0
+#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
+#define F_RESPQ_EXHAUSTED    V_RESPQ_EXHAUSTED(1U)
+
+#define S_RESPQ_OVERFLOW    1
+#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
+#define F_RESPQ_OVERFLOW    V_RESPQ_OVERFLOW(1U)
+
+#define S_FL_EXHAUSTED    2
+#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
+#define F_FL_EXHAUSTED    V_FL_EXHAUSTED(1U)
+
+#define S_PACKET_TOO_BIG    3
+#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
+#define F_PACKET_TOO_BIG    V_PACKET_TOO_BIG(1U)
+
+#define S_PACKET_MISMATCH    4
+#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
+#define F_PACKET_MISMATCH    V_PACKET_MISMATCH(1U)
+
+#define A_SG_INT_CAUSE 0xbc
+#define A_SG_RESPACCUTIMER 0xc0
+
+/* MC3 registers */
+
+#define S_READY    1
+#define V_READY(x) ((x) << S_READY)
+#define F_READY    V_READY(1U)
+
+/* MC4 registers */
+
+#define A_MC4_CFG 0x180
+#define S_MC4_SLOW    25
+#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
+#define F_MC4_SLOW    V_MC4_SLOW(1U)
+
+/* TPI registers */
+
+#define A_TPI_ADDR 0x280
+#define A_TPI_WR_DATA 0x284
+#define A_TPI_RD_DATA 0x288
+#define A_TPI_CSR 0x28c
+
+#define S_TPIWR    0
+#define V_TPIWR(x) ((x) << S_TPIWR)
+#define F_TPIWR    V_TPIWR(1U)
+
+#define S_TPIRDY    1
+#define V_TPIRDY(x) ((x) << S_TPIRDY)
+#define F_TPIRDY    V_TPIRDY(1U)
+
+#define A_TPI_PAR 0x29c
+
+#define S_TPIPAR    0
+#define M_TPIPAR    0x7f
+#define V_TPIPAR(x) ((x) << S_TPIPAR)
+#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
+
+/* TP registers */
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_TP_IN_CSPI_CPL    3
+#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
+#define F_TP_IN_CSPI_CPL    V_TP_IN_CSPI_CPL(1U)
+
+#define S_TP_IN_CSPI_CHECK_IP_CSUM    5
+#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
+#define F_TP_IN_CSPI_CHECK_IP_CSUM    V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_CSPI_CHECK_TCP_CSUM    6
+#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
+#define F_TP_IN_CSPI_CHECK_TCP_CSUM    V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+
+#define S_TP_IN_ESPI_ETHERNET    8
+#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
+#define F_TP_IN_ESPI_ETHERNET    V_TP_IN_ESPI_ETHERNET(1U)
+
+#define S_TP_IN_ESPI_CHECK_IP_CSUM    12
+#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
+#define F_TP_IN_ESPI_CHECK_IP_CSUM    V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_ESPI_CHECK_TCP_CSUM    13
+#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
+#define F_TP_IN_ESPI_CHECK_TCP_CSUM    V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
+
+#define S_OFFLOAD_DISABLE    14
+#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
+#define F_OFFLOAD_DISABLE    V_OFFLOAD_DISABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_TP_OUT_CSPI_CPL    2
+#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
+#define F_TP_OUT_CSPI_CPL    V_TP_OUT_CSPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_ETHERNET    6
+#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
+#define F_TP_OUT_ESPI_ETHERNET    V_TP_OUT_ESPI_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_IP_CSUM    10
+#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_IP_CSUM    V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM    11
+#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM    V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_IP_TTL    0
+#define M_IP_TTL    0xff
+#define V_IP_TTL(x) ((x) << S_IP_TTL)
+
+#define S_TCP_CSUM    11
+#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
+#define F_TCP_CSUM    V_TCP_CSUM(1U)
+
+#define S_UDP_CSUM    12
+#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
+#define F_UDP_CSUM    V_UDP_CSUM(1U)
+
+#define S_IP_CSUM    13
+#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
+#define F_IP_CSUM    V_IP_CSUM(1U)
+
+#define S_PATH_MTU    15
+#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
+#define F_PATH_MTU    V_PATH_MTU(1U)
+
+#define S_5TUPLE_LOOKUP    17
+#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+
+#define S_SYN_COOKIE_PARAMETER    26
+#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+
+#define A_TP_PC_CONFIG 0x348
+#define S_DIS_TX_FILL_WIN_PUSH    12
+#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
+#define F_DIS_TX_FILL_WIN_PUSH    V_DIS_TX_FILL_WIN_PUSH(1U)
+
+#define S_TP_PC_REV    30
+#define M_TP_PC_REV    0x3
+#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+#define A_TP_RESET 0x44c
+#define S_TP_RESET    0
+#define V_TP_RESET(x) ((x) << S_TP_RESET)
+#define F_TP_RESET    V_TP_RESET(1U)
+
+#define A_TP_INT_ENABLE 0x470
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_TX_DROP_CONFIG 0x4b8
+
+#define S_ENABLE_TX_DROP    31
+#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
+#define F_ENABLE_TX_DROP    V_ENABLE_TX_DROP(1U)
+
+#define S_ENABLE_TX_ERROR    30
+#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
+#define F_ENABLE_TX_ERROR    V_ENABLE_TX_ERROR(1U)
+
+#define S_DROP_TICKS_CNT    4
+#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+
+#define S_NUM_PKTS_DROPPED    0
+#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+
+/* CSPI registers */
+
+#define S_DIP4ERR    0
+#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
+#define F_DIP4ERR    V_DIP4ERR(1U)
+
+#define S_RXDROP    1
+#define V_RXDROP(x) ((x) << S_RXDROP)
+#define F_RXDROP    V_RXDROP(1U)
+
+#define S_TXDROP    2
+#define V_TXDROP(x) ((x) << S_TXDROP)
+#define F_TXDROP    V_TXDROP(1U)
+
+#define S_RXOVERFLOW    3
+#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
+#define F_RXOVERFLOW    V_RXOVERFLOW(1U)
+
+#define S_RAMPARITYERR    4
+#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
+#define F_RAMPARITYERR    V_RAMPARITYERR(1U)
+
+/* ESPI registers */
+
+#define A_ESPI_SCH_TOKEN0 0x880
+#define A_ESPI_SCH_TOKEN1 0x884
+#define A_ESPI_SCH_TOKEN2 0x888
+#define A_ESPI_SCH_TOKEN3 0x88c
+#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+#define A_ESPI_CALENDAR_LENGTH 0x898
+#define A_PORT_CONFIG 0x89c
+
+#define S_RX_NPORTS    0
+#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+
+#define S_TX_NPORTS    8
+#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+
+#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
+
+#define S_RXSTATUSENABLE    0
+#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
+#define F_RXSTATUSENABLE    V_RXSTATUSENABLE(1U)
+
+#define S_INTEL1010MODE    4
+#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
+#define F_INTEL1010MODE    V_INTEL1010MODE(1U)
+
+#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
+#define A_ESPI_TRAIN 0x8ac
+#define A_ESPI_INTR_STATUS 0x8c8
+
+#define S_DIP2PARITYERR    5
+#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
+#define F_DIP2PARITYERR    V_DIP2PARITYERR(1U)
+
+#define A_ESPI_INTR_ENABLE 0x8cc
+#define A_RX_DROP_THRESHOLD 0x8d0
+#define A_ESPI_RX_RESET 0x8ec
+#define A_ESPI_MISC_CONTROL 0x8f0
+
+#define S_OUT_OF_SYNC_COUNT    0
+#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_PARITY_ERR_THRES    5
+#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+
+#define S_DIP4_THRES    9
+#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+
+#define S_MONITORED_PORT_NUM    25
+#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+
+#define S_MONITORED_DIRECTION    27
+#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
+#define F_MONITORED_DIRECTION    V_MONITORED_DIRECTION(1U)
+
+#define S_MONITORED_INTERFACE    28
+#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
+#define F_MONITORED_INTERFACE    V_MONITORED_INTERFACE(1U)
+
+#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+#define A_ESPI_CMD_ADDR 0x8f8
+
+#define S_WRITE_DATA    0
+#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+
+#define S_REGISTER_OFFSET    8
+#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+
+#define S_CHANNEL_ADDR    12
+#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+
+#define S_MODULE_ADDR    16
+#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+
+#define S_BUNDLE_ADDR    20
+#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+
+#define S_SPI4_COMMAND    24
+#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+
+#define A_ESPI_GOSTAT 0x8fc
+#define S_ESPI_CMD_BUSY    8
+#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
+#define F_ESPI_CMD_BUSY    V_ESPI_CMD_BUSY(1U)
+
+/* PL registers */
+
+#define A_PL_ENABLE 0xa00
+
+#define S_PL_INTR_SGE_ERR    0
+#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
+#define F_PL_INTR_SGE_ERR    V_PL_INTR_SGE_ERR(1U)
+
+#define S_PL_INTR_SGE_DATA    1
+#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
+#define F_PL_INTR_SGE_DATA    V_PL_INTR_SGE_DATA(1U)
+
+#define S_PL_INTR_TP    6
+#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
+#define F_PL_INTR_TP    V_PL_INTR_TP(1U)
+
+#define S_PL_INTR_ESPI    8
+#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
+#define F_PL_INTR_ESPI    V_PL_INTR_ESPI(1U)
+
+#define S_PL_INTR_PCIX    10
+#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
+#define F_PL_INTR_PCIX    V_PL_INTR_PCIX(1U)
+
+#define S_PL_INTR_EXT    11
+#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
+#define F_PL_INTR_EXT    V_PL_INTR_EXT(1U)
+
+#define A_PL_CAUSE 0xa04
+
+/* MC5 registers */
+
+#define A_MC5_CONFIG 0xc04
+
+#define S_TCAM_RESET    1
+#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
+#define F_TCAM_RESET    V_TCAM_RESET(1U)
+
+#define S_M_BUS_ENABLE    5
+#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
+#define F_M_BUS_ENABLE    V_M_BUS_ENABLE(1U)
+
+/* PCICFG registers */
+
+#define A_PCICFG_PM_CSR 0x44
+#define A_PCICFG_VPD_ADDR 0x4a
+
+#define S_VPD_OP_FLAG    15
+#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
+#define F_VPD_OP_FLAG    V_VPD_OP_FLAG(1U)
+
+#define A_PCICFG_VPD_DATA 0x4c
+
+#define A_PCICFG_INTR_ENABLE 0xf4
+#define A_PCICFG_INTR_CAUSE 0xf8
+
+#define A_PCICFG_MODE 0xfc
+
+#define S_PCI_MODE_64BIT    0
+#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
+#define F_PCI_MODE_64BIT    V_PCI_MODE_64BIT(1U)
+
+#define S_PCI_MODE_PCIX    5
+#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
+#define F_PCI_MODE_PCIX    V_PCI_MODE_PCIX(1U)
+
+#define S_PCI_MODE_CLK    6
+#define M_PCI_MODE_CLK    0x3
+#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
+
+#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.c                                                               *
+ * $Revision: 1.26 $                                                         *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  DMA engine.                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+
+#ifdef NETIF_F_TSO
+#include <linux/tcp.h>
+#endif
+
+#define SGE_CMDQ_N		2
+#define SGE_FREELQ_N		2
+#define SGE_CMDQ0_E_N		1024
+#define SGE_CMDQ1_E_N		128
+#define SGE_FREEL_SIZE		4096
+#define SGE_JUMBO_FREEL_SIZE	512
+#define SGE_FREEL_REFILL_THRESH	16
+#define SGE_RESPQ_E_N		1024
+#define SGE_INTRTIMER_NRES	1000
+#define SGE_RX_COPY_THRES	256
+#define SGE_RX_SM_BUF_SIZE	1536
+
+# define SGE_RX_DROP_THRES 2
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer.  This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#ifndef NET_IP_ALIGN
+# define NET_IP_ALIGN 2
+#endif
+
+#define M_CMD_LEN       0x7fffffff
+#define V_CMD_LEN(v)    (v)
+#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v)   ((v) << 31)
+#define V_CMD_GEN2(v)   (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP       (1 << 2)
+#define V_CMD_EOP(v)    ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 flags;
+	u32 addr_hi;
+};
+
+struct freelQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 gen2;
+	u32 addr_hi;
+};
+
+struct respQ_e {
+	u32 Qsleeping		: 4;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 FreelistQid		: 2;
+	u32 CreditValid		: 1;
+	u32 DataValid		: 1;
+	u32 Offload		: 1;
+	u32 Eop			: 1;
+	u32 Sop			: 1;
+	u32 GenerationBit	: 1;
+	u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 flags;
+};
+
+struct freelQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 gen2;
+};
+
+struct respQ_e {
+	u32 BufferLength;
+	u32 GenerationBit	: 1;
+	u32 Sop			: 1;
+	u32 Eop			: 1;
+	u32 Offload		: 1;
+	u32 DataValid		: 1;
+	u32 CreditValid		: 1;
+	u32 FreelistQid		: 2;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Qsleeping		: 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr);
+	DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr);
+	DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+	unsigned long   status;         /* HW DMA fetch status */
+	unsigned int    in_use;         /* # of in-use command descriptors */
+	unsigned int	size;	        /* # of descriptors */
+	unsigned int	processed;      /* total # of descs HW has processed */
+	unsigned int	cleaned;        /* total # of descs SW has reclaimed */
+	unsigned int	stop_thres;     /* SW TX queue suspend threshold */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u8		genbit;         /* current generation (=valid) bit */
+	u8		sop;            /* is next entry start of packet? */
+	struct cmdQ_e  *entries;        /* HW command descriptor Q */
+	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
+	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
+	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
+};
+
+struct freelQ {
+	unsigned int	credits;        /* # of available RX buffers */
+	unsigned int	size;	        /* free list capacity */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u16		rx_buffer_size; /* Buffer size on this free list */
+	u16		dma_offset;     /* DMA offset to align IP headers */
+	u16		recycleq_idx;   /* skb recycle q to use */
+	u8		genbit;	        /* current generation (=valid) bit */
+	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
+	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+	unsigned int	credits;        /* credits to be returned to SGE */
+	unsigned int	size;	        /* # of response Q descriptors */
+	u16		cidx;	        /* consumer index (SW) */
+	u8		genbit;	        /* current generation(=valid) bit */
+	struct respQ_e *entries;        /* HW response descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
+	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
+};
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * seperate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+	struct adapter *adapter; 	/* adapter backpointer */
+	struct net_device *netdev;      /* netdevice backpointer */
+	struct freelQ 	freelQ[SGE_FREELQ_N]; /* buffer free lists */
+	struct respQ 	respQ;		/* response Q */
+	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
+	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
+	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
+	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
+	unsigned int	fixed_intrtimer;/* non-adaptive interrupt timer */
+	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+	struct timer_list espibug_timer;
+	unsigned int	espibug_timeout;
+	struct sk_buff	*espibug_skb;
+	u32		sge_control;	/* shadow value of sge control reg */
+	struct sge_intr_counts stats;
+	struct sge_port_stats port_stats[MAX_NPORTS];
+	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+	wmb();
+	writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+	unsigned int cidx = q->cidx;
+
+	while (q->credits--) {
+		struct freelQ_ce *ce = &q->centries[cidx];
+
+		pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+				 pci_unmap_len(ce, dma_len),
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(ce->skb);
+		ce->skb = NULL;
+		if (++cidx == q->size)
+			cidx = 0;
+	}
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	if (sge->respQ.entries) {
+		size = sizeof(struct respQ_e) * sge->respQ.size;
+		pci_free_consistent(pdev, size, sge->respQ.entries,
+				    sge->respQ.dma_addr);
+	}
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		if (q->centries) {
+			free_freelQ_buffers(pdev, q);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct freelQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		q->genbit = 1;
+		q->size = p->freelQ_size[i];
+		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+		size = sizeof(struct freelQ_e) * q->size;
+		q->entries = (struct freelQ_e *)
+			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+		memset(q->entries, 0, size);
+		size = sizeof(struct freelQ_ce) * q->size;
+		q->centries = kmalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+		memset(q->centries, 0, size);
+	}
+
+	/*
+	 * Calculate the buffer sizes for the two free lists.  FL0 accommodates
+	 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+	 * including all the sk_buff overhead.
+	 *
+	 * Note: For T2 FL0 and FL1 are reversed.
+	 */
+	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+		sizeof(struct cpl_rx_data) +
+		sge->freelQ[!sge->jumbo_fl].dma_offset;
+	sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/*
+	 * Setup which skb recycle Q should be used when recycling buffers from
+	 * each free list.
+	 */
+	sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+	sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+	sge->respQ.genbit = 1;
+	sge->respQ.size = SGE_RESPQ_E_N;
+	sge->respQ.credits = 0;
+	size = sizeof(struct respQ_e) * sge->respQ.size;
+	sge->respQ.entries = (struct respQ_e *)
+		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+	if (!sge->respQ.entries)
+		goto err_no_mem;
+	memset(sge->respQ.entries, 0, size);
+	return 0;
+
+err_no_mem:
+	free_rx_resources(sge);
+	return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+	struct cmdQ_ce *ce;
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int cidx = q->cidx;
+
+	q->in_use -= n;
+	ce = &q->centries[cidx];
+	while (n--) {
+		if (q->sop)
+			pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+			 		 pci_unmap_len(ce, dma_len),
+					 PCI_DMA_TODEVICE);
+		else
+			pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+			 	       pci_unmap_len(ce, dma_len),
+				       PCI_DMA_TODEVICE);
+		q->sop = 0;
+		if (ce->skb) {
+			dev_kfree_skb(ce->skb);
+			q->sop = 1;
+		}
+		ce++;
+		if (++cidx == q->size) {
+			cidx = 0;
+			ce = q->centries;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (q->centries) {
+			if (q->in_use)
+				free_cmdQ_buffers(sge, q, q->in_use);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct cmdQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		q->genbit = 1;
+		q->sop = 1;
+		q->size = p->cmdQ_size[i];
+		q->in_use = 0;
+		q->status = 0;
+		q->processed = q->cleaned = 0;
+		q->stop_thres = 0;
+		spin_lock_init(&q->lock);
+		size = sizeof(struct cmdQ_e) * q->size;
+		q->entries = (struct cmdQ_e *)
+			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+		memset(q->entries, 0, size);
+		size = sizeof(struct cmdQ_ce) * q->size;
+		q->centries = kmalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+		memset(q->centries, 0, size);
+	}
+
+	/*
+	 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+	 * only.  For queue 0 set the stop threshold so we can handle one more
+	 * packet from each port, plus reserve an additional 24 entries for
+	 * Ethernet packets only.  Queue 1 never suspends nor do we reserve
+	 * space for Ethernet packets.
+	 */
+	sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+		(MAX_SKB_FRAGS + 1);
+	return 0;
+
+err_no_mem:
+	free_tx_resources(sge);
+	return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+				     u32 size, int base_reg_lo,
+				     int base_reg_hi, int size_reg)
+{
+	writel((u32)addr, adapter->regs + base_reg_lo);
+	writel(addr >> 32, adapter->regs + base_reg_hi);
+	writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_set_vlan_accel(struct adapter *adapter, int on_off)
+{
+	struct sge *sge = adapter->sge;
+
+	sge->sge_control &= ~F_VLAN_XTRACT;
+	if (on_off)
+		sge->sge_control |= F_VLAN_XTRACT;
+	if (adapter->open_device_map) {
+		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+		readl(adapter->regs + A_SG_CONTROL); /* flush */
+	}
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+	struct adapter *ap = sge->adapter;
+	
+	writel(0, ap->regs + A_SG_CONTROL);
+	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+	setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+			  A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+	setup_ring_params(ap, sge->freelQ[0].dma_addr,
+			  sge->freelQ[0].size, A_SG_FL0BASELWR,
+			  A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+	setup_ring_params(ap, sge->freelQ[1].dma_addr,
+			  sge->freelQ[1].size, A_SG_FL1BASELWR,
+			  A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+	/* The threshold comparison uses <. */
+	writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+	setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+			  A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+	writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+		F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
+		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+	sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+	/* Initialize no-resource timer */
+	sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+	t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+	return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+		sge->freelQ[sge->jumbo_fl].dma_offset -
+		sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+	if (sge->espibug_skb)
+		kfree_skb(sge->espibug_skb);
+
+	free_tx_resources(sge);
+	free_rx_resources(sge);
+	kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	struct freelQ_ce *ce = &q->centries[q->pidx];
+	struct freelQ_e *e = &q->entries[q->pidx];
+	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+
+	while (q->credits < q->size) {
+		struct sk_buff *skb;
+		dma_addr_t mapping;
+
+		skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, q->dma_offset);
+		mapping = pci_map_single(pdev, skb->data, dma_len,
+					 PCI_DMA_FROMDEVICE);
+		ce->skb = skb;
+		pci_unmap_addr_set(ce, dma_addr, mapping);
+		pci_unmap_len_set(ce, dma_len, dma_len);
+		e->addr_lo = (u32)mapping;
+		e->addr_hi = (u64)mapping >> 32;
+		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+		wmb();
+		e->gen2 = V_CMD_GEN2(q->genbit);
+
+		e++;
+		ce++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			q->genbit ^= 1;
+			ce = q->centries;
+			e = q->entries;
+		}
+		q->credits++;
+	}
+
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+	u32 irqholdoff_reg;
+
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+	    sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+		irq_reg |= F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->fixed_intrtimer;
+	} else {
+		/* Clear the F_FL_EXHAUSTED interrupts for now */
+		irq_reg &= ~F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->intrtimer_nres;
+	}
+	writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+	writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+	/* We reenable the Qs to force a freelist GTS interrupt later */
+	doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+			F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+	writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+	u32 en = SGE_INT_ENABLE;
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	if (sge->adapter->flags & TSO_CAPABLE)
+		en &= ~F_PACKET_TOO_BIG;
+	writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+	writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+	writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+	writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+	if (adapter->flags & TSO_CAPABLE)
+		cause &= ~F_PACKET_TOO_BIG;
+	if (cause & F_RESPQ_EXHAUSTED)
+		sge->stats.respQ_empty++;
+	if (cause & F_RESPQ_OVERFLOW) {
+		sge->stats.respQ_overflow++;
+		CH_ALERT("%s: SGE response queue overflow\n",
+			 adapter->name);
+	}
+	if (cause & F_FL_EXHAUSTED) {
+		sge->stats.freelistQ_empty++;
+		freelQs_empty(sge);
+	}
+	if (cause & F_PACKET_TOO_BIG) {
+		sge->stats.pkt_too_big++;
+		CH_ALERT("%s: SGE max packet size exceeded\n",
+			 adapter->name);
+	}
+	if (cause & F_PACKET_MISMATCH) {
+		sge->stats.pkt_mismatch++;
+		CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
+	}
+	if (cause & SGE_INT_FATAL)
+		t1_fatal_err(adapter);
+
+	writel(cause, adapter->regs + A_SG_INT_CAUSE);
+	return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+{
+	return &sge->stats;
+}
+
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+{
+	return &sge->port_stats[port];
+}
+
+/**
+ *	recycle_fl_buf - recycle a free list buffer
+ *	@fl: the free list
+ *	@idx: index of buffer to recycle
+ *
+ *	Recycles the specified buffer on the given free list by adding it at
+ *	the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+	struct freelQ_e *from = &fl->entries[idx];
+	struct freelQ_e *to = &fl->entries[fl->pidx];
+
+	fl->centries[fl->pidx] = fl->centries[idx];
+	to->addr_lo = from->addr_lo;
+	to->addr_hi = from->addr_hi;
+	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+	wmb();
+	to->gen2 = V_CMD_GEN2(fl->genbit);
+	fl->credits++;
+
+	if (++fl->pidx == fl->size) {
+		fl->pidx = 0;
+		fl->genbit ^= 1;
+	}
+}
+
+/**
+ *	get_packet - return the next ingress packet buffer
+ *	@pdev: the PCI device that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the actual packet length, excluding any SGE padding
+ *	@dma_pad: padding at beginning of buffer left by SGE DMA
+ *	@skb_pad: padding to be used if the packet is copied
+ *	@copy_thres: length threshold under which a packet should be copied
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+					 struct freelQ *fl, unsigned int len,
+					 int dma_pad, int skb_pad,
+					 unsigned int copy_thres,
+					 unsigned int drop_thres)
+{
+	struct sk_buff *skb;
+	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+	if (len < copy_thres) {
+		skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			skb_reserve(skb, skb_pad);
+			skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(pdev,
+					    pci_unmap_addr(ce, dma_addr),
+ 					    pci_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, ce->skb->data + dma_pad, len);
+			pci_dma_sync_single_for_device(pdev,
+					    pci_unmap_addr(ce, dma_addr),
+ 					    pci_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			goto use_orig_buf;
+
+		recycle_fl_buf(fl, fl->cidx);
+		return skb;
+	}
+
+	if (fl->credits < drop_thres) {
+		recycle_fl_buf(fl, fl->cidx);
+		return NULL;
+	}
+
+use_orig_buf:
+	pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+			 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	skb = ce->skb;
+	skb_reserve(skb, dma_pad);
+	skb_put(skb, len);
+	return skb;
+}
+
+/**
+ *	unexpected_offload - handle an unexpected offload packet
+ *	@adapter: the adapter
+ *	@fl: the free list that received the packet
+ *
+ *	Called when we receive an unexpected offload packet (e.g., the TOE
+ *	function is disabled or the card is a NIC).  Prints a message and
+ *	recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+	struct sk_buff *skb = ce->skb;
+
+	pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
+			    pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	CH_ERR("%s: unexpected offload packet, cmd %u\n",
+	       adapter->name, *skb->data);
+	recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+				  unsigned int pidx, unsigned int gen,
+				  struct cmdQ *q)
+{
+	dma_addr_t mapping;
+	struct cmdQ_e *e, *e1;
+	struct cmdQ_ce *ce;
+	unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+
+	mapping = pci_map_single(adapter->pdev, skb->data,
+				 skb->len - skb->data_len, PCI_DMA_TODEVICE);
+	ce = &q->centries[pidx];
+	ce->skb = NULL;
+	pci_unmap_addr_set(ce, dma_addr, mapping);
+	pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+
+	flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
+		V_CMD_GEN2(gen);
+	e = &q->entries[pidx];
+	e->addr_lo = (u32)mapping;
+	e->addr_hi = (u64)mapping >> 32;
+	e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
+	for (e1 = e, i = 0; nfrags--; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		ce++;
+		e1++;
+		if (++pidx == q->size) {
+			pidx = 0;
+			gen ^= 1;
+			ce = q->centries;
+			e1 = q->entries;
+		}
+
+		mapping = pci_map_page(adapter->pdev, frag->page,
+				       frag->page_offset, frag->size,
+				       PCI_DMA_TODEVICE);
+		ce->skb = NULL;
+		pci_unmap_addr_set(ce, dma_addr, mapping);
+		pci_unmap_len_set(ce, dma_len, frag->size);
+
+		e1->addr_lo = (u32)mapping;
+		e1->addr_hi = (u64)mapping >> 32;
+		e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
+		e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
+			    V_CMD_GEN2(gen);
+	}
+
+	ce->skb = skb;
+	wmb();
+	e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	if (reclaim) {
+		free_cmdQ_buffers(sge, q, reclaim);
+		q->cleaned += reclaim;
+	}
+}
+
+#ifndef SET_ETHTOOL_OPS
+# define __netif_rx_complete(dev) netif_rx_complete(dev)
+#endif
+
+/*
+ * We cannot use the standard netif_rx_schedule_prep() because we have multiple
+ * ports plus the TOE all multiplexing onto a single response queue, therefore
+ * accepting new responses cannot depend on the state of any particular port.
+ * So define our own equivalent that omits the netif_running() test.
+ */
+static inline int napi_schedule_prep(struct net_device *dev)
+{
+	return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+
+/**
+ *	sge_rx - process an ingress ethernet packet
+ *	@sge: the sge structure
+ *	@fl: the free list that contains the packet buffer
+ *	@len: the packet length
+ *
+ *	Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_pkt *p;
+	struct adapter *adapter = sge->adapter;
+
+	sge->stats.ethernet_pkts++;
+	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
+			 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
+			 SGE_RX_DROP_THRES);
+	if (!skb) {
+		sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+		return 0;
+	}
+
+	p = (struct cpl_rx_pkt *)skb->data;
+	skb_pull(skb, sizeof(*p));
+	skb->dev = adapter->port[p->iff].dev;
+	skb->dev->last_rx = jiffies;
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+	    skb->protocol == htons(ETH_P_IP) &&
+	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+		sge->port_stats[p->iff].rx_cso_good++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
+		sge->port_stats[p->iff].vlan_xtract++;
+		if (adapter->params.sge.polling)
+			vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+						 ntohs(p->vlan));
+		else
+			vlan_hwaccel_rx(skb, adapter->vlan_grp,
+					ntohs(p->vlan));
+	} else if (adapter->params.sge.polling)
+		netif_receive_skb(skb);
+	else
+		netif_rx(skb);
+	return 0;
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+	unsigned int r = q->processed - q->cleaned;
+
+	return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+	struct adapter *adap = sge->adapter;
+
+	if (enough_free_Tx_descs(&sge->cmdQ[0])) {
+		int i;
+
+		for_each_port(adap, i) {
+			struct net_device *nd = adap->port[i].dev;
+
+			if (test_and_clear_bit(nd->if_port,
+					       &sge->stopped_tx_queues) &&
+			    netif_running(nd)) {
+				sge->stats.cmdQ_restarted[3]++;
+				netif_wake_queue(nd);
+			}
+		}
+	}
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter, 
+					  unsigned int flags, 
+					  unsigned int pr0)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *cmdq = &sge->cmdQ[0];
+
+	cmdq->processed += pr0;
+
+	if (flags & F_CMDQ0_ENABLE) {
+		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+	
+		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	 	flags &= ~F_CMDQ0_ENABLE;
+	}
+	
+	if (unlikely(sge->stopped_tx_queues != 0))
+		restart_tx_queues(sge);
+
+	return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget.  Returns the number of
+ * responses processed.  A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	struct respQ_e *e = &q->entries[q->cidx];
+	int budget_left = budget;
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+	
+
+	while (likely(budget_left && e->GenerationBit == q->genbit)) {
+		flags |= e->Qsleeping;
+		
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+		
+		/* We batch updates to the TX side to avoid cacheline
+		 * ping-pong of TX state information on MP where the sender
+		 * might run on a different CPU than this function...
+		 */
+		if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
+			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+			cmdq_processed[0] = 0;
+		}
+		if (unlikely(cmdq_processed[1] > 16)) {
+			sge->cmdQ[1].processed += cmdq_processed[1];
+			cmdq_processed[1] = 0;
+		}
+		if (likely(e->DataValid)) {
+			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+			if (unlikely(!e->Sop || !e->Eop))
+				BUG();
+			if (unlikely(e->Offload))
+				unexpected_offload(adapter, fl);
+			else
+				sge_rx(sge, fl, e->BufferLength);
+
+			/*
+			 * Note: this depends on each packet consuming a
+			 * single free-list buffer; cf. the BUG above.
+			 */
+			if (++fl->cidx == fl->size)
+				fl->cidx = 0;
+			if (unlikely(--fl->credits <
+				     fl->size - SGE_FREEL_REFILL_THRESH))
+				refill_free_list(sge, fl);
+		} else
+			sge->stats.pure_rsps++;
+
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+		--budget_left;
+	}
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	budget -= budget_left;
+	return budget;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses.  Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context.  The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response.  Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+	do {
+		flags |= e->Qsleeping;
+
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+		
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+		sge->stats.pure_rsps++;
+	} while (e->GenerationBit == q->genbit && !e->DataValid);
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI.  This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+static int t1_poll(struct net_device *dev, int *budget)
+{
+	struct adapter *adapter = dev->priv;
+	int effective_budget = min(*budget, dev->quota);
+
+	int work_done = process_responses(adapter, effective_budget);
+	*budget -= work_done;
+	dev->quota -= work_done;
+
+	if (work_done >= effective_budget)
+		return 1;
+
+	__netif_rx_complete(dev);
+
+	/*
+	 * Because we don't atomically flush the following write it is
+	 * possible that in very rare cases it can reach the device in a way
+	 * that races with a new response being written plus an error interrupt
+	 * causing the NAPI interrupt handler below to return unhandled status
+	 * to the OS.  To protect against this would require flushing the write
+	 * and doing both the write and the flush with interrupts off.  Way too
+	 * expensive and unjustifiable given the rarity of the race.
+	 */
+	writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+	return 0;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct net_device *dev)
+{
+	return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+/*
+ * NAPI version of the main interrupt handler.
+ */
+static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
+{
+	int handled;
+	struct adapter *adapter = data;
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &adapter->sge->respQ;
+
+	/*
+	 * Clear the SGE_DATA interrupt first thing.  Normally the NAPI
+	 * handler has control of the response queue and the interrupt handler
+	 * can look at the queue reliably only once it knows NAPI is off.
+	 * We can't wait that long to clear the SGE_DATA interrupt because we
+	 * could race with t1_poll rearming the SGE interrupt, so we need to
+	 * clear the interrupt speculatively and really early on.
+	 */
+	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+	spin_lock(&adapter->async_lock);
+	if (!napi_is_scheduled(sge->netdev)) {
+		struct respQ_e *e = &q->entries[q->cidx];
+
+		if (e->GenerationBit == q->genbit) {
+			if (e->DataValid ||
+			    process_pure_responses(adapter, e)) {
+				if (likely(napi_schedule_prep(sge->netdev)))
+					__netif_rx_schedule(sge->netdev);
+				else
+					printk(KERN_CRIT
+					       "NAPI schedule failure!\n");
+			} else
+			writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+			handled = 1;
+			goto unlock;
+		} else
+		writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+	}  else
+	if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
+		printk(KERN_ERR "data interrupt while NAPI running\n");
+	
+	handled = t1_slow_intr_handler(adapter);
+	if (!handled)
+		sge->stats.unhandled_irqs++;
+ unlock:
+	spin_unlock(&adapter->async_lock);
+	return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Main interrupt handler, optimized assuming that we took a 'DATA'
+ * interrupt.
+ *
+ * 1. Clear the interrupt
+ * 2. Loop while we find valid descriptors and process them; accumulate
+ *      information that can be processed after the loop
+ * 3. Tell the SGE at which index we stopped processing descriptors
+ * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
+ *      outstanding TX buffers waiting, replenish RX buffers, potentially
+ *      reenable upper layers if they were turned off due to lack of TX
+ *      resources which are available again.
+ * 5. If we took an interrupt, but no valid respQ descriptors was found we
+ *      let the slow_intr_handler run and do error handling.
+ */
+static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+{
+	int work_done;
+	struct respQ_e *e;
+	struct adapter *adapter = cookie;
+	struct respQ *Q = &adapter->sge->respQ;
+
+	spin_lock(&adapter->async_lock);
+	e = &Q->entries[Q->cidx];
+	prefetch(e);
+
+	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+	if (likely(e->GenerationBit == Q->genbit))
+		work_done = process_responses(adapter, -1);
+	else
+		work_done = t1_slow_intr_handler(adapter);
+
+	/*
+	 * The unconditional clearing of the PL_CAUSE above may have raced
+	 * with DMA completion and the corresponding generation of a response
+	 * to cause us to miss the resulting data interrupt.  The next write
+	 * is also unconditional to recover the missed interrupt and render
+	 * this race harmless.
+	 */
+	writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
+
+	if (!work_done)
+		adapter->sge->stats.unhandled_irqs++;
+	spin_unlock(&adapter->async_lock);
+	return IRQ_RETVAL(work_done != 0);
+}
+
+intr_handler_t t1_select_intr_handler(adapter_t *adapter)
+{
+	return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjuction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+		       unsigned int qid, struct net_device *dev)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *q = &sge->cmdQ[qid];
+	unsigned int credits, pidx, genbit, count;
+
+	spin_lock(&q->lock);
+	reclaim_completed_tx(sge, q);
+
+	pidx = q->pidx;
+	credits = q->size - q->in_use;
+	count = 1 + skb_shinfo(skb)->nr_frags;
+
+	{	/* Ethernet packet */
+	 	if (unlikely(credits < count)) {
+			netif_stop_queue(dev);
+			set_bit(dev->if_port, &sge->stopped_tx_queues);
+			sge->stats.cmdQ_full[3]++;
+			spin_unlock(&q->lock);
+			CH_ERR("%s: Tx ring full while queue awake!\n",
+			       adapter->name);
+			return 1;
+		}
+		if (unlikely(credits - count < q->stop_thres)) {
+			sge->stats.cmdQ_full[3]++;
+			netif_stop_queue(dev);
+			set_bit(dev->if_port, &sge->stopped_tx_queues);
+		}
+	}
+	q->in_use += count;
+	genbit = q->genbit;
+	q->pidx += count;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->genbit ^= 1;
+	}
+	spin_unlock(&q->lock);
+
+	write_tx_descs(adapter, skb, pidx, genbit, q);
+
+	/*
+	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
+	 * the doorbell if the Q is asleep. There is a natural race, where
+	 * the hardware is going to sleep just after we checked, however,
+	 * then the interrupt handler will detect the outstanding TX packet
+	 * and ring the doorbell for us.
+	 */
+	if (qid)
+		doorbell_pio(adapter, F_CMDQ1_ENABLE);
+	else {
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	return 0;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ *	eth_hdr_len - return the length of an Ethernet header
+ *	@data: pointer to the start of the Ethernet header
+ *
+ *	Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+	const struct ethhdr *e = data;
+
+	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
+	struct sge *sge = adapter->sge;
+	struct cpl_tx_pkt *cpl;
+
+#ifdef NETIF_F_TSO
+	if (skb_shinfo(skb)->tso_size) {
+		int eth_type;
+		struct cpl_tx_pkt_lso *hdr;
+
+		st->tso++;
+
+		eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+			CPL_ETH_II : CPL_ETH_II_VLAN;
+
+		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+		hdr->opcode = CPL_TX_PKT_LSO;
+		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+		hdr->ip_hdr_words = skb->nh.iph->ihl;
+		hdr->tcp_hdr_words = skb->h.th->doff;
+		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+						skb_shinfo(skb)->tso_size));
+		hdr->len = htonl(skb->len - sizeof(*hdr));
+		cpl = (struct cpl_tx_pkt *)hdr;
+		sge->stats.tx_lso_pkts++;
+	} else
+#endif
+	{
+		/*
+	 	 * Packets shorter than ETH_HLEN can break the MAC, drop them
+		 * early.  Also, we may get oversized packets because some
+		 * parts of the kernel don't handle our unusual hard_header_len
+		 * right, drop those too.
+		 */
+		if (unlikely(skb->len < ETH_HLEN ||
+			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+			dev_kfree_skb_any(skb);
+			return NET_XMIT_SUCCESS;
+		}
+
+		/*
+		 * We are using a non-standard hard_header_len and some kernel
+		 * components, such as pktgen, do not handle it right.
+		 * Complain when this happens but try to fix things up.
+		 */
+		if (unlikely(skb_headroom(skb) <
+			     dev->hard_header_len - ETH_HLEN)) {
+			struct sk_buff *orig_skb = skb;
+
+			if (net_ratelimit())
+				printk(KERN_ERR "%s: inadequate headroom in "
+				       "Tx packet\n", dev->name);
+			skb = skb_realloc_headroom(skb, sizeof(*cpl));
+			dev_kfree_skb_any(orig_skb);
+			if (!skb)
+				return -ENOMEM;
+		}
+
+		if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
+		    skb->ip_summed == CHECKSUM_HW &&
+		    skb->nh.iph->protocol == IPPROTO_UDP)
+			if (unlikely(skb_checksum_help(skb, 0))) {
+				dev_kfree_skb_any(skb);
+				return -ENOMEM;
+			}
+
+		/* Hmmm, assuming to catch the gratious arp... and we'll use
+		 * it to flush out stuck espi packets...
+		  */
+		if (unlikely(!adapter->sge->espibug_skb)) {
+			if (skb->protocol == htons(ETH_P_ARP) &&
+			    skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+				adapter->sge->espibug_skb = skb;
+				/* We want to re-use this skb later. We
+				 * simply bump the reference count and it
+				 * will not be freed...
+				 */
+				skb = skb_get(skb);
+			}
+		}
+
+		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+		cpl->opcode = CPL_TX_PKT;
+		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
+		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
+		/* the length field isn't used so don't bother setting it */
+
+		st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
+		sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
+		sge->stats.tx_reg_pkts++;
+	}
+	cpl->iff = dev->if_port;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+	if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
+		cpl->vlan_valid = 1;
+		cpl->vlan = htons(vlan_tx_tag_get(skb));
+		st->vlan_insert++;
+	} else
+#endif
+		cpl->vlan_valid = 0;
+
+	dev->trans_start = jiffies;
+	return t1_sge_tx(skb, adapter, 0, dev);
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+	int i;
+	struct sge *sge = (struct sge *)data;
+
+	for (i = 0; i < SGE_CMDQ_N; ++i) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (!spin_trylock(&q->lock))
+			continue;
+
+		reclaim_completed_tx(sge, q);
+		if (i == 0 && q->in_use)   /* flush pending credits */
+			writel(F_CMDQ0_ENABLE,
+				sge->adapter->regs + A_SG_DOORBELL);
+
+		spin_unlock(&q->lock);
+	}
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+	sge->netdev->poll = t1_poll;
+	sge->fixed_intrtimer = p->rx_coalesce_usecs *
+		core_ticks_per_usec(sge->adapter);
+	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+	return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+	if (alloc_rx_resources(sge, p))
+		return -ENOMEM;
+	if (alloc_tx_resources(sge, p)) {
+		free_rx_resources(sge);
+		return -ENOMEM;
+	}
+	configure_sge(sge, p);
+
+	/*
+	 * Now that we have sized the free lists calculate the payload
+	 * capacity of the large buffers.  Other parts of the driver use
+	 * this to set the max offload coalescing size so that RX packets
+	 * do not overflow our large buffers.
+	 */
+	p->large_buf_capacity = jumbo_payload_capacity(sge);
+	return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+	writel(0, sge->adapter->regs + A_SG_CONTROL);
+	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+	if (is_T2(sge->adapter))
+		del_timer_sync(&sge->espibug_timer);
+	del_timer_sync(&sge->tx_reclaim_timer);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+	if (is_T2(sge->adapter)) 
+		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround(void *data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *sge = adapter->sge;
+
+	if (netif_running(adapter->port[0].dev)) {
+		struct sk_buff *skb = sge->espibug_skb;
+
+		u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+		if ((seop & 0xfff0fff) == 0xfff && skb) {
+			if (!skb->cb[0]) {
+				u8 ch_mac_addr[ETH_ALEN] =
+				    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+				memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+				    ch_mac_addr, ETH_ALEN);
+				memcpy(skb->data + skb->len - 10, ch_mac_addr,
+				    ETH_ALEN);
+				skb->cb[0] = 0xff;
+			}
+
+			/* bump the reference count to avoid freeing of the
+			 * skb once the DMA has completed.
+			 */
+			skb = skb_get(skb);
+			t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+		}
+	}
+	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+				     struct sge_params *p)
+{
+	struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+
+	if (!sge)
+		return NULL;
+	memset(sge, 0, sizeof(*sge));
+
+	sge->adapter = adapter;
+	sge->netdev = adapter->port[0].dev;
+	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	init_timer(&sge->tx_reclaim_timer);
+	sge->tx_reclaim_timer.data = (unsigned long)sge;
+	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+	if (is_T2(sge->adapter)) {
+		init_timer(&sge->espibug_timer);
+		sge->espibug_timer.function = (void *)&espibug_workaround;
+		sge->espibug_timer.data = (unsigned long)sge->adapter;
+		sge->espibug_timeout = 1;
+	}
+	 
+
+	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+	p->rx_coalesce_usecs =  50;
+	p->coalesce_enable = 0;
+	p->sample_interval_usecs = 0;
+	p->polling = 0;
+
+	return sge;
+}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.h                                                               *
+ * $Revision: 1.11 $                                                          *
+ * $Date: 2005/06/21 22:10:55 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SGE_H_
+#define _CXGB_SGE_H_
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#ifndef IRQ_RETVAL
+#define IRQ_RETVAL(x)
+typedef void irqreturn_t;
+#endif
+
+typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
+
+struct sge_intr_counts {
+	unsigned int respQ_empty;      /* # times respQ empty */
+	unsigned int respQ_overflow;   /* # respQ overflow (fatal) */
+	unsigned int freelistQ_empty;  /* # times freelist empty */
+	unsigned int pkt_too_big;      /* packet too large (fatal) */
+	unsigned int pkt_mismatch;
+	unsigned int cmdQ_full[3];     /* not HW IRQ, host cmdQ[] full */
+	unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
+	unsigned int ethernet_pkts;    /* # of Ethernet packets received */
+	unsigned int offload_pkts;     /* # of offload packets received */
+	unsigned int offload_bundles;  /* # of offload pkt bundles delivered */
+	unsigned int pure_rsps;        /* # of non-payload responses */
+	unsigned int unhandled_irqs;   /* # of unhandled interrupts */
+	unsigned int tx_ipfrags;
+	unsigned int tx_reg_pkts;
+	unsigned int tx_lso_pkts;
+	unsigned int tx_do_cksum;
+};
+
+struct sge_port_stats {
+	unsigned long rx_cso_good;     /* # of successful RX csum offloads */
+	unsigned long tx_cso;          /* # of TX checksum offloads */
+	unsigned long vlan_xtract;     /* # of VLAN tag extractions */
+	unsigned long vlan_insert;     /* # of VLAN tag extractions */
+	unsigned long tso;             /* # of TSO requests */
+	unsigned long rx_drops;        /* # of packets dropped due to no mem */
+};
+
+struct sk_buff;
+struct net_device;
+struct adapter;
+struct sge_params;
+struct sge;
+
+struct sge *t1_sge_create(struct adapter *, struct sge_params *);
+int t1_sge_configure(struct sge *, struct sge_params *);
+int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
+void t1_sge_destroy(struct sge *);
+intr_handler_t t1_select_intr_handler(adapter_t *adapter);
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+		       unsigned int qid, struct net_device *netdev);
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void t1_set_vlan_accel(struct adapter *adapter, int on_off);
+void t1_sge_start(struct sge *);
+void t1_sge_stop(struct sge *);
+int t1_sge_intr_error_handler(struct sge *);
+void t1_sge_intr_enable(struct sge *);
+void t1_sge_intr_disable(struct sge *);
+void t1_sge_intr_clear(struct sge *);
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
+
+#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: subr.c                                                              *
+ * $Revision: 1.27 $                                                         *
+ * $Date: 2005/06/22 01:08:36 $                                              *
+ * Description:                                                              *
+ *  Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "elmer0.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+/**
+ *	t1_wait_op_done - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *      @delay: delay in usecs between iterations
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  Returns %0 if the operation completes and %1
+ *	otherwise.
+ */
+static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
+		    int attempts, int delay)
+{
+	while (1) {
+		u32 val = readl(adapter->regs + reg) & mask;
+
+		if (!!val == polarity)
+			return 0;
+		if (--attempts == 0)
+			return 1;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+#define TPI_ATTEMPTS 50
+
+/*
+ * Write a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(value, adapter->regs + A_TPI_WR_DATA);
+	writel(F_TPIWR, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		CH_ALERT("%s: TPI write to 0x%x failed\n",
+			 adapter->name, addr);
+	return tpi_busy;
+}
+
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int ret;
+
+	spin_lock(&(adapter)->tpi_lock);
+	ret = __t1_tpi_write(adapter, addr, value);
+	spin_unlock(&(adapter)->tpi_lock);
+	return ret;
+}
+
+/*
+ * Read a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(0, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		CH_ALERT("%s: TPI read from 0x%x failed\n",
+			 adapter->name, addr);
+	else
+		*valp = readl(adapter->regs + A_TPI_RD_DATA);
+	return tpi_busy;
+}
+
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int ret;
+
+	spin_lock(&(adapter)->tpi_lock);
+	ret = __t1_tpi_read(adapter, addr, valp);
+	spin_unlock(&(adapter)->tpi_lock);
+	return ret;
+}
+
+/*
+ * Called when a port's link settings change to propagate the new values to the
+ * associated PHY and MAC.  After performing the common tasks it invokes an
+ * OS-specific handler.
+ */
+/* static */ void link_changed(adapter_t *adapter, int port_id)
+{
+	int link_ok, speed, duplex, fc;
+	struct cphy *phy = adapter->port[port_id].phy;
+	struct link_config *lc = &adapter->port[port_id].link_config;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+	if (!(lc->requested_fc & PAUSE_AUTONEG))
+		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+		/* Set MAC speed, duplex, and flow control to match PHY. */
+		struct cmac *mac = adapter->port[port_id].mac;
+
+		mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
+		lc->fc = (unsigned char)fc;
+	}
+	t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+static int t1_pci_intr_handler(adapter_t *adapter)
+{
+	u32 pcix_cause;
+
+    	pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+
+	if (pcix_cause) {
+		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
+					 pcix_cause);
+		t1_fatal_err(adapter);    /* PCI errors are fatal */
+	}
+	return 0;
+}
+
+
+/*
+ * Wait until Elmer's MI1 interface is ready for new operations.
+ */
+static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
+{
+	int attempts = 100, busy;
+
+	do {
+		u32 val;
+
+		__t1_tpi_read(adapter, mi1_reg, &val);
+		busy = val & F_MI1_OP_BUSY;
+		if (busy)
+			udelay(10);
+	} while (busy && --attempts);
+	if (busy)
+		CH_ALERT("%s: MDIO operation timed out\n",
+			 adapter->name);
+	return busy;
+}
+
+/*
+ * MI1 MDIO initialization.
+ */
+static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
+{
+	u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
+	u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
+		V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
+
+	if (!(bi->caps & SUPPORTED_10000baseT_Full))
+		val |= V_MI1_SOF(1);
+	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
+}
+
+static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+			     int reg_addr, unsigned int *valp)
+{
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&(adapter)->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the operation we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Read the data. */
+	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+	spin_unlock(&(adapter)->tpi_lock);
+	return 0;
+}
+
+static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+			      int reg_addr, unsigned int val)
+{
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&(adapter)->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the data. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	spin_unlock(&(adapter)->tpi_lock);
+	return 0;
+}
+
+static struct mdio_ops mi1_mdio_ext_ops = {
+	mi1_mdio_init,
+	mi1_mdio_ext_read,
+	mi1_mdio_ext_write
+};
+
+enum {
+	CH_BRD_N110_1F,
+	CH_BRD_N210_1F,
+};
+
+static struct board_info t1_board[] = {
+
+{ CHBT_BOARD_N110, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N110 1x10GBaseX NIC" },
+
+{ CHBT_BOARD_N210, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N210 1x10GBaseX NIC" },
+
+};
+
+struct pci_device_id t1_pci_tbl[] = {
+	CH_DEVICE(7, 0, CH_BRD_N110_1F),
+	CH_DEVICE(10, 1, CH_BRD_N210_1F),
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
+
+/*
+ * Return the board_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct board_info *t1_get_board_info(unsigned int board_id)
+{
+	return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
+}
+
+struct chelsio_vpd_t {
+	u32 format_version;
+	u8 serial_number[16];
+	u8 mac_base_address[6];
+	u8 pad[2];           /* make multiple-of-4 size requirement explicit */
+};
+
+#define EEPROMSIZE        (8 * 1024)
+#define EEPROM_MAX_POLL   4
+
+/*
+ * Read SEEPROM. A zero is written to the flag register when the addres is
+ * written to the Control register. The hardware device will set the flag to a
+ * one when 4B have been transferred to the Data register.
+ */
+int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
+{
+	int i = EEPROM_MAX_POLL;
+	u16 val;
+
+	if (addr >= EEPROMSIZE || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
+	do {
+		udelay(50);
+		pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
+	} while (!(val & F_VPD_OP_FLAG) && --i);
+
+	if (!(val & F_VPD_OP_FLAG)) {
+		CH_ERR("%s: reading EEPROM address 0x%x failed\n",
+		       adapter->name, addr);
+		return -EIO;
+	}
+	pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
+	*data = le32_to_cpu(*data);
+	return 0;
+}
+
+static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
+{
+	int addr, ret = 0;
+
+	for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
+		ret = t1_seeprom_read(adapter, addr,
+				      (u32 *)((u8 *)vpd + addr));
+
+	return ret;
+}
+
+/*
+ * Read a port's MAC address from the VPD ROM.
+ */
+static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
+{
+	struct chelsio_vpd_t vpd;
+
+	if (t1_eeprom_vpd_get(adapter, &vpd))
+		return 1;
+	memcpy(mac_addr, vpd.mac_base_address, 5);
+	mac_addr[5] = vpd.mac_base_address[5] + index;
+	return 0;
+}
+
+/*
+ * Set up the MAC/PHY according to the requested link settings.
+ *
+ * If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired and reset.
+ *
+ * If the PHY does not auto-negotiate we just reset it.
+ *
+ * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
+		if (fc) {
+			lc->advertising |= ADVERTISED_ASYM_PAUSE;
+			if (fc == (PAUSE_RX | PAUSE_TX))
+				lc->advertising |= ADVERTISED_PAUSE;
+		}
+		phy->ops->advertise(phy, lc->advertising);
+
+		if (lc->autoneg == AUTONEG_DISABLE) {
+			lc->speed = lc->requested_speed;
+			lc->duplex = lc->requested_duplex;
+			lc->fc = (unsigned char)fc;
+			mac->ops->set_speed_duplex_fc(mac, lc->speed,
+						      lc->duplex, fc);
+			/* Also disables autoneg */
+			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+			phy->ops->reset(phy, 0);
+		} else
+			phy->ops->autoneg_enable(phy); /* also resets PHY */
+	} else {
+		mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
+		lc->fc = (unsigned char)fc;
+		phy->ops->reset(phy, 0);
+	}
+	return 0;
+}
+
+/*
+ * External interrupt handler for boards using elmer0.
+ */
+int elmer0_ext_intr_handler(adapter_t *adapter)
+{
+    	struct cphy *phy;
+	int phy_cause;
+    	u32 cause;
+
+	t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
+
+	switch (board_info(adapter)->board) {
+	case CHBT_BOARD_N210:
+	case CHBT_BOARD_N110:
+		if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
+			phy = adapter->port[0].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				link_changed(adapter, 0);
+		}
+		break;
+	}
+	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
+	return 0;
+}
+
+/* Enables all interrupts. */
+void t1_interrupts_enable(adapter_t *adapter)
+{
+	unsigned int i;
+	u32 pl_intr;
+
+	adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
+
+	t1_sge_intr_enable(adapter->sge);
+	if (adapter->espi) {
+		adapter->slow_intr_mask |= F_PL_INTR_ESPI;
+		t1_espi_intr_enable(adapter->espi);
+	}
+
+	/* Enable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
+	}
+
+	/* Enable PCIX & external chip interrupts on ASIC boards. */
+	pl_intr = readl(adapter->regs + A_PL_ENABLE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+			       0xffffffff);
+
+	adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+	pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+	writel(pl_intr, adapter->regs + A_PL_ENABLE);
+}
+
+/* Disables all interrupts. */
+void t1_interrupts_disable(adapter_t* adapter)
+{
+	unsigned int i;
+
+	t1_sge_intr_disable(adapter->sge);
+	if (adapter->espi)
+		t1_espi_intr_disable(adapter->espi);
+
+	/* Disable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
+	}
+
+	/* Disable PCIX & external chip interrupts. */
+	writel(0, adapter->regs + A_PL_ENABLE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
+
+	adapter->slow_intr_mask = 0;
+}
+
+/* Clears all interrupts */
+void t1_interrupts_clear(adapter_t* adapter)
+{
+	unsigned int i;
+	u32 pl_intr;
+
+
+	t1_sge_intr_clear(adapter->sge);
+	if (adapter->espi)
+		t1_espi_intr_clear(adapter->espi);
+
+	/* Clear MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
+	}
+
+	/* Enable interrupts for external devices. */
+    	pl_intr = readl(adapter->regs + A_PL_CAUSE);
+
+	writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+	       adapter->regs + A_PL_CAUSE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
+}
+
+/*
+ * Slow path interrupt handler for ASICs.
+ */
+int t1_slow_intr_handler(adapter_t *adapter)
+{
+	u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+	cause &= adapter->slow_intr_mask;
+	if (!cause)
+		return 0;
+	if (cause & F_PL_INTR_SGE_ERR)
+		t1_sge_intr_error_handler(adapter->sge);
+	if (cause & F_PL_INTR_ESPI)
+		t1_espi_intr_handler(adapter->espi);
+	if (cause & F_PL_INTR_PCIX)
+		t1_pci_intr_handler(adapter);
+	if (cause & F_PL_INTR_EXT)
+		t1_elmer0_ext_intr(adapter);
+
+	/* Clear the interrupts just processed. */
+	writel(cause, adapter->regs + A_PL_CAUSE);
+	(void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+	return 1;
+}
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT  1
+
+static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
+{
+	u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	if (enable)
+		val |= csum_bit;
+	else
+		val &= ~csum_bit;
+	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_UDP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_TCP_CSUM, enable);
+}
+
+static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
+{
+	u32 val;
+
+	val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+	      F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+	val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
+	       F_TP_IN_ESPI_CHECK_TCP_CSUM;
+	writel(val, adapter->regs + A_TP_IN_CONFIG);
+	writel(F_TP_OUT_CSPI_CPL |
+	       F_TP_OUT_ESPI_ETHERNET |
+	       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+	       F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
+	       adapter->regs + A_TP_OUT_CONFIG);
+
+	val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+	val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
+	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	/*
+	 * Enable pause frame deadlock prevention.
+	 */
+	if (is_T2(adapter)) {
+		u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+		writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+		       V_DROP_TICKS_CNT(drop_ticks) |
+		       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+		       adapter->regs + A_TP_TX_DROP_CONFIG);
+	}
+
+	writel(F_TP_RESET, adapter->regs + A_TP_RESET);
+}
+
+int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+			       struct adapter_params *p)
+{
+	p->chip_version = bi->chip_term;
+	if (p->chip_version == CHBT_TERM_T1 ||
+	    p->chip_version == CHBT_TERM_T2) {
+		u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
+
+		val = G_TP_PC_REV(val);
+		if (val == 2)
+			p->chip_revision = TERM_T1B;
+		else if (val == 3)
+			p->chip_revision = TERM_T2;
+		else
+			return -1;
+	} else
+		return -1;
+	return 0;
+}
+
+/*
+ * Enable board components other than the Chelsio chip, such as external MAC
+ * and PHY.
+ */
+static int board_init(adapter_t *adapter, const struct board_info *bi)
+{
+	switch (bi->board) {
+	case CHBT_BOARD_N110:
+	case CHBT_BOARD_N210:
+		writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
+    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Initialize and configure the Terminator HW modules.  Note that external
+ * MAC and PHYs are initialized separately.
+ */
+int t1_init_hw_modules(adapter_t *adapter)
+{
+	int err = -EIO;
+	const struct board_info *bi = board_info(adapter);
+
+	if (!bi->clock_mc4) {
+		u32 val = readl(adapter->regs + A_MC4_CFG);
+
+		writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
+		writel(F_M_BUS_ENABLE | F_TCAM_RESET,
+		       adapter->regs + A_MC5_CONFIG);
+	}
+
+	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
+					  bi->espi_nports))
+		goto out_err;
+
+	t1_tp_reset(adapter, bi->clock_core);
+
+	err = t1_sge_configure(adapter->sge, &adapter->params.sge);
+	if (err)
+		goto out_err;
+
+	err = 0;
+ out_err:
+	return err;
+}
+
+/*
+ * Determine a card's PCI mode.
+ */
+static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+{
+	static unsigned short speed_map[] = { 33, 66, 100, 133 };
+	u32 pci_mode;
+
+	pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
+	p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
+	p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
+	p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
+}
+
+/*
+ * Release the structures holding the SW per-Terminator-HW-module state.
+ */
+void t1_free_sw_modules(adapter_t *adapter)
+{
+	unsigned int i;
+
+	for_each_port(adapter, i) {
+		struct cmac *mac = adapter->port[i].mac;
+		struct cphy *phy = adapter->port[i].phy;
+
+		if (mac)
+			mac->ops->destroy(mac);
+		if (phy)
+			phy->ops->destroy(phy);
+	}
+
+	if (adapter->sge)
+		t1_sge_destroy(adapter->sge);
+	if (adapter->espi)
+		t1_espi_destroy(adapter->espi);
+}
+
+static void __devinit init_link_config(struct link_config *lc,
+				       const struct board_info *bi)
+{
+	lc->supported = bi->caps;
+	lc->requested_speed = lc->speed = SPEED_INVALID;
+	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+
+/*
+ * Allocate and initialize the data structures that hold the SW state of
+ * the Terminator HW modules.
+ */
+int __devinit t1_init_sw_modules(adapter_t *adapter,
+				 const struct board_info *bi)
+{
+	unsigned int i;
+
+	adapter->params.brd_info = bi;
+	adapter->params.nports = bi->port_number;
+	adapter->params.stats_update_period = bi->gmac->stats_update_period;
+
+	adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
+	if (!adapter->sge) {
+		CH_ERR("%s: SGE initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
+		CH_ERR("%s: ESPI initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	board_init(adapter, bi);
+	bi->mdio_ops->init(adapter, bi);
+	if (bi->gphy->reset)
+		bi->gphy->reset(adapter);
+	if (bi->gmac->reset)
+		bi->gmac->reset(adapter);
+
+	for_each_port(adapter, i) {
+		u8 hw_addr[6];
+		struct cmac *mac;
+		int phy_addr = bi->mdio_phybaseaddr + i;
+
+		adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
+							bi->mdio_ops);
+		if (!adapter->port[i].phy) {
+			CH_ERR("%s: PHY %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
+		if (!mac) {
+			CH_ERR("%s: MAC %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		/*
+		 * Get the port's MAC addresses either from the EEPROM if one
+		 * exists or the one hardcoded in the MAC.
+		 */
+		if (vpd_macaddress_get(adapter, i, hw_addr)) {
+			CH_ERR("%s: could not read MAC address from VPD ROM\n",
+			       adapter->port[i].dev->name);
+			goto error;
+		}
+		memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+		init_link_config(&adapter->port[i].link_config, bi);
+	}
+
+	get_pci_mode(adapter, &adapter->params.pci);
+	t1_interrupts_clear(adapter);
+	return 0;
+
+ error:
+	t1_free_sw_modules(adapter);
+	return -1;
+}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: suni1x10gexp_regs.h                                                 *
+ * $Revision: 1.9 $                                                          *
+ * $Date: 2005/06/22 00:17:04 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: PMC/SIERRA                                                       *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
+#define _CXGB_SUNI1x10GEXP_REGS_H_
+
+/******************************************************************************/
+/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP                                     **/
+/******************************************************************************/
+/* Refer to the Register Bit Masks bellow for the naming of each register and */
+/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit        */
+/******************************************************************************/
+
+#define SUNI1x10GEXP_REG_DEVICE_STATUS                                   0x0004
+#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS                         0x000D
+#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE                         0x000E
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE                    0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS                    0x0104
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_1                                   0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_3                                   0x2042
+#define SUNI1x10GEXP_REG_RXXG_INTERRUPT                                  0x2043
+#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH                           0x2045
+#define SUNI1x10GEXP_REG_RXXG_SA_15_0                                    0x2046
+#define SUNI1x10GEXP_REG_RXXG_SA_31_16                                   0x2047
+#define SUNI1x10GEXP_REG_RXXG_SA_47_32                                   0x2048
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW                     0x204D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID                     0x204E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH                    0x204F
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW                         0x206A
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW                      0x206B
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH                     0x206C
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH                        0x206D
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0                   0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2                   0x2070
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE                            0x2088
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS                            0x2089
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE                       0x208B
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS                       0x208C
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE                          0x20C7
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS                          0x20C8
+#define SUNI1x10GEXP_REG_MSTAT_CONTROL                                   0x2100
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0                        0x2101
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1                        0x2102
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2                        0x2103
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3                        0x2104
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0                          0x2105
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1                          0x2106
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2                          0x2107
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3                          0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW                             0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW                             0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW                             0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW                             0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW                             0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW                             0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW                            0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW                            0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW                            0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW                            0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW                            0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW                            0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW                            0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW                            0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW                            0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW                            0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW                            0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW                            0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW                            0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW                            0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW                            0x21BC
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE                       0x2209
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT                    0x220A
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK                           0x2282
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT                                0x2283
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS                        0x2300
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE                        0x2301
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK                          0x2302
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_1                                   0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_3                                   0x3042
+#define SUNI1x10GEXP_REG_TXXG_INTERRUPT                                  0x3043
+#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE                             0x3045
+#define SUNI1x10GEXP_REG_TXXG_SA_15_0                                    0x3047
+#define SUNI1x10GEXP_REG_TXXG_SA_31_16                                   0x3048
+#define SUNI1x10GEXP_REG_TXXG_SA_47_32                                   0x3049
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS                           0x3084
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE                           0x3085
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE                          0x30C6
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS                          0x30C7
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE                 0x320C
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION             0x320D
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK                           0x3282
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT                                0x3283
+
+/******************************************************************************/
+/*                 -- End register offset definitions --                      */
+/******************************************************************************/
+
+/******************************************************************************/
+/** SUNI-1x10GE-XP REGISTER BIT MASKS                                        **/
+/******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * Register 0x0004: S/UNI-1x10GE-XP Device Status
+ *    Bit 9 TOP_SXRA_EXPIRED
+ *    Bit 8 TOP_MDIO_BUSY
+ *    Bit 7 TOP_DTRB
+ *    Bit 6 TOP_EXPIRED
+ *    Bit 5 TOP_PAUSED
+ *    Bit 4 TOP_PL4_ID_DOOL
+ *    Bit 3 TOP_PL4_IS_DOOL
+ *    Bit 2 TOP_PL4_ID_ROOL
+ *    Bit 1 TOP_PL4_IS_ROOL
+ *    Bit 0 TOP_PL4_OUT_ROOL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED  0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED       0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL   0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL   0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL   0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL   0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000E:PM3393 Global interrupt enable
+ *    Bit 15 TOP_INTE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_INTE  0x8000
+
+/*----------------------------------------------------------------------------
+ * Register 0x2040: RXXG Configuration 1
+ *    Bit 15  RXXG_RXEN
+ *    Bit 14  RXXG_ROCF
+ *    Bit 13  RXXG_PAD_STRIP
+ *    Bit 10  RXXG_PUREP
+ *    Bit 9   RXXG_LONGP
+ *    Bit 8   RXXG_PARF
+ *    Bit 7   RXXG_FLCHK
+ *    Bit 5   RXXG_PASS_CTRL
+ *    Bit 3   RXXG_CRC_STRIP
+ *    Bit 2-0 RXXG_MIFG
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_RXEN       0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_PUREP      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK      0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP  0x0008
+
+/*----------------------------------------------------------------------------
+ * Register 0x2070: RXXG Address Filter Control 2
+ *    Bit 1 RXXG_PMODE
+ *    Bit 0 RXXG_MHASH_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_PMODE     0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2100: MSTAT Control
+ *    Bit 2 MSTAT_WRITE
+ *    Bit 1 MSTAT_CLEAR
+ *    Bit 0 MSTAT_SNAP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR  0x0002
+#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3040: TXXG Configuration Register 1
+ *    Bit 15   TXXG_TXEN0
+ *    Bit 13   TXXG_HOSTPAUSE
+ *    Bit 12-7 TXXG_IPGT
+ *    Bit 5    TXXG_32BIT_ALIGN
+ *    Bit 4    TXXG_CRCEN
+ *    Bit 3    TXXG_FCTX
+ *    Bit 2    TXXG_FCRX
+ *    Bit 1    TXXG_PADEN
+ *    Bit 0    TXXG_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0        0x8000
+#define SUNI1x10GEXP_BITOFF_TXXG_IPGT         7
+#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN  0x0020
+#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN        0x0010
+#define SUNI1x10GEXP_BITMSK_TXXG_FCTX         0x0008
+#define SUNI1x10GEXP_BITMSK_TXXG_FCRX         0x0004
+#define SUNI1x10GEXP_BITMSK_TXXG_PADEN        0x0002
+
+#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
+
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -156,7 +156,7 @@
 
 #define DRV_NAME		"e100"
 #define DRV_EXT		"-NAPI"
-#define DRV_VERSION		"3.4.8-k2"DRV_EXT
+#define DRV_VERSION		"3.4.10-k2"DRV_EXT
 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT		"Copyright(c) 1999-2005 Intel Corporation"
 #define PFX			DRV_NAME ": "
@@ -902,8 +902,8 @@ static void mdio_write(struct net_device
 
 static void e100_get_defaults(struct nic *nic)
 {
-	struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
-	struct param_range cbs  = { .min = 64, .max = 256, .count = 64 };
+	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
+	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
 
 	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1006,209 @@ static void e100_configure(struct nic *n
 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
 }
 
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 8                      */
+/********************************************************/
+
+/*  Parameter values for the D101M B-step  */
+#define D101M_CPUSAVER_TIMER_DWORD		78
+#define D101M_CPUSAVER_BUNDLE_DWORD		65
+#define D101M_CPUSAVER_MIN_SIZE_DWORD		126
+
+#define D101M_B_RCVBUNDLE_UCODE \
+{\
+0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
+0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
+0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
+0x00380438, 0x00000000, 0x00140000, 0x00380555, \
+0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
+0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
+0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
+0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
+0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
+0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
+0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
+0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
+0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
+0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
+0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
+0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
+0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
+0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
+0x00380559, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
+0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
+0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
+}
+
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 9                      */
+/********************************************************/
+
+/*  Parameter values for the D101S  */
+#define D101S_CPUSAVER_TIMER_DWORD		78
+#define D101S_CPUSAVER_BUNDLE_DWORD		67
+#define D101S_CPUSAVER_MIN_SIZE_DWORD		128
+
+#define D101S_RCVBUNDLE_UCODE \
+{\
+0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
+0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
+0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
+0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
+0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
+0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
+0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
+0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
+0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
+0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
+0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
+0x00101313, 0x00380700, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
+0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
+0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
+0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
+0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
+0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
+0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
+0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
+0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00130831, \
+0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
+0x00041000, 0x00010004, 0x00380700  \
+}
+
+/********************************************************/
+/*  Micro code for the 8086:1229 Rev F/10               */
+/********************************************************/
+
+/*  Parameter values for the D102 E-step  */
+#define D102_E_CPUSAVER_TIMER_DWORD		42
+#define D102_E_CPUSAVER_BUNDLE_DWORD		54
+#define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
+
+#define     D102_E_RCVBUNDLE_UCODE \
+{\
+0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
+0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
+0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
+0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
+0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
+0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
+0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+}
+
 static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
-	int i;
-	static const u32 ucode[UCODE_SIZE] = {
-		/* NFS packets are misinterpreted as TCO packets and
-		 * incorrectly routed to the BMC over SMBus.  This
-		 * microcode patch checks the fragmented IP bit in the
-		 * NFS/UDP header to distinguish between NFS and TCO. */
-		0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
-		0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
-		0x00906EFD, 0x00900EFD,	0x00E00EF8,
-	};
+/* *INDENT-OFF* */
+	static struct {
+		u32 ucode[UCODE_SIZE + 1];
+		u8 mac;
+		u8 timer_dword;
+		u8 bundle_dword;
+		u8 min_size_dword;
+	} ucode_opts[] = {
+		{ D101M_B_RCVBUNDLE_UCODE,
+		  mac_82559_D101M,
+		  D101M_CPUSAVER_TIMER_DWORD,
+		  D101M_CPUSAVER_BUNDLE_DWORD,
+		  D101M_CPUSAVER_MIN_SIZE_DWORD },
+		{ D101S_RCVBUNDLE_UCODE,
+		  mac_82559_D101S,
+		  D101S_CPUSAVER_TIMER_DWORD,
+		  D101S_CPUSAVER_BUNDLE_DWORD,
+		  D101S_CPUSAVER_MIN_SIZE_DWORD },
+		{ D102_E_RCVBUNDLE_UCODE,
+		  mac_82551_F,
+		  D102_E_CPUSAVER_TIMER_DWORD,
+		  D102_E_CPUSAVER_BUNDLE_DWORD,
+		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
+		{ D102_E_RCVBUNDLE_UCODE,
+		  mac_82551_10,
+		  D102_E_CPUSAVER_TIMER_DWORD,
+		  D102_E_CPUSAVER_BUNDLE_DWORD,
+		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
+		{ {0}, 0, 0, 0, 0}
+	}, *opts;
+/* *INDENT-ON* */
+
+#define BUNDLESMALL 1
+#define BUNDLEMAX 50
+#define INTDELAY 15000
+
+	opts = ucode_opts;
+
+	/* Search for ucode match against h/w rev_id */
+	while (opts->mac) {
+		if (nic->mac == opts->mac) {
+			int i;
+			u32 *ucode = opts->ucode;
+
+			/* Insert user-tunable settings */
+			ucode[opts->timer_dword] &= 0xFFFF0000;
+			ucode[opts->timer_dword] |=
+				(u16) INTDELAY;
+			ucode[opts->bundle_dword] &= 0xFFFF0000;
+			ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
+			ucode[opts->min_size_dword] &= 0xFFFF0000;
+			ucode[opts->min_size_dword] |=
+				(BUNDLESMALL) ?  0xFFFF : 0xFF80;
+
+			for(i = 0; i < UCODE_SIZE; i++)
+				cb->u.ucode[i] = cpu_to_le32(ucode[i]);
+			cb->command = cpu_to_le16(cb_ucode);
+			return;
+		}
+		opts++;
+	}
 
-	if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
-		for(i = 0; i < UCODE_SIZE; i++)
-			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-		cb->command = cpu_to_le16(cb_ucode);
-	} else
-		cb->command = cpu_to_le16(cb_nop);
+	cb->command = cpu_to_le16(cb_nop);
 }
 
 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1706,6 +1890,7 @@ static int e100_poll(struct net_device *
 static void e100_netpoll(struct net_device *netdev)
 {
 	struct nic *nic = netdev_priv(netdev);
+
 	e100_disable_irq(nic);
 	e100_intr(nic->pdev->irq, netdev, NULL);
 	e100_tx_clean(nic);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -85,6 +85,14 @@
  *	0.33: 16 May 2005: Support for MCP51 added.
  *	0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
  *	0.35: 26 Jun 2005: Support for MCP55 added.
+ *	0.36: 28 Jun 2005: Add jumbo frame support.
+ *	0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
+ *	0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
+ *			   per-packet flags.
+ *      0.39: 18 Jul 2005: Add 64bit descriptor support.
+ *      0.40: 19 Jul 2005: Add support for mac address change.
+ *      0.41: 30 Jul 2005: Write back original MAC in nv_close instead
+ *			   of nv_remove
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -96,7 +104,7 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION		"0.35"
+#define FORCEDETH_VERSION		"0.41"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -131,11 +139,10 @@
  * Hardware access:
  */
 
-#define DEV_NEED_LASTPACKET1	0x0001	/* set LASTPACKET1 in tx flags */
-#define DEV_IRQMASK_1		0x0002  /* use NVREG_IRQMASK_WANTED_1 for irq mask */
-#define DEV_IRQMASK_2		0x0004  /* use NVREG_IRQMASK_WANTED_2 for irq mask */
-#define DEV_NEED_TIMERIRQ	0x0008  /* set the timer irq flag in the irq mask */
-#define DEV_NEED_LINKTIMER	0x0010	/* poll link settings. Relies on the timer irq */
+#define DEV_NEED_TIMERIRQ	0x0001  /* set the timer irq flag in the irq mask */
+#define DEV_NEED_LINKTIMER	0x0002	/* poll link settings. Relies on the timer irq */
+#define DEV_HAS_LARGEDESC	0x0004	/* device supports jumbo frames and needs packet format 2 */
+#define DEV_HAS_HIGH_DMA        0x0008  /* device supports 64bit dma */
 
 enum {
 	NvRegIrqStatus = 0x000,
@@ -146,13 +153,16 @@ enum {
 #define NVREG_IRQ_RX			0x0002
 #define NVREG_IRQ_RX_NOBUF		0x0004
 #define NVREG_IRQ_TX_ERR		0x0008
-#define NVREG_IRQ_TX2			0x0010
+#define NVREG_IRQ_TX_OK			0x0010
 #define NVREG_IRQ_TIMER			0x0020
 #define NVREG_IRQ_LINK			0x0040
+#define NVREG_IRQ_TX_ERROR		0x0080
 #define NVREG_IRQ_TX1			0x0100
-#define NVREG_IRQMASK_WANTED_1		0x005f
-#define NVREG_IRQMASK_WANTED_2		0x0147
-#define NVREG_IRQ_UNKNOWN		(~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR|NVREG_IRQ_TX2|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX1))
+#define NVREG_IRQMASK_WANTED		0x00df
+
+#define NVREG_IRQ_UNKNOWN	(~(NVREG_IRQ_RX_ERROR|NVREG_IRQ_RX|NVREG_IRQ_RX_NOBUF|NVREG_IRQ_TX_ERR| \
+					NVREG_IRQ_TX_OK|NVREG_IRQ_TIMER|NVREG_IRQ_LINK|NVREG_IRQ_TX_ERROR| \
+					NVREG_IRQ_TX1))
 
 	NvRegUnknownSetupReg6 = 0x008,
 #define NVREG_UNKSETUP6_VAL		3
@@ -286,6 +296,18 @@ struct ring_desc {
 	u32 FlagLen;
 };
 
+struct ring_desc_ex {
+	u32 PacketBufferHigh;
+	u32 PacketBufferLow;
+	u32 Reserved;
+	u32 FlagLen;
+};
+
+typedef union _ring_type {
+	struct ring_desc* orig;
+	struct ring_desc_ex* ex;
+} ring_type;
+
 #define FLAG_MASK_V1 0xffff0000
 #define FLAG_MASK_V2 0xffffc000
 #define LEN_MASK_V1 (0xffffffff ^ FLAG_MASK_V1)
@@ -293,7 +315,7 @@ struct ring_desc {
 
 #define NV_TX_LASTPACKET	(1<<16)
 #define NV_TX_RETRYERROR	(1<<19)
-#define NV_TX_LASTPACKET1	(1<<24)
+#define NV_TX_FORCED_INTERRUPT	(1<<24)
 #define NV_TX_DEFERRED		(1<<26)
 #define NV_TX_CARRIERLOST	(1<<27)
 #define NV_TX_LATECOLLISION	(1<<28)
@@ -303,7 +325,7 @@ struct ring_desc {
 
 #define NV_TX2_LASTPACKET	(1<<29)
 #define NV_TX2_RETRYERROR	(1<<18)
-#define NV_TX2_LASTPACKET1	(1<<23)
+#define NV_TX2_FORCED_INTERRUPT	(1<<30)
 #define NV_TX2_DEFERRED		(1<<25)
 #define NV_TX2_CARRIERLOST	(1<<26)
 #define NV_TX2_LATECOLLISION	(1<<27)
@@ -379,9 +401,13 @@ struct ring_desc {
 #define TX_LIMIT_START	62
 
 /* rx/tx mac addr + type + vlan + align + slack*/
-#define RX_NIC_BUFSIZE		(ETH_DATA_LEN + 64)
-/* even more slack */
-#define RX_ALLOC_BUFSIZE	(ETH_DATA_LEN + 128)
+#define NV_RX_HEADERS		(64)
+/* even more slack. */
+#define NV_RX_ALLOC_PAD		(64)
+
+/* maximum mtu size */
+#define NV_PKTLIMIT_1	ETH_DATA_LEN	/* hard limit not known */
+#define NV_PKTLIMIT_2	9100	/* Actual limit according to NVidia: 9202 */
 
 #define OOM_REFILL	(1+HZ/20)
 #define POLL_WAIT	(1+HZ/100)
@@ -396,6 +422,7 @@ struct ring_desc {
  */
 #define DESC_VER_1	0x0
 #define DESC_VER_2	(0x02100|NVREG_TXRXCTL_RXCHECK)
+#define DESC_VER_3      (0x02200|NVREG_TXRXCTL_RXCHECK)
 
 /* PHY defines */
 #define PHY_OUI_MARVELL	0x5043
@@ -468,11 +495,12 @@ struct fe_priv {
 	/* rx specific fields.
 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
 	 */
-	struct ring_desc *rx_ring;
+	ring_type rx_ring;
 	unsigned int cur_rx, refill_rx;
 	struct sk_buff *rx_skbuff[RX_RING];
 	dma_addr_t rx_dma[RX_RING];
 	unsigned int rx_buf_sz;
+	unsigned int pkt_limit;
 	struct timer_list oom_kick;
 	struct timer_list nic_poll;
 
@@ -484,7 +512,7 @@ struct fe_priv {
 	/*
 	 * tx specific fields.
 	 */
-	struct ring_desc *tx_ring;
+	ring_type tx_ring;
 	unsigned int next_tx, nic_tx;
 	struct sk_buff *tx_skbuff[TX_RING];
 	dma_addr_t tx_dma[TX_RING];
@@ -519,6 +547,11 @@ static inline u32 nv_descr_getlength(str
 		& ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
 }
 
+static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
+{
+	return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2;
+}
+
 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
 				int delay, int delaymax, const char *msg)
 {
@@ -792,7 +825,7 @@ static int nv_alloc_rx(struct net_device
 		nr = refill_rx % RX_RING;
 		if (np->rx_skbuff[nr] == NULL) {
 
-			skb = dev_alloc_skb(RX_ALLOC_BUFSIZE);
+			skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
 			if (!skb)
 				break;
 
@@ -803,9 +836,16 @@ static int nv_alloc_rx(struct net_device
 		}
 		np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len,
 						PCI_DMA_FROMDEVICE);
-		np->rx_ring[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
-		wmb();
-		np->rx_ring[nr].FlagLen = cpu_to_le32(RX_NIC_BUFSIZE | NV_RX_AVAIL);
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+			np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]);
+			wmb();
+			np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
+		} else {
+			np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
+			np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
+			wmb();
+			np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
+		}
 		dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
 					dev->name, refill_rx);
 		refill_rx++;
@@ -831,19 +871,37 @@ static void nv_do_rx_refill(unsigned lon
 	enable_irq(dev->irq);
 }
 
-static int nv_init_ring(struct net_device *dev)
+static void nv_init_rx(struct net_device *dev) 
 {
 	struct fe_priv *np = get_nvpriv(dev);
 	int i;
 
-	np->next_tx = np->nic_tx = 0;
-	for (i = 0; i < TX_RING; i++)
-		np->tx_ring[i].FlagLen = 0;
-
 	np->cur_rx = RX_RING;
 	np->refill_rx = 0;
 	for (i = 0; i < RX_RING; i++)
-		np->rx_ring[i].FlagLen = 0;
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			np->rx_ring.orig[i].FlagLen = 0;
+	        else
+			np->rx_ring.ex[i].FlagLen = 0;
+}
+
+static void nv_init_tx(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	int i;
+
+	np->next_tx = np->nic_tx = 0;
+	for (i = 0; i < TX_RING; i++)
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			np->tx_ring.orig[i].FlagLen = 0;
+	        else
+			np->tx_ring.ex[i].FlagLen = 0;
+}
+
+static int nv_init_ring(struct net_device *dev)
+{
+	nv_init_tx(dev);
+	nv_init_rx(dev);
 	return nv_alloc_rx(dev);
 }
 
@@ -852,7 +910,10 @@ static void nv_drain_tx(struct net_devic
 	struct fe_priv *np = get_nvpriv(dev);
 	int i;
 	for (i = 0; i < TX_RING; i++) {
-		np->tx_ring[i].FlagLen = 0;
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			np->tx_ring.orig[i].FlagLen = 0;
+		else
+			np->tx_ring.ex[i].FlagLen = 0;
 		if (np->tx_skbuff[i]) {
 			pci_unmap_single(np->pci_dev, np->tx_dma[i],
 						np->tx_skbuff[i]->len,
@@ -869,7 +930,10 @@ static void nv_drain_rx(struct net_devic
 	struct fe_priv *np = get_nvpriv(dev);
 	int i;
 	for (i = 0; i < RX_RING; i++) {
-		np->rx_ring[i].FlagLen = 0;
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			np->rx_ring.orig[i].FlagLen = 0;
+		else
+			np->rx_ring.ex[i].FlagLen = 0;
 		wmb();
 		if (np->rx_skbuff[i]) {
 			pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -900,11 +964,19 @@ static int nv_start_xmit(struct sk_buff 
 	np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len,
 					PCI_DMA_TODEVICE);
 
-	np->tx_ring[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
+	else {
+		np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
+		np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
+	}
 
 	spin_lock_irq(&np->lock);
 	wmb();
-	np->tx_ring[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
+	else
+		np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags );
 	dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission.\n",
 				dev->name, np->next_tx);
 	{
@@ -942,7 +1014,10 @@ static void nv_tx_done(struct net_device
 	while (np->nic_tx != np->next_tx) {
 		i = np->nic_tx % TX_RING;
 
-		Flags = le32_to_cpu(np->tx_ring[i].FlagLen);
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
+		else
+			Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen);
 
 		dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n",
 					dev->name, np->nic_tx, Flags);
@@ -993,9 +1068,56 @@ static void nv_tx_timeout(struct net_dev
 	struct fe_priv *np = get_nvpriv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
-	dprintk(KERN_DEBUG "%s: Got tx_timeout. irq: %08x\n", dev->name,
+	printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name,
 			readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK);
 
+	{
+		int i;
+
+		printk(KERN_INFO "%s: Ring at %lx: next %d nic %d\n",
+				dev->name, (unsigned long)np->ring_addr,
+				np->next_tx, np->nic_tx);
+		printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
+		for (i=0;i<0x400;i+= 32) {
+			printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+					i,
+					readl(base + i + 0), readl(base + i + 4),
+					readl(base + i + 8), readl(base + i + 12),
+					readl(base + i + 16), readl(base + i + 20),
+					readl(base + i + 24), readl(base + i + 28));
+		}
+		printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
+		for (i=0;i<TX_RING;i+= 4) {
+			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+				       i, 
+				       le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
+				       le32_to_cpu(np->tx_ring.orig[i].FlagLen),
+				       le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
+				       le32_to_cpu(np->tx_ring.orig[i+1].FlagLen),
+				       le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer),
+				       le32_to_cpu(np->tx_ring.orig[i+2].FlagLen),
+				       le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer),
+				       le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
+			} else {
+				printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+				       i, 
+				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
+				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
+				       le32_to_cpu(np->tx_ring.ex[i].FlagLen),
+				       le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh),
+				       le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow),
+				       le32_to_cpu(np->tx_ring.ex[i+1].FlagLen),
+				       le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh),
+				       le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow),
+				       le32_to_cpu(np->tx_ring.ex[i+2].FlagLen),
+				       le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh),
+				       le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow),
+				       le32_to_cpu(np->tx_ring.ex[i+3].FlagLen));
+			}
+		}
+	}
+
 	spin_lock_irq(&np->lock);
 
 	/* 1) stop tx engine */
@@ -1009,7 +1131,10 @@ static void nv_tx_timeout(struct net_dev
 		printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name);
 		nv_drain_tx(dev);
 		np->next_tx = np->nic_tx = 0;
-		writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+		else
+			writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
 		netif_wake_queue(dev);
 	}
 
@@ -1084,8 +1209,13 @@ static void nv_rx_process(struct net_dev
 			break;	/* we scanned the whole ring - do not continue */
 
 		i = np->cur_rx % RX_RING;
-		Flags = le32_to_cpu(np->rx_ring[i].FlagLen);
-		len = nv_descr_getlength(&np->rx_ring[i], np->desc_ver);
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+			Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
+			len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
+		} else {
+			Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen);
+			len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
+		}
 
 		dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n",
 					dev->name, np->cur_rx, Flags);
@@ -1207,15 +1337,133 @@ next_pkt:
 	}
 }
 
+static void set_bufsize(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (dev->mtu <= ETH_DATA_LEN)
+		np->rx_buf_sz = ETH_DATA_LEN + NV_RX_HEADERS;
+	else
+		np->rx_buf_sz = dev->mtu + NV_RX_HEADERS;
+}
+
 /*
  * nv_change_mtu: dev->change_mtu function
  * Called with dev_base_lock held for read.
  */
 static int nv_change_mtu(struct net_device *dev, int new_mtu)
 {
-	if (new_mtu > ETH_DATA_LEN)
+	struct fe_priv *np = get_nvpriv(dev);
+	int old_mtu;
+
+	if (new_mtu < 64 || new_mtu > np->pkt_limit)
 		return -EINVAL;
+
+	old_mtu = dev->mtu;
 	dev->mtu = new_mtu;
+
+	/* return early if the buffer sizes will not change */
+	if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+		return 0;
+	if (old_mtu == new_mtu)
+		return 0;
+
+	/* synchronized against open : rtnl_lock() held by caller */
+	if (netif_running(dev)) {
+		u8 *base = get_hwbase(dev);
+		/*
+		 * It seems that the nic preloads valid ring entries into an
+		 * internal buffer. The procedure for flushing everything is
+		 * guessed, there is probably a simpler approach.
+		 * Changing the MTU is a rare event, it shouldn't matter.
+		 */
+		disable_irq(dev->irq);
+		spin_lock_bh(&dev->xmit_lock);
+		spin_lock(&np->lock);
+		/* stop engines */
+		nv_stop_rx(dev);
+		nv_stop_tx(dev);
+		nv_txrx_reset(dev);
+		/* drain rx queue */
+		nv_drain_rx(dev);
+		nv_drain_tx(dev);
+		/* reinit driver view of the rx queue */
+		nv_init_rx(dev);
+		nv_init_tx(dev);
+		/* alloc new rx buffers */
+		set_bufsize(dev);
+		if (nv_alloc_rx(dev)) {
+			if (!np->in_shutdown)
+				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+		}
+		/* reinit nic view of the rx queue */
+		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+		writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+			writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+		else
+			writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+		writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+			base + NvRegRingSizes);
+		pci_push(base);
+		writel(NVREG_TXRXCTL_KICK|np->desc_ver, get_hwbase(dev) + NvRegTxRxControl);
+		pci_push(base);
+
+		/* restart rx engine */
+		nv_start_rx(dev);
+		nv_start_tx(dev);
+		spin_unlock(&np->lock);
+		spin_unlock_bh(&dev->xmit_lock);
+		enable_irq(dev->irq);
+	}
+	return 0;
+}
+
+static void nv_copy_mac_to_hw(struct net_device *dev)
+{
+	u8 *base = get_hwbase(dev);
+	u32 mac[2];
+
+	mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
+			(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
+	mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
+
+	writel(mac[0], base + NvRegMacAddrA);
+	writel(mac[1], base + NvRegMacAddrB);
+}
+
+/*
+ * nv_set_mac_address: dev->set_mac_address function
+ * Called with rtnl_lock() held.
+ */
+static int nv_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	struct sockaddr *macaddr = (struct sockaddr*)addr;
+
+	if(!is_valid_ether_addr(macaddr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* synchronized against open : rtnl_lock() held by caller */
+	memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+
+	if (netif_running(dev)) {
+		spin_lock_bh(&dev->xmit_lock);
+		spin_lock_irq(&np->lock);
+
+		/* stop rx engine */
+		nv_stop_rx(dev);
+
+		/* set mac address */
+		nv_copy_mac_to_hw(dev);
+
+		/* restart rx engine */
+		nv_start_rx(dev);
+		spin_unlock_irq(&np->lock);
+		spin_unlock_bh(&dev->xmit_lock);
+	} else {
+		nv_copy_mac_to_hw(dev);
+	}
 	return 0;
 }
 
@@ -1470,7 +1718,7 @@ static irqreturn_t nv_nic_irq(int foo, v
 		if (!(events & np->irqmask))
 			break;
 
-		if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX2|NVREG_IRQ_TX_ERR)) {
+		if (events & (NVREG_IRQ_TX1|NVREG_IRQ_TX_OK|NVREG_IRQ_TX_ERROR|NVREG_IRQ_TX_ERR)) {
 			spin_lock(&np->lock);
 			nv_tx_done(dev);
 			spin_unlock(&np->lock);
@@ -1761,6 +2009,50 @@ static int nv_set_settings(struct net_de
 	return 0;
 }
 
+#define FORCEDETH_REGS_VER	1
+#define FORCEDETH_REGS_SIZE	0x400 /* 256 32-bit registers */
+
+static int nv_get_regs_len(struct net_device *dev)
+{
+	return FORCEDETH_REGS_SIZE;
+}
+
+static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	u32 *rbuf = buf;
+	int i;
+
+	regs->version = FORCEDETH_REGS_VER;
+	spin_lock_irq(&np->lock);
+	for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
+		rbuf[i] = readl(base + i*sizeof(u32));
+	spin_unlock_irq(&np->lock);
+}
+
+static int nv_nway_reset(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	int ret;
+
+	spin_lock_irq(&np->lock);
+	if (np->autoneg) {
+		int bmcr;
+
+		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+		ret = 0;
+	} else {
+		ret = -EINVAL;
+	}
+	spin_unlock_irq(&np->lock);
+
+	return ret;
+}
+
 static struct ethtool_ops ops = {
 	.get_drvinfo = nv_get_drvinfo,
 	.get_link = ethtool_op_get_link,
@@ -1768,6 +2060,9 @@ static struct ethtool_ops ops = {
 	.set_wol = nv_set_wol,
 	.get_settings = nv_get_settings,
 	.set_settings = nv_set_settings,
+	.get_regs_len = nv_get_regs_len,
+	.get_regs = nv_get_regs,
+	.nway_reset = nv_nway_reset,
 };
 
 static int nv_open(struct net_device *dev)
@@ -1792,6 +2087,7 @@ static int nv_open(struct net_device *de
 	writel(0, base + NvRegAdapterControl);
 
 	/* 2) initialize descriptor rings */
+	set_bufsize(dev);
 	oom = nv_init_ring(dev);
 
 	writel(0, base + NvRegLinkSpeed);
@@ -1802,20 +2098,14 @@ static int nv_open(struct net_device *de
 	np->in_shutdown = 0;
 
 	/* 3) set mac address */
-	{
-		u32 mac[2];
-
-		mac[0] = (dev->dev_addr[0] << 0) + (dev->dev_addr[1] << 8) +
-				(dev->dev_addr[2] << 16) + (dev->dev_addr[3] << 24);
-		mac[1] = (dev->dev_addr[4] << 0) + (dev->dev_addr[5] << 8);
-
-		writel(mac[0], base + NvRegMacAddrA);
-		writel(mac[1], base + NvRegMacAddrB);
-	}
+	nv_copy_mac_to_hw(dev);
 
 	/* 4) give hw rings */
 	writel((u32) np->ring_addr, base + NvRegRxRingPhysAddr);
-	writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+	else
+		writel((u32) (np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
 	writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
 		base + NvRegRingSizes);
 
@@ -1837,7 +2127,7 @@ static int nv_open(struct net_device *de
 	writel(NVREG_MISC1_FORCE | NVREG_MISC1_HD, base + NvRegMisc1);
 	writel(readl(base + NvRegTransmitterStatus), base + NvRegTransmitterStatus);
 	writel(NVREG_PFF_ALWAYS, base + NvRegPacketFilterFlags);
-	writel(NVREG_OFFLOAD_NORMAL, base + NvRegOffloadConfig);
+	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 
 	writel(readl(base + NvRegReceiverStatus), base + NvRegReceiverStatus);
 	get_random_bytes(&i, sizeof(i));
@@ -1942,6 +2232,12 @@ static int nv_close(struct net_device *d
 	if (np->wolenabled)
 		nv_start_rx(dev);
 
+	/* special op: write back the misordered MAC address - otherwise
+	 * the next nv_probe would see a wrong address.
+	 */
+	writel(np->orig_mac[0], base + NvRegMacAddrA);
+	writel(np->orig_mac[1], base + NvRegMacAddrB);
+
 	/* FIXME: power down nic */
 
 	return 0;
@@ -2006,32 +2302,55 @@ static int __devinit nv_probe(struct pci
 	}
 
 	/* handle different descriptor versions */
-	if (pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_1 ||
-		pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_2 ||
-		pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_3 ||    
-		pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_12 ||
-		pci_dev->device == PCI_DEVICE_ID_NVIDIA_NVENET_13)
-		np->desc_ver = DESC_VER_1;
-	else
+	if (id->driver_data & DEV_HAS_HIGH_DMA) {
+		/* packet format 3: supports 40-bit addressing */
+		np->desc_ver = DESC_VER_3;
+		if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
+			printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
+					pci_name(pci_dev));
+		}
+	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
+		/* packet format 2: supports jumbo frames */
 		np->desc_ver = DESC_VER_2;
+	} else {
+		/* original packet format */
+		np->desc_ver = DESC_VER_1;
+	}
+
+	np->pkt_limit = NV_PKTLIMIT_1;
+	if (id->driver_data & DEV_HAS_LARGEDESC)
+		np->pkt_limit = NV_PKTLIMIT_2;
 
 	err = -ENOMEM;
 	np->base = ioremap(addr, NV_PCI_REGSZ);
 	if (!np->base)
 		goto out_relreg;
 	dev->base_addr = (unsigned long)np->base;
+
 	dev->irq = pci_dev->irq;
-	np->rx_ring = pci_alloc_consistent(pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
-						&np->ring_addr);
-	if (!np->rx_ring)
-		goto out_unmap;
-	np->tx_ring = &np->rx_ring[RX_RING];
+
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
+					sizeof(struct ring_desc) * (RX_RING + TX_RING),
+					&np->ring_addr);
+		if (!np->rx_ring.orig)
+			goto out_unmap;
+		np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
+	} else {
+		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
+					sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+					&np->ring_addr);
+		if (!np->rx_ring.ex)
+			goto out_unmap;
+		np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
+	}
 
 	dev->open = nv_open;
 	dev->stop = nv_close;
 	dev->hard_start_xmit = nv_start_xmit;
 	dev->get_stats = nv_get_stats;
 	dev->change_mtu = nv_change_mtu;
+	dev->set_mac_address = nv_set_mac_address;
 	dev->set_multicast_list = nv_set_multicast;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	dev->poll_controller = nv_poll_controller;
@@ -2080,17 +2399,10 @@ static int __devinit nv_probe(struct pci
 
 	if (np->desc_ver == DESC_VER_1) {
 		np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID;
-		if (id->driver_data & DEV_NEED_LASTPACKET1)
-			np->tx_flags |= NV_TX_LASTPACKET1;
 	} else {
 		np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID;
-		if (id->driver_data & DEV_NEED_LASTPACKET1)
-			np->tx_flags |= NV_TX2_LASTPACKET1;
 	}
-	if (id->driver_data & DEV_IRQMASK_1)
-		np->irqmask = NVREG_IRQMASK_WANTED_1;
-	if (id->driver_data & DEV_IRQMASK_2)
-		np->irqmask = NVREG_IRQMASK_WANTED_2;
+	np->irqmask = NVREG_IRQMASK_WANTED;
 	if (id->driver_data & DEV_NEED_TIMERIRQ)
 		np->irqmask |= NVREG_IRQ_TIMER;
 	if (id->driver_data & DEV_NEED_LINKTIMER) {
@@ -2155,8 +2467,12 @@ static int __devinit nv_probe(struct pci
 	return 0;
 
 out_freering:
-	pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
-				np->rx_ring, np->ring_addr);
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
+				    np->rx_ring.orig, np->ring_addr);
+	else
+		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+				    np->rx_ring.ex, np->ring_addr);
 	pci_set_drvdata(pci_dev, NULL);
 out_unmap:
 	iounmap(get_hwbase(dev));
@@ -2174,18 +2490,14 @@ static void __devexit nv_remove(struct p
 {
 	struct net_device *dev = pci_get_drvdata(pci_dev);
 	struct fe_priv *np = get_nvpriv(dev);
-	u8 __iomem *base = get_hwbase(dev);
 
 	unregister_netdev(dev);
 
-	/* special op: write back the misordered MAC address - otherwise
-	 * the next nv_probe would see a wrong address.
-	 */
-	writel(np->orig_mac[0], base + NvRegMacAddrA);
-	writel(np->orig_mac[1], base + NvRegMacAddrB);
-
 	/* free all structures */
-	pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring, np->ring_addr);
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
+		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
+	else
+		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
 	iounmap(get_hwbase(dev));
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
@@ -2195,109 +2507,64 @@ static void __devexit nv_remove(struct p
 
 static struct pci_device_id pci_tbl[] = {
 	{	/* nForce Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_1,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_IRQMASK_1|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_1),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
 	},
 	{	/* nForce2 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_2,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_2),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
 	},
 	{	/* nForce3 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_3,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_3),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
 	},
 	{	/* nForce3 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_4,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_4),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
 	},
 	{	/* nForce3 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_5,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_5),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
 	},
 	{	/* nForce3 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_6,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_6),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
 	},
 	{	/* nForce3 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_7,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_7),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC,
 	},
 	{	/* CK804 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_8,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_8),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{	/* CK804 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_9,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_9),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP04 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_10,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_10),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP04 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_11,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_11),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP51 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_12,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_12),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP51 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_13,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_13),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP55 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_14,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{	/* MCP55 Ethernet Controller */
-		.vendor = PCI_VENDOR_ID_NVIDIA,
-		.device = PCI_DEVICE_ID_NVIDIA_NVENET_15,
-		.subvendor = PCI_ANY_ID,
-		.subdevice = PCI_ANY_ID,
-		.driver_data = DEV_NEED_LASTPACKET1|DEV_IRQMASK_2|DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
+		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA,
 	},
 	{0,},
 };
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -54,6 +54,7 @@
 #include <linux/kmod.h>
 #include <linux/hdlcdrv.h>
 #include <linux/baycom.h>
+#include <linux/jiffies.h>
 #if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
 /* prototypes for ax25_encapsulate and ax25_rebuild_header */
 #include <net/ax25.h> 
@@ -287,7 +288,7 @@ static inline void baycom_int_freq(struc
 	 * measure the interrupt frequency
 	 */
 	bc->debug_vals.cur_intcnt++;
-	if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+	if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
 		bc->debug_vals.last_jiffies = cur_jiffies;
 		bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
 		bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -84,6 +84,7 @@
 #include <linux/baycom.h>
 #include <linux/parport.h>
 #include <linux/bitops.h>
+#include <linux/jiffies.h>
 
 #include <asm/bug.h>
 #include <asm/system.h>
@@ -165,7 +166,7 @@ static void __inline__ baycom_int_freq(s
 	 * measure the interrupt frequency
 	 */
 	bc->debug_vals.cur_intcnt++;
-	if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+	if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
 		bc->debug_vals.last_jiffies = cur_jiffies;
 		bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
 		bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -79,6 +79,7 @@
 #include <asm/io.h>
 #include <linux/hdlcdrv.h>
 #include <linux/baycom.h>
+#include <linux/jiffies.h>
 
 /* --------------------------------------------------------------------- */
 
@@ -159,7 +160,7 @@ static inline void baycom_int_freq(struc
 	 * measure the interrupt frequency
 	 */
 	bc->debug_vals.cur_intcnt++;
-	if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+	if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
 		bc->debug_vals.last_jiffies = cur_jiffies;
 		bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
 		bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c
--- a/drivers/net/hamradio/baycom_ser_hdx.c
+++ b/drivers/net/hamradio/baycom_ser_hdx.c
@@ -69,6 +69,7 @@
 #include <asm/io.h>
 #include <linux/hdlcdrv.h>
 #include <linux/baycom.h>
+#include <linux/jiffies.h>
 
 /* --------------------------------------------------------------------- */
 
@@ -150,7 +151,7 @@ static inline void baycom_int_freq(struc
 	 * measure the interrupt frequency
 	 */
 	bc->debug_vals.cur_intcnt++;
-	if ((cur_jiffies - bc->debug_vals.last_jiffies) >= HZ) {
+	if (time_after_eq(cur_jiffies, bc->debug_vals.last_jiffies + HZ)) {
 		bc->debug_vals.last_jiffies = cur_jiffies;
 		bc->debug_vals.last_intcnt = bc->debug_vals.cur_intcnt;
 		bc->debug_vals.cur_intcnt = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -46,6 +46,7 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/if_arp.h>
+#include <linux/jiffies.h>
 
 #include <net/ax25.h>
 
@@ -429,7 +430,7 @@ static int ax_xmit(struct sk_buff *skb, 
 		 * May be we must check transmitter timeout here ?
 		 *      14 Oct 1994 Dmitry Gorodchanin.
 		 */
-		if (jiffies - dev->trans_start  < 20 * HZ) {
+		if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
 			/* 20 sec timeout not reached */
 			return 1;
 		}
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -119,7 +119,7 @@ struct ixgb_adapter;
  * so a DMA handle can be stored along with the buffer */
 struct ixgb_buffer {
 	struct sk_buff *skb;
-	uint64_t dma;
+	dma_addr_t dma;
 	unsigned long time_stamp;
 	uint16_t length;
 	uint16_t next_to_watch;
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -565,24 +565,6 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
 	}
 }
 
-/******************************************************************************
- * return the compatibility flags from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          compatibility flags if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_compatibility(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->compatibility));
-
-	return(0);
-}
 
 /******************************************************************************
  * return the Printed Board Assembly number from EEPROM
@@ -602,81 +584,6 @@ ixgb_get_ee_pba_number(struct ixgb_hw *h
 	return(0);
 }
 
-/******************************************************************************
- * return the Initialization Control Word 1 from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->init_ctrl_reg_1));
-
-	return(0);
-}
-
-/******************************************************************************
- * return the Initialization Control Word 2 from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->init_ctrl_reg_2));
-
-	return(0);
-}
-
-/******************************************************************************
- * return the Subsystem Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          Subsystem Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->subsystem_id));
-
-	return(0);
-}
-
-/******************************************************************************
- * return the Sub Vendor Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          Sub Vendor Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->subvendor_id));
-
-	return(0);
-}
 
 /******************************************************************************
  * return the Device Id from EEPROM
@@ -694,81 +601,6 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw
 	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
 		return (le16_to_cpu(ee_map->device_id));
 
-	return(0);
-}
-
-/******************************************************************************
- * return the Vendor Id from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          Device Id if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->vendor_id));
-
-	return(0);
-}
-
-/******************************************************************************
- * return the Software Defined Pins Register from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          SDP Register if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint16_t
-ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->swdpins_reg));
-
-	return(0);
+	return (0);
 }
 
-/******************************************************************************
- * return the D3 Power Management Bits from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint8_t
-ixgb_get_ee_d3_power(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->d3_power));
-
-	return(0);
-}
-
-/******************************************************************************
- * return the D0 Power Management Bits from EEPROM
- *
- * hw - Struct containing variables accessed by shared code
- *
- * Returns:
- *          D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
- ******************************************************************************/
-uint8_t
-ixgb_get_ee_d0_power(struct ixgb_hw *hw)
-{
-	struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
-
-	if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
-		return (le16_to_cpu(ee_map->d0_power));
-
-	return(0);
-}
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -98,10 +98,10 @@ static struct ixgb_stats ixgb_gstrings_s
 static int
 ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
-	ecmd->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+	ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
 	ecmd->port = PORT_FIBRE;
 	ecmd->transceiver = XCVR_EXTERNAL;
 
@@ -120,7 +120,7 @@ ixgb_get_settings(struct net_device *net
 static int
 ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	if(ecmd->autoneg == AUTONEG_ENABLE ||
 	   ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
@@ -130,6 +130,12 @@ ixgb_set_settings(struct net_device *net
 		ixgb_down(adapter, TRUE);
 		ixgb_reset(adapter);
 		ixgb_up(adapter);
+		/* be optimistic about our link, since we were up before */
+		adapter->link_speed = 10000;
+		adapter->link_duplex = FULL_DUPLEX;
+		netif_carrier_on(netdev);
+		netif_wake_queue(netdev);
+		
 	} else
 		ixgb_reset(adapter);
 
@@ -140,7 +146,7 @@ static void
 ixgb_get_pauseparam(struct net_device *netdev,
 			 struct ethtool_pauseparam *pause)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	
 	pause->autoneg = AUTONEG_DISABLE;
@@ -159,7 +165,7 @@ static int
 ixgb_set_pauseparam(struct net_device *netdev,
 			 struct ethtool_pauseparam *pause)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	
 	if(pause->autoneg == AUTONEG_ENABLE)
@@ -177,6 +183,11 @@ ixgb_set_pauseparam(struct net_device *n
 	if(netif_running(adapter->netdev)) {
 		ixgb_down(adapter, TRUE);
 		ixgb_up(adapter);
+		/* be optimistic about our link, since we were up before */
+		adapter->link_speed = 10000;
+		adapter->link_duplex = FULL_DUPLEX;
+		netif_carrier_on(netdev);
+		netif_wake_queue(netdev);
 	} else
 		ixgb_reset(adapter);
 		
@@ -186,19 +197,26 @@ ixgb_set_pauseparam(struct net_device *n
 static uint32_t
 ixgb_get_rx_csum(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
+
 	return adapter->rx_csum;
 }
 
 static int
 ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
+
 	adapter->rx_csum = data;
 
 	if(netif_running(netdev)) {
 		ixgb_down(adapter,TRUE);
 		ixgb_up(adapter);
+		/* be optimistic about our link, since we were up before */
+		adapter->link_speed = 10000;
+		adapter->link_duplex = FULL_DUPLEX;
+		netif_carrier_on(netdev);
+		netif_wake_queue(netdev);
 	} else
 		ixgb_reset(adapter);
 	return 0;
@@ -246,14 +264,15 @@ static void
 ixgb_get_regs(struct net_device *netdev,
 		   struct ethtool_regs *regs, void *p)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	uint32_t *reg = p;
 	uint32_t *reg_start = reg;
 	uint8_t i;
 
 	/* the 1 (one) below indicates an attempt at versioning, if the
-	 * interface in ethtool or the driver this 1 should be incremented */
+	 * interface in ethtool or the driver changes, this 1 should be
+	 * incremented */
 	regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
 
 	/* General Registers */
@@ -283,7 +302,8 @@ ixgb_get_regs(struct net_device *netdev,
 	*reg++ = IXGB_READ_REG(hw, RAIDC);	/*  19 */
 	*reg++ = IXGB_READ_REG(hw, RXCSUM);	/*  20 */
 
-	for (i = 0; i < IXGB_RAR_ENTRIES; i++) {
+	/* there are 16 RAR entries in hardware, we only use 3 */
+	for(i = 0; i < 16; i++) {
 		*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
 		*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
 	}
@@ -391,7 +411,7 @@ static int
 ixgb_get_eeprom(struct net_device *netdev,
 		  struct ethtool_eeprom *eeprom, uint8_t *bytes)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	uint16_t *eeprom_buff;
 	int i, max_len, first_word, last_word;
@@ -439,7 +459,7 @@ static int
 ixgb_set_eeprom(struct net_device *netdev,
 		  struct ethtool_eeprom *eeprom, uint8_t *bytes)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	uint16_t *eeprom_buff;
 	void *ptr;
@@ -497,7 +517,7 @@ static void
 ixgb_get_drvinfo(struct net_device *netdev,
 		   struct ethtool_drvinfo *drvinfo)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	strncpy(drvinfo->driver,  ixgb_driver_name, 32);
 	strncpy(drvinfo->version, ixgb_driver_version, 32);
@@ -512,7 +532,7 @@ static void
 ixgb_get_ringparam(struct net_device *netdev,
 		struct ethtool_ringparam *ring)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 
@@ -530,7 +550,7 @@ static int 
 ixgb_set_ringparam(struct net_device *netdev,
 		struct ethtool_ringparam *ring)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_desc_ring *txdr = &adapter->tx_ring;
 	struct ixgb_desc_ring *rxdr = &adapter->rx_ring;
 	struct ixgb_desc_ring tx_old, tx_new, rx_old, rx_new;
@@ -573,6 +593,11 @@ ixgb_set_ringparam(struct net_device *ne
 		adapter->tx_ring = tx_new;
 		if((err = ixgb_up(adapter)))
 			return err;
+		/* be optimistic about our link, since we were up before */
+		adapter->link_speed = 10000;
+		adapter->link_duplex = FULL_DUPLEX;
+		netif_carrier_on(netdev);
+		netif_wake_queue(netdev);
 	}
 
 	return 0;
@@ -607,7 +632,7 @@ ixgb_led_blink_callback(unsigned long da
 static int
 ixgb_phys_id(struct net_device *netdev, uint32_t data)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	if(!data || data > (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ))
 		data = (uint32_t)(MAX_SCHEDULE_TIMEOUT / HZ);
@@ -643,7 +668,7 @@ static void 
 ixgb_get_ethtool_stats(struct net_device *netdev, 
 		struct ethtool_stats *stats, uint64_t *data)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int i;
 
 	ixgb_update_stats(adapter);
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -822,17 +822,8 @@ extern void ixgb_clear_vfta(struct ixgb_
 
 /* Access functions to eeprom data */
 void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
-uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
 uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw *hw);
 uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw *hw);
-uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw);
-uint8_t ixgb_get_ee_d3_power(struct ixgb_hw *hw);
-uint8_t ixgb_get_ee_d0_power(struct ixgb_hw *hw);
 boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
 uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
 
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -29,6 +29,11 @@
 #include "ixgb.h"
 
 /* Change Log
+ * 1.0.96 04/19/05
+ * - Make needlessly global code static -- bunk@stusta.de
+ * - ethtool cleanup -- shemminger@osdl.org
+ * - Support for MODULE_VERSION -- linville@tuxdriver.com
+ * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
  * 1.0.88 01/05/05
  * - include fix to the condition that determines when to quit NAPI - Robert Olsson
  * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
@@ -47,10 +52,9 @@ char ixgb_driver_string[] = "Intel(R) PR
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-
-#define DRV_VERSION "1.0.95-k2"DRIVERNAPI
+#define DRV_VERSION		"1.0.100-k2"DRIVERNAPI
 char ixgb_driver_version[] = DRV_VERSION;
-char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
 
 /* ixgb_pci_tbl - PCI Device ID Table
  *
@@ -145,10 +149,12 @@ MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
 /* some defines for controlling descriptor fetches in h/w */
-#define RXDCTL_PTHRESH_DEFAULT 128	/* chip considers prefech below this */
-#define RXDCTL_HTHRESH_DEFAULT 16	/* chip will only prefetch if tail is 
-					   pushed this many descriptors from head */
 #define RXDCTL_WTHRESH_DEFAULT 16	/* chip writes back at this many or RXT0 */
+#define RXDCTL_PTHRESH_DEFAULT 0		/* chip considers prefech below
+						 * this */
+#define RXDCTL_HTHRESH_DEFAULT 0		/* chip will only prefetch if tail
+						 * is pushed this many descriptors
+						 * from head */
 
 /**
  * ixgb_init_module - Driver Registration Routine
@@ -376,7 +382,7 @@ ixgb_probe(struct pci_dev *pdev,
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
 	pci_set_drvdata(pdev, netdev);
-	adapter = netdev->priv;
+	adapter = netdev_priv(netdev);
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 	adapter->hw.back = adapter;
@@ -512,7 +518,7 @@ static void __devexit
 ixgb_remove(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	unregister_netdev(netdev);
 
@@ -583,7 +589,7 @@ ixgb_sw_init(struct ixgb_adapter *adapte
 static int
 ixgb_open(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int err;
 
 	/* allocate transmit descriptors */
@@ -626,7 +632,7 @@ err_setup_tx:
 static int
 ixgb_close(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	ixgb_down(adapter, TRUE);
 
@@ -1017,7 +1023,7 @@ ixgb_clean_rx_ring(struct ixgb_adapter *
 static int
 ixgb_set_mac(struct net_device *netdev, void *p)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct sockaddr *addr = p;
 
 	if(!is_valid_ether_addr(addr->sa_data))
@@ -1043,7 +1049,7 @@ ixgb_set_mac(struct net_device *netdev, 
 static void
 ixgb_set_multi(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	struct dev_mc_list *mc_ptr;
 	uint32_t rctl;
@@ -1371,7 +1377,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapt
 static int
 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	unsigned int first;
 	unsigned int tx_flags = 0;
 	unsigned long flags;
@@ -1425,7 +1431,7 @@ ixgb_xmit_frame(struct sk_buff *skb, str
 static void
 ixgb_tx_timeout(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	/* Do the reset outside of interrupt context */
 	schedule_work(&adapter->tx_timeout_task);
@@ -1434,7 +1440,7 @@ ixgb_tx_timeout(struct net_device *netde
 static void
 ixgb_tx_timeout_task(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	ixgb_down(adapter, TRUE);
 	ixgb_up(adapter);
@@ -1451,7 +1457,7 @@ ixgb_tx_timeout_task(struct net_device *
 static struct net_device_stats *
 ixgb_get_stats(struct net_device *netdev)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
 	return &adapter->net_stats;
 }
@@ -1467,7 +1473,7 @@ ixgb_get_stats(struct net_device *netdev
 static int
 ixgb_change_mtu(struct net_device *netdev, int new_mtu)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 	int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
 
@@ -1522,7 +1528,8 @@ ixgb_update_stats(struct ixgb_adapter *a
 
 		multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
 		/* fix up multicast stats by removing broadcasts */
-		multi -= bcast;
+		if(multi >= bcast)
+			multi -= bcast;
 		
 		adapter->stats.mprcl += (multi & 0xFFFFFFFF);
 		adapter->stats.mprch += (multi >> 32);
@@ -1641,7 +1648,7 @@ static irqreturn_t
 ixgb_intr(int irq, void *data, struct pt_regs *regs)
 {
 	struct net_device *netdev = data;
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	struct ixgb_hw *hw = &adapter->hw;
 	uint32_t icr = IXGB_READ_REG(hw, ICR);
 #ifndef CONFIG_IXGB_NAPI
@@ -1688,7 +1695,7 @@ ixgb_intr(int irq, void *data, struct pt
 static int
 ixgb_clean(struct net_device *netdev, int *budget)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	int work_to_do = min(*budget, netdev->quota);
 	int tx_cleaned;
 	int work_done = 0;
@@ -2017,7 +2024,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapte
 static void
 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	uint32_t ctrl, rctl;
 
 	ixgb_irq_disable(adapter);
@@ -2055,7 +2062,7 @@ ixgb_vlan_rx_register(struct net_device 
 static void
 ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	uint32_t vfta, index;
 
 	/* add VID to filter table */
@@ -2069,7 +2076,7 @@ ixgb_vlan_rx_add_vid(struct net_device *
 static void
 ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
 {
-	struct ixgb_adapter *adapter = netdev->priv;
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
 	uint32_t vfta, index;
 
 	ixgb_irq_disable(adapter);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -68,6 +68,7 @@ static DEFINE_PER_CPU(struct net_device_
  * of largesending device modulo TCP checksum, which is ignored for loopback.
  */
 
+#ifdef LOOPBACK_TSO
 static void emulate_large_send_offload(struct sk_buff *skb)
 {
 	struct iphdr *iph = skb->nh.iph;
@@ -119,6 +120,7 @@ static void emulate_large_send_offload(s
 
 	dev_kfree_skb(skb);
 }
+#endif /* LOOPBACK_TSO */
 
 /*
  * The higher levels take care of making this non-reentrant (it's
@@ -130,12 +132,13 @@ static int loopback_xmit(struct sk_buff 
 
 	skb_orphan(skb);
 
-	skb->protocol=eth_type_trans(skb,dev);
-	skb->dev=dev;
+	skb->protocol = eth_type_trans(skb,dev);
+	skb->dev = dev;
 #ifndef LOOPBACK_MUST_CHECKSUM
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 #endif
 
+#ifdef LOOPBACK_TSO
 	if (skb_shinfo(skb)->tso_size) {
 		BUG_ON(skb->protocol != htons(ETH_P_IP));
 		BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP);
@@ -143,14 +146,14 @@ static int loopback_xmit(struct sk_buff 
 		emulate_large_send_offload(skb);
 		return 0;
 	}
-
+#endif
 	dev->last_rx = jiffies;
 
 	lb_stats = &per_cpu(loopback_stats, get_cpu());
 	lb_stats->rx_bytes += skb->len;
-	lb_stats->tx_bytes += skb->len;
+	lb_stats->tx_bytes = lb_stats->rx_bytes;
 	lb_stats->rx_packets++;
-	lb_stats->tx_packets++;
+	lb_stats->tx_packets = lb_stats->rx_packets;
 	put_cpu();
 
 	netif_rx(skb);
@@ -208,9 +211,12 @@ struct net_device loopback_dev = {
 	.type			= ARPHRD_LOOPBACK,	/* 0x0001*/
 	.rebuild_header		= eth_rebuild_header,
 	.flags			= IFF_LOOPBACK,
-	.features 		= NETIF_F_SG|NETIF_F_FRAGLIST
-				  |NETIF_F_NO_CSUM|NETIF_F_HIGHDMA
-				  |NETIF_F_LLTX,
+	.features 		= NETIF_F_SG | NETIF_F_FRAGLIST
+#ifdef LOOPBACK_TSO
+				  | NETIF_F_TSO
+#endif
+				  | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA
+				  | NETIF_F_LLTX,
 	.ethtool_ops		= &loopback_ethtool_ops,
 };
 
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -486,9 +486,9 @@ struct netdrv_private {
 MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
 MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
 MODULE_LICENSE("GPL");
-MODULE_PARM (multicast_filter_limit, "i");
-MODULE_PARM (max_interrupt_work, "i");
-MODULE_PARM (media, "1-" __MODULE_STRING(8) "i");
+module_param(multicast_filter_limit, int, 0);
+module_param(max_interrupt_work, int, 0);
+module_param_array(media, int, NULL, 0);
 MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
 MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
 MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/Kconfig
@@ -0,0 +1,49 @@
+#
+# PHY Layer Configuration
+#
+
+menu "PHY device support"
+
+config PHYLIB
+	tristate "PHY Device support and infrastructure"
+	depends on NET_ETHERNET
+	help
+	  Ethernet controllers are usually attached to PHY
+	  devices.  This option provides infrastructure for
+	  managing PHY devices.
+
+comment "MII PHY device drivers"
+	depends on PHYLIB
+
+config MARVELL_PHY
+	tristate "Drivers for Marvell PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently has a driver for the 88E1011S
+	
+config DAVICOM_PHY
+	tristate "Drivers for Davicom PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports dm9161e and dm9131
+
+config QSEMI_PHY
+	tristate "Drivers for Quality Semiconductor PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports the qs6612
+
+config LXT_PHY
+	tristate "Drivers for the Intel LXT PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports the lxt970, lxt971
+
+config CICADA_PHY
+	tristate "Drivers for the Cicada PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports the cis8204
+
+endmenu
+
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/Makefile
@@ -0,0 +1,9 @@
+# Makefile for Linux PHY drivers
+
+libphy-objs			:= phy.o phy_device.o mdio_bus.o
+
+obj-$(CONFIG_MARVELL_PHY)	+= libphy.o marvell.o
+obj-$(CONFIG_DAVICOM_PHY)	+= libphy.o davicom.o
+obj-$(CONFIG_CICADA_PHY)	+= libphy.o cicada.o
+obj-$(CONFIG_LXT_PHY)		+= libphy.o lxt.o
+obj-$(CONFIG_QSEMI_PHY)		+= libphy.o qsemi.o
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/cicada.c
@@ -0,0 +1,134 @@
+/*
+ * drivers/net/phy/cicada.c
+ *
+ * Driver for Cicada PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* Cicada Extended Control Register 1 */
+#define MII_CIS8201_EXT_CON1           0x17
+#define MII_CIS8201_EXTCON1_INIT       0x0000
+
+/* Cicada Interrupt Mask Register */
+#define MII_CIS8201_IMASK		0x19
+#define MII_CIS8201_IMASK_IEN		0x8000
+#define MII_CIS8201_IMASK_SPEED	0x4000
+#define MII_CIS8201_IMASK_LINK		0x2000
+#define MII_CIS8201_IMASK_DUPLEX	0x1000
+#define MII_CIS8201_IMASK_MASK		0xf000
+
+/* Cicada Interrupt Status Register */
+#define MII_CIS8201_ISTAT		0x1a
+#define MII_CIS8201_ISTAT_STATUS	0x8000
+#define MII_CIS8201_ISTAT_SPEED	0x4000
+#define MII_CIS8201_ISTAT_LINK		0x2000
+#define MII_CIS8201_ISTAT_DUPLEX	0x1000
+
+/* Cicada Auxiliary Control/Status Register */
+#define MII_CIS8201_AUX_CONSTAT        0x1c
+#define MII_CIS8201_AUXCONSTAT_INIT    0x0004
+#define MII_CIS8201_AUXCONSTAT_DUPLEX  0x0020
+#define MII_CIS8201_AUXCONSTAT_SPEED   0x0018
+#define MII_CIS8201_AUXCONSTAT_GBIT    0x0010
+#define MII_CIS8201_AUXCONSTAT_100     0x0008
+
+MODULE_DESCRIPTION("Cicadia PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int cis820x_config_init(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_write(phydev, MII_CIS8201_AUX_CONSTAT,
+			MII_CIS8201_AUXCONSTAT_INIT);
+
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, MII_CIS8201_EXT_CON1,
+			MII_CIS8201_EXTCON1_INIT);
+
+	return err;
+}
+
+static int cis820x_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, MII_CIS8201_ISTAT);
+
+	return (err < 0) ? err : 0;
+}
+
+static int cis820x_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_CIS8201_IMASK, 
+				MII_CIS8201_IMASK_MASK);
+	else
+		err = phy_write(phydev, MII_CIS8201_IMASK, 0);
+
+	return err;
+}
+
+/* Cicada 820x */
+static struct phy_driver cis8204_driver = {
+	.phy_id		= 0x000fc440,
+	.name		= "Cicada Cis8204",
+	.phy_id_mask	= 0x000fffc0,
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_init	= &cis820x_config_init,
+	.config_aneg	= &genphy_config_aneg,
+	.read_status	= &genphy_read_status,
+	.ack_interrupt	= &cis820x_ack_interrupt,
+	.config_intr	= &cis820x_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static int __init cis8204_init(void)
+{
+	return phy_driver_register(&cis8204_driver);
+}
+
+static void __exit cis8204_exit(void)
+{
+	phy_driver_unregister(&cis8204_driver);
+}
+
+module_init(cis8204_init);
+module_exit(cis8204_exit);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/davicom.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/net/phy/davicom.c
+ *
+ * Driver for Davicom PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_DM9161_SCR		0x10
+#define MII_DM9161_SCR_INIT	0x0610
+
+/* DM9161 Interrupt Register */
+#define MII_DM9161_INTR	0x15
+#define MII_DM9161_INTR_PEND		0x8000
+#define MII_DM9161_INTR_DPLX_MASK	0x0800
+#define MII_DM9161_INTR_SPD_MASK	0x0400
+#define MII_DM9161_INTR_LINK_MASK	0x0200
+#define MII_DM9161_INTR_MASK		0x0100
+#define MII_DM9161_INTR_DPLX_CHANGE	0x0010
+#define MII_DM9161_INTR_SPD_CHANGE	0x0008
+#define MII_DM9161_INTR_LINK_CHANGE	0x0004
+#define MII_DM9161_INTR_INIT 		0x0000
+#define MII_DM9161_INTR_STOP	\
+(MII_DM9161_INTR_DPLX_MASK | MII_DM9161_INTR_SPD_MASK \
+ | MII_DM9161_INTR_LINK_MASK | MII_DM9161_INTR_MASK)
+
+/* DM9161 10BT Configuration/Status */
+#define MII_DM9161_10BTCSR	0x12
+#define MII_DM9161_10BTCSR_INIT	0x7800
+
+MODULE_DESCRIPTION("Davicom PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+
+#define DM9161_DELAY 1
+static int dm9161_config_intr(struct phy_device *phydev)
+{
+	int temp;
+
+	temp = phy_read(phydev, MII_DM9161_INTR);
+
+	if (temp < 0)
+		return temp;
+
+	if(PHY_INTERRUPT_ENABLED == phydev->interrupts )
+		temp &= ~(MII_DM9161_INTR_STOP);
+	else
+		temp |= MII_DM9161_INTR_STOP;
+
+	temp = phy_write(phydev, MII_DM9161_INTR, temp);
+
+	return temp;
+}
+
+static int dm9161_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	/* Isolate the PHY */
+	err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
+
+	if (err < 0)
+		return err;
+
+	/* Configure the new settings */
+	err = genphy_config_aneg(phydev);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int dm9161_config_init(struct phy_device *phydev)
+{
+	int err;
+
+	/* Isolate the PHY */
+	err = phy_write(phydev, MII_BMCR, BMCR_ISOLATE);
+
+	if (err < 0)
+		return err;
+
+	/* Do not bypass the scrambler/descrambler */
+	err = phy_write(phydev, MII_DM9161_SCR, MII_DM9161_SCR_INIT);
+
+	if (err < 0)
+		return err;
+
+	/* Clear 10BTCSR to default */
+	err = phy_write(phydev, MII_DM9161_10BTCSR, MII_DM9161_10BTCSR_INIT);
+
+	if (err < 0)
+		return err;
+
+	/* Reconnect the PHY, and enable Autonegotiation */
+	err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int dm9161_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, MII_DM9161_INTR);
+
+	return (err < 0) ? err : 0;
+}
+
+static struct phy_driver dm9161_driver = {
+	.phy_id		= 0x0181b880,
+	.name		= "Davicom DM9161E",
+	.phy_id_mask	= 0x0ffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.config_init	= dm9161_config_init,
+	.config_aneg	= dm9161_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver dm9131_driver = {
+	.phy_id		= 0x00181b80,
+	.name		= "Davicom DM9131",
+	.phy_id_mask	= 0x0ffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= dm9161_ack_interrupt,
+	.config_intr	= dm9161_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static int __init davicom_init(void)
+{
+	int ret;
+
+	ret = phy_driver_register(&dm9161_driver);
+	if (ret)
+		goto err1;
+
+	ret = phy_driver_register(&dm9131_driver);
+	if (ret)
+		goto err2;
+	return 0;
+
+ err2:	
+	phy_driver_unregister(&dm9161_driver);
+ err1:
+	return ret;
+}
+
+static void __exit davicom_exit(void)
+{
+	phy_driver_unregister(&dm9161_driver);
+	phy_driver_unregister(&dm9131_driver);
+}
+
+module_init(davicom_init);
+module_exit(davicom_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/lxt.c
@@ -0,0 +1,179 @@
+/*
+ * drivers/net/phy/lxt.c
+ *
+ * Driver for Intel LXT PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* The Level one LXT970 is used by many boards				     */
+
+#define MII_LXT970_IER       17  /* Interrupt Enable Register */
+
+#define MII_LXT970_IER_IEN	0x0002
+
+#define MII_LXT970_ISR       18  /* Interrupt Status Register */
+
+#define MII_LXT970_CONFIG    19  /* Configuration Register    */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards                  */
+
+/* register definitions for the 971 */
+#define MII_LXT971_IER		18  /* Interrupt Enable Register */
+#define MII_LXT971_IER_IEN	0x00f2
+
+#define MII_LXT971_ISR		19  /* Interrupt Status Register */
+
+
+MODULE_DESCRIPTION("Intel LXT PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int lxt970_ack_interrupt(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_read(phydev, MII_BMSR);
+
+	if (err < 0)
+		return err;
+
+	err = phy_read(phydev, MII_LXT970_ISR);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int lxt970_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+	else
+		err = phy_write(phydev, MII_LXT970_IER, 0);
+
+	return err;
+}
+
+static int lxt970_config_init(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_write(phydev, MII_LXT970_CONFIG, 0);
+
+	return err;
+}
+
+
+static int lxt971_ack_interrupt(struct phy_device *phydev)
+{
+	int err = phy_read(phydev, MII_LXT971_ISR);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int lxt971_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+	else
+		err = phy_write(phydev, MII_LXT971_IER, 0);
+
+	return err;
+}
+
+static struct phy_driver lxt970_driver = {
+	.phy_id		= 0x07810000,
+	.name		= "LXT970",
+	.phy_id_mask	= 0x0fffffff,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_init	= lxt970_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= lxt970_ack_interrupt,
+	.config_intr	= lxt970_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static struct phy_driver lxt971_driver = {
+	.phy_id		= 0x0001378e,
+	.name		= "LXT971",
+	.phy_id_mask	= 0x0fffffff,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= lxt971_ack_interrupt,
+	.config_intr	= lxt971_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static int __init lxt_init(void)
+{
+	int ret;
+
+	ret = phy_driver_register(&lxt970_driver);
+	if (ret)
+		goto err1;
+
+	ret = phy_driver_register(&lxt971_driver);
+	if (ret)
+		goto err2;
+	return 0;
+
+ err2:	
+	phy_driver_unregister(&lxt970_driver);
+ err1:
+	return ret;
+}
+
+static void __exit lxt_exit(void)
+{
+	phy_driver_unregister(&lxt970_driver);
+	phy_driver_unregister(&lxt971_driver);
+}
+
+module_init(lxt_init);
+module_exit(lxt_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/marvell.c
@@ -0,0 +1,140 @@
+/*
+ * drivers/net/phy/marvell.c
+ *
+ * Driver for Marvell PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+#define MII_M1011_IEVENT		0x13
+#define MII_M1011_IEVENT_CLEAR		0x0000
+
+#define MII_M1011_IMASK			0x12
+#define MII_M1011_IMASK_INIT		0x6400
+#define MII_M1011_IMASK_CLEAR		0x0000
+
+MODULE_DESCRIPTION("Marvell PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+static int marvell_ack_interrupt(struct phy_device *phydev)
+{
+	int err;
+
+	/* Clear the interrupts by reading the reg */
+	err = phy_read(phydev, MII_M1011_IEVENT);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int marvell_config_intr(struct phy_device *phydev)
+{
+	int err;
+
+	if(phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_INIT);
+	else
+		err = phy_write(phydev, MII_M1011_IMASK, MII_M1011_IMASK_CLEAR);
+
+	return err;
+}
+
+static int marvell_config_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	/* The Marvell PHY has an errata which requires
+	 * that certain registers get written in order
+	 * to restart autonegotiation */
+	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x1f);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x200c);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1d, 0x5);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, 0x1e, 0x100);
+	if (err < 0)
+		return err;
+
+
+	err = genphy_config_aneg(phydev);
+
+	return err;
+}
+
+
+static struct phy_driver m88e1101_driver = {
+	.phy_id		= 0x01410c00,
+	.phy_id_mask	= 0xffffff00,
+	.name		= "Marvell 88E1101",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_aneg	= &marvell_config_aneg,
+	.read_status	= &genphy_read_status,
+	.ack_interrupt	= &marvell_ack_interrupt,
+	.config_intr	= &marvell_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static int __init marvell_init(void)
+{
+	return phy_driver_register(&m88e1101_driver);
+}
+
+static void __exit marvell_exit(void)
+{
+	phy_driver_unregister(&m88e1101_driver);
+}
+
+module_init(marvell_init);
+module_exit(marvell_exit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/mdio_bus.c
@@ -0,0 +1,99 @@
+/*
+ * drivers/net/phy/mdio_bus.c
+ *
+ * MDIO Bus interface
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* mdio_bus_match
+ *
+ * description: Given a PHY device, and a PHY driver, return 1 if
+ *   the driver supports the device.  Otherwise, return 0
+ */
+static int mdio_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct phy_device *phydev = to_phy_device(dev);
+	struct phy_driver *phydrv = to_phy_driver(drv);
+
+	return (phydrv->phy_id == (phydev->phy_id & phydrv->phy_id_mask));
+}
+
+/* Suspend and resume.  Copied from platform_suspend and
+ * platform_resume
+ */
+static int mdio_bus_suspend(struct device * dev, u32 state)
+{
+	int ret = 0;
+	struct device_driver *drv = dev->driver;
+
+	if (drv && drv->suspend) {
+		ret = drv->suspend(dev, state, SUSPEND_DISABLE);
+		if (ret == 0)
+			ret = drv->suspend(dev, state, SUSPEND_SAVE_STATE);
+		if (ret == 0)
+			ret = drv->suspend(dev, state, SUSPEND_POWER_DOWN);
+	}
+	return ret;
+}
+
+static int mdio_bus_resume(struct device * dev)
+{
+	int ret = 0;
+	struct device_driver *drv = dev->driver;
+
+	if (drv && drv->resume) {
+		ret = drv->resume(dev, RESUME_POWER_ON);
+		if (ret == 0)
+			ret = drv->resume(dev, RESUME_RESTORE_STATE);
+		if (ret == 0)
+			ret = drv->resume(dev, RESUME_ENABLE);
+	}
+	return ret;
+}
+
+struct bus_type mdio_bus_type = {
+	.name		= "mdio_bus",
+	.match		= mdio_bus_match,
+	.suspend	= mdio_bus_suspend,
+	.resume		= mdio_bus_resume,
+};
+
+int __init mdio_bus_init(void)
+{
+	return bus_register(&mdio_bus_type);
+}
+
+
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/phy.c
@@ -0,0 +1,690 @@
+/*
+ * drivers/net/phy/phy.c
+ *
+ * Framework for configuring and reading PHY devices
+ * Based on code in sungem_phy.c and gianfar_phy.c
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+static void phy_timer(unsigned long data);
+static int phy_disable_interrupts(struct phy_device *phydev);
+static void phy_sanitize_settings(struct phy_device *phydev);
+static int phy_stop_interrupts(struct phy_device *phydev);
+
+
+/* Convenience functions for reading/writing a given PHY
+ * register. They MUST NOT be called from interrupt context,
+ * because the bus read/write functions may wait for an interrupt
+ * to conclude the operation. */
+int phy_read(struct phy_device *phydev, u16 regnum)
+{
+	int retval;
+	struct mii_bus *bus = phydev->bus;
+
+	spin_lock_bh(&bus->mdio_lock);
+	retval = bus->read(bus, phydev->addr, regnum);
+	spin_unlock_bh(&bus->mdio_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(phy_read);
+
+int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
+{
+	int err;
+	struct mii_bus *bus = phydev->bus;
+
+	spin_lock_bh(&bus->mdio_lock);
+	err = bus->write(bus, phydev->addr, regnum, val);
+	spin_unlock_bh(&bus->mdio_lock);
+
+	return err;
+}
+EXPORT_SYMBOL(phy_write);
+
+
+int phy_clear_interrupt(struct phy_device *phydev)
+{
+	int err = 0;
+
+	if (phydev->drv->ack_interrupt)
+		err = phydev->drv->ack_interrupt(phydev);
+
+	return err;
+}
+
+
+int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+{
+	int err = 0;
+
+	phydev->interrupts = interrupts;
+	if (phydev->drv->config_intr)
+		err = phydev->drv->config_intr(phydev);
+
+	return err;
+}
+
+
+/* phy_aneg_done
+ *
+ * description: Reads the status register and returns 0 either if
+ *   auto-negotiation is incomplete, or if there was an error.
+ *   Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+	int retval;
+
+	retval = phy_read(phydev, MII_BMSR);
+
+	return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+/* phy_start_aneg
+ *
+ * description: Calls the PHY driver's config_aneg, and then
+ *   sets the PHY state to PHY_AN if auto-negotiation is enabled,
+ *   and to PHY_FORCING if auto-negotiation is disabled. Unless
+ *   the PHY is currently HALTED.
+ */
+static int phy_start_aneg(struct phy_device *phydev)
+{
+	int err;
+
+	spin_lock(&phydev->lock);
+
+	if (AUTONEG_DISABLE == phydev->autoneg)
+		phy_sanitize_settings(phydev);
+
+	err = phydev->drv->config_aneg(phydev);
+
+	if (err < 0)
+		goto out_unlock;
+
+	if (phydev->state != PHY_HALTED) {
+		if (AUTONEG_ENABLE == phydev->autoneg) {
+			phydev->state = PHY_AN;
+			phydev->link_timeout = PHY_AN_TIMEOUT;
+		} else {
+			phydev->state = PHY_FORCING;
+			phydev->link_timeout = PHY_FORCE_TIMEOUT;
+		}
+	}
+
+out_unlock:
+	spin_unlock(&phydev->lock);
+	return err;
+}
+
+/* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value */
+struct phy_setting {
+	int speed;
+	int duplex;
+	u32 setting;
+};
+
+/* A mapping of all SUPPORTED settings to speed/duplex */
+static struct phy_setting settings[] = {
+	{
+		.speed = 10000,
+		.duplex = DUPLEX_FULL,
+		.setting = SUPPORTED_10000baseT_Full,
+	},
+	{
+		.speed = SPEED_1000,
+		.duplex = DUPLEX_FULL,
+		.setting = SUPPORTED_1000baseT_Full,
+	},
+	{
+		.speed = SPEED_1000,
+		.duplex = DUPLEX_HALF,
+		.setting = SUPPORTED_1000baseT_Half,
+	},
+	{
+		.speed = SPEED_100,
+		.duplex = DUPLEX_FULL,
+		.setting = SUPPORTED_100baseT_Full,
+	},
+	{
+		.speed = SPEED_100,
+		.duplex = DUPLEX_HALF,
+		.setting = SUPPORTED_100baseT_Half,
+	},
+	{
+		.speed = SPEED_10,
+		.duplex = DUPLEX_FULL,
+		.setting = SUPPORTED_10baseT_Full,
+	},
+	{
+		.speed = SPEED_10,
+		.duplex = DUPLEX_HALF,
+		.setting = SUPPORTED_10baseT_Half,
+	},
+};
+
+#define MAX_NUM_SETTINGS (sizeof(settings)/sizeof(struct phy_setting))
+
+/* phy_find_setting
+ *
+ * description: Searches the settings array for the setting which
+ *   matches the desired speed and duplex, and returns the index
+ *   of that setting.  Returns the index of the last setting if
+ *   none of the others match.
+ */
+static inline int phy_find_setting(int speed, int duplex)
+{
+	int idx = 0;
+
+	while (idx < ARRAY_SIZE(settings) &&
+			(settings[idx].speed != speed ||
+			settings[idx].duplex != duplex))
+		idx++;
+
+	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+}
+
+/* phy_find_valid
+ * idx: The first index in settings[] to search
+ * features: A mask of the valid settings
+ *
+ * description: Returns the index of the first valid setting less
+ *   than or equal to the one pointed to by idx, as determined by
+ *   the mask in features.  Returns the index of the last setting
+ *   if nothing else matches.
+ */
+static inline int phy_find_valid(int idx, u32 features)
+{
+	while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
+		idx++;
+
+	return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
+}
+
+/* phy_sanitize_settings
+ *
+ * description: Make sure the PHY is set to supported speeds and
+ *   duplexes.  Drop down by one in this order:  1000/FULL,
+ *   1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF
+ */
+static void phy_sanitize_settings(struct phy_device *phydev)
+{
+	u32 features = phydev->supported;
+	int idx;
+
+	/* Sanitize settings based on PHY capabilities */
+	if ((features & SUPPORTED_Autoneg) == 0)
+		phydev->autoneg = 0;
+
+	idx = phy_find_valid(phy_find_setting(phydev->speed, phydev->duplex),
+			features);
+
+	phydev->speed = settings[idx].speed;
+	phydev->duplex = settings[idx].duplex;
+}
+
+/* phy_force_reduction
+ *
+ * description: Reduces the speed/duplex settings by
+ *   one notch.  The order is so:
+ *   1000/FULL, 1000/HALF, 100/FULL, 100/HALF,
+ *   10/FULL, 10/HALF.  The function bottoms out at 10/HALF.
+ */
+static void phy_force_reduction(struct phy_device *phydev)
+{
+	int idx;
+
+	idx = phy_find_setting(phydev->speed, phydev->duplex);
+	
+	idx++;
+
+	idx = phy_find_valid(idx, phydev->supported);
+
+	phydev->speed = settings[idx].speed;
+	phydev->duplex = settings[idx].duplex;
+
+	pr_info("Trying %d/%s\n", phydev->speed,
+			DUPLEX_FULL == phydev->duplex ?
+			"FULL" : "HALF");
+}
+
+/* phy_ethtool_sset:
+ * A generic ethtool sset function.  Handles all the details
+ *
+ * A few notes about parameter checking:
+ * - We don't set port or transceiver, so we don't care what they
+ *   were set to.
+ * - phy_start_aneg() will make sure forced settings are sane, and
+ *   choose the next best ones from the ones selected, so we don't
+ *   care if ethtool tries to give us bad values
+ */
+int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
+{
+	if (cmd->phy_address != phydev->addr)
+		return -EINVAL;
+
+	/* We make sure that we don't pass unsupported
+	 * values in to the PHY */
+	cmd->advertising &= phydev->supported;
+
+	/* Verify the settings we care about. */
+	if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_ENABLE && cmd->advertising == 0)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_DISABLE
+			&& ((cmd->speed != SPEED_1000
+					&& cmd->speed != SPEED_100
+					&& cmd->speed != SPEED_10)
+				|| (cmd->duplex != DUPLEX_HALF
+					&& cmd->duplex != DUPLEX_FULL)))
+		return -EINVAL;
+
+	phydev->autoneg = cmd->autoneg;
+
+	phydev->speed = cmd->speed;
+
+	phydev->advertising = cmd->advertising;
+
+	if (AUTONEG_ENABLE == cmd->autoneg)
+		phydev->advertising |= ADVERTISED_Autoneg;
+	else
+		phydev->advertising &= ~ADVERTISED_Autoneg;
+
+	phydev->duplex = cmd->duplex;
+
+	/* Restart the PHY */
+	phy_start_aneg(phydev);
+
+	return 0;
+}
+
+int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
+{
+	cmd->supported = phydev->supported;
+
+	cmd->advertising = phydev->advertising;
+
+	cmd->speed = phydev->speed;
+	cmd->duplex = phydev->duplex;
+	cmd->port = PORT_MII;
+	cmd->phy_address = phydev->addr;
+	cmd->transceiver = XCVR_EXTERNAL;
+	cmd->autoneg = phydev->autoneg;
+
+	return 0;
+}
+
+
+/* Note that this function is currently incompatible with the
+ * PHYCONTROL layer.  It changes registers without regard to
+ * current state.  Use at own risk
+ */
+int phy_mii_ioctl(struct phy_device *phydev,
+		struct mii_ioctl_data *mii_data, int cmd)
+{
+	u16 val = mii_data->val_in;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		mii_data->phy_id = phydev->addr;
+		break;
+	case SIOCGMIIREG:
+		mii_data->val_out = phy_read(phydev, mii_data->reg_num);
+		break;
+
+	case SIOCSMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (mii_data->phy_id == phydev->addr) {
+			switch(mii_data->reg_num) {
+			case MII_BMCR:
+				if (val & (BMCR_RESET|BMCR_ANENABLE))
+					phydev->autoneg = AUTONEG_DISABLE;
+				else
+					phydev->autoneg = AUTONEG_ENABLE;
+				if ((!phydev->autoneg) && (val & BMCR_FULLDPLX))
+					phydev->duplex = DUPLEX_FULL;
+				else
+					phydev->duplex = DUPLEX_HALF;
+				break;
+			case MII_ADVERTISE:
+				phydev->advertising = val;
+				break;
+			default:
+				/* do nothing */
+				break;
+			}
+		}
+
+		phy_write(phydev, mii_data->reg_num, val);
+		
+		if (mii_data->reg_num == MII_BMCR 
+				&& val & BMCR_RESET
+				&& phydev->drv->config_init)
+			phydev->drv->config_init(phydev);
+		break;
+	}
+
+	return 0;
+}
+
+/* phy_start_machine:
+ *
+ * description: The PHY infrastructure can run a state machine
+ *   which tracks whether the PHY is starting up, negotiating,
+ *   etc.  This function starts the timer which tracks the state
+ *   of the PHY.  If you want to be notified when the state
+ *   changes, pass in the callback, otherwise, pass NULL.  If you
+ *   want to maintain your own state machine, do not call this
+ *   function. */
+void phy_start_machine(struct phy_device *phydev,
+		void (*handler)(struct net_device *))
+{
+	phydev->adjust_state = handler;
+
+	init_timer(&phydev->phy_timer);
+	phydev->phy_timer.function = &phy_timer;
+	phydev->phy_timer.data = (unsigned long) phydev;
+	mod_timer(&phydev->phy_timer, jiffies + HZ);
+}
+
+/* phy_stop_machine
+ *
+ * description: Stops the state machine timer, sets the state to
+ *   UP (unless it wasn't up yet), and then frees the interrupt,
+ *   if it is in use. This function must be called BEFORE
+ *   phy_detach.
+ */
+void phy_stop_machine(struct phy_device *phydev)
+{
+	del_timer_sync(&phydev->phy_timer);
+
+	spin_lock(&phydev->lock);
+	if (phydev->state > PHY_UP)
+		phydev->state = PHY_UP;
+	spin_unlock(&phydev->lock);
+
+	if (phydev->irq != PHY_POLL)
+		phy_stop_interrupts(phydev);
+
+	phydev->adjust_state = NULL;
+}
+
+/* phy_error:
+ *
+ * Moves the PHY to the HALTED state in response to a read
+ * or write error, and tells the controller the link is down.
+ * Must not be called from interrupt context, or while the
+ * phydev->lock is held.
+ */
+void phy_error(struct phy_device *phydev)
+{
+	spin_lock(&phydev->lock);
+	phydev->state = PHY_HALTED;
+	spin_unlock(&phydev->lock);
+}
+
+static int phy_stop_interrupts(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_disable_interrupts(phydev);
+
+	if (err)
+		phy_error(phydev);
+
+	free_irq(phydev->irq, phydev);
+
+	return err;
+}
+
+/* Disable the PHY interrupts from the PHY side */
+static int phy_disable_interrupts(struct phy_device *phydev)
+{
+	int err;
+
+	/* Disable PHY interrupts */
+	err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
+
+	if (err)
+		goto phy_err;
+
+	/* Clear the interrupt */
+	err = phy_clear_interrupt(phydev);
+
+	if (err)
+		goto phy_err;
+
+	return 0;
+
+phy_err:
+	phy_error(phydev);
+
+	return err;
+}
+
+/* PHY timer which handles the state machine */
+static void phy_timer(unsigned long data)
+{
+	struct phy_device *phydev = (struct phy_device *)data;
+	int needs_aneg = 0;
+	int err = 0;
+
+	spin_lock(&phydev->lock);
+
+	if (phydev->adjust_state)
+		phydev->adjust_state(phydev->attached_dev);
+
+	switch(phydev->state) {
+		case PHY_DOWN:
+		case PHY_STARTING:
+		case PHY_READY:
+		case PHY_PENDING:
+			break;
+		case PHY_UP:
+			needs_aneg = 1;
+
+			phydev->link_timeout = PHY_AN_TIMEOUT;
+
+			break;
+		case PHY_AN:
+			/* Check if negotiation is done.  Break
+			 * if there's an error */
+			err = phy_aneg_done(phydev);
+			if (err < 0)
+				break;
+
+			/* If auto-negotiation is done, we change to
+			 * either RUNNING, or NOLINK */
+			if (err > 0) {
+				err = phy_read_status(phydev);
+
+				if (err)
+					break;
+
+				if (phydev->link) {
+					phydev->state = PHY_RUNNING;
+					netif_carrier_on(phydev->attached_dev);
+				} else {
+					phydev->state = PHY_NOLINK;
+					netif_carrier_off(phydev->attached_dev);
+				}
+
+				phydev->adjust_link(phydev->attached_dev);
+
+			} else if (0 == phydev->link_timeout--) {
+				/* The counter expired, so either we
+				 * switch to forced mode, or the
+				 * magic_aneg bit exists, and we try aneg
+				 * again */
+				if (!(phydev->drv->flags & PHY_HAS_MAGICANEG)) {
+					int idx;
+
+					/* We'll start from the
+					 * fastest speed, and work
+					 * our way down */
+					idx = phy_find_valid(0,
+							phydev->supported);
+
+					phydev->speed = settings[idx].speed;
+					phydev->duplex = settings[idx].duplex;
+					
+					phydev->autoneg = AUTONEG_DISABLE;
+					phydev->state = PHY_FORCING;
+					phydev->link_timeout =
+						PHY_FORCE_TIMEOUT;
+
+					pr_info("Trying %d/%s\n",
+							phydev->speed,
+							DUPLEX_FULL ==
+							phydev->duplex ?
+							"FULL" : "HALF");
+				}
+
+				needs_aneg = 1;
+			}
+			break;
+		case PHY_NOLINK:
+			err = phy_read_status(phydev);
+
+			if (err)
+				break;
+
+			if (phydev->link) {
+				phydev->state = PHY_RUNNING;
+				netif_carrier_on(phydev->attached_dev);
+				phydev->adjust_link(phydev->attached_dev);
+			}
+			break;
+		case PHY_FORCING:
+			err = phy_read_status(phydev);
+
+			if (err)
+				break;
+
+			if (phydev->link) {
+				phydev->state = PHY_RUNNING;
+				netif_carrier_on(phydev->attached_dev);
+			} else {
+				if (0 == phydev->link_timeout--) {
+					phy_force_reduction(phydev);
+					needs_aneg = 1;
+				}
+			}
+
+			phydev->adjust_link(phydev->attached_dev);
+			break;
+		case PHY_RUNNING:
+			/* Only register a CHANGE if we are
+			 * polling */
+			if (PHY_POLL == phydev->irq)
+				phydev->state = PHY_CHANGELINK;
+			break;
+		case PHY_CHANGELINK:
+			err = phy_read_status(phydev);
+
+			if (err)
+				break;
+
+			if (phydev->link) {
+				phydev->state = PHY_RUNNING;
+				netif_carrier_on(phydev->attached_dev);
+			} else {
+				phydev->state = PHY_NOLINK;
+				netif_carrier_off(phydev->attached_dev);
+			}
+
+			phydev->adjust_link(phydev->attached_dev);
+
+			if (PHY_POLL != phydev->irq)
+				err = phy_config_interrupt(phydev,
+						PHY_INTERRUPT_ENABLED);
+			break;
+		case PHY_HALTED:
+			if (phydev->link) {
+				phydev->link = 0;
+				netif_carrier_off(phydev->attached_dev);
+				phydev->adjust_link(phydev->attached_dev);
+			}
+			break;
+		case PHY_RESUMING:
+
+			err = phy_clear_interrupt(phydev);
+
+			if (err)
+				break;
+
+			err = phy_config_interrupt(phydev,
+					PHY_INTERRUPT_ENABLED);
+
+			if (err)
+				break;
+
+			if (AUTONEG_ENABLE == phydev->autoneg) {
+				err = phy_aneg_done(phydev);
+				if (err < 0)
+					break;
+
+				/* err > 0 if AN is done.
+				 * Otherwise, it's 0, and we're
+				 * still waiting for AN */
+				if (err > 0) {
+					phydev->state = PHY_RUNNING;
+				} else {
+					phydev->state = PHY_AN;
+					phydev->link_timeout = PHY_AN_TIMEOUT;
+				}
+			} else
+				phydev->state = PHY_RUNNING;
+			break;
+	}
+
+	spin_unlock(&phydev->lock);
+
+	if (needs_aneg)
+		err = phy_start_aneg(phydev);
+
+	if (err < 0)
+		phy_error(phydev);
+
+	mod_timer(&phydev->phy_timer, jiffies + PHY_STATE_TIME * HZ);
+}
+
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/phy_device.c
@@ -0,0 +1,572 @@
+/*
+ * drivers/net/phy/phy_device.c
+ *
+ * Framework for finding and configuring PHYs.
+ * Also contains generic PHY driver
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+static int genphy_config_init(struct phy_device *phydev);
+
+static struct phy_driver genphy_driver = {
+	.phy_id		= 0xffffffff,
+	.phy_id_mask	= 0xffffffff,
+	.name		= "Generic PHY",
+	.config_init	= genphy_config_init,
+	.features	= 0,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.driver	=	{.owner	= THIS_MODULE, },
+};
+
+/* get_phy_device
+ *
+ * description: Reads the ID registers of the PHY at addr on the
+ *   bus, then allocates and returns the phy_device to
+ *   represent it.
+ */
+struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
+{
+	int phy_reg;
+	u32 phy_id;
+	struct phy_device *dev = NULL;
+
+	/* Grab the bits from PHYIR1, and put them
+	 * in the upper half */
+	phy_reg = bus->read(bus, addr, MII_PHYSID1);
+
+	if (phy_reg < 0)
+		return ERR_PTR(phy_reg);
+
+	phy_id = (phy_reg & 0xffff) << 16;
+
+	/* Grab the bits from PHYIR2, and put them in the lower half */
+	phy_reg = bus->read(bus, addr, MII_PHYSID2);
+
+	if (phy_reg < 0)
+		return ERR_PTR(phy_reg);
+
+	phy_id |= (phy_reg & 0xffff);
+
+	/* If the phy_id is all Fs, there is no device there */
+	if (0xffffffff == phy_id)
+		return NULL;
+
+	/* Otherwise, we allocate the device, and initialize the
+	 * default values */
+	dev = kcalloc(1, sizeof(*dev), GFP_KERNEL);
+
+	if (NULL == dev)
+		return ERR_PTR(-ENOMEM);
+
+	dev->speed = 0;
+	dev->duplex = -1;
+	dev->pause = dev->asym_pause = 0;
+	dev->link = 1;
+
+	dev->autoneg = AUTONEG_ENABLE;
+
+	dev->addr = addr;
+	dev->phy_id = phy_id;
+	dev->bus = bus;
+
+	dev->state = PHY_DOWN;
+
+	spin_lock_init(&dev->lock);
+
+	return dev;
+}
+
+/* phy_prepare_link:
+ *
+ * description: Tells the PHY infrastructure to handle the
+ *   gory details on monitoring link status (whether through
+ *   polling or an interrupt), and to call back to the
+ *   connected device driver when the link status changes.
+ *   If you want to monitor your own link state, don't call
+ *   this function */
+void phy_prepare_link(struct phy_device *phydev,
+		void (*handler)(struct net_device *))
+{
+	phydev->adjust_link = handler;
+}
+
+/* Generic PHY support and helper functions */
+
+/* genphy_config_advert
+ *
+ * description: Writes MII_ADVERTISE with the appropriate values,
+ *   after sanitizing the values to make sure we only advertise
+ *   what is supported
+ */
+static int genphy_config_advert(struct phy_device *phydev)
+{
+	u32 advertise;
+	int adv;
+	int err;
+
+	/* Only allow advertising what
+	 * this PHY supports */
+	phydev->advertising &= phydev->supported;
+	advertise = phydev->advertising;
+
+	/* Setup standard advertisement */
+	adv = phy_read(phydev, MII_ADVERTISE);
+
+	if (adv < 0)
+		return adv;
+
+	adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | 
+		 ADVERTISE_PAUSE_ASYM);
+	if (advertise & ADVERTISED_10baseT_Half)
+		adv |= ADVERTISE_10HALF;
+	if (advertise & ADVERTISED_10baseT_Full)
+		adv |= ADVERTISE_10FULL;
+	if (advertise & ADVERTISED_100baseT_Half)
+		adv |= ADVERTISE_100HALF;
+	if (advertise & ADVERTISED_100baseT_Full)
+		adv |= ADVERTISE_100FULL;
+	if (advertise & ADVERTISED_Pause)
+		adv |= ADVERTISE_PAUSE_CAP;
+	if (advertise & ADVERTISED_Asym_Pause)
+		adv |= ADVERTISE_PAUSE_ASYM;
+
+	err = phy_write(phydev, MII_ADVERTISE, adv);
+
+	if (err < 0)
+		return err;
+
+	/* Configure gigabit if it's supported */
+	if (phydev->supported & (SUPPORTED_1000baseT_Half |
+				SUPPORTED_1000baseT_Full)) {
+		adv = phy_read(phydev, MII_CTRL1000);
+
+		if (adv < 0)
+			return adv;
+
+		adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+		if (advertise & SUPPORTED_1000baseT_Half)
+			adv |= ADVERTISE_1000HALF;
+		if (advertise & SUPPORTED_1000baseT_Full)
+			adv |= ADVERTISE_1000FULL;
+		err = phy_write(phydev, MII_CTRL1000, adv);
+
+		if (err < 0)
+			return err;
+	}
+
+	return adv;
+}
+
+/* genphy_setup_forced
+ *
+ * description: Configures MII_BMCR to force speed/duplex
+ *   to the values in phydev. Assumes that the values are valid.
+ *   Please see phy_sanitize_settings() */
+int genphy_setup_forced(struct phy_device *phydev)
+{
+	int ctl = BMCR_RESET;
+
+	phydev->pause = phydev->asym_pause = 0;
+
+	if (SPEED_1000 == phydev->speed)
+		ctl |= BMCR_SPEED1000;
+	else if (SPEED_100 == phydev->speed)
+		ctl |= BMCR_SPEED100;
+
+	if (DUPLEX_FULL == phydev->duplex)
+		ctl |= BMCR_FULLDPLX;
+	
+	ctl = phy_write(phydev, MII_BMCR, ctl);
+
+	if (ctl < 0)
+		return ctl;
+
+	/* We just reset the device, so we'd better configure any
+	 * settings the PHY requires to operate */
+	if (phydev->drv->config_init)
+		ctl = phydev->drv->config_init(phydev);
+
+	return ctl;
+}
+
+
+/* Enable and Restart Autonegotiation */
+int genphy_restart_aneg(struct phy_device *phydev)
+{
+	int ctl;
+
+	ctl = phy_read(phydev, MII_BMCR);
+
+	if (ctl < 0)
+		return ctl;
+
+	ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
+
+	/* Don't isolate the PHY if we're negotiating */
+	ctl &= ~(BMCR_ISOLATE);
+
+	ctl = phy_write(phydev, MII_BMCR, ctl);
+
+	return ctl;
+}
+
+
+/* genphy_config_aneg
+ *
+ * description: If auto-negotiation is enabled, we configure the
+ *   advertising, and then restart auto-negotiation.  If it is not
+ *   enabled, then we write the BMCR
+ */
+int genphy_config_aneg(struct phy_device *phydev)
+{
+	int err = 0;
+
+	if (AUTONEG_ENABLE == phydev->autoneg) {
+		err = genphy_config_advert(phydev);
+
+		if (err < 0)
+			return err;
+
+		err = genphy_restart_aneg(phydev);
+	} else
+		err = genphy_setup_forced(phydev);
+
+	return err;
+}
+EXPORT_SYMBOL(genphy_config_aneg);
+
+/* genphy_update_link
+ *
+ * description: Update the value in phydev->link to reflect the
+ *   current link value.  In order to do this, we need to read
+ *   the status register twice, keeping the second value
+ */
+int genphy_update_link(struct phy_device *phydev)
+{
+	int status;
+
+	/* Do a fake read */
+	status = phy_read(phydev, MII_BMSR);
+
+	if (status < 0)
+		return status;
+
+	/* Read link and autonegotiation status */
+	status = phy_read(phydev, MII_BMSR);
+
+	if (status < 0)
+		return status;
+
+	if ((status & BMSR_LSTATUS) == 0)
+		phydev->link = 0;
+	else
+		phydev->link = 1;
+
+	return 0;
+}
+
+/* genphy_read_status
+ *
+ * description: Check the link, then figure out the current state
+ *   by comparing what we advertise with what the link partner
+ *   advertises.  Start by checking the gigabit possibilities,
+ *   then move on to 10/100.
+ */
+int genphy_read_status(struct phy_device *phydev)
+{
+	int adv;
+	int err;
+	int lpa;
+	int lpagb = 0;
+
+	/* Update the link, but return if there
+	 * was an error */
+	err = genphy_update_link(phydev);
+	if (err)
+		return err;
+
+	if (AUTONEG_ENABLE == phydev->autoneg) {
+		if (phydev->supported & (SUPPORTED_1000baseT_Half
+					| SUPPORTED_1000baseT_Full)) {
+			lpagb = phy_read(phydev, MII_STAT1000);
+
+			if (lpagb < 0)
+				return lpagb;
+
+			adv = phy_read(phydev, MII_CTRL1000);
+
+			if (adv < 0)
+				return adv;
+
+			lpagb &= adv << 2;
+		}
+
+		lpa = phy_read(phydev, MII_LPA);
+
+		if (lpa < 0)
+			return lpa;
+
+		adv = phy_read(phydev, MII_ADVERTISE);
+
+		if (adv < 0)
+			return adv;
+
+		lpa &= adv;
+
+		phydev->speed = SPEED_10;
+		phydev->duplex = DUPLEX_HALF;
+		phydev->pause = phydev->asym_pause = 0;
+
+		if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+			phydev->speed = SPEED_1000;
+
+			if (lpagb & LPA_1000FULL)
+				phydev->duplex = DUPLEX_FULL;
+		} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+			phydev->speed = SPEED_100;
+			
+			if (lpa & LPA_100FULL)
+				phydev->duplex = DUPLEX_FULL;
+		} else
+			if (lpa & LPA_10FULL)
+				phydev->duplex = DUPLEX_FULL;
+
+		if (phydev->duplex == DUPLEX_FULL){
+			phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
+			phydev->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
+		}
+	} else {
+		int bmcr = phy_read(phydev, MII_BMCR);
+		if (bmcr < 0)
+			return bmcr;
+
+		if (bmcr & BMCR_FULLDPLX)
+			phydev->duplex = DUPLEX_FULL;
+		else
+			phydev->duplex = DUPLEX_HALF;
+
+		if (bmcr & BMCR_SPEED1000)
+			phydev->speed = SPEED_1000;
+		else if (bmcr & BMCR_SPEED100)
+			phydev->speed = SPEED_100;
+		else
+			phydev->speed = SPEED_10;
+
+		phydev->pause = phydev->asym_pause = 0;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(genphy_read_status);
+
+static int genphy_config_init(struct phy_device *phydev)
+{
+	u32 val;
+	u32 features;
+
+	/* For now, I'll claim that the generic driver supports
+	 * all possible port types */
+	features = (SUPPORTED_TP | SUPPORTED_MII
+			| SUPPORTED_AUI | SUPPORTED_FIBRE |
+			SUPPORTED_BNC);
+
+	/* Do we support autonegotiation? */
+	val = phy_read(phydev, MII_BMSR);
+
+	if (val < 0)
+		return val;
+
+	if (val & BMSR_ANEGCAPABLE)
+		features |= SUPPORTED_Autoneg;
+
+	if (val & BMSR_100FULL)
+		features |= SUPPORTED_100baseT_Full;
+	if (val & BMSR_100HALF)
+		features |= SUPPORTED_100baseT_Half;
+	if (val & BMSR_10FULL)
+		features |= SUPPORTED_10baseT_Full;
+	if (val & BMSR_10HALF)
+		features |= SUPPORTED_10baseT_Half;
+
+	if (val & BMSR_ESTATEN) {
+		val = phy_read(phydev, MII_ESTATUS);
+
+		if (val < 0)
+			return val;
+
+		if (val & ESTATUS_1000_TFULL)
+			features |= SUPPORTED_1000baseT_Full;
+		if (val & ESTATUS_1000_THALF)
+			features |= SUPPORTED_1000baseT_Half;
+	}
+
+	phydev->supported = features;
+	phydev->advertising = features;
+
+	return 0;
+}
+
+
+/* phy_probe
+ *
+ * description: Take care of setting up the phy_device structure,
+ *   set the state to READY (the driver's init function should
+ *   set it to STARTING if needed).
+ */
+static int phy_probe(struct device *dev)
+{
+	struct phy_device *phydev;
+	struct phy_driver *phydrv;
+	struct device_driver *drv;
+	int err = 0;
+
+	phydev = to_phy_device(dev);
+
+	/* Make sure the driver is held.
+	 * XXX -- Is this correct? */
+	drv = get_driver(phydev->dev.driver);
+	phydrv = to_phy_driver(drv);
+	phydev->drv = phydrv;
+
+	/* Disable the interrupt if the PHY doesn't support it */
+	if (!(phydrv->flags & PHY_HAS_INTERRUPT))
+		phydev->irq = PHY_POLL;
+
+	spin_lock(&phydev->lock);
+
+	/* Start out supporting everything. Eventually,
+	 * a controller will attach, and may modify one
+	 * or both of these values */
+	phydev->supported = phydrv->features;
+	phydev->advertising = phydrv->features;
+
+	/* Set the state to READY by default */
+	phydev->state = PHY_READY;
+
+	if (phydev->drv->probe)
+		err = phydev->drv->probe(phydev);
+
+	spin_unlock(&phydev->lock);
+
+	if (err < 0)
+		return err;
+
+	if (phydev->drv->config_init)
+		err = phydev->drv->config_init(phydev);
+
+	return err;
+}
+
+static int phy_remove(struct device *dev)
+{
+	struct phy_device *phydev;
+
+	phydev = to_phy_device(dev);
+
+	spin_lock(&phydev->lock);
+	phydev->state = PHY_DOWN;
+	spin_unlock(&phydev->lock);
+
+	if (phydev->drv->remove)
+		phydev->drv->remove(phydev);
+
+	put_driver(dev->driver);
+	phydev->drv = NULL;
+
+	return 0;
+}
+
+int phy_driver_register(struct phy_driver *new_driver)
+{
+	int retval;
+
+	memset(&new_driver->driver, 0, sizeof(new_driver->driver));
+	new_driver->driver.name = new_driver->name;
+	new_driver->driver.bus = &mdio_bus_type;
+	new_driver->driver.probe = phy_probe;
+	new_driver->driver.remove = phy_remove;
+
+	retval = driver_register(&new_driver->driver);
+
+	if (retval) {
+		printk(KERN_ERR "%s: Error %d in registering driver\n",
+				new_driver->name, retval);
+
+		return retval;
+	}
+
+	pr_info("%s: Registered new driver\n", new_driver->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(phy_driver_register);
+
+void phy_driver_unregister(struct phy_driver *drv)
+{
+	driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL(phy_driver_unregister);
+
+
+static int __init phy_init(void)
+{
+	int rc;
+	extern int mdio_bus_init(void);
+
+	rc = phy_driver_register(&genphy_driver);
+	if (rc)
+		goto out;
+
+	rc = mdio_bus_init();
+	if (rc)
+		goto out_unreg;
+
+	return 0;
+
+out_unreg:
+	phy_driver_unregister(&genphy_driver);
+out:
+	return rc;
+}
+
+static void __exit phy_exit(void)
+{
+	phy_driver_unregister(&genphy_driver);
+}
+
+module_init(phy_init);
+module_exit(phy_exit);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/phy/qsemi.c
@@ -0,0 +1,143 @@
+/*
+ * drivers/net/phy/qsemi.c
+ *
+ * Driver for Quality Semiconductor PHYs
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
+
+/* register definitions */
+
+#define MII_QS6612_MCR		17  /* Mode Control Register      */
+#define MII_QS6612_FTR		27  /* Factory Test Register      */
+#define MII_QS6612_MCO		28  /* Misc. Control Register     */
+#define MII_QS6612_ISR		29  /* Interrupt Source Register  */
+#define MII_QS6612_IMR		30  /* Interrupt Mask Register    */
+#define MII_QS6612_IMR_INIT	0x003a
+#define MII_QS6612_PCR		31  /* 100BaseTx PHY Control Reg. */
+
+#define QS6612_PCR_AN_COMPLETE	0x1000
+#define QS6612_PCR_RLBEN	0x0200
+#define QS6612_PCR_DCREN	0x0100
+#define QS6612_PCR_4B5BEN	0x0040
+#define QS6612_PCR_TX_ISOLATE	0x0020
+#define QS6612_PCR_MLT3_DIS	0x0002
+#define QS6612_PCR_SCRM_DESCRM	0x0001
+
+MODULE_DESCRIPTION("Quality Semiconductor PHY driver");
+MODULE_AUTHOR("Andy Fleming");
+MODULE_LICENSE("GPL");
+
+/* Returns 0, unless there's a write error */
+static int qs6612_config_init(struct phy_device *phydev)
+{
+	/* The PHY powers up isolated on the RPX,
+	 * so send a command to allow operation.
+	 * XXX - My docs indicate this should be 0x0940
+	 * ...or something.  The current value sets three
+	 * reserved bits, bit 11, which specifies it should be
+	 * set to one, bit 10, which specifies it should be set
+	 * to 0, and bit 7, which doesn't specify.  However, my
+	 * docs are preliminary, and I will leave it like this
+	 * until someone more knowledgable corrects me or it.
+	 * -- Andy Fleming
+	 */
+	return phy_write(phydev, MII_QS6612_PCR, 0x0dc0);
+}
+
+static int qs6612_ack_interrupt(struct phy_device *phydev)
+{
+	int err;
+
+	err = phy_read(phydev, MII_QS6612_ISR);
+
+	if (err < 0)
+		return err;
+
+	err = phy_read(phydev, MII_BMSR);
+
+	if (err < 0)
+		return err;
+
+	err = phy_read(phydev, MII_EXPANSION);
+
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int qs6612_config_intr(struct phy_device *phydev)
+{
+	int err;
+	if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+		err = phy_write(phydev, MII_QS6612_IMR,
+				MII_QS6612_IMR_INIT);
+	else
+		err = phy_write(phydev, MII_QS6612_IMR, 0);
+
+	return err;
+
+}
+
+static struct phy_driver qs6612_driver = {
+	.phy_id		= 0x00181440,
+	.name		= "QS6612",
+	.phy_id_mask	= 0xfffffff0,
+	.features	= PHY_BASIC_FEATURES,
+	.flags		= PHY_HAS_INTERRUPT,
+	.config_init	= qs6612_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.ack_interrupt	= qs6612_ack_interrupt,
+	.config_intr	= qs6612_config_intr,
+	.driver 	= { .owner = THIS_MODULE,},
+};
+
+static int __init qs6612_init(void)
+{
+	return phy_driver_register(&qs6612_driver);
+}
+
+static void __exit qs6612_exit(void)
+{
+	phy_driver_unregister(&qs6612_driver);
+}
+
+module_init(qs6612_init);
+module_exit(qs6612_exit);
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -62,6 +62,7 @@ typedef struct _XENA_dev_config {
 #define ADAPTER_STATUS_RMAC_REMOTE_FAULT   BIT(6)
 #define ADAPTER_STATUS_RMAC_LOCAL_FAULT    BIT(7)
 #define ADAPTER_STATUS_RMAC_PCC_IDLE       vBIT(0xFF,8,8)
+#define ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE  vBIT(0x0F,8,8)
 #define ADAPTER_STATUS_RC_PRC_QUIESCENT    vBIT(0xFF,16,8)
 #define ADAPTER_STATUS_MC_DRAM_READY       BIT(24)
 #define ADAPTER_STATUS_MC_QUEUES_READY     BIT(25)
@@ -77,21 +78,34 @@ typedef struct _XENA_dev_config {
 #define ADAPTER_ECC_EN                     BIT(55)
 
 	u64 serr_source;
-#define SERR_SOURCE_PIC					BIT(0)
-#define SERR_SOURCE_TXDMA				BIT(1)
-#define SERR_SOURCE_RXDMA				BIT(2)
+#define SERR_SOURCE_PIC			BIT(0)
+#define SERR_SOURCE_TXDMA		BIT(1)
+#define SERR_SOURCE_RXDMA		BIT(2)
 #define SERR_SOURCE_MAC                 BIT(3)
 #define SERR_SOURCE_MC                  BIT(4)
 #define SERR_SOURCE_XGXS                BIT(5)
-#define	SERR_SOURCE_ANY					(SERR_SOURCE_PIC		| \
-										SERR_SOURCE_TXDMA	| \
-										SERR_SOURCE_RXDMA	| \
-										SERR_SOURCE_MAC		| \
-										SERR_SOURCE_MC      | \
-										SERR_SOURCE_XGXS)
+#define	SERR_SOURCE_ANY			(SERR_SOURCE_PIC	| \
+					SERR_SOURCE_TXDMA	| \
+					SERR_SOURCE_RXDMA	| \
+					SERR_SOURCE_MAC		| \
+					SERR_SOURCE_MC		| \
+					SERR_SOURCE_XGXS)
+
+	u64 pci_mode;
+#define	GET_PCI_MODE(val)		((val & vBIT(0xF, 0, 4)) >> 60)
+#define	PCI_MODE_PCI_33			0
+#define	PCI_MODE_PCI_66			0x1
+#define	PCI_MODE_PCIX_M1_66		0x2
+#define	PCI_MODE_PCIX_M1_100		0x3
+#define	PCI_MODE_PCIX_M1_133		0x4
+#define	PCI_MODE_PCIX_M2_66		0x5
+#define	PCI_MODE_PCIX_M2_100		0x6
+#define	PCI_MODE_PCIX_M2_133		0x7
+#define	PCI_MODE_UNSUPPORTED		BIT(0)
+#define	PCI_MODE_32_BITS		BIT(8)
+#define	PCI_MODE_UNKNOWN_MODE		BIT(9)
 
-
-	u8 unused_0[0x800 - 0x120];
+	u8 unused_0[0x800 - 0x128];
 
 /* PCI-X Controller registers */
 	u64 pic_int_status;
@@ -153,7 +167,11 @@ typedef struct _XENA_dev_config {
 	u8 unused4[0x08];
 
 	u64 gpio_int_reg;
+#define GPIO_INT_REG_LINK_DOWN                 BIT(1)
+#define GPIO_INT_REG_LINK_UP                   BIT(2)
 	u64 gpio_int_mask;
+#define GPIO_INT_MASK_LINK_DOWN                BIT(1)
+#define GPIO_INT_MASK_LINK_UP                  BIT(2)
 	u64 gpio_alarms;
 
 	u8 unused5[0x38];
@@ -223,19 +241,16 @@ typedef struct _XENA_dev_config {
 	u64 xmsi_data;
 
 	u64 rx_mat;
+#define RX_MAT_SET(ring, msi)			vBIT(msi, (8 * ring), 8)
 
 	u8 unused6[0x8];
 
-	u64 tx_mat0_7;
-	u64 tx_mat8_15;
-	u64 tx_mat16_23;
-	u64 tx_mat24_31;
-	u64 tx_mat32_39;
-	u64 tx_mat40_47;
-	u64 tx_mat48_55;
-	u64 tx_mat56_63;
+	u64 tx_mat0_n[0x8];
+#define TX_MAT_SET(fifo, msi)			vBIT(msi, (8 * fifo), 8)
 
-	u8 unused_1[0x10];
+	u8 unused_1[0x8];
+	u64 stat_byte_cnt;
+#define STAT_BC(n)                              vBIT(n,4,12)
 
 	/* Automated statistics collection */
 	u64 stat_cfg;
@@ -246,6 +261,7 @@ typedef struct _XENA_dev_config {
 #define STAT_TRSF_PER(n)           TBD
 #define	PER_SEC					   0x208d5
 #define	SET_UPDT_PERIOD(n)		   vBIT((PER_SEC*n),32,32)
+#define	SET_UPDT_CLICKS(val)		   vBIT(val, 32, 32)
 
 	u64 stat_addr;
 
@@ -267,8 +283,15 @@ typedef struct _XENA_dev_config {
 
 	u64 gpio_control;
 #define GPIO_CTRL_GPIO_0		BIT(8)
+	u64 misc_control;
+#define MISC_LINK_STABILITY_PRD(val)   vBIT(val,29,3)
+
+	u8 unused7_1[0x240 - 0x208];
+
+	u64 wreq_split_mask;
+#define	WREQ_SPLIT_MASK_SET_MASK(val)	vBIT(val, 52, 12)
 
-	u8 unused7[0x600];
+	u8 unused7_2[0x800 - 0x248];
 
 /* TxDMA registers */
 	u64 txdma_int_status;
@@ -290,6 +313,7 @@ typedef struct _XENA_dev_config {
 
 	u64 pcc_err_reg;
 #define PCC_FB_ECC_DB_ERR		vBIT(0xFF, 16, 8)
+#define PCC_ENABLE_FOUR			vBIT(0x0F,0,8)
 
 	u64 pcc_err_mask;
 	u64 pcc_err_alarm;
@@ -468,6 +492,7 @@ typedef struct _XENA_dev_config {
 #define PRC_CTRL_NO_SNOOP                      (BIT(22)|BIT(23))
 #define PRC_CTRL_NO_SNOOP_DESC                 BIT(22)
 #define PRC_CTRL_NO_SNOOP_BUFF                 BIT(23)
+#define PRC_CTRL_BIMODAL_INTERRUPT             BIT(37)
 #define PRC_CTRL_RXD_BACKOFF_INTERVAL(val)     vBIT(val,40,24)
 
 	u64 prc_alarm_action;
@@ -691,6 +716,10 @@ typedef struct _XENA_dev_config {
 #define MC_ERR_REG_MIRI_CRI_ERR_0          BIT(22)
 #define MC_ERR_REG_MIRI_CRI_ERR_1          BIT(23)
 #define MC_ERR_REG_SM_ERR                  BIT(31)
+#define MC_ERR_REG_ECC_ALL_SNG		   (BIT(6) | \
+					BIT(7) | BIT(17) | BIT(19))
+#define MC_ERR_REG_ECC_ALL_DBL		   (BIT(14) | \
+					BIT(15) | BIT(18) | BIT(20))
 	u64 mc_err_mask;
 	u64 mc_err_alarm;
 
@@ -736,7 +765,19 @@ typedef struct _XENA_dev_config {
 	u64 mc_rldram_test_d1;
 	u8 unused24[0x300 - 0x288];
 	u64 mc_rldram_test_d2;
-	u8 unused25[0x700 - 0x308];
+
+	u8 unused24_1[0x360 - 0x308];
+	u64 mc_rldram_ctrl;
+#define	MC_RLDRAM_ENABLE_ODT		BIT(7)
+
+	u8 unused24_2[0x640 - 0x368];
+	u64 mc_rldram_ref_per_herc;
+#define	MC_RLDRAM_SET_REF_PERIOD(val)	vBIT(val, 0, 16)
+
+	u8 unused24_3[0x660 - 0x648];
+	u64 mc_rldram_mrs_herc;
+
+	u8 unused25[0x700 - 0x668];
 	u64 mc_debug_ctrl;
 
 	u8 unused26[0x3000 - 0x2f08];
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -11,29 +11,28 @@
  * See the file COPYING in this distribution for more information.
  *
  * Credits:
- * Jeff Garzik		: For pointing out the improper error condition 
- *			  check in the s2io_xmit routine and also some 
- * 			  issues in the Tx watch dog function. Also for
- *			  patiently answering all those innumerable 
+ * Jeff Garzik		: For pointing out the improper error condition
+ *			  check in the s2io_xmit routine and also some
+ *			  issues in the Tx watch dog function. Also for
+ *			  patiently answering all those innumerable
  *			  questions regaring the 2.6 porting issues.
  * Stephen Hemminger	: Providing proper 2.6 porting mechanism for some
  *			  macros available only in 2.6 Kernel.
- * Francois Romieu	: For pointing out all code part that were 
+ * Francois Romieu	: For pointing out all code part that were
  *			  deprecated and also styling related comments.
- * Grant Grundler	: For helping me get rid of some Architecture 
+ * Grant Grundler	: For helping me get rid of some Architecture
  *			  dependent code.
  * Christopher Hellwig	: Some more 2.6 specific issues in the driver.
- *			  	
+ *
  * The module loadable parameters that are supported by the driver and a brief
  * explaination of all the variables.
- * rx_ring_num : This can be used to program the number of receive rings used 
- * in the driver.  					
- * rx_ring_len: This defines the number of descriptors each ring can have. This 
+ * rx_ring_num : This can be used to program the number of receive rings used
+ * in the driver.
+ * rx_ring_len: This defines the number of descriptors each ring can have. This
  * is also an array of size 8.
  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
- * tx_fifo_len: This too is an array of 8. Each element defines the number of 
+ * tx_fifo_len: This too is an array of 8. Each element defines the number of
  * Tx descriptors that can be associated with each corresponding FIFO.
- * in PCI Configuration space.
  ************************************************************************/
 
 #include <linux/config.h>
@@ -56,27 +55,39 @@
 #include <linux/ethtool.h>
 #include <linux/version.h>
 #include <linux/workqueue.h>
+#include <linux/if_vlan.h>
 
-#include <asm/io.h>
 #include <asm/system.h>
 #include <asm/uaccess.h>
+#include <asm/io.h>
 
 /* local include */
 #include "s2io.h"
 #include "s2io-regs.h"
 
 /* S2io Driver name & version. */
-static char s2io_driver_name[] = "s2io";
-static char s2io_driver_version[] = "Version 1.7.7.1";
+static char s2io_driver_name[] = "Neterion";
+static char s2io_driver_version[] = "Version 2.0.2.1";
+
+static inline int RXD_IS_UP2DT(RxD_t *rxdp)
+{
+	int ret;
+
+	ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
+		(GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
+
+	return ret;
+}
 
-/* 
+/*
  * Cards with following subsystem_id have a link state indication
  * problem, 600B, 600C, 600D, 640B, 640C and 640D.
  * macro below identifies these cards given the subsystem_id.
  */
-#define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
-		(((subid >= 0x600B) && (subid <= 0x600D)) || \
-		 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
+#define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
+	(dev_type == XFRAME_I_DEVICE) ?			\
+		((((subid >= 0x600B) && (subid <= 0x600D)) || \
+		 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
 
 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
 				      ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
@@ -86,9 +97,12 @@ static char s2io_driver_version[] = "Ver
 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
 {
 	int level = 0;
-	if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
+	mac_info_t *mac_control;
+
+	mac_control = &sp->mac_control;
+	if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
 		level = LOW;
-		if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
+		if (rxb_size <= MAX_RXDS_PER_BLOCK) {
 			level = PANIC;
 		}
 	}
@@ -145,6 +159,9 @@ static char ethtool_stats_keys[][ETH_GST
 	{"rmac_pause_cnt"},
 	{"rmac_accepted_ip"},
 	{"rmac_err_tcp"},
+	{"\n DRIVER STATISTICS"},
+	{"single_bit_ecc_errs"},
+	{"double_bit_ecc_errs"},
 };
 
 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
@@ -153,8 +170,37 @@ static char ethtool_stats_keys[][ETH_GST
 #define S2IO_TEST_LEN	sizeof(s2io_gstrings) / ETH_GSTRING_LEN
 #define S2IO_STRINGS_LEN	S2IO_TEST_LEN * ETH_GSTRING_LEN
 
+#define S2IO_TIMER_CONF(timer, handle, arg, exp)		\
+			init_timer(&timer);			\
+			timer.function = handle;		\
+			timer.data = (unsigned long) arg;	\
+			mod_timer(&timer, (jiffies + exp))	\
+
+/* Add the vlan */
+static void s2io_vlan_rx_register(struct net_device *dev,
+					struct vlan_group *grp)
+{
+	nic_t *nic = dev->priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nic->tx_lock, flags);
+	nic->vlgrp = grp;
+	spin_unlock_irqrestore(&nic->tx_lock, flags);
+}
+
+/* Unregister the vlan */
+static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
+{
+	nic_t *nic = dev->priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&nic->tx_lock, flags);
+	if (nic->vlgrp)
+		nic->vlgrp->vlan_devices[vid] = NULL;
+	spin_unlock_irqrestore(&nic->tx_lock, flags);
+}
 
-/* 
+/*
  * Constants to be programmed into the Xena's registers, to configure
  * the XAUI.
  */
@@ -162,7 +208,24 @@ static char ethtool_stats_keys[][ETH_GST
 #define SWITCH_SIGN	0xA5A5A5A5A5A5A5A5ULL
 #define	END_SIGN	0x0
 
-static u64 default_mdio_cfg[] = {
+static u64 herc_act_dtx_cfg[] = {
+	/* Set address */
+	0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
+	/* Write data */
+	0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
+	/* Set address */
+	0x80010515003F0000ULL, 0x80010515003F00E0ULL,
+	/* Write data */
+	0x80010515003F0004ULL, 0x80010515003F00E4ULL,
+	/* Set address */
+	0x80020515F2100000ULL, 0x80020515F21000E0ULL,
+	/* Write data */
+	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
+	/* Done */
+	END_SIGN
+};
+
+static u64 xena_mdio_cfg[] = {
 	/* Reset PMA PLL */
 	0xC001010000000000ULL, 0xC0010100000000E0ULL,
 	0xC0010100008000E4ULL,
@@ -172,7 +235,7 @@ static u64 default_mdio_cfg[] = {
 	END_SIGN
 };
 
-static u64 default_dtx_cfg[] = {
+static u64 xena_dtx_cfg[] = {
 	0x8000051500000000ULL, 0x80000515000000E0ULL,
 	0x80000515D93500E4ULL, 0x8001051500000000ULL,
 	0x80010515000000E0ULL, 0x80010515001E00E4ULL,
@@ -196,8 +259,7 @@ static u64 default_dtx_cfg[] = {
 	END_SIGN
 };
 
-
-/* 
+/*
  * Constants for Fixing the MacAddress problem seen mostly on
  * Alpha machines.
  */
@@ -226,20 +288,25 @@ static unsigned int tx_fifo_len[MAX_TX_F
 static unsigned int rx_ring_num = 1;
 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
-static unsigned int Stats_refresh_time = 4;
+static unsigned int rts_frm_len[MAX_RX_RINGS] =
+    {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+static unsigned int use_continuous_tx_intrs = 1;
 static unsigned int rmac_pause_time = 65535;
 static unsigned int mc_pause_threshold_q0q3 = 187;
 static unsigned int mc_pause_threshold_q4q7 = 187;
 static unsigned int shared_splits;
 static unsigned int tmac_util_period = 5;
 static unsigned int rmac_util_period = 5;
+static unsigned int bimodal = 0;
 #ifndef CONFIG_S2IO_NAPI
 static unsigned int indicate_max_pkts;
 #endif
+/* Frequency of Rx desc syncs expressed as power of 2 */
+static unsigned int rxsync_frequency = 3;
 
-/* 
+/*
  * S2IO device table.
- * This table lists all the devices that this driver supports. 
+ * This table lists all the devices that this driver supports.
  */
 static struct pci_device_id s2io_tbl[] __devinitdata = {
 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
@@ -247,9 +314,9 @@ static struct pci_device_id s2io_tbl[] _
 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
 	 PCI_ANY_ID, PCI_ANY_ID},
 	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
-	 PCI_ANY_ID, PCI_ANY_ID},
-	{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
-	 PCI_ANY_ID, PCI_ANY_ID},
+         PCI_ANY_ID, PCI_ANY_ID},
+        {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
+         PCI_ANY_ID, PCI_ANY_ID},
 	{0,}
 };
 
@@ -268,8 +335,8 @@ static struct pci_driver s2io_driver = {
 /**
  * init_shared_mem - Allocation and Initialization of Memory
  * @nic: Device private variable.
- * Description: The function allocates all the memory areas shared 
- * between the NIC and the driver. This includes Tx descriptors, 
+ * Description: The function allocates all the memory areas shared
+ * between the NIC and the driver. This includes Tx descriptors,
  * Rx descriptors and the statistics block.
  */
 
@@ -279,11 +346,11 @@ static int init_shared_mem(struct s2io_n
 	void *tmp_v_addr, *tmp_v_addr_next;
 	dma_addr_t tmp_p_addr, tmp_p_addr_next;
 	RxD_block_t *pre_rxd_blk = NULL;
-	int i, j, blk_cnt;
+	int i, j, blk_cnt, rx_sz, tx_sz;
 	int lst_size, lst_per_page;
 	struct net_device *dev = nic->dev;
 #ifdef CONFIG_2BUFF_MODE
-	unsigned long tmp;
+	u64 tmp;
 	buffAdd_t *ba;
 #endif
 
@@ -300,36 +367,41 @@ static int init_shared_mem(struct s2io_n
 		size += config->tx_cfg[i].fifo_len;
 	}
 	if (size > MAX_AVAILABLE_TXDS) {
-		DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
-			  dev->name);
-		DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
-		DBG_PRINT(ERR_DBG, "that can be used\n");
+		DBG_PRINT(ERR_DBG, "%s: Requested TxDs too high, ",
+			  __FUNCTION__);
+		DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
 		return FAILURE;
 	}
 
 	lst_size = (sizeof(TxD_t) * config->max_txds);
+	tx_sz = lst_size * size;
 	lst_per_page = PAGE_SIZE / lst_size;
 
 	for (i = 0; i < config->tx_fifo_num; i++) {
 		int fifo_len = config->tx_cfg[i].fifo_len;
 		int list_holder_size = fifo_len * sizeof(list_info_hold_t);
-		nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
-		if (!nic->list_info[i]) {
+		mac_control->fifos[i].list_info = kmalloc(list_holder_size,
+							  GFP_KERNEL);
+		if (!mac_control->fifos[i].list_info) {
 			DBG_PRINT(ERR_DBG,
 				  "Malloc failed for list_info\n");
 			return -ENOMEM;
 		}
-		memset(nic->list_info[i], 0, list_holder_size);
+		memset(mac_control->fifos[i].list_info, 0, list_holder_size);
 	}
 	for (i = 0; i < config->tx_fifo_num; i++) {
 		int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
 						lst_per_page);
-		mac_control->tx_curr_put_info[i].offset = 0;
-		mac_control->tx_curr_put_info[i].fifo_len =
+		mac_control->fifos[i].tx_curr_put_info.offset = 0;
+		mac_control->fifos[i].tx_curr_put_info.fifo_len =
 		    config->tx_cfg[i].fifo_len - 1;
-		mac_control->tx_curr_get_info[i].offset = 0;
-		mac_control->tx_curr_get_info[i].fifo_len =
+		mac_control->fifos[i].tx_curr_get_info.offset = 0;
+		mac_control->fifos[i].tx_curr_get_info.fifo_len =
 		    config->tx_cfg[i].fifo_len - 1;
+		mac_control->fifos[i].fifo_no = i;
+		mac_control->fifos[i].nic = nic;
+		mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
+
 		for (j = 0; j < page_num; j++) {
 			int k = 0;
 			dma_addr_t tmp_p;
@@ -345,16 +417,15 @@ static int init_shared_mem(struct s2io_n
 			while (k < lst_per_page) {
 				int l = (j * lst_per_page) + k;
 				if (l == config->tx_cfg[i].fifo_len)
-					goto end_txd_alloc;
-				nic->list_info[i][l].list_virt_addr =
+					break;
+				mac_control->fifos[i].list_info[l].list_virt_addr =
 				    tmp_v + (k * lst_size);
-				nic->list_info[i][l].list_phy_addr =
+				mac_control->fifos[i].list_info[l].list_phy_addr =
 				    tmp_p + (k * lst_size);
 				k++;
 			}
 		}
 	}
-      end_txd_alloc:
 
 	/* Allocation and initialization of RXDs in Rings */
 	size = 0;
@@ -367,21 +438,26 @@ static int init_shared_mem(struct s2io_n
 			return FAILURE;
 		}
 		size += config->rx_cfg[i].num_rxd;
-		nic->block_count[i] =
+		mac_control->rings[i].block_count =
 		    config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
-		nic->pkt_cnt[i] =
-		    config->rx_cfg[i].num_rxd - nic->block_count[i];
+		mac_control->rings[i].pkt_cnt =
+		    config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
 	}
+	size = (size * (sizeof(RxD_t)));
+	rx_sz = size;
 
 	for (i = 0; i < config->rx_ring_num; i++) {
-		mac_control->rx_curr_get_info[i].block_index = 0;
-		mac_control->rx_curr_get_info[i].offset = 0;
-		mac_control->rx_curr_get_info[i].ring_len =
+		mac_control->rings[i].rx_curr_get_info.block_index = 0;
+		mac_control->rings[i].rx_curr_get_info.offset = 0;
+		mac_control->rings[i].rx_curr_get_info.ring_len =
 		    config->rx_cfg[i].num_rxd - 1;
-		mac_control->rx_curr_put_info[i].block_index = 0;
-		mac_control->rx_curr_put_info[i].offset = 0;
-		mac_control->rx_curr_put_info[i].ring_len =
+		mac_control->rings[i].rx_curr_put_info.block_index = 0;
+		mac_control->rings[i].rx_curr_put_info.offset = 0;
+		mac_control->rings[i].rx_curr_put_info.ring_len =
 		    config->rx_cfg[i].num_rxd - 1;
+		mac_control->rings[i].nic = nic;
+		mac_control->rings[i].ring_no = i;
+
 		blk_cnt =
 		    config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
 		/*  Allocating all the Rx blocks */
@@ -395,32 +471,36 @@ static int init_shared_mem(struct s2io_n
 							  &tmp_p_addr);
 			if (tmp_v_addr == NULL) {
 				/*
-				 * In case of failure, free_shared_mem() 
-				 * is called, which should free any 
-				 * memory that was alloced till the 
+				 * In case of failure, free_shared_mem()
+				 * is called, which should free any
+				 * memory that was alloced till the
 				 * failure happened.
 				 */
-				nic->rx_blocks[i][j].block_virt_addr =
+				mac_control->rings[i].rx_blocks[j].block_virt_addr =
 				    tmp_v_addr;
 				return -ENOMEM;
 			}
 			memset(tmp_v_addr, 0, size);
-			nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
-			nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
+			mac_control->rings[i].rx_blocks[j].block_virt_addr =
+				tmp_v_addr;
+			mac_control->rings[i].rx_blocks[j].block_dma_addr =
+				tmp_p_addr;
 		}
 		/* Interlinking all Rx Blocks */
 		for (j = 0; j < blk_cnt; j++) {
-			tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
+			tmp_v_addr =
+				mac_control->rings[i].rx_blocks[j].block_virt_addr;
 			tmp_v_addr_next =
-			    nic->rx_blocks[i][(j + 1) %
+				mac_control->rings[i].rx_blocks[(j + 1) %
 					      blk_cnt].block_virt_addr;
-			tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+			tmp_p_addr =
+				mac_control->rings[i].rx_blocks[j].block_dma_addr;
 			tmp_p_addr_next =
-			    nic->rx_blocks[i][(j + 1) %
+				mac_control->rings[i].rx_blocks[(j + 1) %
 					      blk_cnt].block_dma_addr;
 
 			pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
-			pre_rxd_blk->reserved_1 = END_OF_BLOCK;	/* last RxD 
+			pre_rxd_blk->reserved_1 = END_OF_BLOCK;	/* last RxD
 								 * marker.
 								 */
 #ifndef	CONFIG_2BUFF_MODE
@@ -433,43 +513,43 @@ static int init_shared_mem(struct s2io_n
 	}
 
 #ifdef CONFIG_2BUFF_MODE
-	/* 
+	/*
 	 * Allocation of Storages for buffer addresses in 2BUFF mode
 	 * and the buffers as well.
 	 */
 	for (i = 0; i < config->rx_ring_num; i++) {
 		blk_cnt =
 		    config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
-		nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
+		mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
 				     GFP_KERNEL);
-		if (!nic->ba[i])
+		if (!mac_control->rings[i].ba)
 			return -ENOMEM;
 		for (j = 0; j < blk_cnt; j++) {
 			int k = 0;
-			nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
+			mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
 						 (MAX_RXDS_PER_BLOCK + 1)),
 						GFP_KERNEL);
-			if (!nic->ba[i][j])
+			if (!mac_control->rings[i].ba[j])
 				return -ENOMEM;
 			while (k != MAX_RXDS_PER_BLOCK) {
-				ba = &nic->ba[i][j][k];
+				ba = &mac_control->rings[i].ba[j][k];
 
-				ba->ba_0_org = kmalloc
+				ba->ba_0_org = (void *) kmalloc
 				    (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
 				if (!ba->ba_0_org)
 					return -ENOMEM;
-				tmp = (unsigned long) ba->ba_0_org;
+				tmp = (u64) ba->ba_0_org;
 				tmp += ALIGN_SIZE;
-				tmp &= ~((unsigned long) ALIGN_SIZE);
+				tmp &= ~((u64) ALIGN_SIZE);
 				ba->ba_0 = (void *) tmp;
 
-				ba->ba_1_org = kmalloc
+				ba->ba_1_org = (void *) kmalloc
 				    (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
 				if (!ba->ba_1_org)
 					return -ENOMEM;
-				tmp = (unsigned long) ba->ba_1_org;
+				tmp = (u64) ba->ba_1_org;
 				tmp += ALIGN_SIZE;
-				tmp &= ~((unsigned long) ALIGN_SIZE);
+				tmp &= ~((u64) ALIGN_SIZE);
 				ba->ba_1 = (void *) tmp;
 				k++;
 			}
@@ -483,9 +563,9 @@ static int init_shared_mem(struct s2io_n
 	    (nic->pdev, size, &mac_control->stats_mem_phy);
 
 	if (!mac_control->stats_mem) {
-		/* 
-		 * In case of failure, free_shared_mem() is called, which 
-		 * should free any memory that was alloced till the 
+		/*
+		 * In case of failure, free_shared_mem() is called, which
+		 * should free any memory that was alloced till the
 		 * failure happened.
 		 */
 		return -ENOMEM;
@@ -495,15 +575,14 @@ static int init_shared_mem(struct s2io_n
 	tmp_v_addr = mac_control->stats_mem;
 	mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
 	memset(tmp_v_addr, 0, size);
-
 	DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
 		  (unsigned long long) tmp_p_addr);
 
 	return SUCCESS;
 }
 
-/**  
- * free_shared_mem - Free the allocated Memory 
+/**
+ * free_shared_mem - Free the allocated Memory
  * @nic:  Device private variable.
  * Description: This function is to free all memory locations allocated by
  * the init_shared_mem() function and return it to the kernel.
@@ -533,15 +612,19 @@ static void free_shared_mem(struct s2io_
 						lst_per_page);
 		for (j = 0; j < page_num; j++) {
 			int mem_blks = (j * lst_per_page);
-			if (!nic->list_info[i][mem_blks].list_virt_addr)
+			if ((!mac_control->fifos[i].list_info) ||
+				(!mac_control->fifos[i].list_info[mem_blks].
+				 list_virt_addr))
 				break;
 			pci_free_consistent(nic->pdev, PAGE_SIZE,
-					    nic->list_info[i][mem_blks].
+					    mac_control->fifos[i].
+					    list_info[mem_blks].
 					    list_virt_addr,
-					    nic->list_info[i][mem_blks].
+					    mac_control->fifos[i].
+					    list_info[mem_blks].
 					    list_phy_addr);
 		}
-		kfree(nic->list_info[i]);
+		kfree(mac_control->fifos[i].list_info);
 	}
 
 #ifndef CONFIG_2BUFF_MODE
@@ -550,10 +633,12 @@ static void free_shared_mem(struct s2io_
 	size = SIZE_OF_BLOCK;
 #endif
 	for (i = 0; i < config->rx_ring_num; i++) {
-		blk_cnt = nic->block_count[i];
+		blk_cnt = mac_control->rings[i].block_count;
 		for (j = 0; j < blk_cnt; j++) {
-			tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
-			tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
+			tmp_v_addr = mac_control->rings[i].rx_blocks[j].
+				block_virt_addr;
+			tmp_p_addr = mac_control->rings[i].rx_blocks[j].
+				block_dma_addr;
 			if (tmp_v_addr == NULL)
 				break;
 			pci_free_consistent(nic->pdev, size,
@@ -566,35 +651,21 @@ static void free_shared_mem(struct s2io_
 	for (i = 0; i < config->rx_ring_num; i++) {
 		blk_cnt =
 		    config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
-		if (!nic->ba[i])
-			goto end_free;
 		for (j = 0; j < blk_cnt; j++) {
 			int k = 0;
-			if (!nic->ba[i][j]) {
-				kfree(nic->ba[i]);
-				goto end_free;
-			}
+			if (!mac_control->rings[i].ba[j])
+				continue;
 			while (k != MAX_RXDS_PER_BLOCK) {
-				buffAdd_t *ba = &nic->ba[i][j][k];
-				if (!ba || !ba->ba_0_org || !ba->ba_1_org)
-				{
-					kfree(nic->ba[i]);
-					kfree(nic->ba[i][j]);
-					if(ba->ba_0_org)
-						kfree(ba->ba_0_org);
-					if(ba->ba_1_org)
-						kfree(ba->ba_1_org);
-					goto end_free;
-				}
+				buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
 				kfree(ba->ba_0_org);
 				kfree(ba->ba_1_org);
 				k++;
 			}
-			kfree(nic->ba[i][j]);
+			kfree(mac_control->rings[i].ba[j]);
 		}
-		kfree(nic->ba[i]);
+		if (mac_control->rings[i].ba)
+			kfree(mac_control->rings[i].ba);
 	}
-end_free:
 #endif
 
 	if (mac_control->stats_mem) {
@@ -605,12 +676,93 @@ end_free:
 	}
 }
 
-/**  
- *  init_nic - Initialization of hardware 
+/**
+ * s2io_verify_pci_mode -
+ */
+
+static int s2io_verify_pci_mode(nic_t *nic)
+{
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+	register u64 val64 = 0;
+	int     mode;
+
+	val64 = readq(&bar0->pci_mode);
+	mode = (u8)GET_PCI_MODE(val64);
+
+	if ( val64 & PCI_MODE_UNKNOWN_MODE)
+		return -1;      /* Unknown PCI mode */
+	return mode;
+}
+
+
+/**
+ * s2io_print_pci_mode -
+ */
+static int s2io_print_pci_mode(nic_t *nic)
+{
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+	register u64 val64 = 0;
+	int	mode;
+	struct config_param *config = &nic->config;
+
+	val64 = readq(&bar0->pci_mode);
+	mode = (u8)GET_PCI_MODE(val64);
+
+	if ( val64 & PCI_MODE_UNKNOWN_MODE)
+		return -1;	/* Unknown PCI mode */
+
+	if (val64 & PCI_MODE_32_BITS) {
+		DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
+	} else {
+		DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
+	}
+
+	switch(mode) {
+		case PCI_MODE_PCI_33:
+			DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
+			config->bus_speed = 33;
+			break;
+		case PCI_MODE_PCI_66:
+			DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
+			config->bus_speed = 133;
+			break;
+		case PCI_MODE_PCIX_M1_66:
+			DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
+			config->bus_speed = 133; /* Herc doubles the clock rate */
+			break;
+		case PCI_MODE_PCIX_M1_100:
+			DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
+			config->bus_speed = 200;
+			break;
+		case PCI_MODE_PCIX_M1_133:
+			DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
+			config->bus_speed = 266;
+			break;
+		case PCI_MODE_PCIX_M2_66:
+			DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
+			config->bus_speed = 133;
+			break;
+		case PCI_MODE_PCIX_M2_100:
+			DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
+			config->bus_speed = 200;
+			break;
+		case PCI_MODE_PCIX_M2_133:
+			DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
+			config->bus_speed = 266;
+			break;
+		default:
+			return -1;	/* Unsupported bus speed */
+	}
+
+	return mode;
+}
+
+/**
+ *  init_nic - Initialization of hardware
  *  @nic: device peivate variable
- *  Description: The function sequentially configures every block 
- *  of the H/W from their reset values. 
- *  Return Value:  SUCCESS on success and 
+ *  Description: The function sequentially configures every block
+ *  of the H/W from their reset values.
+ *  Return Value:  SUCCESS on success and
  *  '-1' on failure (endian settings incorrect).
  */
 
@@ -626,21 +778,32 @@ static int init_nic(struct s2io_nic *nic
 	struct config_param *config;
 	int mdio_cnt = 0, dtx_cnt = 0;
 	unsigned long long mem_share;
+	int mem_size;
 
 	mac_control = &nic->mac_control;
 	config = &nic->config;
 
-	/* Initialize swapper control register */
-	if (s2io_set_swapper(nic)) {
+	/* to set the swapper controle on the card */
+	if(s2io_set_swapper(nic)) {
 		DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
 		return -1;
 	}
 
+	/*
+	 * Herc requires EOI to be removed from reset before XGXS, so..
+	 */
+	if (nic->device_type & XFRAME_II_DEVICE) {
+		val64 = 0xA500000000ULL;
+		writeq(val64, &bar0->sw_reset);
+		msleep(500);
+		val64 = readq(&bar0->sw_reset);
+	}
+
 	/* Remove XGXS from reset state */
 	val64 = 0;
 	writeq(val64, &bar0->sw_reset);
-	val64 = readq(&bar0->sw_reset);
 	msleep(500);
+	val64 = readq(&bar0->sw_reset);
 
 	/*  Enable Receiving broadcasts */
 	add = &bar0->mac_cfg;
@@ -660,48 +823,58 @@ static int init_nic(struct s2io_nic *nic
 	val64 = dev->mtu;
 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
 
-	/* 
-	 * Configuring the XAUI Interface of Xena. 
+	/*
+	 * Configuring the XAUI Interface of Xena.
 	 * ***************************************
-	 * To Configure the Xena's XAUI, one has to write a series 
-	 * of 64 bit values into two registers in a particular 
-	 * sequence. Hence a macro 'SWITCH_SIGN' has been defined 
-	 * which will be defined in the array of configuration values 
-	 * (default_dtx_cfg & default_mdio_cfg) at appropriate places 
-	 * to switch writing from one regsiter to another. We continue 
+	 * To Configure the Xena's XAUI, one has to write a series
+	 * of 64 bit values into two registers in a particular
+	 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
+	 * which will be defined in the array of configuration values
+	 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
+	 * to switch writing from one regsiter to another. We continue
 	 * writing these values until we encounter the 'END_SIGN' macro.
-	 * For example, After making a series of 21 writes into 
-	 * dtx_control register the 'SWITCH_SIGN' appears and hence we 
+	 * For example, After making a series of 21 writes into
+	 * dtx_control register the 'SWITCH_SIGN' appears and hence we
 	 * start writing into mdio_control until we encounter END_SIGN.
 	 */
-	while (1) {
-	      dtx_cfg:
-		while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
-			if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
-				dtx_cnt++;
-				goto mdio_cfg;
-			}
-			SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
+	if (nic->device_type & XFRAME_II_DEVICE) {
+		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
+			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
 					  &bar0->dtx_control, UF);
-			val64 = readq(&bar0->dtx_control);
+			if (dtx_cnt & 0x1)
+				msleep(1); /* Necessary!! */
 			dtx_cnt++;
 		}
-	      mdio_cfg:
-		while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
-			if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+	} else {
+		while (1) {
+		      dtx_cfg:
+			while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+				if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
+					dtx_cnt++;
+					goto mdio_cfg;
+				}
+				SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+						  &bar0->dtx_control, UF);
+				val64 = readq(&bar0->dtx_control);
+				dtx_cnt++;
+			}
+		      mdio_cfg:
+			while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
+				if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
+					mdio_cnt++;
+					goto dtx_cfg;
+				}
+				SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
+						  &bar0->mdio_control, UF);
+				val64 = readq(&bar0->mdio_control);
 				mdio_cnt++;
+			}
+			if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
+			    (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
+				break;
+			} else {
 				goto dtx_cfg;
 			}
-			SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
-					  &bar0->mdio_control, UF);
-			val64 = readq(&bar0->mdio_control);
-			mdio_cnt++;
-		}
-		if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
-		    (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
-			break;
-		} else {
-			goto dtx_cfg;
 		}
 	}
 
@@ -748,12 +921,20 @@ static int init_nic(struct s2io_nic *nic
 	val64 |= BIT(0);	/* To enable the FIFO partition. */
 	writeq(val64, &bar0->tx_fifo_partition_0);
 
+	/*
+	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
+	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
+	 */
+	if ((nic->device_type == XFRAME_I_DEVICE) &&
+		(get_xena_rev_id(nic->pdev) < 4))
+		writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
+
 	val64 = readq(&bar0->tx_fifo_partition_0);
 	DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
 		  &bar0->tx_fifo_partition_0, (unsigned long long) val64);
 
-	/* 
-	 * Initialization of Tx_PA_CONFIG register to ignore packet 
+	/*
+	 * Initialization of Tx_PA_CONFIG register to ignore packet
 	 * integrity checking.
 	 */
 	val64 = readq(&bar0->tx_pa_cfg);
@@ -770,85 +951,304 @@ static int init_nic(struct s2io_nic *nic
 	}
 	writeq(val64, &bar0->rx_queue_priority);
 
-	/* 
-	 * Allocating equal share of memory to all the 
+	/*
+	 * Allocating equal share of memory to all the
 	 * configured Rings.
 	 */
 	val64 = 0;
+	if (nic->device_type & XFRAME_II_DEVICE)
+		mem_size = 32;
+	else
+		mem_size = 64;
+
 	for (i = 0; i < config->rx_ring_num; i++) {
 		switch (i) {
 		case 0:
-			mem_share = (64 / config->rx_ring_num +
-				     64 % config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num +
+				     mem_size % config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
 			continue;
 		case 1:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
 			continue;
 		case 2:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
 			continue;
 		case 3:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
 			continue;
 		case 4:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
 			continue;
 		case 5:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
 			continue;
 		case 6:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
 			continue;
 		case 7:
-			mem_share = (64 / config->rx_ring_num);
+			mem_share = (mem_size / config->rx_ring_num);
 			val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
 			continue;
 		}
 	}
 	writeq(val64, &bar0->rx_queue_cfg);
 
-	/* 
-	 * Initializing the Tx round robin registers to 0.
-	 * Filling Tx and Rx round robin registers as per the 
-	 * number of FIFOs and Rings is still TODO.
-	 */
-	writeq(0, &bar0->tx_w_round_robin_0);
-	writeq(0, &bar0->tx_w_round_robin_1);
-	writeq(0, &bar0->tx_w_round_robin_2);
-	writeq(0, &bar0->tx_w_round_robin_3);
-	writeq(0, &bar0->tx_w_round_robin_4);
-
-	/* 
-	 * TODO
-	 * Disable Rx steering. Hard coding all packets be steered to
-	 * Queue 0 for now. 
+	/*
+	 * Filling Tx round robin registers
+	 * as per the number of FIFOs
 	 */
-	val64 = 0x8080808080808080ULL;
-	writeq(val64, &bar0->rts_qos_steering);
+	switch (config->tx_fifo_num) {
+	case 1:
+		val64 = 0x0000000000000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 2:
+		val64 = 0x0000010000010000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0100000100000100ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0001000001000001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0000010000010000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0100000000000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 3:
+		val64 = 0x0001000102000001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0001020000010001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0200000100010200ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0001000102000001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0001020000000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 4:
+		val64 = 0x0001020300010200ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0100000102030001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0200010000010203ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0001020001000001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0203000100000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 5:
+		val64 = 0x0001000203000102ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0001020001030004ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0001000203000102ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0001020001030004ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0001000000000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 6:
+		val64 = 0x0001020304000102ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0304050001020001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0203000100000102ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0304000102030405ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0001000200000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 7:
+		val64 = 0x0001020001020300ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0102030400010203ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0405060001020001ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0304050000010200ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0102030000000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	case 8:
+		val64 = 0x0001020300040105ULL;
+		writeq(val64, &bar0->tx_w_round_robin_0);
+		val64 = 0x0200030106000204ULL;
+		writeq(val64, &bar0->tx_w_round_robin_1);
+		val64 = 0x0103000502010007ULL;
+		writeq(val64, &bar0->tx_w_round_robin_2);
+		val64 = 0x0304010002060500ULL;
+		writeq(val64, &bar0->tx_w_round_robin_3);
+		val64 = 0x0103020400000000ULL;
+		writeq(val64, &bar0->tx_w_round_robin_4);
+		break;
+	}
+
+	/* Filling the Rx round robin registers as per the
+	 * number of Rings and steering based on QoS.
+         */
+	switch (config->rx_ring_num) {
+	case 1:
+		val64 = 0x8080808080808080ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 2:
+		val64 = 0x0000010000010000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0100000100000100ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0001000001000001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0000010000010000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0100000000000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080808040404040ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 3:
+		val64 = 0x0001000102000001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0001020000010001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0200000100010200ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0001000102000001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0001020000000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080804040402020ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 4:
+		val64 = 0x0001020300010200ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0100000102030001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0200010000010203ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0001020001000001ULL;	
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0203000100000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080404020201010ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 5:
+		val64 = 0x0001000203000102ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0001020001030004ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0001000203000102ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0001020001030004ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0001000000000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080404020201008ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 6:
+		val64 = 0x0001020304000102ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0304050001020001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0203000100000102ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0304000102030405ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0001000200000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080404020100804ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 7:
+		val64 = 0x0001020001020300ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0102030400010203ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0405060001020001ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0304050000010200ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0102030000000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8080402010080402ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	case 8:
+		val64 = 0x0001020300040105ULL;
+		writeq(val64, &bar0->rx_w_round_robin_0);
+		val64 = 0x0200030106000204ULL;
+		writeq(val64, &bar0->rx_w_round_robin_1);
+		val64 = 0x0103000502010007ULL;
+		writeq(val64, &bar0->rx_w_round_robin_2);
+		val64 = 0x0304010002060500ULL;
+		writeq(val64, &bar0->rx_w_round_robin_3);
+		val64 = 0x0103020400000000ULL;
+		writeq(val64, &bar0->rx_w_round_robin_4);
+
+		val64 = 0x8040201008040201ULL;
+		writeq(val64, &bar0->rts_qos_steering);
+		break;
+	}
 
 	/* UDP Fix */
 	val64 = 0;
-	for (i = 1; i < 8; i++)
+	for (i = 0; i < 8; i++)
+		writeq(val64, &bar0->rts_frm_len_n[i]);
+
+	/* Set the default rts frame length for the rings configured */
+	val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
+	for (i = 0 ; i < config->rx_ring_num ; i++)
 		writeq(val64, &bar0->rts_frm_len_n[i]);
 
-	/* Set rts_frm_len register for fifo 0 */
-	writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
-	       &bar0->rts_frm_len_n[0]);
+	/* Set the frame length for the configured rings
+	 * desired by the user
+	 */
+	for (i = 0; i < config->rx_ring_num; i++) {
+		/* If rts_frm_len[i] == 0 then it is assumed that user not
+		 * specified frame length steering.
+		 * If the user provides the frame length then program
+		 * the rts_frm_len register for those values or else
+		 * leave it as it is.
+		 */
+		if (rts_frm_len[i] != 0) {
+			writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
+				&bar0->rts_frm_len_n[i]);
+		}
+	}
 
-	/* Enable statistics */
+	/* Program statistics memory */
 	writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
-	val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
-	    STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
-	writeq(val64, &bar0->stat_cfg);
 
-	/* 
+	if (nic->device_type == XFRAME_II_DEVICE) {
+		val64 = STAT_BC(0x320);
+		writeq(val64, &bar0->stat_byte_cnt);
+	}
+
+	/*
 	 * Initializing the sampling rate for the device to calculate the
 	 * bandwidth utilization.
 	 */
@@ -857,30 +1257,38 @@ static int init_nic(struct s2io_nic *nic
 	writeq(val64, &bar0->mac_link_util);
 
 
-	/* 
-	 * Initializing the Transmit and Receive Traffic Interrupt 
+	/*
+	 * Initializing the Transmit and Receive Traffic Interrupt
 	 * Scheme.
 	 */
-	/* TTI Initialization. Default Tx timer gets us about
+	/*
+	 * TTI Initialization. Default Tx timer gets us about
 	 * 250 interrupts per sec. Continuous interrupts are enabled
 	 * by default.
 	 */
-	val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
-	    TTI_DATA1_MEM_TX_URNG_A(0xA) |
+	if (nic->device_type == XFRAME_II_DEVICE) {
+		int count = (nic->config.bus_speed * 125)/2;
+		val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
+	} else {
+
+		val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
+	}
+	val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
 	    TTI_DATA1_MEM_TX_URNG_B(0x10) |
-	    TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN |
-		TTI_DATA1_MEM_TX_TIMER_CI_EN;
+	    TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
+		if (use_continuous_tx_intrs)
+			val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
 	writeq(val64, &bar0->tti_data1_mem);
 
 	val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
 	    TTI_DATA2_MEM_TX_UFC_B(0x20) |
-	    TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
+	    TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
 	writeq(val64, &bar0->tti_data2_mem);
 
 	val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
 	writeq(val64, &bar0->tti_command_mem);
 
-	/* 
+	/*
 	 * Once the operation completes, the Strobe bit of the command
 	 * register will be reset. We poll for this particular condition
 	 * We wait for a maximum of 500ms for the operation to complete,
@@ -901,52 +1309,97 @@ static int init_nic(struct s2io_nic *nic
 		time++;
 	}
 
-	/* RTI Initialization */
-	val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
-	    RTI_DATA1_MEM_RX_URNG_A(0xA) |
-	    RTI_DATA1_MEM_RX_URNG_B(0x10) |
-	    RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
-
-	writeq(val64, &bar0->rti_data1_mem);
-
-	val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
-	    RTI_DATA2_MEM_RX_UFC_B(0x2) |
-	    RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
-	writeq(val64, &bar0->rti_data2_mem);
-
-	val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
-	writeq(val64, &bar0->rti_command_mem);
+	if (nic->config.bimodal) {
+		int k = 0;
+		for (k = 0; k < config->rx_ring_num; k++) {
+			val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
+			val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
+			writeq(val64, &bar0->tti_command_mem);
+
+		/*
+		 * Once the operation completes, the Strobe bit of the command
+		 * register will be reset. We poll for this particular condition
+		 * We wait for a maximum of 500ms for the operation to complete,
+		 * if it's not complete by then we return error.
+		*/
+			time = 0;
+			while (TRUE) {
+				val64 = readq(&bar0->tti_command_mem);
+				if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
+					break;
+				}
+				if (time > 10) {
+					DBG_PRINT(ERR_DBG,
+						"%s: TTI init Failed\n",
+					dev->name);
+					return -1;
+				}
+				time++;
+				msleep(50);
+			}
+		}
+	} else {
 
-	/* 
-	 * Once the operation completes, the Strobe bit of the command
-	 * register will be reset. We poll for this particular condition
-	 * We wait for a maximum of 500ms for the operation to complete,
-	 * if it's not complete by then we return error.
-	 */
-	time = 0;
-	while (TRUE) {
-		val64 = readq(&bar0->rti_command_mem);
-		if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
-			break;
+		/* RTI Initialization */
+		if (nic->device_type == XFRAME_II_DEVICE) {
+			/*
+			 * Programmed to generate Apprx 500 Intrs per
+			 * second
+			 */
+			int count = (nic->config.bus_speed * 125)/4;
+			val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
+		} else {
+			val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
 		}
-		if (time > 10) {
-			DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
-				  dev->name);
-			return -1;
+		val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
+		    RTI_DATA1_MEM_RX_URNG_B(0x10) |
+		    RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
+
+		writeq(val64, &bar0->rti_data1_mem);
+
+		val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
+		    RTI_DATA2_MEM_RX_UFC_B(0x2) |
+		    RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
+		writeq(val64, &bar0->rti_data2_mem);
+
+		for (i = 0; i < config->rx_ring_num; i++) {
+			val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
+					| RTI_CMD_MEM_OFFSET(i);
+			writeq(val64, &bar0->rti_command_mem);
+
+			/*
+			 * Once the operation completes, the Strobe bit of the
+			 * command register will be reset. We poll for this
+			 * particular condition. We wait for a maximum of 500ms
+			 * for the operation to complete, if it's not complete
+			 * by then we return error.
+			 */
+			time = 0;
+			while (TRUE) {
+				val64 = readq(&bar0->rti_command_mem);
+				if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
+					break;
+				}
+				if (time > 10) {
+					DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
+						  dev->name);
+					return -1;
+				}
+				time++;
+				msleep(50);
+			}
 		}
-		time++;
-		msleep(50);
 	}
 
-	/* 
-	 * Initializing proper values as Pause threshold into all 
+	/*
+	 * Initializing proper values as Pause threshold into all
 	 * the 8 Queues on Rx side.
 	 */
 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
 	writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
 
 	/* Disable RMAC PAD STRIPPING */
-	add = &bar0->mac_cfg;
+	add = (void *) &bar0->mac_cfg;
 	val64 = readq(&bar0->mac_cfg);
 	val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
 	writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
@@ -955,8 +1408,8 @@ static int init_nic(struct s2io_nic *nic
 	writel((u32) (val64 >> 32), (add + 4));
 	val64 = readq(&bar0->mac_cfg);
 
-	/* 
-	 * Set the time value to be inserted in the pause frame 
+	/*
+	 * Set the time value to be inserted in the pause frame
 	 * generated by xena.
 	 */
 	val64 = readq(&bar0->rmac_pause_cfg);
@@ -964,7 +1417,7 @@ static int init_nic(struct s2io_nic *nic
 	val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
 	writeq(val64, &bar0->rmac_pause_cfg);
 
-	/* 
+	/*
 	 * Set the Threshold Limit for Generating the pause frame
 	 * If the amount of data in any Queue exceeds ratio of
 	 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
@@ -988,25 +1441,54 @@ static int init_nic(struct s2io_nic *nic
 	}
 	writeq(val64, &bar0->mc_pause_thresh_q4q7);
 
-	/* 
-	 * TxDMA will stop Read request if the number of read split has 
+	/*
+	 * TxDMA will stop Read request if the number of read split has
 	 * exceeded the limit pointed by shared_splits
 	 */
 	val64 = readq(&bar0->pic_control);
 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
 	writeq(val64, &bar0->pic_control);
 
+	/*
+	 * Programming the Herc to split every write transaction
+	 * that does not start on an ADB to reduce disconnects.
+	 */
+	if (nic->device_type == XFRAME_II_DEVICE) {
+		val64 = WREQ_SPLIT_MASK_SET_MASK(255);
+		writeq(val64, &bar0->wreq_split_mask);
+	}
+
+	/* Setting Link stability period to 64 ms */ 
+	if (nic->device_type == XFRAME_II_DEVICE) {
+		val64 = MISC_LINK_STABILITY_PRD(3);
+		writeq(val64, &bar0->misc_control);
+	}
+
 	return SUCCESS;
 }
+#define LINK_UP_DOWN_INTERRUPT		1
+#define MAC_RMAC_ERR_TIMER		2
+
+#if defined(CONFIG_MSI_MODE) || defined(CONFIG_MSIX_MODE)
+#define s2io_link_fault_indication(x) MAC_RMAC_ERR_TIMER
+#else
+int s2io_link_fault_indication(nic_t *nic)
+{
+	if (nic->device_type == XFRAME_II_DEVICE)
+		return LINK_UP_DOWN_INTERRUPT;
+	else
+		return MAC_RMAC_ERR_TIMER;
+}
+#endif
 
-/**  
- *  en_dis_able_nic_intrs - Enable or Disable the interrupts 
+/**
+ *  en_dis_able_nic_intrs - Enable or Disable the interrupts
  *  @nic: device private variable,
  *  @mask: A mask indicating which Intr block must be modified and,
  *  @flag: A flag indicating whether to enable or disable the Intrs.
  *  Description: This function will either disable or enable the interrupts
- *  depending on the flag argument. The mask argument can be used to 
- *  enable/disable any Intr block. 
+ *  depending on the flag argument. The mask argument can be used to
+ *  enable/disable any Intr block.
  *  Return Value: NONE.
  */
 
@@ -1024,20 +1506,31 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/*  
-			 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
-			 * interrupts for now. 
-			 * TODO 
+			/*
+			 * If Hercules adapter enable GPIO otherwise
+			 * disabled all PCIX, Flash, MDIO, IIC and GPIO
+			 * interrupts for now.
+			 * TODO
 			 */
-			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
-			/* 
+			if (s2io_link_fault_indication(nic) ==
+					LINK_UP_DOWN_INTERRUPT ) {
+				temp64 = readq(&bar0->pic_int_mask);
+				temp64 &= ~((u64) PIC_INT_GPIO);
+				writeq(temp64, &bar0->pic_int_mask);
+				temp64 = readq(&bar0->gpio_int_mask);
+				temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
+				writeq(temp64, &bar0->gpio_int_mask);
+			} else {
+				writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
+			}
+			/*
 			 * No MSI Support is available presently, so TTI and
 			 * RTI interrupts are also disabled.
 			 */
 		} else if (flag == DISABLE_INTRS) {
-			/*  
-			 * Disable PIC Intrs in the general 
-			 * intr mask register 
+			/*
+			 * Disable PIC Intrs in the general
+			 * intr mask register
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
 			temp64 = readq(&bar0->general_int_mask);
@@ -1055,27 +1548,27 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
-			 * Keep all interrupts other than PFC interrupt 
+			/*
+			 * Keep all interrupts other than PFC interrupt
 			 * and PCC interrupt disabled in DMA level.
 			 */
 			val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
 						      TXDMA_PCC_INT_M);
 			writeq(val64, &bar0->txdma_int_mask);
-			/* 
-			 * Enable only the MISC error 1 interrupt in PFC block 
+			/*
+			 * Enable only the MISC error 1 interrupt in PFC block
 			 */
 			val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
 			writeq(val64, &bar0->pfc_err_mask);
-			/* 
-			 * Enable only the FB_ECC error interrupt in PCC block 
+			/*
+			 * Enable only the FB_ECC error interrupt in PCC block
 			 */
 			val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
 			writeq(val64, &bar0->pcc_err_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/* 
-			 * Disable TxDMA Intrs in the general intr mask 
-			 * register 
+			/*
+			 * Disable TxDMA Intrs in the general intr mask
+			 * register
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
 			writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
@@ -1093,15 +1586,15 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
-			 * All RxDMA block interrupts are disabled for now 
-			 * TODO 
+			/*
+			 * All RxDMA block interrupts are disabled for now
+			 * TODO
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/*  
-			 * Disable RxDMA Intrs in the general intr mask 
-			 * register 
+			/*
+			 * Disable RxDMA Intrs in the general intr mask
+			 * register
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
 			temp64 = readq(&bar0->general_int_mask);
@@ -1118,22 +1611,13 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
-			 * All MAC block error interrupts are disabled for now 
-			 * except the link status change interrupt.
+			/*
+			 * All MAC block error interrupts are disabled for now
 			 * TODO
 			 */
-			val64 = MAC_INT_STATUS_RMAC_INT;
-			temp64 = readq(&bar0->mac_int_mask);
-			temp64 &= ~((u64) val64);
-			writeq(temp64, &bar0->mac_int_mask);
-
-			val64 = readq(&bar0->mac_rmac_err_mask);
-			val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
-			writeq(val64, &bar0->mac_rmac_err_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/*  
-			 * Disable MAC Intrs in the general intr mask register 
+			/*
+			 * Disable MAC Intrs in the general intr mask register
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
 			writeq(DISABLE_ALL_INTRS,
@@ -1152,14 +1636,14 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
+			/*
 			 * All XGXS block error interrupts are disabled for now
-			 * TODO 
+			 * TODO
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/*  
-			 * Disable MC Intrs in the general intr mask register 
+			/*
+			 * Disable MC Intrs in the general intr mask register
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
 			temp64 = readq(&bar0->general_int_mask);
@@ -1175,11 +1659,11 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
-			 * All MC block error interrupts are disabled for now
-			 * TODO 
+			/*
+			 * Enable all MC Intrs.
 			 */
-			writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
+			writeq(0x0, &bar0->mc_int_mask);
+			writeq(0x0, &bar0->mc_err_mask);
 		} else if (flag == DISABLE_INTRS) {
 			/*
 			 * Disable MC Intrs in the general intr mask register
@@ -1199,14 +1683,14 @@ static void en_dis_able_nic_intrs(struct
 			temp64 = readq(&bar0->general_int_mask);
 			temp64 &= ~((u64) val64);
 			writeq(temp64, &bar0->general_int_mask);
-			/* 
+			/*
 			 * Enable all the Tx side interrupts
-			 * writing 0 Enables all 64 TX interrupt levels 
+			 * writing 0 Enables all 64 TX interrupt levels
 			 */
 			writeq(0x0, &bar0->tx_traffic_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/* 
-			 * Disable Tx Traffic Intrs in the general intr mask 
+			/*
+			 * Disable Tx Traffic Intrs in the general intr mask
 			 * register.
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
@@ -1226,8 +1710,8 @@ static void en_dis_able_nic_intrs(struct
 			/* writing 0 Enables all 8 RX interrupt levels */
 			writeq(0x0, &bar0->rx_traffic_mask);
 		} else if (flag == DISABLE_INTRS) {
-			/*  
-			 * Disable Rx Traffic Intrs in the general intr mask 
+			/*
+			 * Disable Rx Traffic Intrs in the general intr mask
 			 * register.
 			 */
 			writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
@@ -1238,24 +1722,66 @@ static void en_dis_able_nic_intrs(struct
 	}
 }
 
-/**  
- *  verify_xena_quiescence - Checks whether the H/W is ready 
+static int check_prc_pcc_state(u64 val64, int flag, int rev_id, int herc)
+{
+	int ret = 0;
+
+	if (flag == FALSE) {
+		if ((!herc && (rev_id >= 4)) || herc) {
+			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+			    ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+			     ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+				ret = 1;
+			}
+		}else {
+			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
+			    ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+			     ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
+				ret = 1;
+			}
+		}
+	} else {
+		if ((!herc && (rev_id >= 4)) || herc) {
+			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
+			     ADAPTER_STATUS_RMAC_PCC_IDLE) &&
+			    (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
+			     ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+			      ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
+				ret = 1;
+			}
+		} else {
+			if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
+			     ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
+			    (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
+			     ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
+			      ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
+				ret = 1;
+			}
+		}
+	}
+
+	return ret;
+}
+/**
+ *  verify_xena_quiescence - Checks whether the H/W is ready
  *  @val64 :  Value read from adapter status register.
  *  @flag : indicates if the adapter enable bit was ever written once
  *  before.
  *  Description: Returns whether the H/W is ready to go or not. Depending
- *  on whether adapter enable bit was written or not the comparison 
+ *  on whether adapter enable bit was written or not the comparison
  *  differs and the calling function passes the input argument flag to
  *  indicate this.
- *  Return: 1 If xena is quiescence 
+ *  Return: 1 If xena is quiescence
  *          0 If Xena is not quiescence
  */
 
-static int verify_xena_quiescence(u64 val64, int flag)
+static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
 {
-	int ret = 0;
+	int ret = 0, herc;
 	u64 tmp64 = ~((u64) val64);
+	int rev_id = get_xena_rev_id(sp->pdev);
 
+	herc = (sp->device_type == XFRAME_II_DEVICE);
 	if (!
 	    (tmp64 &
 	     (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
@@ -1263,25 +1789,7 @@ static int verify_xena_quiescence(u64 va
 	      ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
 	      ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
 	      ADAPTER_STATUS_P_PLL_LOCK))) {
-		if (flag == FALSE) {
-			if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
-			    ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
-			     ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
-
-				ret = 1;
-
-			}
-		} else {
-			if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
-			     ADAPTER_STATUS_RMAC_PCC_IDLE) &&
-			    (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
-			     ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
-			      ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
-
-				ret = 1;
-
-			}
-		}
+		ret = check_prc_pcc_state(val64, flag, rev_id, herc);
 	}
 
 	return ret;
@@ -1290,12 +1798,12 @@ static int verify_xena_quiescence(u64 va
 /**
  * fix_mac_address -  Fix for Mac addr problem on Alpha platforms
  * @sp: Pointer to device specifc structure
- * Description : 
+ * Description :
  * New procedure to clear mac address reading  problems on Alpha platforms
  *
  */
 
-static void fix_mac_address(nic_t * sp)
+void fix_mac_address(nic_t * sp)
 {
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	u64 val64;
@@ -1303,20 +1811,21 @@ static void fix_mac_address(nic_t * sp)
 
 	while (fix_mac[i] != END_SIGN) {
 		writeq(fix_mac[i++], &bar0->gpio_control);
+		udelay(10);
 		val64 = readq(&bar0->gpio_control);
 	}
 }
 
 /**
- *  start_nic - Turns the device on   
+ *  start_nic - Turns the device on
  *  @nic : device private variable.
- *  Description: 
- *  This function actually turns the device on. Before this  function is 
- *  called,all Registers are configured from their reset states 
- *  and shared memory is allocated but the NIC is still quiescent. On 
+ *  Description:
+ *  This function actually turns the device on. Before this  function is
+ *  called,all Registers are configured from their reset states
+ *  and shared memory is allocated but the NIC is still quiescent. On
  *  calling this function, the device interrupts are cleared and the NIC is
  *  literally switched on by writing into the adapter control register.
- *  Return Value: 
+ *  Return Value:
  *  SUCCESS on success and -1 on failure.
  */
 
@@ -1325,8 +1834,8 @@ static int start_nic(struct s2io_nic *ni
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
 	struct net_device *dev = nic->dev;
 	register u64 val64 = 0;
-	u16 interruptible, i;
-	u16 subid;
+	u16 interruptible;
+	u16 subid, i;
 	mac_info_t *mac_control;
 	struct config_param *config;
 
@@ -1335,10 +1844,12 @@ static int start_nic(struct s2io_nic *ni
 
 	/*  PRC Initialization and configuration */
 	for (i = 0; i < config->rx_ring_num; i++) {
-		writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
+		writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
 		       &bar0->prc_rxd0_n[i]);
 
 		val64 = readq(&bar0->prc_ctrl_n[i]);
+		if (nic->config.bimodal)
+			val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
 #ifndef CONFIG_2BUFF_MODE
 		val64 |= PRC_CTRL_RC_ENABLED;
 #else
@@ -1354,7 +1865,7 @@ static int start_nic(struct s2io_nic *ni
 	writeq(val64, &bar0->rx_pa_cfg);
 #endif
 
-	/* 
+	/*
 	 * Enabling MC-RLDRAM. After enabling the device, we timeout
 	 * for around 100ms, which is approximately the time required
 	 * for the device to be ready for operation.
@@ -1364,27 +1875,27 @@ static int start_nic(struct s2io_nic *ni
 	SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
 	val64 = readq(&bar0->mc_rldram_mrs);
 
-	msleep(100);			/* Delay by around 100 ms. */
+	msleep(100);	/* Delay by around 100 ms. */
 
 	/* Enabling ECC Protection. */
 	val64 = readq(&bar0->adapter_control);
 	val64 &= ~ADAPTER_ECC_EN;
 	writeq(val64, &bar0->adapter_control);
 
-	/* 
-	 * Clearing any possible Link state change interrupts that 
+	/*
+	 * Clearing any possible Link state change interrupts that
 	 * could have popped up just before Enabling the card.
 	 */
 	val64 = readq(&bar0->mac_rmac_err_reg);
 	if (val64)
 		writeq(val64, &bar0->mac_rmac_err_reg);
 
-	/* 
-	 * Verify if the device is ready to be enabled, if so enable 
+	/*
+	 * Verify if the device is ready to be enabled, if so enable
 	 * it.
 	 */
 	val64 = readq(&bar0->adapter_status);
-	if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
+	if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
 		DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
 		DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
 			  (unsigned long long) val64);
@@ -1392,16 +1903,18 @@ static int start_nic(struct s2io_nic *ni
 	}
 
 	/*  Enable select interrupts */
-	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
-	    RX_MAC_INTR;
+	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
+	interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+	interruptible |= TX_MAC_INTR | RX_MAC_INTR;
+
 	en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
 
-	/* 
+	/*
 	 * With some switches, link might be already up at this point.
-	 * Because of this weird behavior, when we enable laser, 
-	 * we may not get link. We need to handle this. We cannot 
-	 * figure out which switch is misbehaving. So we are forced to 
-	 * make a global change. 
+	 * Because of this weird behavior, when we enable laser,
+	 * we may not get link. We need to handle this. We cannot
+	 * figure out which switch is misbehaving. So we are forced to
+	 * make a global change.
 	 */
 
 	/* Enabling Laser. */
@@ -1411,44 +1924,30 @@ static int start_nic(struct s2io_nic *ni
 
 	/* SXE-002: Initialize link and activity LED */
 	subid = nic->pdev->subsystem_device;
-	if ((subid & 0xFF) >= 0x07) {
+	if (((subid & 0xFF) >= 0x07) &&
+	    (nic->device_type == XFRAME_I_DEVICE)) {
 		val64 = readq(&bar0->gpio_control);
 		val64 |= 0x0000800000000000ULL;
 		writeq(val64, &bar0->gpio_control);
 		val64 = 0x0411040400000000ULL;
-		writeq(val64, (void __iomem *) bar0 + 0x2700);
+		writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
 	}
 
-	/* 
-	 * Don't see link state interrupts on certain switches, so 
+	/*
+	 * Don't see link state interrupts on certain switches, so
 	 * directly scheduling a link state task from here.
 	 */
 	schedule_work(&nic->set_link_task);
 
-	/* 
-	 * Here we are performing soft reset on XGXS to 
-	 * force link down. Since link is already up, we will get
-	 * link state change interrupt after this reset
-	 */
-	SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
-	val64 = readq(&bar0->dtx_control);
-	udelay(50);
-	SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
-	val64 = readq(&bar0->dtx_control);
-	udelay(50);
-	SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
-	val64 = readq(&bar0->dtx_control);
-	udelay(50);
-
 	return SUCCESS;
 }
 
-/** 
- *  free_tx_buffers - Free all queued Tx buffers 
+/**
+ *  free_tx_buffers - Free all queued Tx buffers
  *  @nic : device private variable.
- *  Description: 
+ *  Description:
  *  Free all queued Tx buffers.
- *  Return Value: void 
+ *  Return Value: void
 */
 
 static void free_tx_buffers(struct s2io_nic *nic)
@@ -1459,39 +1958,61 @@ static void free_tx_buffers(struct s2io_
 	int i, j;
 	mac_info_t *mac_control;
 	struct config_param *config;
-	int cnt = 0;
+	int cnt = 0, frg_cnt;
 
 	mac_control = &nic->mac_control;
 	config = &nic->config;
 
 	for (i = 0; i < config->tx_fifo_num; i++) {
 		for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
-			txdp = (TxD_t *) nic->list_info[i][j].
+			txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
 			    list_virt_addr;
 			skb =
 			    (struct sk_buff *) ((unsigned long) txdp->
 						Host_Control);
 			if (skb == NULL) {
-				memset(txdp, 0, sizeof(TxD_t));
+				memset(txdp, 0, sizeof(TxD_t) *
+				       config->max_txds);
 				continue;
 			}
+			frg_cnt = skb_shinfo(skb)->nr_frags;
+			pci_unmap_single(nic->pdev, (dma_addr_t)
+					 txdp->Buffer_Pointer,
+					 skb->len - skb->data_len,
+					 PCI_DMA_TODEVICE);
+			if (frg_cnt) {
+				TxD_t *temp;
+				temp = txdp;
+				txdp++;
+				for (j = 0; j < frg_cnt; j++, txdp++) {
+					skb_frag_t *frag =
+					    &skb_shinfo(skb)->frags[j];
+					pci_unmap_page(nic->pdev,
+						       (dma_addr_t)
+						       txdp->
+						       Buffer_Pointer,
+						       frag->size,
+						       PCI_DMA_TODEVICE);
+				}
+				txdp = temp;
+			}
 			dev_kfree_skb(skb);
-			memset(txdp, 0, sizeof(TxD_t));
+			memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
 			cnt++;
 		}
 		DBG_PRINT(INTR_DBG,
 			  "%s:forcibly freeing %d skbs on FIFO%d\n",
 			  dev->name, cnt, i);
-		mac_control->tx_curr_get_info[i].offset = 0;
-		mac_control->tx_curr_put_info[i].offset = 0;
+		mac_control->fifos[i].tx_curr_get_info.offset = 0;
+		mac_control->fifos[i].tx_curr_put_info.offset = 0;
 	}
 }
 
-/**  
- *   stop_nic -  To stop the nic  
+/**
+ *   stop_nic -  To stop the nic
  *   @nic ; device private variable.
- *   Description: 
- *   This function does exactly the opposite of what the start_nic() 
+ *   Description:
+ *   This function does exactly the opposite of what the start_nic()
  *   function does. This function is called to stop the device.
  *   Return Value:
  *   void.
@@ -1509,8 +2030,9 @@ static void stop_nic(struct s2io_nic *ni
 	config = &nic->config;
 
 	/*  Disable all interrupts */
-	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
-	    RX_MAC_INTR;
+	interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | MC_INTR;
+	interruptible |= TX_PIC_INTR | RX_PIC_INTR;
+	interruptible |= TX_MAC_INTR | RX_MAC_INTR;
 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
 
 	/*  Disable PRCs */
@@ -1521,11 +2043,11 @@ static void stop_nic(struct s2io_nic *ni
 	}
 }
 
-/**  
- *  fill_rx_buffers - Allocates the Rx side skbs 
+/**
+ *  fill_rx_buffers - Allocates the Rx side skbs
  *  @nic:  device private variable
- *  @ring_no: ring number 
- *  Description: 
+ *  @ring_no: ring number
+ *  Description:
  *  The function allocates Rx side skbs and puts the physical
  *  address of these buffers into the RxD buffer pointers, so that the NIC
  *  can DMA the received frame into these locations.
@@ -1533,8 +2055,8 @@ static void stop_nic(struct s2io_nic *ni
  *  1. single buffer,
  *  2. three buffer and
  *  3. Five buffer modes.
- *  Each mode defines how many fragments the received frame will be split 
- *  up into by the NIC. The frame is split into L3 header, L4 Header, 
+ *  Each mode defines how many fragments the received frame will be split
+ *  up into by the NIC. The frame is split into L3 header, L4 Header,
  *  L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
  *  is split into 3 fragments. As of now only single buffer mode is
  *  supported.
@@ -1542,7 +2064,7 @@ static void stop_nic(struct s2io_nic *ni
  *  SUCCESS on success or an appropriate -ve value on failure.
  */
 
-static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
+int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
 {
 	struct net_device *dev = nic->dev;
 	struct sk_buff *skb;
@@ -1550,34 +2072,35 @@ static int fill_rx_buffers(struct s2io_n
 	int off, off1, size, block_no, block_no1;
 	int offset, offset1;
 	u32 alloc_tab = 0;
-	u32 alloc_cnt = nic->pkt_cnt[ring_no] -
-	    atomic_read(&nic->rx_bufs_left[ring_no]);
+	u32 alloc_cnt;
 	mac_info_t *mac_control;
 	struct config_param *config;
 #ifdef CONFIG_2BUFF_MODE
 	RxD_t *rxdpnext;
 	int nextblk;
-	unsigned long tmp;
+	u64 tmp;
 	buffAdd_t *ba;
 	dma_addr_t rxdpphys;
 #endif
 #ifndef CONFIG_S2IO_NAPI
 	unsigned long flags;
 #endif
+	RxD_t *first_rxdp = NULL;
 
 	mac_control = &nic->mac_control;
 	config = &nic->config;
-
+	alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
+	    atomic_read(&nic->rx_bufs_left[ring_no]);
 	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
 	    HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
 
 	while (alloc_tab < alloc_cnt) {
-		block_no = mac_control->rx_curr_put_info[ring_no].
+		block_no = mac_control->rings[ring_no].rx_curr_put_info.
 		    block_index;
-		block_no1 = mac_control->rx_curr_get_info[ring_no].
+		block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
 		    block_index;
-		off = mac_control->rx_curr_put_info[ring_no].offset;
-		off1 = mac_control->rx_curr_get_info[ring_no].offset;
+		off = mac_control->rings[ring_no].rx_curr_put_info.offset;
+		off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
 #ifndef CONFIG_2BUFF_MODE
 		offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
 		offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
@@ -1586,7 +2109,7 @@ static int fill_rx_buffers(struct s2io_n
 		offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
 #endif
 
-		rxdp = nic->rx_blocks[ring_no][block_no].
+		rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
 		    block_virt_addr + off;
 		if ((offset == offset1) && (rxdp->Host_Control)) {
 			DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
@@ -1595,15 +2118,15 @@ static int fill_rx_buffers(struct s2io_n
 		}
 #ifndef	CONFIG_2BUFF_MODE
 		if (rxdp->Control_1 == END_OF_BLOCK) {
-			mac_control->rx_curr_put_info[ring_no].
+			mac_control->rings[ring_no].rx_curr_put_info.
 			    block_index++;
-			mac_control->rx_curr_put_info[ring_no].
-			    block_index %= nic->block_count[ring_no];
-			block_no = mac_control->rx_curr_put_info
-			    [ring_no].block_index;
+			mac_control->rings[ring_no].rx_curr_put_info.
+			    block_index %= mac_control->rings[ring_no].block_count;
+			block_no = mac_control->rings[ring_no].rx_curr_put_info.
+				block_index;
 			off++;
 			off %= (MAX_RXDS_PER_BLOCK + 1);
-			mac_control->rx_curr_put_info[ring_no].offset =
+			mac_control->rings[ring_no].rx_curr_put_info.offset =
 			    off;
 			rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
 			DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
@@ -1611,30 +2134,30 @@ static int fill_rx_buffers(struct s2io_n
 		}
 #ifndef CONFIG_S2IO_NAPI
 		spin_lock_irqsave(&nic->put_lock, flags);
-		nic->put_pos[ring_no] =
+		mac_control->rings[ring_no].put_pos =
 		    (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
 		spin_unlock_irqrestore(&nic->put_lock, flags);
 #endif
 #else
 		if (rxdp->Host_Control == END_OF_BLOCK) {
-			mac_control->rx_curr_put_info[ring_no].
+			mac_control->rings[ring_no].rx_curr_put_info.
 			    block_index++;
-			mac_control->rx_curr_put_info[ring_no].
-			    block_index %= nic->block_count[ring_no];
-			block_no = mac_control->rx_curr_put_info
-			    [ring_no].block_index;
+			mac_control->rings[ring_no].rx_curr_put_info.block_index
+			    %= mac_control->rings[ring_no].block_count;
+			block_no = mac_control->rings[ring_no].rx_curr_put_info
+			    .block_index;
 			off = 0;
 			DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
 				  dev->name, block_no,
 				  (unsigned long long) rxdp->Control_1);
-			mac_control->rx_curr_put_info[ring_no].offset =
+			mac_control->rings[ring_no].rx_curr_put_info.offset =
 			    off;
-			rxdp = nic->rx_blocks[ring_no][block_no].
+			rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
 			    block_virt_addr;
 		}
 #ifndef CONFIG_S2IO_NAPI
 		spin_lock_irqsave(&nic->put_lock, flags);
-		nic->put_pos[ring_no] = (block_no *
+		mac_control->rings[ring_no].put_pos = (block_no *
 					 (MAX_RXDS_PER_BLOCK + 1)) + off;
 		spin_unlock_irqrestore(&nic->put_lock, flags);
 #endif
@@ -1646,27 +2169,27 @@ static int fill_rx_buffers(struct s2io_n
 		if (rxdp->Control_2 & BIT(0))
 #endif
 		{
-			mac_control->rx_curr_put_info[ring_no].
+			mac_control->rings[ring_no].rx_curr_put_info.
 			    offset = off;
 			goto end;
 		}
 #ifdef	CONFIG_2BUFF_MODE
-		/* 
-		 * RxDs Spanning cache lines will be replenished only 
-		 * if the succeeding RxD is also owned by Host. It 
-		 * will always be the ((8*i)+3) and ((8*i)+6) 
-		 * descriptors for the 48 byte descriptor. The offending 
+		/*
+		 * RxDs Spanning cache lines will be replenished only
+		 * if the succeeding RxD is also owned by Host. It
+		 * will always be the ((8*i)+3) and ((8*i)+6)
+		 * descriptors for the 48 byte descriptor. The offending
 		 * decsriptor is of-course the 3rd descriptor.
 		 */
-		rxdpphys = nic->rx_blocks[ring_no][block_no].
+		rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
 		    block_dma_addr + (off * sizeof(RxD_t));
 		if (((u64) (rxdpphys)) % 128 > 80) {
-			rxdpnext = nic->rx_blocks[ring_no][block_no].
+			rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
 			    block_virt_addr + (off + 1);
 			if (rxdpnext->Host_Control == END_OF_BLOCK) {
 				nextblk = (block_no + 1) %
-				    (nic->block_count[ring_no]);
-				rxdpnext = nic->rx_blocks[ring_no]
+				    (mac_control->rings[ring_no].block_count);
+				rxdpnext = mac_control->rings[ring_no].rx_blocks
 				    [nextblk].block_virt_addr;
 			}
 			if (rxdpnext->Control_2 & BIT(0))
@@ -1682,6 +2205,10 @@ static int fill_rx_buffers(struct s2io_n
 		if (!skb) {
 			DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
 			DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+			if (first_rxdp) {
+				wmb();
+				first_rxdp->Control_1 |= RXD_OWN_XENA;
+			}
 			return -ENOMEM;
 		}
 #ifndef	CONFIG_2BUFF_MODE
@@ -1692,12 +2219,13 @@ static int fill_rx_buffers(struct s2io_n
 		rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
 		rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
 		rxdp->Host_Control = (unsigned long) (skb);
-		rxdp->Control_1 |= RXD_OWN_XENA;
+		if (alloc_tab & ((1 << rxsync_frequency) - 1))
+			rxdp->Control_1 |= RXD_OWN_XENA;
 		off++;
 		off %= (MAX_RXDS_PER_BLOCK + 1);
-		mac_control->rx_curr_put_info[ring_no].offset = off;
+		mac_control->rings[ring_no].rx_curr_put_info.offset = off;
 #else
-		ba = &nic->ba[ring_no][block_no][off];
+		ba = &mac_control->rings[ring_no].ba[block_no][off];
 		skb_reserve(skb, BUF0_LEN);
 		tmp = ((unsigned long) skb->data & ALIGN_SIZE);
 		if (tmp)
@@ -1719,22 +2247,41 @@ static int fill_rx_buffers(struct s2io_n
 		rxdp->Control_2 |= SET_BUFFER1_SIZE(1);	/* dummy. */
 		rxdp->Control_2 |= BIT(0);	/* Set Buffer_Empty bit. */
 		rxdp->Host_Control = (u64) ((unsigned long) (skb));
-		rxdp->Control_1 |= RXD_OWN_XENA;
+		if (alloc_tab & ((1 << rxsync_frequency) - 1))
+			rxdp->Control_1 |= RXD_OWN_XENA;
 		off++;
-		mac_control->rx_curr_put_info[ring_no].offset = off;
+		mac_control->rings[ring_no].rx_curr_put_info.offset = off;
 #endif
+		rxdp->Control_2 |= SET_RXD_MARKER;
+
+		if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
+			if (first_rxdp) {
+				wmb();
+				first_rxdp->Control_1 |= RXD_OWN_XENA;
+			}
+			first_rxdp = rxdp;
+		}
 		atomic_inc(&nic->rx_bufs_left[ring_no]);
 		alloc_tab++;
 	}
 
       end:
+	/* Transfer ownership of first descriptor to adapter just before
+	 * exiting. Before that, use memory barrier so that ownership
+	 * and other fields are seen by adapter correctly.
+	 */
+	if (first_rxdp) {
+		wmb();
+		first_rxdp->Control_1 |= RXD_OWN_XENA;
+	}
+
 	return SUCCESS;
 }
 
 /**
- *  free_rx_buffers - Frees all Rx buffers   
+ *  free_rx_buffers - Frees all Rx buffers
  *  @sp: device private variable.
- *  Description: 
+ *  Description:
  *  This function will free all Rx buffers allocated by host.
  *  Return Value:
  *  NONE.
@@ -1758,7 +2305,8 @@ static void free_rx_buffers(struct s2io_
 	for (i = 0; i < config->rx_ring_num; i++) {
 		for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
 			off = j % (MAX_RXDS_PER_BLOCK + 1);
-			rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
+			rxdp = mac_control->rings[i].rx_blocks[blk].
+				block_virt_addr + off;
 
 #ifndef CONFIG_2BUFF_MODE
 			if (rxdp->Control_1 == END_OF_BLOCK) {
@@ -1793,7 +2341,7 @@ static void free_rx_buffers(struct s2io_
 						 HEADER_SNAP_SIZE,
 						 PCI_DMA_FROMDEVICE);
 #else
-				ba = &sp->ba[i][blk][off];
+				ba = &mac_control->rings[i].ba[blk][off];
 				pci_unmap_single(sp->pdev, (dma_addr_t)
 						 rxdp->Buffer0_ptr,
 						 BUF0_LEN,
@@ -1813,10 +2361,10 @@ static void free_rx_buffers(struct s2io_
 			}
 			memset(rxdp, 0, sizeof(RxD_t));
 		}
-		mac_control->rx_curr_put_info[i].block_index = 0;
-		mac_control->rx_curr_get_info[i].block_index = 0;
-		mac_control->rx_curr_put_info[i].offset = 0;
-		mac_control->rx_curr_get_info[i].offset = 0;
+		mac_control->rings[i].rx_curr_put_info.block_index = 0;
+		mac_control->rings[i].rx_curr_get_info.block_index = 0;
+		mac_control->rings[i].rx_curr_put_info.offset = 0;
+		mac_control->rings[i].rx_curr_get_info.offset = 0;
 		atomic_set(&sp->rx_bufs_left[i], 0);
 		DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
 			  dev->name, buf_cnt, i);
@@ -1826,7 +2374,7 @@ static void free_rx_buffers(struct s2io_
 /**
  * s2io_poll - Rx interrupt handler for NAPI support
  * @dev : pointer to the device structure.
- * @budget : The number of packets that were budgeted to be processed 
+ * @budget : The number of packets that were budgeted to be processed
  * during  one pass through the 'Poll" function.
  * Description:
  * Comes into picture only if NAPI support has been incorporated. It does
@@ -1836,160 +2384,36 @@ static void free_rx_buffers(struct s2io_
  * 0 on success and 1 if there are No Rx packets to be processed.
  */
 
-#ifdef CONFIG_S2IO_NAPI
+#if defined(CONFIG_S2IO_NAPI)
 static int s2io_poll(struct net_device *dev, int *budget)
 {
 	nic_t *nic = dev->priv;
-	XENA_dev_config_t __iomem *bar0 = nic->bar0;
-	int pkts_to_process = *budget, pkt_cnt = 0;
-	register u64 val64 = 0;
-	rx_curr_get_info_t get_info, put_info;
-	int i, get_block, put_block, get_offset, put_offset, ring_bufs;
-#ifndef CONFIG_2BUFF_MODE
-	u16 val16, cksum;
-#endif
-	struct sk_buff *skb;
-	RxD_t *rxdp;
+	int pkt_cnt = 0, org_pkts_to_process;
 	mac_info_t *mac_control;
 	struct config_param *config;
-#ifdef CONFIG_2BUFF_MODE
-	buffAdd_t *ba;
-#endif
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+	u64 val64;
+	int i;
 
+	atomic_inc(&nic->isr_cnt);
 	mac_control = &nic->mac_control;
 	config = &nic->config;
 
-	if (pkts_to_process > dev->quota)
-		pkts_to_process = dev->quota;
+	nic->pkts_to_process = *budget;
+	if (nic->pkts_to_process > dev->quota)
+		nic->pkts_to_process = dev->quota;
+	org_pkts_to_process = nic->pkts_to_process;
 
 	val64 = readq(&bar0->rx_traffic_int);
 	writeq(val64, &bar0->rx_traffic_int);
 
 	for (i = 0; i < config->rx_ring_num; i++) {
-		get_info = mac_control->rx_curr_get_info[i];
-		get_block = get_info.block_index;
-		put_info = mac_control->rx_curr_put_info[i];
-		put_block = put_info.block_index;
-		ring_bufs = config->rx_cfg[i].num_rxd;
-		rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
-		    get_info.offset;
-#ifndef	CONFIG_2BUFF_MODE
-		get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
-		    get_info.offset;
-		put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
-		    put_info.offset;
-		while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
-		       (((get_offset + 1) % ring_bufs) != put_offset)) {
-			if (--pkts_to_process < 0) {
-				goto no_rx;
-			}
-			if (rxdp->Control_1 == END_OF_BLOCK) {
-				rxdp =
-				    (RxD_t *) ((unsigned long) rxdp->
-					       Control_2);
-				get_info.offset++;
-				get_info.offset %=
-				    (MAX_RXDS_PER_BLOCK + 1);
-				get_block++;
-				get_block %= nic->block_count[i];
-				mac_control->rx_curr_get_info[i].
-				    offset = get_info.offset;
-				mac_control->rx_curr_get_info[i].
-				    block_index = get_block;
-				continue;
-			}
-			get_offset =
-			    (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
-			    get_info.offset;
-			skb =
-			    (struct sk_buff *) ((unsigned long) rxdp->
-						Host_Control);
-			if (skb == NULL) {
-				DBG_PRINT(ERR_DBG, "%s: The skb is ",
-					  dev->name);
-				DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
-				goto no_rx;
-			}
-			val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
-			val16 = (u16) (val64 >> 48);
-			cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer0_ptr,
-					 dev->mtu +
-					 HEADER_ETHERNET_II_802_3_SIZE +
-					 HEADER_802_2_SIZE +
-					 HEADER_SNAP_SIZE,
-					 PCI_DMA_FROMDEVICE);
-			rx_osm_handler(nic, val16, rxdp, i);
-			pkt_cnt++;
-			get_info.offset++;
-			get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
-			rxdp =
-			    nic->rx_blocks[i][get_block].block_virt_addr +
-			    get_info.offset;
-			mac_control->rx_curr_get_info[i].offset =
-			    get_info.offset;
+		rx_intr_handler(&mac_control->rings[i]);
+		pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
+		if (!nic->pkts_to_process) {
+			/* Quota for the current iteration has been met */
+			goto no_rx;
 		}
-#else
-		get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
-		    get_info.offset;
-		put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
-		    put_info.offset;
-		while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
-			!(rxdp->Control_2 & BIT(0))) &&
-		       (((get_offset + 1) % ring_bufs) != put_offset)) {
-			if (--pkts_to_process < 0) {
-				goto no_rx;
-			}
-			skb = (struct sk_buff *) ((unsigned long)
-						  rxdp->Host_Control);
-			if (skb == NULL) {
-				DBG_PRINT(ERR_DBG, "%s: The skb is ",
-					  dev->name);
-				DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
-				goto no_rx;
-			}
-
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer0_ptr,
-					 BUF0_LEN, PCI_DMA_FROMDEVICE);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer1_ptr,
-					 BUF1_LEN, PCI_DMA_FROMDEVICE);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer2_ptr,
-					 dev->mtu + BUF0_LEN + 4,
-					 PCI_DMA_FROMDEVICE);
-			ba = &nic->ba[i][get_block][get_info.offset];
-
-			rx_osm_handler(nic, rxdp, i, ba);
-
-			get_info.offset++;
-			mac_control->rx_curr_get_info[i].offset =
-			    get_info.offset;
-			rxdp =
-			    nic->rx_blocks[i][get_block].block_virt_addr +
-			    get_info.offset;
-
-			if (get_info.offset &&
-			    (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
-				get_info.offset = 0;
-				mac_control->rx_curr_get_info[i].
-				    offset = get_info.offset;
-				get_block++;
-				get_block %= nic->block_count[i];
-				mac_control->rx_curr_get_info[i].
-				    block_index = get_block;
-				rxdp =
-				    nic->rx_blocks[i][get_block].
-				    block_virt_addr;
-			}
-			get_offset =
-			    (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
-			    get_info.offset;
-			pkt_cnt++;
-		}
-#endif
 	}
 	if (!pkt_cnt)
 		pkt_cnt = 1;
@@ -2007,9 +2431,10 @@ static int s2io_poll(struct net_device *
 	}
 	/* Re enable the Rx interrupts. */
 	en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+	atomic_dec(&nic->isr_cnt);
 	return 0;
 
-      no_rx:
+no_rx:
 	dev->quota -= pkt_cnt;
 	*budget -= pkt_cnt;
 
@@ -2020,279 +2445,204 @@ static int s2io_poll(struct net_device *
 			break;
 		}
 	}
+	atomic_dec(&nic->isr_cnt);
 	return 1;
 }
-#else
-/**  
+#endif
+
+/**
  *  rx_intr_handler - Rx interrupt handler
  *  @nic: device private variable.
- *  Description: 
- *  If the interrupt is because of a received frame or if the 
+ *  Description:
+ *  If the interrupt is because of a received frame or if the
  *  receive ring contains fresh as yet un-processed frames,this function is
- *  called. It picks out the RxD at which place the last Rx processing had 
- *  stopped and sends the skb to the OSM's Rx handler and then increments 
+ *  called. It picks out the RxD at which place the last Rx processing had
+ *  stopped and sends the skb to the OSM's Rx handler and then increments
  *  the offset.
  *  Return Value:
  *  NONE.
  */
-
-static void rx_intr_handler(struct s2io_nic *nic)
+static void rx_intr_handler(ring_info_t *ring_data)
 {
+	nic_t *nic = ring_data->nic;
 	struct net_device *dev = (struct net_device *) nic->dev;
-	XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
+	int get_block, get_offset, put_block, put_offset, ring_bufs;
 	rx_curr_get_info_t get_info, put_info;
 	RxD_t *rxdp;
 	struct sk_buff *skb;
-#ifndef CONFIG_2BUFF_MODE
-	u16 val16, cksum;
-#endif
-	register u64 val64 = 0;
-	int get_block, get_offset, put_block, put_offset, ring_bufs;
-	int i, pkt_cnt = 0;
-	mac_info_t *mac_control;
-	struct config_param *config;
-#ifdef CONFIG_2BUFF_MODE
-	buffAdd_t *ba;
+#ifndef CONFIG_S2IO_NAPI
+	int pkt_cnt = 0;
 #endif
+	spin_lock(&nic->rx_lock);
+	if (atomic_read(&nic->card_state) == CARD_DOWN) {
+		DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
+			  __FUNCTION__, dev->name);
+		spin_unlock(&nic->rx_lock);
+	}
 
-	mac_control = &nic->mac_control;
-	config = &nic->config;
-
-	/* 
-	 * rx_traffic_int reg is an R1 register, hence we read and write back 
-	 * the samevalue in the register to clear it.
-	 */
-	val64 = readq(&bar0->rx_traffic_int);
-	writeq(val64, &bar0->rx_traffic_int);
-
-	for (i = 0; i < config->rx_ring_num; i++) {
-		get_info = mac_control->rx_curr_get_info[i];
-		get_block = get_info.block_index;
-		put_info = mac_control->rx_curr_put_info[i];
-		put_block = put_info.block_index;
-		ring_bufs = config->rx_cfg[i].num_rxd;
-		rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
-		    get_info.offset;
-#ifndef	CONFIG_2BUFF_MODE
-		get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+	get_info = ring_data->rx_curr_get_info;
+	get_block = get_info.block_index;
+	put_info = ring_data->rx_curr_put_info;
+	put_block = put_info.block_index;
+	ring_bufs = get_info.ring_len+1;
+	rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
 		    get_info.offset;
-		spin_lock(&nic->put_lock);
-		put_offset = nic->put_pos[i];
-		spin_unlock(&nic->put_lock);
-		while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
-		       (((get_offset + 1) % ring_bufs) != put_offset)) {
-			if (rxdp->Control_1 == END_OF_BLOCK) {
-				rxdp = (RxD_t *) ((unsigned long)
-						  rxdp->Control_2);
-				get_info.offset++;
-				get_info.offset %=
-				    (MAX_RXDS_PER_BLOCK + 1);
-				get_block++;
-				get_block %= nic->block_count[i];
-				mac_control->rx_curr_get_info[i].
-				    offset = get_info.offset;
-				mac_control->rx_curr_get_info[i].
-				    block_index = get_block;
-				continue;
-			}
-			get_offset =
-			    (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
-			    get_info.offset;
-			skb = (struct sk_buff *) ((unsigned long)
-						  rxdp->Host_Control);
-			if (skb == NULL) {
-				DBG_PRINT(ERR_DBG, "%s: The skb is ",
-					  dev->name);
-				DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
-				return;
-			}
-			val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
-			val16 = (u16) (val64 >> 48);
-			cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer0_ptr,
-					 dev->mtu +
-					 HEADER_ETHERNET_II_802_3_SIZE +
-					 HEADER_802_2_SIZE +
-					 HEADER_SNAP_SIZE,
-					 PCI_DMA_FROMDEVICE);
-			rx_osm_handler(nic, val16, rxdp, i);
-			get_info.offset++;
-			get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
-			rxdp =
-			    nic->rx_blocks[i][get_block].block_virt_addr +
-			    get_info.offset;
-			mac_control->rx_curr_get_info[i].offset =
-			    get_info.offset;
-			pkt_cnt++;
-			if ((indicate_max_pkts)
-			    && (pkt_cnt > indicate_max_pkts))
-				break;
+	get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+		get_info.offset;
+#ifndef CONFIG_S2IO_NAPI
+	spin_lock(&nic->put_lock);
+	put_offset = ring_data->put_pos;
+	spin_unlock(&nic->put_lock);
+#else
+	put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
+		put_info.offset;
+#endif
+	while (RXD_IS_UP2DT(rxdp) &&
+	       (((get_offset + 1) % ring_bufs) != put_offset)) {
+		skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
+		if (skb == NULL) {
+			DBG_PRINT(ERR_DBG, "%s: The skb is ",
+				  dev->name);
+			DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
+			spin_unlock(&nic->rx_lock);
+			return;
 		}
+#ifndef CONFIG_2BUFF_MODE
+		pci_unmap_single(nic->pdev, (dma_addr_t)
+				 rxdp->Buffer0_ptr,
+				 dev->mtu +
+				 HEADER_ETHERNET_II_802_3_SIZE +
+				 HEADER_802_2_SIZE +
+				 HEADER_SNAP_SIZE,
+				 PCI_DMA_FROMDEVICE);
 #else
-		get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+		pci_unmap_single(nic->pdev, (dma_addr_t)
+				 rxdp->Buffer0_ptr,
+				 BUF0_LEN, PCI_DMA_FROMDEVICE);
+		pci_unmap_single(nic->pdev, (dma_addr_t)
+				 rxdp->Buffer1_ptr,
+				 BUF1_LEN, PCI_DMA_FROMDEVICE);
+		pci_unmap_single(nic->pdev, (dma_addr_t)
+				 rxdp->Buffer2_ptr,
+				 dev->mtu + BUF0_LEN + 4,
+				 PCI_DMA_FROMDEVICE);
+#endif
+		rx_osm_handler(ring_data, rxdp);
+		get_info.offset++;
+		ring_data->rx_curr_get_info.offset =
 		    get_info.offset;
-		spin_lock(&nic->put_lock);
-		put_offset = nic->put_pos[i];
-		spin_unlock(&nic->put_lock);
-		while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
-			!(rxdp->Control_2 & BIT(0))) &&
-		       (((get_offset + 1) % ring_bufs) != put_offset)) {
-			skb = (struct sk_buff *) ((unsigned long)
-						  rxdp->Host_Control);
-			if (skb == NULL) {
-				DBG_PRINT(ERR_DBG, "%s: The skb is ",
-					  dev->name);
-				DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
-				return;
-			}
-
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer0_ptr,
-					 BUF0_LEN, PCI_DMA_FROMDEVICE);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer1_ptr,
-					 BUF1_LEN, PCI_DMA_FROMDEVICE);
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 rxdp->Buffer2_ptr,
-					 dev->mtu + BUF0_LEN + 4,
-					 PCI_DMA_FROMDEVICE);
-			ba = &nic->ba[i][get_block][get_info.offset];
-
-			rx_osm_handler(nic, rxdp, i, ba);
-
-			get_info.offset++;
-			mac_control->rx_curr_get_info[i].offset =
-			    get_info.offset;
-			rxdp =
-			    nic->rx_blocks[i][get_block].block_virt_addr +
-			    get_info.offset;
+		rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
+		    get_info.offset;
+		if (get_info.offset &&
+		    (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
+			get_info.offset = 0;
+			ring_data->rx_curr_get_info.offset
+			    = get_info.offset;
+			get_block++;
+			get_block %= ring_data->block_count;
+			ring_data->rx_curr_get_info.block_index
+			    = get_block;
+			rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
+		}
 
-			if (get_info.offset &&
-			    (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
-				get_info.offset = 0;
-				mac_control->rx_curr_get_info[i].
-				    offset = get_info.offset;
-				get_block++;
-				get_block %= nic->block_count[i];
-				mac_control->rx_curr_get_info[i].
-				    block_index = get_block;
-				rxdp =
-				    nic->rx_blocks[i][get_block].
-				    block_virt_addr;
-			}
-			get_offset =
-			    (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
+		get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
 			    get_info.offset;
-			pkt_cnt++;
-			if ((indicate_max_pkts)
-			    && (pkt_cnt > indicate_max_pkts))
-				break;
-		}
-#endif
+#ifdef CONFIG_S2IO_NAPI
+		nic->pkts_to_process -= 1;
+		if (!nic->pkts_to_process)
+			break;
+#else
+		pkt_cnt++;
 		if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
 			break;
+#endif
 	}
+	spin_unlock(&nic->rx_lock);
 }
-#endif
-/**  
+
+/**
  *  tx_intr_handler - Transmit interrupt handler
  *  @nic : device private variable
- *  Description: 
- *  If an interrupt was raised to indicate DMA complete of the 
- *  Tx packet, this function is called. It identifies the last TxD 
- *  whose buffer was freed and frees all skbs whose data have already 
+ *  Description:
+ *  If an interrupt was raised to indicate DMA complete of the
+ *  Tx packet, this function is called. It identifies the last TxD
+ *  whose buffer was freed and frees all skbs whose data have already
  *  DMA'ed into the NICs internal memory.
  *  Return Value:
  *  NONE
  */
 
-static void tx_intr_handler(struct s2io_nic *nic)
+static void tx_intr_handler(fifo_info_t *fifo_data)
 {
-	XENA_dev_config_t __iomem *bar0 = nic->bar0;
+	nic_t *nic = fifo_data->nic;
 	struct net_device *dev = (struct net_device *) nic->dev;
 	tx_curr_get_info_t get_info, put_info;
 	struct sk_buff *skb;
 	TxD_t *txdlp;
-	register u64 val64 = 0;
-	int i;
 	u16 j, frg_cnt;
-	mac_info_t *mac_control;
-	struct config_param *config;
-
-	mac_control = &nic->mac_control;
-	config = &nic->config;
 
-	/* 
-	 * tx_traffic_int reg is an R1 register, hence we read and write 
-	 * back the samevalue in the register to clear it.
-	 */
-	val64 = readq(&bar0->tx_traffic_int);
-	writeq(val64, &bar0->tx_traffic_int);
+	get_info = fifo_data->tx_curr_get_info;
+	put_info = fifo_data->tx_curr_put_info;
+	txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
+	    list_virt_addr;
+	while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
+	       (get_info.offset != put_info.offset) &&
+	       (txdlp->Host_Control)) {
+		/* Check for TxD errors */
+		if (txdlp->Control_1 & TXD_T_CODE) {
+			unsigned long long err;
+			err = txdlp->Control_1 & TXD_T_CODE;
+			DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
+				  err);
+		}
+
+		skb = (struct sk_buff *) ((unsigned long)
+				txdlp->Host_Control);
+		if (skb == NULL) {
+			DBG_PRINT(ERR_DBG, "%s: Null skb ",
+			__FUNCTION__);
+			DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
+			return;
+		}
 
-	for (i = 0; i < config->tx_fifo_num; i++) {
-		get_info = mac_control->tx_curr_get_info[i];
-		put_info = mac_control->tx_curr_put_info[i];
-		txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
-		    list_virt_addr;
-		while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
-		       (get_info.offset != put_info.offset) &&
-		       (txdlp->Host_Control)) {
-			/* Check for TxD errors */
-			if (txdlp->Control_1 & TXD_T_CODE) {
-				unsigned long long err;
-				err = txdlp->Control_1 & TXD_T_CODE;
-				DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
-					  err);
-			}
+		frg_cnt = skb_shinfo(skb)->nr_frags;
+		nic->tx_pkt_count++;
 
-			skb = (struct sk_buff *) ((unsigned long)
-						  txdlp->Host_Control);
-			if (skb == NULL) {
-				DBG_PRINT(ERR_DBG, "%s: Null skb ",
-					  dev->name);
-				DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
-				return;
+		pci_unmap_single(nic->pdev, (dma_addr_t)
+				 txdlp->Buffer_Pointer,
+				 skb->len - skb->data_len,
+				 PCI_DMA_TODEVICE);
+		if (frg_cnt) {
+			TxD_t *temp;
+			temp = txdlp;
+			txdlp++;
+			for (j = 0; j < frg_cnt; j++, txdlp++) {
+				skb_frag_t *frag =
+				    &skb_shinfo(skb)->frags[j];
+				if (!txdlp->Buffer_Pointer)
+					break;
+				pci_unmap_page(nic->pdev,
+					       (dma_addr_t)
+					       txdlp->
+					       Buffer_Pointer,
+					       frag->size,
+					       PCI_DMA_TODEVICE);
 			}
-			nic->tx_pkt_count++;
-
-			frg_cnt = skb_shinfo(skb)->nr_frags;
-
-			/*  For unfragmented skb */
-			pci_unmap_single(nic->pdev, (dma_addr_t)
-					 txdlp->Buffer_Pointer,
-					 skb->len - skb->data_len,
-					 PCI_DMA_TODEVICE);
-			if (frg_cnt) {
-				TxD_t *temp = txdlp;
-				txdlp++;
-				for (j = 0; j < frg_cnt; j++, txdlp++) {
-					skb_frag_t *frag =
-					    &skb_shinfo(skb)->frags[j];
-					pci_unmap_page(nic->pdev,
-						       (dma_addr_t)
-						       txdlp->
-						       Buffer_Pointer,
-						       frag->size,
-						       PCI_DMA_TODEVICE);
-				}
-				txdlp = temp;
-			}
-			memset(txdlp, 0,
-			       (sizeof(TxD_t) * config->max_txds));
-
-			/* Updating the statistics block */
-			nic->stats.tx_packets++;
-			nic->stats.tx_bytes += skb->len;
-			dev_kfree_skb_irq(skb);
-
-			get_info.offset++;
-			get_info.offset %= get_info.fifo_len + 1;
-			txdlp = (TxD_t *) nic->list_info[i]
-			    [get_info.offset].list_virt_addr;
-			mac_control->tx_curr_get_info[i].offset =
-			    get_info.offset;
+			txdlp = temp;
 		}
+		memset(txdlp, 0,
+		       (sizeof(TxD_t) * fifo_data->max_txds));
+
+		/* Updating the statistics block */
+		nic->stats.tx_bytes += skb->len;
+		dev_kfree_skb_irq(skb);
+
+		get_info.offset++;
+		get_info.offset %= get_info.fifo_len + 1;
+		txdlp = (TxD_t *) fifo_data->list_info
+		    [get_info.offset].list_virt_addr;
+		fifo_data->tx_curr_get_info.offset =
+		    get_info.offset;
 	}
 
 	spin_lock(&nic->tx_lock);
@@ -2301,13 +2651,13 @@ static void tx_intr_handler(struct s2io_
 	spin_unlock(&nic->tx_lock);
 }
 
-/**  
+/**
  *  alarm_intr_handler - Alarm Interrrupt handler
  *  @nic: device private variable
- *  Description: If the interrupt was neither because of Rx packet or Tx 
+ *  Description: If the interrupt was neither because of Rx packet or Tx
  *  complete, this function is called. If the interrupt was to indicate
- *  a loss of link, the OSM link status handler is invoked for any other 
- *  alarm interrupt the block that raised the interrupt is displayed 
+ *  a loss of link, the OSM link status handler is invoked for any other
+ *  alarm interrupt the block that raised the interrupt is displayed
  *  and a H/W reset is issued.
  *  Return Value:
  *  NONE
@@ -2320,10 +2670,30 @@ static void alarm_intr_handler(struct s2
 	register u64 val64 = 0, err_reg = 0;
 
 	/* Handling link status change error Intr */
-	err_reg = readq(&bar0->mac_rmac_err_reg);
-	writeq(err_reg, &bar0->mac_rmac_err_reg);
-	if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
-		schedule_work(&nic->set_link_task);
+	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+		err_reg = readq(&bar0->mac_rmac_err_reg);
+		writeq(err_reg, &bar0->mac_rmac_err_reg);
+		if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
+			schedule_work(&nic->set_link_task);
+		}
+	}
+
+	/* Handling Ecc errors */
+	val64 = readq(&bar0->mc_err_reg);
+	writeq(val64, &bar0->mc_err_reg);
+	if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
+		if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
+			nic->mac_control.stats_info->sw_stat.
+				double_ecc_errs++;
+			DBG_PRINT(ERR_DBG, "%s: Device indicates ",
+				  dev->name);
+			DBG_PRINT(ERR_DBG, "double ECC error!!\n");
+			netif_stop_queue(dev);
+			schedule_work(&nic->rst_timer_task);
+		} else {
+			nic->mac_control.stats_info->sw_stat.
+				single_ecc_errs++;
+		}
 	}
 
 	/* In case of a serious error, the device will be Reset. */
@@ -2338,7 +2708,7 @@ static void alarm_intr_handler(struct s2
 	/*
 	 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
 	 * Error occurs, the adapter will be recycled by disabling the
-	 * adapter enable bit and enabling it again after the device 
+	 * adapter enable bit and enabling it again after the device
 	 * becomes Quiescent.
 	 */
 	val64 = readq(&bar0->pcc_err_reg);
@@ -2354,18 +2724,18 @@ static void alarm_intr_handler(struct s2
 	/* Other type of interrupts are not being handled now,  TODO */
 }
 
-/** 
+/**
  *  wait_for_cmd_complete - waits for a command to complete.
- *  @sp : private member of the device structure, which is a pointer to the 
+ *  @sp : private member of the device structure, which is a pointer to the
  *  s2io_nic structure.
- *  Description: Function that waits for a command to Write into RMAC 
- *  ADDR DATA registers to be completed and returns either success or 
- *  error depending on whether the command was complete or not. 
+ *  Description: Function that waits for a command to Write into RMAC
+ *  ADDR DATA registers to be completed and returns either success or
+ *  error depending on whether the command was complete or not.
  *  Return value:
  *   SUCCESS on success and FAILURE on failure.
  */
 
-static int wait_for_cmd_complete(nic_t * sp)
+int wait_for_cmd_complete(nic_t * sp)
 {
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	int ret = FAILURE, cnt = 0;
@@ -2385,29 +2755,33 @@ static int wait_for_cmd_complete(nic_t *
 	return ret;
 }
 
-/** 
- *  s2io_reset - Resets the card. 
+/**
+ *  s2io_reset - Resets the card.
  *  @sp : private member of the device structure.
  *  Description: Function to Reset the card. This function then also
- *  restores the previously saved PCI configuration space registers as 
+ *  restores the previously saved PCI configuration space registers as
  *  the card reset also resets the configuration space.
  *  Return value:
  *  void.
  */
 
-static void s2io_reset(nic_t * sp)
+void s2io_reset(nic_t * sp)
 {
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	u64 val64;
-	u16 subid;
+	u16 subid, pci_cmd;
+
+	/* Back up  the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
+	if (sp->device_type == XFRAME_I_DEVICE)
+		pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
 
 	val64 = SW_RESET_ALL;
 	writeq(val64, &bar0->sw_reset);
 
-	/* 
-	 * At this stage, if the PCI write is indeed completed, the 
-	 * card is reset and so is the PCI Config space of the device. 
-	 * So a read cannot be issued at this stage on any of the 
+	/*
+	 * At this stage, if the PCI write is indeed completed, the
+	 * card is reset and so is the PCI Config space of the device.
+	 * So a read cannot be issued at this stage on any of the
 	 * registers to ensure the write into "sw_reset" register
 	 * has gone through.
 	 * Question: Is there any system call that will explicitly force
@@ -2418,42 +2792,76 @@ static void s2io_reset(nic_t * sp)
 	 */
 	msleep(250);
 
-	/* Restore the PCI state saved during initializarion. */
-	pci_restore_state(sp->pdev);
-	s2io_init_pci(sp);
+	if (!(sp->device_type & XFRAME_II_DEVICE)) {
+		/* Restore the PCI state saved during initializarion. */
+		pci_restore_state(sp->pdev);
+		pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
+				     pci_cmd);
+	} else {
+		pci_set_master(sp->pdev);
+	}
+	s2io_init_pci(sp);
+
+	msleep(250);
+
+	/* Set swapper to enable I/O register access */
+	s2io_set_swapper(sp);
+
+	/* Clear certain PCI/PCI-X fields after reset */
+	if (sp->device_type == XFRAME_II_DEVICE) {
+		/* Clear parity err detect bit */
+		pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
+
+		/* Clearing PCIX Ecc status register */
+		pci_write_config_dword(sp->pdev, 0x68, 0x7C);
 
-	msleep(250);
+		/* Clearing PCI_STATUS error reflected here */
+		writeq(BIT(62), &bar0->txpic_int_reg);
+	}
+
+	/* Reset device statistics maintained by OS */
+	memset(&sp->stats, 0, sizeof (struct net_device_stats));
 
 	/* SXE-002: Configure link and activity LED to turn it off */
 	subid = sp->pdev->subsystem_device;
-	if ((subid & 0xFF) >= 0x07) {
+	if (((subid & 0xFF) >= 0x07) &&
+	    (sp->device_type == XFRAME_I_DEVICE)) {
 		val64 = readq(&bar0->gpio_control);
 		val64 |= 0x0000800000000000ULL;
 		writeq(val64, &bar0->gpio_control);
 		val64 = 0x0411040400000000ULL;
-		writeq(val64, (void __iomem *) bar0 + 0x2700);
+		writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
+	}
+
+	/*
+	 * Clear spurious ECC interrupts that would have occured on
+	 * XFRAME II cards after reset.
+	 */
+	if (sp->device_type == XFRAME_II_DEVICE) {
+		val64 = readq(&bar0->pcc_err_reg);
+		writeq(val64, &bar0->pcc_err_reg);
 	}
 
 	sp->device_enabled_once = FALSE;
 }
 
 /**
- *  s2io_set_swapper - to set the swapper controle on the card 
- *  @sp : private member of the device structure, 
+ *  s2io_set_swapper - to set the swapper controle on the card
+ *  @sp : private member of the device structure,
  *  pointer to the s2io_nic structure.
- *  Description: Function to set the swapper control on the card 
+ *  Description: Function to set the swapper control on the card
  *  correctly depending on the 'endianness' of the system.
  *  Return value:
  *  SUCCESS on success and FAILURE on failure.
  */
 
-static int s2io_set_swapper(nic_t * sp)
+int s2io_set_swapper(nic_t * sp)
 {
 	struct net_device *dev = sp->dev;
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	u64 val64, valt, valr;
 
-	/* 
+	/*
 	 * Set proper endian settings and verify the same by reading
 	 * the PIF Feed-back register.
 	 */
@@ -2505,8 +2913,9 @@ static int s2io_set_swapper(nic_t * sp)
 			i++;
 		}
 		if(i == 4) {
+			unsigned long long x = val64;
 			DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
-			DBG_PRINT(ERR_DBG, "reads:0x%llx\n",val64);
+			DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
 			return FAILURE;
 		}
 	}
@@ -2514,8 +2923,8 @@ static int s2io_set_swapper(nic_t * sp)
 	val64 &= 0xFFFF000000000000ULL;
 
 #ifdef  __BIG_ENDIAN
-	/* 
-	 * The device by default set to a big endian format, so a 
+	/*
+	 * The device by default set to a big endian format, so a
 	 * big endian driver need not set anything.
 	 */
 	val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2531,9 +2940,9 @@ static int s2io_set_swapper(nic_t * sp)
 		 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
 	writeq(val64, &bar0->swapper_ctrl);
 #else
-	/* 
+	/*
 	 * Initially we enable all bits to make it accessible by the
-	 * driver, then we selectively enable only those bits that 
+	 * driver, then we selectively enable only those bits that
 	 * we want to set.
 	 */
 	val64 |= (SWAPPER_CTRL_TXP_FE |
@@ -2555,8 +2964,8 @@ static int s2io_set_swapper(nic_t * sp)
 #endif
 	val64 = readq(&bar0->swapper_ctrl);
 
-	/* 
-	 * Verifying if endian settings are accurate by reading a 
+	/*
+	 * Verifying if endian settings are accurate by reading a
 	 * feedback register.
 	 */
 	val64 = readq(&bar0->pif_rd_swapper_fb);
@@ -2576,55 +2985,63 @@ static int s2io_set_swapper(nic_t * sp)
  * Functions defined below concern the OS part of the driver *
  * ********************************************************* */
 
-/**  
+/**
  *  s2io_open - open entry point of the driver
  *  @dev : pointer to the device structure.
  *  Description:
  *  This function is the open entry point of the driver. It mainly calls a
  *  function to allocate Rx buffers and inserts them into the buffer
- *  descriptors and then enables the Rx part of the NIC. 
+ *  descriptors and then enables the Rx part of the NIC.
  *  Return value:
  *  0 on success and an appropriate (-)ve integer as defined in errno.h
  *   file on failure.
  */
 
-static int s2io_open(struct net_device *dev)
+int s2io_open(struct net_device *dev)
 {
 	nic_t *sp = dev->priv;
 	int err = 0;
 
-	/* 
-	 * Make sure you have link off by default every time 
+	/*
+	 * Make sure you have link off by default every time
 	 * Nic is initialized
 	 */
 	netif_carrier_off(dev);
-	sp->last_link_state = LINK_DOWN;
+	sp->last_link_state = 0;
 
 	/* Initialize H/W and enable interrupts */
 	if (s2io_card_up(sp)) {
 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
 			  dev->name);
-		return -ENODEV;
+		err = -ENODEV;
+		goto hw_init_failed;
 	}
 
 	/* After proper initialization of H/W, register ISR */
-	err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
+	err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
 			  sp->name, dev);
 	if (err) {
-		s2io_reset(sp);
 		DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
 			  dev->name);
-		return err;
+		goto isr_registration_failed;
 	}
 
 	if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
 		DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
-		s2io_reset(sp);
-		return -ENODEV;
+		err = -ENODEV;
+		goto setting_mac_address_failed;
 	}
 
 	netif_start_queue(dev);
 	return 0;
+
+setting_mac_address_failed:
+	free_irq(sp->pdev->irq, dev);
+isr_registration_failed:
+	del_timer_sync(&sp->alarm_timer);
+	s2io_reset(sp);
+hw_init_failed:
+	return err;
 }
 
 /**
@@ -2640,16 +3057,15 @@ static int s2io_open(struct net_device *
  *  file on failure.
  */
 
-static int s2io_close(struct net_device *dev)
+int s2io_close(struct net_device *dev)
 {
 	nic_t *sp = dev->priv;
-
 	flush_scheduled_work();
 	netif_stop_queue(dev);
 	/* Reset card, kill tasklet and free Tx and Rx buffers. */
 	s2io_card_down(sp);
 
-	free_irq(dev->irq, dev);
+	free_irq(sp->pdev->irq, dev);
 	sp->device_close_flag = TRUE;	/* Device is shut down. */
 	return 0;
 }
@@ -2667,7 +3083,7 @@ static int s2io_close(struct net_device 
  *  0 on success & 1 on failure.
  */
 
-static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
+int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
 {
 	nic_t *sp = dev->priv;
 	u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
@@ -2678,29 +3094,39 @@ static int s2io_xmit(struct sk_buff *skb
 #ifdef NETIF_F_TSO
 	int mss;
 #endif
+	u16 vlan_tag = 0;
+	int vlan_priority = 0;
 	mac_info_t *mac_control;
 	struct config_param *config;
-	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 
 	mac_control = &sp->mac_control;
 	config = &sp->config;
 
-	DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
+	DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
 	spin_lock_irqsave(&sp->tx_lock, flags);
-
 	if (atomic_read(&sp->card_state) == CARD_DOWN) {
-		DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
+		DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
 			  dev->name);
 		spin_unlock_irqrestore(&sp->tx_lock, flags);
-		return 1;
+		dev_kfree_skb(skb);
+		return 0;
 	}
 
 	queue = 0;
-	put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
-	get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
-	txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
 
-	queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
+	/* Get Fifo number to Transmit based on vlan priority */
+	if (sp->vlgrp && vlan_tx_tag_present(skb)) {
+		vlan_tag = vlan_tx_tag_get(skb);
+		vlan_priority = vlan_tag >> 13;
+		queue = config->fifo_mapping[vlan_priority];
+	}
+
+	put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
+	get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
+	txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
+		list_virt_addr;
+
+	queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
 	/* Avoid "put" pointer going beyond "get" pointer */
 	if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
 		DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
@@ -2709,6 +3135,15 @@ static int s2io_xmit(struct sk_buff *skb
 		spin_unlock_irqrestore(&sp->tx_lock, flags);
 		return 0;
 	}
+
+	/* A buffer with no data will be dropped */
+	if (!skb->len) {
+		DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
+		dev_kfree_skb(skb);
+		spin_unlock_irqrestore(&sp->tx_lock, flags);
+		return 0;
+	}
+
 #ifdef NETIF_F_TSO
 	mss = skb_shinfo(skb)->tso_size;
 	if (mss) {
@@ -2720,9 +3155,9 @@ static int s2io_xmit(struct sk_buff *skb
 	frg_cnt = skb_shinfo(skb)->nr_frags;
 	frg_len = skb->len - skb->data_len;
 
-	txdp->Host_Control = (unsigned long) skb;
 	txdp->Buffer_Pointer = pci_map_single
 	    (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
+	txdp->Host_Control = (unsigned long) skb;
 	if (skb->ip_summed == CHECKSUM_HW) {
 		txdp->Control_2 |=
 		    (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
@@ -2731,6 +3166,11 @@ static int s2io_xmit(struct sk_buff *skb
 
 	txdp->Control_2 |= config->tx_intr_type;
 
+	if (sp->vlgrp && vlan_tx_tag_present(skb)) {
+		txdp->Control_2 |= TXD_VLAN_ENABLE;
+		txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
+	}
+
 	txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
 			    TXD_GATHER_CODE_FIRST);
 	txdp->Control_1 |= TXD_LIST_OWN_XENA;
@@ -2738,6 +3178,9 @@ static int s2io_xmit(struct sk_buff *skb
 	/* For fragmented SKB. */
 	for (i = 0; i < frg_cnt; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+		/* A '0' length fragment will be ignored */
+		if (!frag->size)
+			continue;
 		txdp++;
 		txdp->Buffer_Pointer = (u64) pci_map_page
 		    (sp->pdev, frag->page, frag->page_offset,
@@ -2747,23 +3190,23 @@ static int s2io_xmit(struct sk_buff *skb
 	txdp->Control_1 |= TXD_GATHER_CODE_LAST;
 
 	tx_fifo = mac_control->tx_FIFO_start[queue];
-	val64 = sp->list_info[queue][put_off].list_phy_addr;
+	val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
 	writeq(val64, &tx_fifo->TxDL_Pointer);
 
 	val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
 		 TX_FIFO_LAST_LIST);
+
 #ifdef NETIF_F_TSO
 	if (mss)
 		val64 |= TX_FIFO_SPECIAL_FUNC;
 #endif
 	writeq(val64, &tx_fifo->List_Control);
 
-	/* Perform a PCI read to flush previous writes */
-	val64 = readq(&bar0->general_int_status);
+	mmiowb();
 
 	put_off++;
-	put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
-	mac_control->tx_curr_put_info[queue].offset = put_off;
+	put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
+	mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
 
 	/* Avoid "put" pointer going beyond "get" pointer */
 	if (((put_off + 1) % queue_len) == get_off) {
@@ -2779,18 +3222,74 @@ static int s2io_xmit(struct sk_buff *skb
 	return 0;
 }
 
+static void
+s2io_alarm_handle(unsigned long data)
+{
+	nic_t *sp = (nic_t *)data;
+
+	alarm_intr_handler(sp);
+	mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
+}
+
+static void s2io_txpic_intr_handle(nic_t *sp)
+{
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *) sp->bar0;
+	u64 val64;
+
+	val64 = readq(&bar0->pic_int_status);
+	if (val64 & PIC_INT_GPIO) {
+		val64 = readq(&bar0->gpio_int_reg);
+		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
+		    (val64 & GPIO_INT_REG_LINK_UP)) {
+			val64 |=  GPIO_INT_REG_LINK_DOWN;
+			val64 |= GPIO_INT_REG_LINK_UP;
+			writeq(val64, &bar0->gpio_int_reg);
+			goto masking;
+		}
+
+		if (((sp->last_link_state == LINK_UP) &&
+			(val64 & GPIO_INT_REG_LINK_DOWN)) ||
+		((sp->last_link_state == LINK_DOWN) &&
+		(val64 & GPIO_INT_REG_LINK_UP))) {
+			val64 = readq(&bar0->gpio_int_mask);
+			val64 |=  GPIO_INT_MASK_LINK_DOWN;
+			val64 |= GPIO_INT_MASK_LINK_UP;
+			writeq(val64, &bar0->gpio_int_mask);
+			s2io_set_link((unsigned long)sp);
+		}
+masking:
+		if (sp->last_link_state == LINK_UP) {
+			/*enable down interrupt */
+			val64 = readq(&bar0->gpio_int_mask);
+			/* unmasks link down intr */
+			val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
+			/* masks link up intr */
+			val64 |= GPIO_INT_MASK_LINK_UP;
+			writeq(val64, &bar0->gpio_int_mask);
+		} else {
+			/*enable UP Interrupt */
+			val64 = readq(&bar0->gpio_int_mask);
+			/* unmasks link up interrupt */
+			val64 &= ~GPIO_INT_MASK_LINK_UP;
+			/* masks link down interrupt */
+			val64 |=  GPIO_INT_MASK_LINK_DOWN;
+			writeq(val64, &bar0->gpio_int_mask);
+		}
+	}
+}
+
 /**
  *  s2io_isr - ISR handler of the device .
  *  @irq: the irq of the device.
  *  @dev_id: a void pointer to the dev structure of the NIC.
  *  @pt_regs: pointer to the registers pushed on the stack.
- *  Description:  This function is the ISR handler of the device. It 
- *  identifies the reason for the interrupt and calls the relevant 
- *  service routines. As a contongency measure, this ISR allocates the 
+ *  Description:  This function is the ISR handler of the device. It
+ *  identifies the reason for the interrupt and calls the relevant
+ *  service routines. As a contongency measure, this ISR allocates the
  *  recv buffers, if their numbers are below the panic value which is
  *  presently set to 25% of the original number of rcv buffers allocated.
  *  Return value:
- *   IRQ_HANDLED: will be returned if IRQ was handled by this routine 
+ *   IRQ_HANDLED: will be returned if IRQ was handled by this routine
  *   IRQ_NONE: will be returned if interrupt is not from our device
  */
 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2798,40 +3297,31 @@ static irqreturn_t s2io_isr(int irq, voi
 	struct net_device *dev = (struct net_device *) dev_id;
 	nic_t *sp = dev->priv;
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
-#ifndef CONFIG_S2IO_NAPI
-	int i, ret;
-#endif
-	u64 reason = 0;
+	int i;
+	u64 reason = 0, val64;
 	mac_info_t *mac_control;
 	struct config_param *config;
 
+	atomic_inc(&sp->isr_cnt);
 	mac_control = &sp->mac_control;
 	config = &sp->config;
 
-	/* 
+	/*
 	 * Identify the cause for interrupt and call the appropriate
 	 * interrupt handler. Causes for the interrupt could be;
 	 * 1. Rx of packet.
 	 * 2. Tx complete.
 	 * 3. Link down.
-	 * 4. Error in any functional blocks of the NIC. 
+	 * 4. Error in any functional blocks of the NIC.
 	 */
 	reason = readq(&bar0->general_int_status);
 
 	if (!reason) {
 		/* The interrupt was not raised by Xena. */
+		atomic_dec(&sp->isr_cnt);
 		return IRQ_NONE;
 	}
 
-	/* If Intr is because of Tx Traffic */
-	if (reason & GEN_INTR_TXTRAFFIC) {
-		tx_intr_handler(sp);
-	}
-
-	/* If Intr is because of an error */
-	if (reason & (GEN_ERROR_INTR))
-		alarm_intr_handler(sp);
-
 #ifdef CONFIG_S2IO_NAPI
 	if (reason & GEN_INTR_RXTRAFFIC) {
 		if (netif_rx_schedule_prep(dev)) {
@@ -2843,17 +3333,43 @@ static irqreturn_t s2io_isr(int irq, voi
 #else
 	/* If Intr is because of Rx Traffic */
 	if (reason & GEN_INTR_RXTRAFFIC) {
-		rx_intr_handler(sp);
+		/*
+		 * rx_traffic_int reg is an R1 register, writing all 1's
+		 * will ensure that the actual interrupt causing bit get's
+		 * cleared and hence a read can be avoided.
+		 */
+		val64 = 0xFFFFFFFFFFFFFFFFULL;
+		writeq(val64, &bar0->rx_traffic_int);
+		for (i = 0; i < config->rx_ring_num; i++) {
+			rx_intr_handler(&mac_control->rings[i]);
+		}
 	}
 #endif
 
-	/* 
-	 * If the Rx buffer count is below the panic threshold then 
-	 * reallocate the buffers from the interrupt handler itself, 
+	/* If Intr is because of Tx Traffic */
+	if (reason & GEN_INTR_TXTRAFFIC) {
+		/*
+		 * tx_traffic_int reg is an R1 register, writing all 1's
+		 * will ensure that the actual interrupt causing bit get's
+		 * cleared and hence a read can be avoided.
+		 */
+		val64 = 0xFFFFFFFFFFFFFFFFULL;
+		writeq(val64, &bar0->tx_traffic_int);
+
+		for (i = 0; i < config->tx_fifo_num; i++)
+			tx_intr_handler(&mac_control->fifos[i]);
+	}
+
+	if (reason & GEN_INTR_TXPIC)
+		s2io_txpic_intr_handle(sp);
+	/*
+	 * If the Rx buffer count is below the panic threshold then
+	 * reallocate the buffers from the interrupt handler itself,
 	 * else schedule a tasklet to reallocate the buffers.
 	 */
 #ifndef CONFIG_S2IO_NAPI
 	for (i = 0; i < config->rx_ring_num; i++) {
+		int ret;
 		int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
 		int level = rx_buffer_level(sp, rxb_size, i);
 
@@ -2865,6 +3381,7 @@ static irqreturn_t s2io_isr(int irq, voi
 					  dev->name);
 				DBG_PRINT(ERR_DBG, " in ISR!!\n");
 				clear_bit(0, (&sp->tasklet_status));
+				atomic_dec(&sp->isr_cnt);
 				return IRQ_HANDLED;
 			}
 			clear_bit(0, (&sp->tasklet_status));
@@ -2874,33 +3391,69 @@ static irqreturn_t s2io_isr(int irq, voi
 	}
 #endif
 
+	atomic_dec(&sp->isr_cnt);
 	return IRQ_HANDLED;
 }
 
 /**
- *  s2io_get_stats - Updates the device statistics structure. 
+ * s2io_updt_stats -
+ */
+static void s2io_updt_stats(nic_t *sp)
+{
+	XENA_dev_config_t __iomem *bar0 = sp->bar0;
+	u64 val64;
+	int cnt = 0;
+
+	if (atomic_read(&sp->card_state) == CARD_UP) {
+		/* Apprx 30us on a 133 MHz bus */
+		val64 = SET_UPDT_CLICKS(10) |
+			STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
+		writeq(val64, &bar0->stat_cfg);
+		do {
+			udelay(100);
+			val64 = readq(&bar0->stat_cfg);
+			if (!(val64 & BIT(0)))
+				break;
+			cnt++;
+			if (cnt == 5)
+				break; /* Updt failed */
+		} while(1);
+	}
+}
+
+/**
+ *  s2io_get_stats - Updates the device statistics structure.
  *  @dev : pointer to the device structure.
  *  Description:
- *  This function updates the device statistics structure in the s2io_nic 
+ *  This function updates the device statistics structure in the s2io_nic
  *  structure and returns a pointer to the same.
  *  Return value:
  *  pointer to the updated net_device_stats structure.
  */
 
-static struct net_device_stats *s2io_get_stats(struct net_device *dev)
+struct net_device_stats *s2io_get_stats(struct net_device *dev)
 {
 	nic_t *sp = dev->priv;
 	mac_info_t *mac_control;
 	struct config_param *config;
 
+
 	mac_control = &sp->mac_control;
 	config = &sp->config;
 
-	sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
-	sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
-	sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
+	/* Configure Stats for immediate updt */
+	s2io_updt_stats(sp);
+
+	sp->stats.tx_packets =
+		le32_to_cpu(mac_control->stats_info->tmac_frms);
+	sp->stats.tx_errors =
+		le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
+	sp->stats.rx_errors =
+		le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
+	sp->stats.multicast =
+		le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
 	sp->stats.rx_length_errors =
-	    mac_control->stats_info->rmac_long_frms;
+		le32_to_cpu(mac_control->stats_info->rmac_long_frms);
 
 	return (&sp->stats);
 }
@@ -2909,8 +3462,8 @@ static struct net_device_stats *s2io_get
  *  s2io_set_multicast - entry point for multicast address enable/disable.
  *  @dev : pointer to the device structure
  *  Description:
- *  This function is a driver entry point which gets called by the kernel 
- *  whenever multicast addresses must be enabled/disabled. This also gets 
+ *  This function is a driver entry point which gets called by the kernel
+ *  whenever multicast addresses must be enabled/disabled. This also gets
  *  called to set/reset promiscuous mode. Depending on the deivce flag, we
  *  determine, if multicast address must be enabled or if promiscuous mode
  *  is to be disabled etc.
@@ -2948,6 +3501,8 @@ static void s2io_set_multicast(struct ne
 		/*  Disable all Multicast addresses */
 		writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
 		       &bar0->rmac_addr_data0_mem);
+		writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
+		       &bar0->rmac_addr_data1_mem);
 		val64 = RMAC_ADDR_CMD_MEM_WE |
 		    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
 		    RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
@@ -3010,7 +3565,7 @@ static void s2io_set_multicast(struct ne
 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
 			       &bar0->rmac_addr_data0_mem);
 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
-		       		&bar0->rmac_addr_data1_mem);
+				&bar0->rmac_addr_data1_mem);
 			val64 = RMAC_ADDR_CMD_MEM_WE |
 			    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
 			    RMAC_ADDR_CMD_MEM_OFFSET
@@ -3039,8 +3594,7 @@ static void s2io_set_multicast(struct ne
 			writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
 			       &bar0->rmac_addr_data0_mem);
 			writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
-		       		&bar0->rmac_addr_data1_mem);
-
+				&bar0->rmac_addr_data1_mem);
 			val64 = RMAC_ADDR_CMD_MEM_WE |
 			    RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
 			    RMAC_ADDR_CMD_MEM_OFFSET
@@ -3059,12 +3613,12 @@ static void s2io_set_multicast(struct ne
 }
 
 /**
- *  s2io_set_mac_addr - Programs the Xframe mac address 
+ *  s2io_set_mac_addr - Programs the Xframe mac address
  *  @dev : pointer to the device structure.
  *  @addr: a uchar pointer to the new mac address which is to be set.
- *  Description : This procedure will program the Xframe to receive 
+ *  Description : This procedure will program the Xframe to receive
  *  frames with new Mac Address
- *  Return value: SUCCESS on success and an appropriate (-)ve integer 
+ *  Return value: SUCCESS on success and an appropriate (-)ve integer
  *  as defined in errno.h file on failure.
  */
 
@@ -3075,10 +3629,10 @@ int s2io_set_mac_addr(struct net_device 
 	register u64 val64, mac_addr = 0;
 	int i;
 
-	/* 
+	/*
 	 * Set the new MAC address as the new unicast filter and reflect this
 	 * change on the device address registered with the OS. It will be
-	 * at offset 0. 
+	 * at offset 0.
 	 */
 	for (i = 0; i < ETH_ALEN; i++) {
 		mac_addr <<= 8;
@@ -3102,12 +3656,12 @@ int s2io_set_mac_addr(struct net_device 
 }
 
 /**
- * s2io_ethtool_sset - Sets different link parameters. 
+ * s2io_ethtool_sset - Sets different link parameters.
  * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
  * @info: pointer to the structure with parameters given by ethtool to set
  * link information.
  * Description:
- * The function sets different link parameters provided by the user onto 
+ * The function sets different link parameters provided by the user onto
  * the NIC.
  * Return value:
  * 0 on success.
@@ -3129,7 +3683,7 @@ static int s2io_ethtool_sset(struct net_
 }
 
 /**
- * s2io_ethtol_gset - Return link specific information. 
+ * s2io_ethtol_gset - Return link specific information.
  * @sp : private member of the device structure, pointer to the
  *      s2io_nic structure.
  * @info : pointer to the structure with parameters given by ethtool
@@ -3161,8 +3715,8 @@ static int s2io_ethtool_gset(struct net_
 }
 
 /**
- * s2io_ethtool_gdrvinfo - Returns driver specific information. 
- * @sp : private member of the device structure, which is a pointer to the 
+ * s2io_ethtool_gdrvinfo - Returns driver specific information.
+ * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
  * @info : pointer to the structure with parameters given by ethtool to
  * return driver information.
@@ -3190,9 +3744,9 @@ static void s2io_ethtool_gdrvinfo(struct
 
 /**
  *  s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- *  @sp: private member of the device structure, which is a pointer to the 
+ *  @sp: private member of the device structure, which is a pointer to the
  *  s2io_nic structure.
- *  @regs : pointer to the structure with parameters given by ethtool for 
+ *  @regs : pointer to the structure with parameters given by ethtool for
  *  dumping the registers.
  *  @reg_space: The input argumnet into which all the registers are dumped.
  *  Description:
@@ -3221,11 +3775,11 @@ static void s2io_ethtool_gregs(struct ne
 
 /**
  *  s2io_phy_id  - timer function that alternates adapter LED.
- *  @data : address of the private member of the device structure, which 
+ *  @data : address of the private member of the device structure, which
  *  is a pointer to the s2io_nic structure, provided as an u32.
- * Description: This is actually the timer function that alternates the 
- * adapter LED bit of the adapter control bit to set/reset every time on 
- * invocation. The timer is set for 1/2 a second, hence tha NIC blinks 
+ * Description: This is actually the timer function that alternates the
+ * adapter LED bit of the adapter control bit to set/reset every time on
+ * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
  *  once every second.
 */
 static void s2io_phy_id(unsigned long data)
@@ -3236,7 +3790,8 @@ static void s2io_phy_id(unsigned long da
 	u16 subid;
 
 	subid = sp->pdev->subsystem_device;
-	if ((subid & 0xFF) >= 0x07) {
+	if ((sp->device_type == XFRAME_II_DEVICE) ||
+		   ((subid & 0xFF) >= 0x07)) {
 		val64 = readq(&bar0->gpio_control);
 		val64 ^= GPIO_CTRL_GPIO_0;
 		writeq(val64, &bar0->gpio_control);
@@ -3253,12 +3808,12 @@ static void s2io_phy_id(unsigned long da
  * s2io_ethtool_idnic - To physically identify the nic on the system.
  * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
- * @id : pointer to the structure with identification parameters given by 
+ * @id : pointer to the structure with identification parameters given by
  * ethtool.
  * Description: Used to physically identify the NIC on the system.
- * The Link LED will blink for a time specified by the user for 
+ * The Link LED will blink for a time specified by the user for
  * identification.
- * NOTE: The Link has to be Up to be able to blink the LED. Hence 
+ * NOTE: The Link has to be Up to be able to blink the LED. Hence
  * identification is possible only if it's link is up.
  * Return value:
  * int , returns 0 on success
@@ -3273,7 +3828,8 @@ static int s2io_ethtool_idnic(struct net
 
 	subid = sp->pdev->subsystem_device;
 	last_gpio_ctrl_val = readq(&bar0->gpio_control);
-	if ((subid & 0xFF) < 0x07) {
+	if ((sp->device_type == XFRAME_I_DEVICE) &&
+		((subid & 0xFF) < 0x07)) {
 		val64 = readq(&bar0->adapter_control);
 		if (!(val64 & ADAPTER_CNTL_EN)) {
 			printk(KERN_ERR
@@ -3288,12 +3844,12 @@ static int s2io_ethtool_idnic(struct net
 	}
 	mod_timer(&sp->id_timer, jiffies);
 	if (data)
-		msleep(data * 1000);
+		msleep_interruptible(data * HZ);
 	else
-		msleep(0xFFFFFFFF);
+		msleep_interruptible(MAX_FLICKER_TIME);
 	del_timer_sync(&sp->id_timer);
 
-	if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+	if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
 		writeq(last_gpio_ctrl_val, &bar0->gpio_control);
 		last_gpio_ctrl_val = readq(&bar0->gpio_control);
 	}
@@ -3303,7 +3859,8 @@ static int s2io_ethtool_idnic(struct net
 
 /**
  * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
- * @sp : private member of the device structure, which is a pointer to the  * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ *	s2io_nic structure.
  * @ep : pointer to the structure with pause parameters given by ethtool.
  * Description:
  * Returns the Pause frame generation and reception capability of the NIC.
@@ -3327,7 +3884,7 @@ static void s2io_ethtool_getpause_data(s
 
 /**
  * s2io_ethtool_setpause_data -  set/reset pause frame generation.
- * @sp : private member of the device structure, which is a pointer to the 
+ * @sp : private member of the device structure, which is a pointer to the
  *      s2io_nic structure.
  * @ep : pointer to the structure with pause parameters given by ethtool.
  * Description:
@@ -3338,7 +3895,7 @@ static void s2io_ethtool_getpause_data(s
  */
 
 static int s2io_ethtool_setpause_data(struct net_device *dev,
-				      struct ethtool_pauseparam *ep)
+			       struct ethtool_pauseparam *ep)
 {
 	u64 val64;
 	nic_t *sp = dev->priv;
@@ -3359,13 +3916,13 @@ static int s2io_ethtool_setpause_data(st
 
 /**
  * read_eeprom - reads 4 bytes of data from user given offset.
- * @sp : private member of the device structure, which is a pointer to the 
+ * @sp : private member of the device structure, which is a pointer to the
  *      s2io_nic structure.
  * @off : offset at which the data must be written
  * @data : Its an output parameter where the data read at the given
- * 	offset is stored.
+ *	offset is stored.
  * Description:
- * Will read 4 bytes of data from the user given offset and return the 
+ * Will read 4 bytes of data from the user given offset and return the
  * read data.
  * NOTE: Will allow to read only part of the EEPROM visible through the
  *   I2C bus.
@@ -3406,7 +3963,7 @@ static int read_eeprom(nic_t * sp, int o
  *       s2io_nic structure.
  *  @off : offset at which the data must be written
  *  @data : The data that is to be written
- *  @cnt : Number of bytes of the data that are actually to be written into 
+ *  @cnt : Number of bytes of the data that are actually to be written into
  *  the Eeprom. (max of 3)
  * Description:
  *  Actually writes the relevant part of the data value into the Eeprom
@@ -3443,7 +4000,7 @@ static int write_eeprom(nic_t * sp, int 
 /**
  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
  *  @sp : private member of the device structure, which is a pointer to the *       s2io_nic structure.
- *  @eeprom : pointer to the user level structure provided by ethtool, 
+ *  @eeprom : pointer to the user level structure provided by ethtool,
  *  containing all relevant information.
  *  @data_buf : user defined value to be written into Eeprom.
  *  Description: Reads the values stored in the Eeprom at given offset
@@ -3454,7 +4011,7 @@ static int write_eeprom(nic_t * sp, int 
  */
 
 static int s2io_ethtool_geeprom(struct net_device *dev,
-				struct ethtool_eeprom *eeprom, u8 * data_buf)
+			 struct ethtool_eeprom *eeprom, u8 * data_buf)
 {
 	u32 data, i, valid;
 	nic_t *sp = dev->priv;
@@ -3479,7 +4036,7 @@ static int s2io_ethtool_geeprom(struct n
  *  s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
  *  @sp : private member of the device structure, which is a pointer to the
  *  s2io_nic structure.
- *  @eeprom : pointer to the user level structure provided by ethtool, 
+ *  @eeprom : pointer to the user level structure provided by ethtool,
  *  containing all relevant information.
  *  @data_buf ; user defined value to be written into Eeprom.
  *  Description:
@@ -3527,8 +4084,8 @@ static int s2io_ethtool_seeprom(struct n
 }
 
 /**
- * s2io_register_test - reads and writes into all clock domains. 
- * @sp : private member of the device structure, which is a pointer to the 
+ * s2io_register_test - reads and writes into all clock domains.
+ * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
  * @data : variable that returns the result of each of the test conducted b
  * by the driver.
@@ -3545,8 +4102,8 @@ static int s2io_register_test(nic_t * sp
 	u64 val64 = 0;
 	int fail = 0;
 
-	val64 = readq(&bar0->pcc_enable);
-	if (val64 != 0xff00000000000000ULL) {
+	val64 = readq(&bar0->pif_rd_swapper_fb);
+	if (val64 != 0x123456789abcdefULL) {
 		fail = 1;
 		DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
 	}
@@ -3590,13 +4147,13 @@ static int s2io_register_test(nic_t * sp
 }
 
 /**
- * s2io_eeprom_test - to verify that EEprom in the xena can be programmed. 
+ * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
  * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
  * @data:variable that returns the result of each of the test conducted by
  * the driver.
  * Description:
- * Verify that EEPROM in the xena can be programmed using I2C_CONTROL 
+ * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
  * register.
  * Return value:
  * 0 on success.
@@ -3661,14 +4218,14 @@ static int s2io_eeprom_test(nic_t * sp, 
 
 /**
  * s2io_bist_test - invokes the MemBist test of the card .
- * @sp : private member of the device structure, which is a pointer to the 
+ * @sp : private member of the device structure, which is a pointer to the
  * s2io_nic structure.
- * @data:variable that returns the result of each of the test conducted by 
+ * @data:variable that returns the result of each of the test conducted by
  * the driver.
  * Description:
  * This invokes the MemBist test of the card. We give around
  * 2 secs time for the Test to complete. If it's still not complete
- * within this peiod, we consider that the test failed. 
+ * within this peiod, we consider that the test failed.
  * Return value:
  * 0 on success and -1 on failure.
  */
@@ -3697,13 +4254,13 @@ static int s2io_bist_test(nic_t * sp, ui
 }
 
 /**
- * s2io-link_test - verifies the link state of the nic  
- * @sp ; private member of the device structure, which is a pointer to the 
+ * s2io-link_test - verifies the link state of the nic
+ * @sp ; private member of the device structure, which is a pointer to the
  * s2io_nic structure.
  * @data: variable that returns the result of each of the test conducted by
  * the driver.
  * Description:
- * The function verifies the link state of the NIC and updates the input 
+ * The function verifies the link state of the NIC and updates the input
  * argument 'data' appropriately.
  * Return value:
  * 0 on success.
@@ -3722,13 +4279,13 @@ static int s2io_link_test(nic_t * sp, ui
 }
 
 /**
- * s2io_rldram_test - offline test for access to the RldRam chip on the NIC 
- * @sp - private member of the device structure, which is a pointer to the  
+ * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
+ * @sp - private member of the device structure, which is a pointer to the
  * s2io_nic structure.
- * @data - variable that returns the result of each of the test 
+ * @data - variable that returns the result of each of the test
  * conducted by the driver.
  * Description:
- *  This is one of the offline test that tests the read and write 
+ *  This is one of the offline test that tests the read and write
  *  access to the RldRam chip on the NIC.
  * Return value:
  *  0 on success.
@@ -3833,7 +4390,7 @@ static int s2io_rldram_test(nic_t * sp, 
  *  s2io_nic structure.
  *  @ethtest : pointer to a ethtool command specific structure that will be
  *  returned to the user.
- *  @data : variable that returns the result of each of the test 
+ *  @data : variable that returns the result of each of the test
  * conducted by the driver.
  * Description:
  *  This function conducts 6 tests ( 4 offline and 2 online) to determine
@@ -3851,23 +4408,18 @@ static void s2io_ethtool_test(struct net
 
 	if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
 		/* Offline Tests. */
-		if (orig_state) {
+		if (orig_state)
 			s2io_close(sp->dev);
-			s2io_set_swapper(sp);
-		} else
-			s2io_set_swapper(sp);
 
 		if (s2io_register_test(sp, &data[0]))
 			ethtest->flags |= ETH_TEST_FL_FAILED;
 
 		s2io_reset(sp);
-		s2io_set_swapper(sp);
 
 		if (s2io_rldram_test(sp, &data[3]))
 			ethtest->flags |= ETH_TEST_FL_FAILED;
 
 		s2io_reset(sp);
-		s2io_set_swapper(sp);
 
 		if (s2io_eeprom_test(sp, &data[1]))
 			ethtest->flags |= ETH_TEST_FL_FAILED;
@@ -3910,61 +4462,111 @@ static void s2io_get_ethtool_stats(struc
 	nic_t *sp = dev->priv;
 	StatInfo_t *stat_info = sp->mac_control.stats_info;
 
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
+	s2io_updt_stats(sp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32  |
+		le32_to_cpu(stat_info->tmac_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_data_octets);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_mcst_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_bcst_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_any_err_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_vld_ip);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_drop_ip);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_icmp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_rst_tcp);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
+	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
+		le32_to_cpu(stat_info->tmac_udp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_vld_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_data_octets);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_vld_mcst_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_vld_bcst_frms);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_discarded_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_usized_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_osized_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_frag_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_jabber_frms);
+	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_ip);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
+	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_drop_ip);
+	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_icmp);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
-	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
+	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_udp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_err_drp_udp);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_pause_cnt);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_accepted_ip);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+	tmp_stats[i++] = 0;
+	tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
+	tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
 }
 
-static int s2io_ethtool_get_regs_len(struct net_device *dev)
+int s2io_ethtool_get_regs_len(struct net_device *dev)
 {
 	return (XENA_REG_SPACE);
 }
 
 
-static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
+u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
 {
 	nic_t *sp = dev->priv;
 
 	return (sp->rx_csum);
 }
-
-static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
+int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
 {
 	nic_t *sp = dev->priv;
 
@@ -3975,19 +4577,17 @@ static int s2io_ethtool_set_rx_csum(stru
 
 	return 0;
 }
-
-static int s2io_get_eeprom_len(struct net_device *dev)
+int s2io_get_eeprom_len(struct net_device *dev)
 {
 	return (XENA_EEPROM_SPACE);
 }
 
-static int s2io_ethtool_self_test_count(struct net_device *dev)
+int s2io_ethtool_self_test_count(struct net_device *dev)
 {
 	return (S2IO_TEST_LEN);
 }
-
-static void s2io_ethtool_get_strings(struct net_device *dev,
-				     u32 stringset, u8 * data)
+void s2io_ethtool_get_strings(struct net_device *dev,
+			      u32 stringset, u8 * data)
 {
 	switch (stringset) {
 	case ETH_SS_TEST:
@@ -3998,13 +4598,12 @@ static void s2io_ethtool_get_strings(str
 		       sizeof(ethtool_stats_keys));
 	}
 }
-
 static int s2io_ethtool_get_stats_count(struct net_device *dev)
 {
 	return (S2IO_STAT_LEN);
 }
 
-static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
 {
 	if (data)
 		dev->features |= NETIF_F_IP_CSUM;
@@ -4046,21 +4645,18 @@ static struct ethtool_ops netdev_ethtool
 };
 
 /**
- *  s2io_ioctl - Entry point for the Ioctl 
+ *  s2io_ioctl - Entry point for the Ioctl
  *  @dev :  Device pointer.
  *  @ifr :  An IOCTL specefic structure, that can contain a pointer to
  *  a proprietary structure used to pass information to the driver.
  *  @cmd :  This is used to distinguish between the different commands that
  *  can be passed to the IOCTL functions.
  *  Description:
- *  This function has support for ethtool, adding multiple MAC addresses on 
- *  the NIC and some DBG commands for the util tool.
- *  Return value:
- *  Currently the IOCTL supports no operations, hence by default this
- *  function returns OP NOT SUPPORTED value.
+ *  Currently there are no special functionality supported in IOCTL, hence
+ *  function always return EOPNOTSUPPORTED
  */
 
-static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
 	return -EOPNOTSUPP;
 }
@@ -4076,17 +4672,9 @@ static int s2io_ioctl(struct net_device 
  *   file on failure.
  */
 
-static int s2io_change_mtu(struct net_device *dev, int new_mtu)
+int s2io_change_mtu(struct net_device *dev, int new_mtu)
 {
 	nic_t *sp = dev->priv;
-	XENA_dev_config_t __iomem *bar0 = sp->bar0;
-	register u64 val64;
-
-	if (netif_running(dev)) {
-		DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
-		DBG_PRINT(ERR_DBG, "change its MTU \n");
-		return -EBUSY;
-	}
 
 	if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
 		DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
@@ -4094,11 +4682,22 @@ static int s2io_change_mtu(struct net_de
 		return -EPERM;
 	}
 
-	/* Set the new MTU into the PYLD register of the NIC */
-	val64 = new_mtu;
-	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
-
 	dev->mtu = new_mtu;
+	if (netif_running(dev)) {
+		s2io_card_down(sp);
+		netif_stop_queue(dev);
+		if (s2io_card_up(sp)) {
+			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
+				  __FUNCTION__);
+		}
+		if (netif_queue_stopped(dev))
+			netif_wake_queue(dev);
+	} else { /* Device is down */
+		XENA_dev_config_t __iomem *bar0 = sp->bar0;
+		u64 val64 = new_mtu;
+
+		writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
+	}
 
 	return 0;
 }
@@ -4108,9 +4707,9 @@ static int s2io_change_mtu(struct net_de
  *  @dev_adr : address of the device structure in dma_addr_t format.
  *  Description:
  *  This is the tasklet or the bottom half of the ISR. This is
- *  an extension of the ISR which is scheduled by the scheduler to be run 
+ *  an extension of the ISR which is scheduled by the scheduler to be run
  *  when the load on the CPU is low. All low priority tasks of the ISR can
- *  be pushed into the tasklet. For now the tasklet is used only to 
+ *  be pushed into the tasklet. For now the tasklet is used only to
  *  replenish the Rx buffers in the Rx buffer descriptors.
  *  Return value:
  *  void.
@@ -4166,19 +4765,22 @@ static void s2io_set_link(unsigned long 
 	}
 
 	subid = nic->pdev->subsystem_device;
-	/* 
-	 * Allow a small delay for the NICs self initiated 
-	 * cleanup to complete.
-	 */
-	msleep(100);
+	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+		/*
+		 * Allow a small delay for the NICs self initiated
+		 * cleanup to complete.
+		 */
+		msleep(100);
+	}
 
 	val64 = readq(&bar0->adapter_status);
-	if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
+	if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
 		if (LINK_IS_UP(val64)) {
 			val64 = readq(&bar0->adapter_control);
 			val64 |= ADAPTER_CNTL_EN;
 			writeq(val64, &bar0->adapter_control);
-			if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+			if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
+							     subid)) {
 				val64 = readq(&bar0->gpio_control);
 				val64 |= GPIO_CTRL_GPIO_0;
 				writeq(val64, &bar0->gpio_control);
@@ -4187,20 +4789,24 @@ static void s2io_set_link(unsigned long 
 				val64 |= ADAPTER_LED_ON;
 				writeq(val64, &bar0->adapter_control);
 			}
-			val64 = readq(&bar0->adapter_status);
-			if (!LINK_IS_UP(val64)) {
-				DBG_PRINT(ERR_DBG, "%s:", dev->name);
-				DBG_PRINT(ERR_DBG, " Link down");
-				DBG_PRINT(ERR_DBG, "after ");
-				DBG_PRINT(ERR_DBG, "enabling ");
-				DBG_PRINT(ERR_DBG, "device \n");
+			if (s2io_link_fault_indication(nic) ==
+						MAC_RMAC_ERR_TIMER) {
+				val64 = readq(&bar0->adapter_status);
+				if (!LINK_IS_UP(val64)) {
+					DBG_PRINT(ERR_DBG, "%s:", dev->name);
+					DBG_PRINT(ERR_DBG, " Link down");
+					DBG_PRINT(ERR_DBG, "after ");
+					DBG_PRINT(ERR_DBG, "enabling ");
+					DBG_PRINT(ERR_DBG, "device \n");
+				}
 			}
 			if (nic->device_enabled_once == FALSE) {
 				nic->device_enabled_once = TRUE;
 			}
 			s2io_link(nic, LINK_UP);
 		} else {
-			if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
+			if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
+							      subid)) {
 				val64 = readq(&bar0->gpio_control);
 				val64 &= ~GPIO_CTRL_GPIO_0;
 				writeq(val64, &bar0->gpio_control);
@@ -4223,9 +4829,11 @@ static void s2io_card_down(nic_t * sp)
 	unsigned long flags;
 	register u64 val64 = 0;
 
+	del_timer_sync(&sp->alarm_timer);
 	/* If s2io_set_link task is executing, wait till it completes. */
-	while (test_and_set_bit(0, &(sp->link_state)))
+	while (test_and_set_bit(0, &(sp->link_state))) {
 		msleep(50);
+	}
 	atomic_set(&sp->card_state, CARD_DOWN);
 
 	/* disable Tx and Rx traffic on the NIC */
@@ -4237,7 +4845,7 @@ static void s2io_card_down(nic_t * sp)
 	/* Check if the device is Quiescent and then Reset the NIC */
 	do {
 		val64 = readq(&bar0->adapter_status);
-		if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
+		if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
 			break;
 		}
 
@@ -4251,14 +4859,27 @@ static void s2io_card_down(nic_t * sp)
 			break;
 		}
 	} while (1);
-	spin_lock_irqsave(&sp->tx_lock, flags);
 	s2io_reset(sp);
 
-	/* Free all unused Tx and Rx buffers */
+	/* Waiting till all Interrupt handlers are complete */
+	cnt = 0;
+	do {
+		msleep(10);
+		if (!atomic_read(&sp->isr_cnt))
+			break;
+		cnt++;
+	} while(cnt < 5);
+
+	spin_lock_irqsave(&sp->tx_lock, flags);
+	/* Free all Tx buffers */
 	free_tx_buffers(sp);
+	spin_unlock_irqrestore(&sp->tx_lock, flags);
+
+	/* Free all Rx buffers */
+	spin_lock_irqsave(&sp->rx_lock, flags);
 	free_rx_buffers(sp);
+	spin_unlock_irqrestore(&sp->rx_lock, flags);
 
-	spin_unlock_irqrestore(&sp->tx_lock, flags);
 	clear_bit(0, &(sp->link_state));
 }
 
@@ -4276,8 +4897,8 @@ static int s2io_card_up(nic_t * sp)
 		return -ENODEV;
 	}
 
-	/* 
-	 * Initializing the Rx buffers. For now we are considering only 1 
+	/*
+	 * Initializing the Rx buffers. For now we are considering only 1
 	 * Rx ring and initializing buffers into 30 Rx blocks
 	 */
 	mac_control = &sp->mac_control;
@@ -4311,16 +4932,18 @@ static int s2io_card_up(nic_t * sp)
 		return -ENODEV;
 	}
 
+	S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
+
 	atomic_set(&sp->card_state, CARD_UP);
 	return 0;
 }
 
-/** 
+/**
  * s2io_restart_nic - Resets the NIC.
  * @data : long pointer to the device private structure
  * Description:
  * This function is scheduled to be run by the s2io_tx_watchdog
- * function after 0.5 secs to reset the NIC. The idea is to reduce 
+ * function after 0.5 secs to reset the NIC. The idea is to reduce
  * the run time of the watch dog routine which is run holding a
  * spin lock.
  */
@@ -4338,10 +4961,11 @@ static void s2io_restart_nic(unsigned lo
 	netif_wake_queue(dev);
 	DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
 		  dev->name);
+
 }
 
-/** 
- *  s2io_tx_watchdog - Watchdog for transmit side. 
+/**
+ *  s2io_tx_watchdog - Watchdog for transmit side.
  *  @dev : Pointer to net device structure
  *  Description:
  *  This function is triggered if the Tx Queue is stopped
@@ -4369,7 +4993,7 @@ static void s2io_tx_watchdog(struct net_
  *   @len : length of the packet
  *   @cksum : FCS checksum of the frame.
  *   @ring_no : the ring from which this RxD was extracted.
- *   Description: 
+ *   Description:
  *   This function is called by the Tx interrupt serivce routine to perform
  *   some OS related operations on the SKB before passing it to the upper
  *   layers. It mainly checks if the checksum is OK, if so adds it to the
@@ -4379,35 +5003,68 @@ static void s2io_tx_watchdog(struct net_
  *   Return value:
  *   SUCCESS on success and -1 on failure.
  */
-#ifndef CONFIG_2BUFF_MODE
-static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
-#else
-static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
-			  buffAdd_t * ba)
-#endif
+static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
 {
+	nic_t *sp = ring_data->nic;
 	struct net_device *dev = (struct net_device *) sp->dev;
-	struct sk_buff *skb =
-	    (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
+	struct sk_buff *skb = (struct sk_buff *)
+		((unsigned long) rxdp->Host_Control);
+	int ring_no = ring_data->ring_no;
 	u16 l3_csum, l4_csum;
 #ifdef CONFIG_2BUFF_MODE
-	int buf0_len, buf2_len;
+	int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
+	int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
+	int get_block = ring_data->rx_curr_get_info.block_index;
+	int get_off = ring_data->rx_curr_get_info.offset;
+	buffAdd_t *ba = &ring_data->ba[get_block][get_off];
 	unsigned char *buff;
+#else
+	u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
+#endif
+	skb->dev = dev;
+	if (rxdp->Control_1 & RXD_T_CODE) {
+		unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
+		DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+			  dev->name, err);
+		dev_kfree_skb(skb);
+		sp->stats.rx_crc_errors++;
+		atomic_dec(&sp->rx_bufs_left[ring_no]);
+		rxdp->Host_Control = 0;
+		return 0;
+	}
+
+	/* Updating statistics */
+	rxdp->Host_Control = 0;
+	sp->rx_pkt_count++;
+	sp->stats.rx_packets++;
+#ifndef CONFIG_2BUFF_MODE
+	sp->stats.rx_bytes += len;
+#else
+	sp->stats.rx_bytes += buf0_len + buf2_len;
 #endif
 
-	l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
-	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
+#ifndef CONFIG_2BUFF_MODE
+	skb_put(skb, len);
+#else
+	buff = skb_push(skb, buf0_len);
+	memcpy(buff, ba->ba_0, buf0_len);
+	skb_put(skb, buf2_len);
+#endif
+
+	if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
+	    (sp->rx_csum)) {
+		l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
 		l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
 		if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
-			/* 
+			/*
 			 * NIC verifies if the Checksum of the received
 			 * frame is Ok or not and accordingly returns
 			 * a flag in the RxD.
 			 */
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 		} else {
-			/* 
-			 * Packet with erroneous checksum, let the 
+			/*
+			 * Packet with erroneous checksum, let the
 			 * upper layers deal with it.
 			 */
 			skb->ip_summed = CHECKSUM_NONE;
@@ -4416,44 +5073,26 @@ static int rx_osm_handler(nic_t * sp, Rx
 		skb->ip_summed = CHECKSUM_NONE;
 	}
 
-	if (rxdp->Control_1 & RXD_T_CODE) {
-		unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
-		DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
-			  dev->name, err);
-	}
-#ifdef CONFIG_2BUFF_MODE
-	buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
-	buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
-#endif
-
-	skb->dev = dev;
-#ifndef CONFIG_2BUFF_MODE
-	skb_put(skb, len);
-	skb->protocol = eth_type_trans(skb, dev);
-#else
-	buff = skb_push(skb, buf0_len);
-	memcpy(buff, ba->ba_0, buf0_len);
-	skb_put(skb, buf2_len);
 	skb->protocol = eth_type_trans(skb, dev);
-#endif
-
 #ifdef CONFIG_S2IO_NAPI
-	netif_receive_skb(skb);
+	if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
+		/* Queueing the vlan frame to the upper layer */
+		vlan_hwaccel_receive_skb(skb, sp->vlgrp,
+			RXD_GET_VLAN_TAG(rxdp->Control_2));
+	} else {
+		netif_receive_skb(skb);
+	}
 #else
-	netif_rx(skb);
+	if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
+		/* Queueing the vlan frame to the upper layer */
+		vlan_hwaccel_rx(skb, sp->vlgrp,
+			RXD_GET_VLAN_TAG(rxdp->Control_2));
+	} else {
+		netif_rx(skb);
+	}
 #endif
-
 	dev->last_rx = jiffies;
-	sp->rx_pkt_count++;
-	sp->stats.rx_packets++;
-#ifndef CONFIG_2BUFF_MODE
-	sp->stats.rx_bytes += len;
-#else
-	sp->stats.rx_bytes += buf0_len + buf2_len;
-#endif
-
 	atomic_dec(&sp->rx_bufs_left[ring_no]);
-	rxdp->Host_Control = 0;
 	return SUCCESS;
 }
 
@@ -4464,13 +5103,13 @@ static int rx_osm_handler(nic_t * sp, Rx
  *  @link : inidicates whether link is UP/DOWN.
  *  Description:
  *  This function stops/starts the Tx queue depending on whether the link
- *  status of the NIC is is down or up. This is called by the Alarm 
- *  interrupt handler whenever a link change interrupt comes up. 
+ *  status of the NIC is is down or up. This is called by the Alarm
+ *  interrupt handler whenever a link change interrupt comes up.
  *  Return value:
  *  void.
  */
 
-static void s2io_link(nic_t * sp, int link)
+void s2io_link(nic_t * sp, int link)
 {
 	struct net_device *dev = (struct net_device *) sp->dev;
 
@@ -4487,8 +5126,25 @@ static void s2io_link(nic_t * sp, int li
 }
 
 /**
- *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers . 
- *  @sp : private member of the device structure, which is a pointer to the 
+ *  get_xena_rev_id - to identify revision ID of xena.
+ *  @pdev : PCI Dev structure
+ *  Description:
+ *  Function to identify the Revision ID of xena.
+ *  Return value:
+ *  returns the revision ID of the device.
+ */
+
+int get_xena_rev_id(struct pci_dev *pdev)
+{
+	u8 id = 0;
+	int ret;
+	ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
+	return id;
+}
+
+/**
+ *  s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
+ *  @sp : private member of the device structure, which is a pointer to the
  *  s2io_nic structure.
  *  Description:
  *  This function initializes a few of the PCI and PCI-X configuration registers
@@ -4499,15 +5155,15 @@ static void s2io_link(nic_t * sp, int li
 
 static void s2io_init_pci(nic_t * sp)
 {
-	u16 pci_cmd = 0;
+	u16 pci_cmd = 0, pcix_cmd = 0;
 
 	/* Enable Data Parity Error Recovery in PCI-X command register. */
 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(sp->pcix_cmd));
+			     &(pcix_cmd));
 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			      (sp->pcix_cmd | 1));
+			      (pcix_cmd | 1));
 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(sp->pcix_cmd));
+			     &(pcix_cmd));
 
 	/* Set the PErr Response bit in PCI command register. */
 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
@@ -4515,53 +5171,43 @@ static void s2io_init_pci(nic_t * sp)
 			      (pci_cmd | PCI_COMMAND_PARITY));
 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
 
-	/* Set MMRB count to 1024 in PCI-X Command register. */
-	sp->pcix_cmd &= 0xFFF3;
-	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2)));	/* MMRBC 1K */
-	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(sp->pcix_cmd));
-
-	/*  Setting Maximum outstanding splits based on system type. */
-	sp->pcix_cmd &= 0xFF8F;
-
-	sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1);	/* 2 splits. */
-	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			      sp->pcix_cmd);
-	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(sp->pcix_cmd));
 	/* Forcibly disabling relaxed ordering capability of the card. */
-	sp->pcix_cmd &= 0xfffd;
+	pcix_cmd &= 0xfffd;
 	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			      sp->pcix_cmd);
+			      pcix_cmd);
 	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(sp->pcix_cmd));
+			     &(pcix_cmd));
 }
 
 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
 MODULE_LICENSE("GPL");
 module_param(tx_fifo_num, int, 0);
-module_param_array(tx_fifo_len, int, NULL, 0);
 module_param(rx_ring_num, int, 0);
-module_param_array(rx_ring_sz, int, NULL, 0);
-module_param(Stats_refresh_time, int, 0);
+module_param_array(tx_fifo_len, uint, NULL, 0);
+module_param_array(rx_ring_sz, uint, NULL, 0);
+module_param_array(rts_frm_len, uint, NULL, 0);
+module_param(use_continuous_tx_intrs, int, 1);
 module_param(rmac_pause_time, int, 0);
 module_param(mc_pause_threshold_q0q3, int, 0);
 module_param(mc_pause_threshold_q4q7, int, 0);
 module_param(shared_splits, int, 0);
 module_param(tmac_util_period, int, 0);
 module_param(rmac_util_period, int, 0);
+module_param(bimodal, bool, 0);
 #ifndef CONFIG_S2IO_NAPI
 module_param(indicate_max_pkts, int, 0);
 #endif
+module_param(rxsync_frequency, int, 0);
+
 /**
- *  s2io_init_nic - Initialization of the adapter . 
+ *  s2io_init_nic - Initialization of the adapter .
  *  @pdev : structure containing the PCI related information of the device.
  *  @pre: List of PCI devices supported by the driver listed in s2io_tbl.
  *  Description:
  *  The function initializes an adapter identified by the pci_dec structure.
- *  All OS related initialization including memory and device structure and 
- *  initlaization of the device private variable is done. Also the swapper 
- *  control register is initialized to enable read and write into the I/O 
+ *  All OS related initialization including memory and device structure and
+ *  initlaization of the device private variable is done. Also the swapper
+ *  control register is initialized to enable read and write into the I/O
  *  registers of the device.
  *  Return value:
  *  returns 0 on success and negative on failure.
@@ -4572,7 +5218,6 @@ s2io_init_nic(struct pci_dev *pdev, cons
 {
 	nic_t *sp;
 	struct net_device *dev;
-	char *dev_name = "S2IO 10GE NIC";
 	int i, j, ret;
 	int dma_flag = FALSE;
 	u32 mac_up, mac_down;
@@ -4581,10 +5226,11 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	u16 subid;
 	mac_info_t *mac_control;
 	struct config_param *config;
+	int mode;
 
-
-	DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
-		s2io_driver_version);
+#ifdef CONFIG_S2IO_NAPI
+	DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
+#endif
 
 	if ((ret = pci_enable_device(pdev))) {
 		DBG_PRINT(ERR_DBG,
@@ -4595,7 +5241,6 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
 		DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
 		dma_flag = TRUE;
-
 		if (pci_set_consistent_dma_mask
 		    (pdev, DMA_64BIT_MASK)) {
 			DBG_PRINT(ERR_DBG,
@@ -4635,34 +5280,41 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	memset(sp, 0, sizeof(nic_t));
 	sp->dev = dev;
 	sp->pdev = pdev;
-	sp->vendor_id = pdev->vendor;
-	sp->device_id = pdev->device;
 	sp->high_dma_flag = dma_flag;
-	sp->irq = pdev->irq;
 	sp->device_enabled_once = FALSE;
-	strcpy(sp->name, dev_name);
+
+	if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
+		(pdev->device == PCI_DEVICE_ID_HERC_UNI))
+		sp->device_type = XFRAME_II_DEVICE;
+	else
+		sp->device_type = XFRAME_I_DEVICE;
 
 	/* Initialize some PCI/PCI-X fields of the NIC. */
 	s2io_init_pci(sp);
 
-	/* 
+	/*
 	 * Setting the device configuration parameters.
-	 * Most of these parameters can be specified by the user during 
-	 * module insertion as they are module loadable parameters. If 
-	 * these parameters are not not specified during load time, they 
+	 * Most of these parameters can be specified by the user during
+	 * module insertion as they are module loadable parameters. If
+	 * these parameters are not not specified during load time, they
 	 * are initialized with default values.
 	 */
 	mac_control = &sp->mac_control;
 	config = &sp->config;
 
 	/* Tx side parameters. */
-	tx_fifo_len[0] = DEFAULT_FIFO_LEN;	/* Default value. */
+	if (tx_fifo_len[0] == 0)
+		tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
 	config->tx_fifo_num = tx_fifo_num;
 	for (i = 0; i < MAX_TX_FIFOS; i++) {
 		config->tx_cfg[i].fifo_len = tx_fifo_len[i];
 		config->tx_cfg[i].fifo_priority = i;
 	}
 
+	/* mapping the QoS priority to the configured fifos */
+	for (i = 0; i < MAX_TX_FIFOS; i++)
+		config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
+
 	config->tx_intr_type = TXD_INT_TYPE_UTILZ;
 	for (i = 0; i < config->tx_fifo_num; i++) {
 		config->tx_cfg[i].f_no_snoop =
@@ -4675,7 +5327,8 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	config->max_txds = MAX_SKB_FRAGS;
 
 	/* Rx side parameters. */
-	rx_ring_sz[0] = SMALL_BLK_CNT;	/* Default value. */
+	if (rx_ring_sz[0] == 0)
+		rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
 	config->rx_ring_num = rx_ring_num;
 	for (i = 0; i < MAX_RX_RINGS; i++) {
 		config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -4699,10 +5352,13 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	for (i = 0; i < config->rx_ring_num; i++)
 		atomic_set(&sp->rx_bufs_left[i], 0);
 
+	/* Initialize the number of ISRs currently running */
+	atomic_set(&sp->isr_cnt, 0);
+
 	/*  initialize the shared memory used by the NIC and the host */
 	if (init_shared_mem(sp)) {
 		DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
-			  dev->name);
+			  __FUNCTION__);
 		ret = -ENOMEM;
 		goto mem_alloc_failed;
 	}
@@ -4743,13 +5399,17 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	dev->do_ioctl = &s2io_ioctl;
 	dev->change_mtu = &s2io_change_mtu;
 	SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+	dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	dev->vlan_rx_register = s2io_vlan_rx_register;
+	dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
+
 	/*
 	 * will use eth_mac_addr() for  dev->set_mac_address
 	 * mac address will be set every time dev->open() is called
 	 */
-#ifdef CONFIG_S2IO_NAPI
+#if defined(CONFIG_S2IO_NAPI)
 	dev->poll = s2io_poll;
-	dev->weight = 90;
+	dev->weight = 32;
 #endif
 
 	dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
@@ -4766,7 +5426,9 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	INIT_WORK(&sp->set_link_task,
 		  (void (*)(void *)) s2io_set_link, sp);
 
-	pci_save_state(sp->pdev);
+	if (!(sp->device_type & XFRAME_II_DEVICE)) {
+		pci_save_state(sp->pdev);
+	}
 
 	/* Setting swapper control on the NIC, for proper reset operation */
 	if (s2io_set_swapper(sp)) {
@@ -4776,22 +5438,28 @@ s2io_init_nic(struct pci_dev *pdev, cons
 		goto set_swap_failed;
 	}
 
-	/* Fix for all "FFs" MAC address problems observed on Alpha platforms */
-	fix_mac_address(sp);
-	s2io_reset(sp);
+	/* Verify if the Herc works on the slot its placed into */
+	if (sp->device_type & XFRAME_II_DEVICE) {
+		mode = s2io_verify_pci_mode(sp);
+		if (mode < 0) {
+			DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
+			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
+			ret = -EBADSLT;
+			goto set_swap_failed;
+		}
+	}
 
-	/*
-	 * Setting swapper control on the NIC, so the MAC address can be read.
-	 */
-	if (s2io_set_swapper(sp)) {
-		DBG_PRINT(ERR_DBG,
-			  "%s: S2IO: swapper settings are wrong\n",
-			  dev->name);
-		ret = -EAGAIN;
-		goto set_swap_failed;
+	/* Not needed for Herc */
+	if (sp->device_type & XFRAME_I_DEVICE) {
+		/*
+		 * Fix for all "FFs" MAC address problems observed on
+		 * Alpha platforms
+		 */
+		fix_mac_address(sp);
+		s2io_reset(sp);
 	}
 
-	/*  
+	/*
 	 * MAC address initialization.
 	 * For now only one mac address will be read and used.
 	 */
@@ -4814,37 +5482,28 @@ s2io_init_nic(struct pci_dev *pdev, cons
 	sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
 	sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
 
-	DBG_PRINT(INIT_DBG,
-		  "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
-		  sp->def_mac_addr[0].mac_addr[0],
-		  sp->def_mac_addr[0].mac_addr[1],
-		  sp->def_mac_addr[0].mac_addr[2],
-		  sp->def_mac_addr[0].mac_addr[3],
-		  sp->def_mac_addr[0].mac_addr[4],
-		  sp->def_mac_addr[0].mac_addr[5]);
-
 	/*  Set the factory defined MAC address initially   */
 	dev->addr_len = ETH_ALEN;
 	memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
 
 	/*
-	 * Initialize the tasklet status and link state flags 
-	 * and the card statte parameter
+	 * Initialize the tasklet status and link state flags
+	 * and the card state parameter
 	 */
 	atomic_set(&(sp->card_state), 0);
 	sp->tasklet_status = 0;
 	sp->link_state = 0;
 
-
 	/* Initialize spinlocks */
 	spin_lock_init(&sp->tx_lock);
 #ifndef CONFIG_S2IO_NAPI
 	spin_lock_init(&sp->put_lock);
 #endif
+	spin_lock_init(&sp->rx_lock);
 
-	/* 
-	 * SXE-002: Configure link and activity LED to init state 
-	 * on driver load. 
+	/*
+	 * SXE-002: Configure link and activity LED to init state
+	 * on driver load.
 	 */
 	subid = sp->pdev->subsystem_device;
 	if ((subid & 0xFF) >= 0x07) {
@@ -4864,13 +5523,61 @@ s2io_init_nic(struct pci_dev *pdev, cons
 		goto register_failed;
 	}
 
-	/* 
-	 * Make Link state as off at this point, when the Link change 
-	 * interrupt comes the state will be automatically changed to 
+	if (sp->device_type & XFRAME_II_DEVICE) {
+		DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
+			  dev->name);
+		DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
+				get_xena_rev_id(sp->pdev),
+				s2io_driver_version);
+		DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+			  sp->def_mac_addr[0].mac_addr[0],
+			  sp->def_mac_addr[0].mac_addr[1],
+			  sp->def_mac_addr[0].mac_addr[2],
+			  sp->def_mac_addr[0].mac_addr[3],
+			  sp->def_mac_addr[0].mac_addr[4],
+			  sp->def_mac_addr[0].mac_addr[5]);
+		mode = s2io_print_pci_mode(sp);
+		if (mode < 0) {
+			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
+			ret = -EBADSLT;
+			goto set_swap_failed;
+		}
+	} else {
+		DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
+			  dev->name);
+		DBG_PRINT(ERR_DBG, "(rev %d), Driver %s\n",
+					get_xena_rev_id(sp->pdev),
+					s2io_driver_version);
+		DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+			  sp->def_mac_addr[0].mac_addr[0],
+			  sp->def_mac_addr[0].mac_addr[1],
+			  sp->def_mac_addr[0].mac_addr[2],
+			  sp->def_mac_addr[0].mac_addr[3],
+			  sp->def_mac_addr[0].mac_addr[4],
+			  sp->def_mac_addr[0].mac_addr[5]);
+	}
+
+	/* Initialize device name */
+	strcpy(sp->name, dev->name);
+	if (sp->device_type & XFRAME_II_DEVICE)
+		strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
+	else
+		strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
+
+	/* Initialize bimodal Interrupts */
+	sp->config.bimodal = bimodal;
+	if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
+		sp->config.bimodal = 0;
+		DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
+			dev->name);
+	}
+
+	/*
+	 * Make Link state as off at this point, when the Link change
+	 * interrupt comes the state will be automatically changed to
 	 * the right state.
 	 */
 	netif_carrier_off(dev);
-	sp->last_link_state = LINK_DOWN;
 
 	return 0;
 
@@ -4891,11 +5598,11 @@ s2io_init_nic(struct pci_dev *pdev, cons
 }
 
 /**
- * s2io_rem_nic - Free the PCI device 
+ * s2io_rem_nic - Free the PCI device
  * @pdev: structure containing the PCI related information of the device.
- * Description: This function is called by the Pci subsystem to release a 
+ * Description: This function is called by the Pci subsystem to release a
  * PCI device and free up all resource held up by the device. This could
- * be in response to a Hot plug event or when the driver is to be removed 
+ * be in response to a Hot plug event or when the driver is to be removed
  * from memory.
  */
 
@@ -4919,7 +5626,6 @@ static void __devexit s2io_rem_nic(struc
 	pci_disable_device(pdev);
 	pci_release_regions(pdev);
 	pci_set_drvdata(pdev, NULL);
-
 	free_netdev(dev);
 }
 
@@ -4935,11 +5641,11 @@ int __init s2io_starter(void)
 }
 
 /**
- * s2io_closer - Cleanup routine for the driver 
+ * s2io_closer - Cleanup routine for the driver
  * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
  */
 
-static void s2io_closer(void)
+void s2io_closer(void)
 {
 	pci_unregister_driver(&s2io_driver);
 	DBG_PRINT(INIT_DBG, "cleanup done\n");
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,9 @@
 #define SUCCESS 0
 #define FAILURE -1
 
+/* Maximum time to flicker LED when asked to identify NIC using ethtool */
+#define MAX_FLICKER_TIME	60000 /* 60 Secs */
+
 /* Maximum outstanding splits to be configured into xena. */
 typedef enum xena_max_outstanding_splits {
 	XENA_ONE_SPLIT_TRANSACTION = 0,
@@ -45,10 +48,10 @@ typedef enum xena_max_outstanding_splits
 #define XENA_MAX_OUTSTANDING_SPLITS(n) (n << 4)
 
 /*  OS concerned variables and constants */
-#define WATCH_DOG_TIMEOUT   	5*HZ
-#define EFILL       			0x1234
-#define ALIGN_SIZE  			127
-#define	PCIX_COMMAND_REGISTER	0x62
+#define WATCH_DOG_TIMEOUT		15*HZ
+#define EFILL				0x1234
+#define ALIGN_SIZE			127
+#define	PCIX_COMMAND_REGISTER		0x62
 
 /*
  * Debug related variables.
@@ -61,7 +64,7 @@ typedef enum xena_max_outstanding_splits
 #define	INTR_DBG	4
 
 /* Global variable that defines the present debug level of the driver. */
-static int debug_level = ERR_DBG;	/* Default level. */
+int debug_level = ERR_DBG;	/* Default level. */
 
 /* DEBUG message print. */
 #define DBG_PRINT(dbg_level, args...)  if(!(debug_level<dbg_level)) printk(args)
@@ -71,6 +74,12 @@ static int debug_level = ERR_DBG;	/* Def
 #define L4_CKSUM_OK 0xFFFF
 #define S2IO_JUMBO_SIZE 9600
 
+/* Driver statistics maintained by driver */
+typedef struct {
+	unsigned long long single_ecc_errs;
+	unsigned long long double_ecc_errs;
+} swStat_t;
+
 /* The statistics block of Xena */
 typedef struct stat_block {
 /* Tx MAC statistics counters. */
@@ -186,12 +195,90 @@ typedef struct stat_block {
 	u32 rxd_rd_cnt;
 	u32 rxf_wr_cnt;
 	u32 txf_rd_cnt;
+
+/* Tx MAC statistics overflow counters. */
+	u32 tmac_data_octets_oflow;
+	u32 tmac_frms_oflow;
+	u32 tmac_bcst_frms_oflow;
+	u32 tmac_mcst_frms_oflow;
+	u32 tmac_ucst_frms_oflow;
+	u32 tmac_ttl_octets_oflow;
+	u32 tmac_any_err_frms_oflow;
+	u32 tmac_nucst_frms_oflow;
+	u64 tmac_vlan_frms;
+	u32 tmac_drop_ip_oflow;
+	u32 tmac_vld_ip_oflow;
+	u32 tmac_rst_tcp_oflow;
+	u32 tmac_icmp_oflow;
+	u32 tpa_unknown_protocol;
+	u32 tmac_udp_oflow;
+	u32 reserved_10;
+	u32 tpa_parse_failure;
+
+/* Rx MAC Statistics overflow counters. */
+	u32 rmac_data_octets_oflow;
+	u32 rmac_vld_frms_oflow;
+	u32 rmac_vld_bcst_frms_oflow;
+	u32 rmac_vld_mcst_frms_oflow;
+	u32 rmac_accepted_ucst_frms_oflow;
+	u32 rmac_ttl_octets_oflow;
+	u32 rmac_discarded_frms_oflow;
+	u32 rmac_accepted_nucst_frms_oflow;
+	u32 rmac_usized_frms_oflow;
+	u32 rmac_drop_events_oflow;
+	u32 rmac_frag_frms_oflow;
+	u32 rmac_osized_frms_oflow;
+	u32 rmac_ip_oflow;
+	u32 rmac_jabber_frms_oflow;
+	u32 rmac_icmp_oflow;
+	u32 rmac_drop_ip_oflow;
+	u32 rmac_err_drp_udp_oflow;
+	u32 rmac_udp_oflow;
+	u32 reserved_11;
+	u32 rmac_pause_cnt_oflow;
+	u64 rmac_ttl_1519_4095_frms;
+	u64 rmac_ttl_4096_8191_frms;
+	u64 rmac_ttl_8192_max_frms;
+	u64 rmac_ttl_gt_max_frms;
+	u64 rmac_osized_alt_frms;
+	u64 rmac_jabber_alt_frms;
+	u64 rmac_gt_max_alt_frms;
+	u64 rmac_vlan_frms;
+	u32 rmac_len_discard;
+	u32 rmac_fcs_discard;
+	u32 rmac_pf_discard;
+	u32 rmac_da_discard;
+	u32 rmac_red_discard;
+	u32 rmac_rts_discard;
+	u32 reserved_12;
+	u32 rmac_ingm_full_discard;
+	u32 reserved_13;
+	u32 rmac_accepted_ip_oflow;
+	u32 reserved_14;
+	u32 link_fault_cnt;
+	swStat_t sw_stat;
 } StatInfo_t;
 
-/* Structures representing different init time configuration
+/*
+ * Structures representing different init time configuration
  * parameters of the NIC.
  */
 
+#define MAX_TX_FIFOS 8
+#define MAX_RX_RINGS 8
+
+/* FIFO mappings for all possible number of fifos configured */
+int fifo_map[][MAX_TX_FIFOS] = {
+	{0, 0, 0, 0, 0, 0, 0, 0},
+	{0, 0, 0, 0, 1, 1, 1, 1},
+	{0, 0, 0, 1, 1, 1, 2, 2},
+	{0, 0, 1, 1, 2, 2, 3, 3},
+	{0, 0, 1, 1, 2, 2, 3, 4},
+	{0, 0, 1, 1, 2, 3, 4, 5},
+	{0, 0, 1, 2, 3, 4, 5, 6},
+	{0, 1, 2, 3, 4, 5, 6, 7},
+};
+
 /* Maintains Per FIFO related information. */
 typedef struct tx_fifo_config {
 #define	MAX_AVAILABLE_TXDS	8192
@@ -237,14 +324,14 @@ typedef struct rx_ring_config {
 #define NO_SNOOP_RXD_BUFFER         0x02
 } rx_ring_config_t;
 
-/* This structure provides contains values of the tunable parameters 
- * of the H/W 
+/* This structure provides contains values of the tunable parameters
+ * of the H/W
  */
 struct config_param {
 /* Tx Side */
 	u32 tx_fifo_num;	/*Number of Tx FIFOs */
-#define MAX_TX_FIFOS 8
 
+	u8 fifo_mapping[MAX_TX_FIFOS];
 	tx_fifo_config_t tx_cfg[MAX_TX_FIFOS];	/*Per-Tx FIFO config */
 	u32 max_txds;		/*Max no. of Tx buffer descriptor per TxDL */
 	u64 tx_intr_type;
@@ -252,10 +339,10 @@ struct config_param {
 
 /* Rx Side */
 	u32 rx_ring_num;	/*Number of receive rings */
-#define MAX_RX_RINGS 8
 #define MAX_RX_BLOCKS_PER_RING  150
 
 	rx_ring_config_t rx_cfg[MAX_RX_RINGS];	/*Per-Rx Ring config */
+	u8 bimodal;		/*Flag for setting bimodal interrupts*/
 
 #define HEADER_ETHERNET_II_802_3_SIZE 14
 #define HEADER_802_2_SIZE              3
@@ -269,6 +356,7 @@ struct config_param {
 #define MAX_PYLD_JUMBO              9600
 #define MAX_MTU_JUMBO               (MAX_PYLD_JUMBO+18)
 #define MAX_MTU_JUMBO_VLAN          (MAX_PYLD_JUMBO+22)
+	u16 bus_speed;
 };
 
 /* Structure representing MAC Addrs */
@@ -277,7 +365,7 @@ typedef struct mac_addr {
 } macaddr_t;
 
 /* Structure that represent every FIFO element in the BAR1
- * Address location. 
+ * Address location.
  */
 typedef struct _TxFIFO_element {
 	u64 TxDL_Pointer;
@@ -339,6 +427,7 @@ typedef struct _RxD_t {
 #define RXD_FRAME_PROTO         vBIT(0xFFFF,24,8)
 #define RXD_FRAME_PROTO_IPV4    BIT(27)
 #define RXD_FRAME_PROTO_IPV6    BIT(28)
+#define RXD_FRAME_IP_FRAG	BIT(29)
 #define RXD_FRAME_PROTO_TCP     BIT(30)
 #define RXD_FRAME_PROTO_UDP     BIT(31)
 #define TCP_OR_UDP_FRAME        (RXD_FRAME_PROTO_TCP | RXD_FRAME_PROTO_UDP)
@@ -346,11 +435,15 @@ typedef struct _RxD_t {
 #define RXD_GET_L4_CKSUM(val)   ((u16)(val) & 0xFFFF)
 
 	u64 Control_2;
+#define	THE_RXD_MARK		0x3
+#define	SET_RXD_MARKER		vBIT(THE_RXD_MARK, 0, 2)
+#define	GET_RXD_MARKER(ctrl)	((ctrl & SET_RXD_MARKER) >> 62)
+
 #ifndef CONFIG_2BUFF_MODE
-#define MASK_BUFFER0_SIZE       vBIT(0xFFFF,0,16)
-#define SET_BUFFER0_SIZE(val)   vBIT(val,0,16)
+#define MASK_BUFFER0_SIZE       vBIT(0x3FFF,2,14)
+#define SET_BUFFER0_SIZE(val)   vBIT(val,2,14)
 #else
-#define MASK_BUFFER0_SIZE       vBIT(0xFF,0,16)
+#define MASK_BUFFER0_SIZE       vBIT(0xFF,2,14)
 #define MASK_BUFFER1_SIZE       vBIT(0xFFFF,16,16)
 #define MASK_BUFFER2_SIZE       vBIT(0xFFFF,32,16)
 #define SET_BUFFER0_SIZE(val)   vBIT(val,8,8)
@@ -363,7 +456,7 @@ typedef struct _RxD_t {
 #define SET_NUM_TAG(val)       vBIT(val,16,32)
 
 #ifndef CONFIG_2BUFF_MODE
-#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0xFFFF,0,16)))
+#define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14)))
 #else
 #define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \
 							>> 48)
@@ -382,7 +475,7 @@ typedef struct _RxD_t {
 #endif
 } RxD_t;
 
-/* Structure that represents the Rx descriptor block which contains 
+/* Structure that represents the Rx descriptor block which contains
  * 128 Rx descriptors.
  */
 #ifndef CONFIG_2BUFF_MODE
@@ -392,11 +485,11 @@ typedef struct _RxD_block {
 
 	u64 reserved_0;
 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
-	u64 reserved_1;		/* 0xFEFFFFFFFFFFFFFF to mark last 
+	u64 reserved_1;		/* 0xFEFFFFFFFFFFFFFF to mark last
 				 * Rxd in this blk */
 	u64 reserved_2_pNext_RxD_block;	/* Logical ptr to next */
 	u64 pNext_RxD_Blk_physical;	/* Buff0_ptr.In a 32 bit arch
-					 * the upper 32 bits should 
+					 * the upper 32 bits should
 					 * be 0 */
 } RxD_block_t;
 #else
@@ -405,13 +498,13 @@ typedef struct _RxD_block {
 	RxD_t rxd[MAX_RXDS_PER_BLOCK];
 
 #define END_OF_BLOCK    0xFEFFFFFFFFFFFFFFULL
-	u64 reserved_1;		/* 0xFEFFFFFFFFFFFFFF to mark last Rxd 
+	u64 reserved_1;		/* 0xFEFFFFFFFFFFFFFF to mark last Rxd
 				 * in this blk */
 	u64 pNext_RxD_Blk_physical;	/* Phy ponter to next blk. */
 } RxD_block_t;
 #define SIZE_OF_BLOCK	4096
 
-/* Structure to hold virtual addresses of Buf0 and Buf1 in 
+/* Structure to hold virtual addresses of Buf0 and Buf1 in
  * 2buf mode. */
 typedef struct bufAdd {
 	void *ba_0_org;
@@ -423,8 +516,8 @@ typedef struct bufAdd {
 
 /* Structure which stores all the MAC control parameters */
 
-/* This structure stores the offset of the RxD in the ring 
- * from which the Rx Interrupt processor can start picking 
+/* This structure stores the offset of the RxD in the ring
+ * from which the Rx Interrupt processor can start picking
  * up the RxDs for processing.
  */
 typedef struct _rx_curr_get_info_t {
@@ -436,7 +529,7 @@ typedef struct _rx_curr_get_info_t {
 typedef rx_curr_get_info_t rx_curr_put_info_t;
 
 /* This structure stores the offset of the TxDl in the FIFO
- * from which the Tx Interrupt processor can start picking 
+ * from which the Tx Interrupt processor can start picking
  * up the TxDLs for send complete interrupt processing.
  */
 typedef struct {
@@ -446,32 +539,96 @@ typedef struct {
 
 typedef tx_curr_get_info_t tx_curr_put_info_t;
 
-/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
- * is maintained in this structure.
- */
-typedef struct mac_info {
-/* rx side stuff */
-	/* Put pointer info which indictes which RxD has to be replenished 
+/* Structure that holds the Phy and virt addresses of the Blocks */
+typedef struct rx_block_info {
+	RxD_t *block_virt_addr;
+	dma_addr_t block_dma_addr;
+} rx_block_info_t;
+
+/* pre declaration of the nic structure */
+typedef struct s2io_nic nic_t;
+
+/* Ring specific structure */
+typedef struct ring_info {
+	/* The ring number */
+	int ring_no;
+
+	/*
+	 *  Place holders for the virtual and physical addresses of
+	 *  all the Rx Blocks
+	 */
+	rx_block_info_t rx_blocks[MAX_RX_BLOCKS_PER_RING];
+	int block_count;
+	int pkt_cnt;
+
+	/*
+	 * Put pointer info which indictes which RxD has to be replenished
 	 * with a new buffer.
 	 */
-	rx_curr_put_info_t rx_curr_put_info[MAX_RX_RINGS];
+	rx_curr_put_info_t rx_curr_put_info;
 
-	/* Get pointer info which indictes which is the last RxD that was 
+	/*
+	 * Get pointer info which indictes which is the last RxD that was
 	 * processed by the driver.
 	 */
-	rx_curr_get_info_t rx_curr_get_info[MAX_RX_RINGS];
+	rx_curr_get_info_t rx_curr_get_info;
 
-	u16 rmac_pause_time;
-	u16 mc_pause_threshold_q0q3;
-	u16 mc_pause_threshold_q4q7;
+#ifndef CONFIG_S2IO_NAPI
+	/* Index to the absolute position of the put pointer of Rx ring */
+	int put_pos;
+#endif
+
+#ifdef CONFIG_2BUFF_MODE
+	/* Buffer Address store. */
+	buffAdd_t **ba;
+#endif
+	nic_t *nic;
+} ring_info_t;
 
+/* Fifo specific structure */
+typedef struct fifo_info {
+	/* FIFO number */
+	int fifo_no;
+
+	/* Maximum TxDs per TxDL */
+	int max_txds;
+
+	/* Place holder of all the TX List's Phy and Virt addresses. */
+	list_info_hold_t *list_info;
+
+	/*
+	 * Current offset within the tx FIFO where driver would write
+	 * new Tx frame
+	 */
+	tx_curr_put_info_t tx_curr_put_info;
+
+	/*
+	 * Current offset within tx FIFO from where the driver would start freeing
+	 * the buffers
+	 */
+	tx_curr_get_info_t tx_curr_get_info;
+
+	nic_t *nic;
+}fifo_info_t;
+
+/* Infomation related to the Tx and Rx FIFOs and Rings of Xena
+ * is maintained in this structure.
+ */
+typedef struct mac_info {
 /* tx side stuff */
 	/* logical pointer of start of each Tx FIFO */
 	TxFIFO_element_t __iomem *tx_FIFO_start[MAX_TX_FIFOS];
 
-/* Current offset within tx_FIFO_start, where driver would write new Tx frame*/
-	tx_curr_put_info_t tx_curr_put_info[MAX_TX_FIFOS];
-	tx_curr_get_info_t tx_curr_get_info[MAX_TX_FIFOS];
+	/* Fifo specific structure */
+	fifo_info_t fifos[MAX_TX_FIFOS];
+
+/* rx side stuff */
+	/* Ring specific structure */
+	ring_info_t rings[MAX_RX_RINGS];
+
+	u16 rmac_pause_time;
+	u16 mc_pause_threshold_q0q3;
+	u16 mc_pause_threshold_q4q7;
 
 	void *stats_mem;	/* orignal pointer to allocated mem */
 	dma_addr_t stats_mem_phy;	/* Physical address of the stat block */
@@ -485,12 +642,6 @@ typedef struct {
 	int usage_cnt;
 } usr_addr_t;
 
-/* Structure that holds the Phy and virt addresses of the Blocks */
-typedef struct rx_block_info {
-	RxD_t *block_virt_addr;
-	dma_addr_t block_dma_addr;
-} rx_block_info_t;
-
 /* Default Tunable parameters of the NIC. */
 #define DEFAULT_FIFO_LEN 4096
 #define SMALL_RXD_CNT	30 * (MAX_RXDS_PER_BLOCK+1)
@@ -499,7 +650,20 @@ typedef struct rx_block_info {
 #define LARGE_BLK_CNT	100
 
 /* Structure representing one instance of the NIC */
-typedef struct s2io_nic {
+struct s2io_nic {
+#ifdef CONFIG_S2IO_NAPI
+	/*
+	 * Count of packets to be processed in a given iteration, it will be indicated
+	 * by the quota field of the device structure when NAPI is enabled.
+	 */
+	int pkts_to_process;
+#endif
+	struct net_device *dev;
+	mac_info_t mac_control;
+	struct config_param config;
+	struct pci_dev *pdev;
+	void __iomem *bar0;
+	void __iomem *bar1;
 #define MAX_MAC_SUPPORTED   16
 #define MAX_SUPPORTED_MULTICASTS MAX_MAC_SUPPORTED
 
@@ -507,33 +671,20 @@ typedef struct s2io_nic {
 	macaddr_t pre_mac_addr[MAX_MAC_SUPPORTED];
 
 	struct net_device_stats stats;
-	void __iomem *bar0;
-	void __iomem *bar1;
-	struct config_param config;
-	mac_info_t mac_control;
 	int high_dma_flag;
 	int device_close_flag;
 	int device_enabled_once;
 
-	char name[32];
+	char name[50];
 	struct tasklet_struct task;
 	volatile unsigned long tasklet_status;
-	struct timer_list timer;
-	struct net_device *dev;
-	struct pci_dev *pdev;
 
-	u16 vendor_id;
-	u16 device_id;
-	u16 ccmd;
-	u32 cbar0_1;
-	u32 cbar0_2;
-	u32 cbar1_1;
-	u32 cbar1_2;
-	u32 cirq;
-	u8 cache_line;
-	u32 rom_expansion;
-	u16 pcix_cmd;
-	u32 irq;
+	/* Timer that handles I/O errors/exceptions */
+	struct timer_list alarm_timer;
+
+	/* Space to back up the PCI config space */
+	u32 config_space[256 / sizeof(u32)];
+
 	atomic_t rx_bufs_left[MAX_RX_RINGS];
 
 	spinlock_t tx_lock;
@@ -558,27 +709,11 @@ typedef struct s2io_nic {
 	u16 tx_err_count;
 	u16 rx_err_count;
 
-#ifndef CONFIG_S2IO_NAPI
-	/* Index to the absolute position of the put pointer of Rx ring. */
-	int put_pos[MAX_RX_RINGS];
-#endif
-
-	/*
-	 *  Place holders for the virtual and physical addresses of 
-	 *  all the Rx Blocks
-	 */
-	rx_block_info_t rx_blocks[MAX_RX_RINGS][MAX_RX_BLOCKS_PER_RING];
-	int block_count[MAX_RX_RINGS];
-	int pkt_cnt[MAX_RX_RINGS];
-
-	/* Place holder of all the TX List's Phy and Virt addresses. */
-	list_info_hold_t *list_info[MAX_TX_FIFOS];
-
 	/*  Id timer, used to blink NIC to physically identify NIC. */
 	struct timer_list id_timer;
 
 	/*  Restart timer, used to restart NIC if the device is stuck and
-	 *  a schedule task that will set the correct Link state once the 
+	 *  a schedule task that will set the correct Link state once the
 	 *  NIC's PHY has stabilized after a state change.
 	 */
 #ifdef INIT_TQUEUE
@@ -589,12 +724,12 @@ typedef struct s2io_nic {
 	struct work_struct set_link_task;
 #endif
 
-	/* Flag that can be used to turn on or turn off the Rx checksum 
+	/* Flag that can be used to turn on or turn off the Rx checksum
 	 * offload feature.
 	 */
 	int rx_csum;
 
-	/*  after blink, the adapter must be restored with original 
+	/*  after blink, the adapter must be restored with original
 	 *  values.
 	 */
 	u64 adapt_ctrl_org;
@@ -604,16 +739,19 @@ typedef struct s2io_nic {
 #define	LINK_DOWN	1
 #define	LINK_UP		2
 
-#ifdef CONFIG_2BUFF_MODE
-	/* Buffer Address store. */
-	buffAdd_t **ba[MAX_RX_RINGS];
-#endif
 	int task_flag;
 #define CARD_DOWN 1
 #define CARD_UP 2
 	atomic_t card_state;
 	volatile unsigned long link_state;
-} nic_t;
+	struct vlan_group *vlgrp;
+#define XFRAME_I_DEVICE		1
+#define XFRAME_II_DEVICE	2
+	u8 device_type;
+
+	spinlock_t	rx_lock;
+	atomic_t	isr_cnt;
+};
 
 #define RESET_ERROR 1;
 #define CMD_ERROR   2;
@@ -622,9 +760,10 @@ typedef struct s2io_nic {
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
-	u64 ret = readl(addr + 4);
-	ret <<= 32;
-	ret |= readl(addr);
+	u64 ret = 0;
+	ret = readl(addr + 4);
+	(u64) ret <<= 32;
+	(u64) ret |= readl(addr);
 
 	return ret;
 }
@@ -637,10 +776,10 @@ static inline void writeq(u64 val, void 
 	writel((u32) (val >> 32), (addr + 4));
 }
 
-/* In 32 bit modes, some registers have to be written in a 
+/* In 32 bit modes, some registers have to be written in a
  * particular order to expect correct hardware operation. The
- * macro SPECIAL_REG_WRITE is used to perform such ordered 
- * writes. Defines UF (Upper First) and LF (Lower First) will 
+ * macro SPECIAL_REG_WRITE is used to perform such ordered
+ * writes. Defines UF (Upper First) and LF (Lower First) will
  * be used to specify the required write order.
  */
 #define UF	1
@@ -716,6 +855,7 @@ static inline void SPECIAL_REG_WRITE(u64
 #define	PCC_FB_ECC_ERR	   vBIT(0xff, 16, 8)	/* Interrupt to indicate
 						   PCC_FB_ECC Error. */
 
+#define RXD_GET_VLAN_TAG(Control_2) (u16)(Control_2 & MASK_VLAN_TAG)
 /*
  * Prototype declaration.
  */
@@ -725,36 +865,30 @@ static void __devexit s2io_rem_nic(struc
 static int init_shared_mem(struct s2io_nic *sp);
 static void free_shared_mem(struct s2io_nic *sp);
 static int init_nic(struct s2io_nic *nic);
-#ifndef CONFIG_S2IO_NAPI
-static void rx_intr_handler(struct s2io_nic *sp);
-#endif
-static void tx_intr_handler(struct s2io_nic *sp);
+static void rx_intr_handler(ring_info_t *ring_data);
+static void tx_intr_handler(fifo_info_t *fifo_data);
 static void alarm_intr_handler(struct s2io_nic *sp);
 
 static int s2io_starter(void);
-static void s2io_closer(void);
+void s2io_closer(void);
 static void s2io_tx_watchdog(struct net_device *dev);
 static void s2io_tasklet(unsigned long dev_addr);
 static void s2io_set_multicast(struct net_device *dev);
-#ifndef CONFIG_2BUFF_MODE
-static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no);
-#else
-static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
-			  buffAdd_t * ba);
-#endif
-static void s2io_link(nic_t * sp, int link);
-static void s2io_reset(nic_t * sp);
-#ifdef CONFIG_S2IO_NAPI
+static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp);
+void s2io_link(nic_t * sp, int link);
+void s2io_reset(nic_t * sp);
+#if defined(CONFIG_S2IO_NAPI)
 static int s2io_poll(struct net_device *dev, int *budget);
 #endif
 static void s2io_init_pci(nic_t * sp);
-static int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
+int s2io_set_mac_addr(struct net_device *dev, u8 * addr);
+static void s2io_alarm_handle(unsigned long data);
 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs);
-static int verify_xena_quiescence(u64 val64, int flag);
+static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag);
 static struct ethtool_ops netdev_ethtool_ops;
 static void s2io_set_link(unsigned long data);
-static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t * nic);
-static int s2io_card_up(nic_t * nic);
-
+int s2io_set_swapper(nic_t * sp);
+static void s2io_card_down(nic_t *nic);
+static int s2io_card_up(nic_t *nic);
+int get_xena_rev_id(struct pci_dev *pdev);
 #endif				/* _S2IO_H */
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1842 @@
+/*
+   sis190.c: Silicon Integrated Systems SiS190 ethernet driver
+
+   Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
+   Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
+   Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
+
+   Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
+   genuine driver.
+
+   This software may be used and distributed according to the terms of
+   the GNU General Public License (GPL), incorporated herein by reference.
+   Drivers based on or derived from this code fall under the GPL and must
+   retain the authorship, copyright and license notice.  This file is not
+   a complete program and may only be used when the entire operating
+   system is licensed under the GPL.
+
+   See the file COPYING in this distribution for more information.
+
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <asm/irq.h>
+
+#define net_drv(p, arg...)	if (netif_msg_drv(p)) \
+					printk(arg)
+#define net_probe(p, arg...)	if (netif_msg_probe(p)) \
+					printk(arg)
+#define net_link(p, arg...)	if (netif_msg_link(p)) \
+					printk(arg)
+#define net_intr(p, arg...)	if (netif_msg_intr(p)) \
+					printk(arg)
+#define net_tx_err(p, arg...)	if (netif_msg_tx_err(p)) \
+					printk(arg)
+
+#define PHY_MAX_ADDR		32
+#define PHY_ID_ANY		0x1f
+#define MII_REG_ANY		0x1f
+
+#ifdef CONFIG_SIS190_NAPI
+#define NAPI_SUFFIX	"-NAPI"
+#else
+#define NAPI_SUFFIX	""
+#endif
+
+#define DRV_VERSION		"1.2" NAPI_SUFFIX
+#define DRV_NAME		"sis190"
+#define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+#ifdef CONFIG_SIS190_NAPI
+#define sis190_rx_skb			netif_receive_skb
+#define sis190_rx_quota(count, quota)	min(count, quota)
+#else
+#define sis190_rx_skb			netif_rx
+#define sis190_rx_quota(count, quota)	count
+#endif
+
+#define MAC_ADDR_LEN		6
+
+#define NUM_TX_DESC		64	/* [8..1024] */
+#define NUM_RX_DESC		64	/* [8..8192] */
+#define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
+#define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
+#define RX_BUF_SIZE		1536
+#define RX_BUF_MASK		0xfff8
+
+#define SIS190_REGS_SIZE	0x80
+#define SIS190_TX_TIMEOUT	(6*HZ)
+#define SIS190_PHY_TIMEOUT	(10*HZ)
+#define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+				 NETIF_MSG_IFDOWN)
+
+/* Enhanced PHY access register bit definitions */
+#define EhnMIIread		0x0000
+#define EhnMIIwrite		0x0020
+#define EhnMIIdataShift		16
+#define EhnMIIpmdShift		6	/* 7016 only */
+#define EhnMIIregShift		11
+#define EhnMIIreq		0x0010
+#define EhnMIInotDone		0x0010
+
+/* Write/read MMIO register */
+#define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
+#define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
+#define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
+#define SIS_R8(reg)		readb (ioaddr + (reg))
+#define SIS_R16(reg)		readw (ioaddr + (reg))
+#define SIS_R32(reg)		readl (ioaddr + (reg))
+
+#define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
+
+enum sis190_registers {
+	TxControl		= 0x00,
+	TxDescStartAddr		= 0x04,
+	rsv0			= 0x08,	// reserved
+	TxSts			= 0x0c,	// unused (Control/Status)
+	RxControl		= 0x10,
+	RxDescStartAddr		= 0x14,
+	rsv1			= 0x18,	// reserved
+	RxSts			= 0x1c,	// unused
+	IntrStatus		= 0x20,
+	IntrMask		= 0x24,
+	IntrControl		= 0x28,
+	IntrTimer		= 0x2c,	// unused (Interupt Timer)
+	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
+	rsv2			= 0x34,	// reserved
+	ROMControl		= 0x38,
+	ROMInterface		= 0x3c,
+	StationControl		= 0x40,
+	GMIIControl		= 0x44,
+	GIoCR			= 0x48, // unused (GMAC IO Compensation)
+	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
+	TxMacControl		= 0x50,
+	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
+	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
+	rsv3			= 0x5c, // reserved
+	RxMacControl		= 0x60,
+	RxMacAddr		= 0x62,
+	RxHashTable		= 0x68,
+	// Undocumented		= 0x6c,
+	RxWolCtrl		= 0x70,
+	RxWolData		= 0x74, // unused (Rx WOL Data Access)
+	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
+	rsv4			= 0x7c, // reserved
+};
+
+enum sis190_register_content {
+	/* IntrStatus */
+	SoftInt			= 0x40000000,	// unused
+	Timeup			= 0x20000000,	// unused
+	PauseFrame		= 0x00080000,	// unused
+	MagicPacket		= 0x00040000,	// unused
+	WakeupFrame		= 0x00020000,	// unused
+	LinkChange		= 0x00010000,
+	RxQEmpty		= 0x00000080,
+	RxQInt			= 0x00000040,
+	TxQ1Empty		= 0x00000020,	// unused
+	TxQ1Int			= 0x00000010,
+	TxQ0Empty		= 0x00000008,	// unused
+	TxQ0Int			= 0x00000004,
+	RxHalt			= 0x00000002,
+	TxHalt			= 0x00000001,
+
+	/* {Rx/Tx}CmdBits */
+	CmdReset		= 0x10,
+	CmdRxEnb		= 0x08,		// unused
+	CmdTxEnb		= 0x01,
+	RxBufEmpty		= 0x01,		// unused
+
+	/* Cfg9346Bits */
+	Cfg9346_Lock		= 0x00,		// unused
+	Cfg9346_Unlock		= 0xc0,		// unused
+
+	/* RxMacControl */
+	AcceptErr		= 0x20,		// unused
+	AcceptRunt		= 0x10,		// unused
+	AcceptBroadcast		= 0x0800,
+	AcceptMulticast		= 0x0400,
+	AcceptMyPhys		= 0x0200,
+	AcceptAllPhys		= 0x0100,
+
+	/* RxConfigBits */
+	RxCfgFIFOShift		= 13,
+	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
+
+	/* TxConfigBits */
+	TxInterFrameGapShift	= 24,
+	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
+
+	/* StationControl */
+	_1000bpsF		= 0x1c00,
+	_1000bpsH		= 0x0c00,
+	_100bpsF		= 0x1800,
+	_100bpsH		= 0x0800,
+	_10bpsF			= 0x1400,
+	_10bpsH			= 0x0400,
+
+	LinkStatus		= 0x02,		// unused
+	FullDup			= 0x01,		// unused
+
+	/* TBICSRBit */
+	TBILinkOK		= 0x02000000,	// unused
+};
+
+struct TxDesc {
+	__le32 PSize;
+	__le32 status;
+	__le32 addr;
+	__le32 size;
+};
+
+struct RxDesc {
+	__le32 PSize;
+	__le32 status;
+	__le32 addr;
+	__le32 size;
+};
+
+enum _DescStatusBit {
+	/* _Desc.status */
+	OWNbit		= 0x80000000, // RXOWN/TXOWN
+	INTbit		= 0x40000000, // RXINT/TXINT
+	CRCbit		= 0x00020000, // CRCOFF/CRCEN
+	PADbit		= 0x00010000, // PREADD/PADEN
+	/* _Desc.size */
+	RingEnd		= 0x80000000,
+	/* TxDesc.status */
+	LSEN		= 0x08000000, // TSO ? -- FR
+	IPCS		= 0x04000000,
+	TCPCS		= 0x02000000,
+	UDPCS		= 0x01000000,
+	BSTEN		= 0x00800000,
+	EXTEN		= 0x00400000,
+	DEFEN		= 0x00200000,
+	BKFEN		= 0x00100000,
+	CRSEN		= 0x00080000,
+	COLEN		= 0x00040000,
+	THOL3		= 0x30000000,
+	THOL2		= 0x20000000,
+	THOL1		= 0x10000000,
+	THOL0		= 0x00000000,
+	/* RxDesc.status */
+	IPON		= 0x20000000,
+	TCPON		= 0x10000000,
+	UDPON		= 0x08000000,
+	Wakup		= 0x00400000,
+	Magic		= 0x00200000,
+	Pause		= 0x00100000,
+	DEFbit		= 0x00200000,
+	BCAST		= 0x000c0000,
+	MCAST		= 0x00080000,
+	UCAST		= 0x00040000,
+	/* RxDesc.PSize */
+	TAGON		= 0x80000000,
+	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
+	ABORT		= 0x00800000,
+	SHORT		= 0x00400000,
+	LIMIT		= 0x00200000,
+	MIIER		= 0x00100000,
+	OVRUN		= 0x00080000,
+	NIBON		= 0x00040000,
+	COLON		= 0x00020000,
+	CRCOK		= 0x00010000,
+	RxSizeMask	= 0x0000ffff
+	/*
+	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
+	 * provide two (unused with Linux) Tx queues. No publically
+	 * available documentation alas.
+	 */
+};
+
+enum sis190_eeprom_access_register_bits {
+	EECS	= 0x00000001,	// unused
+	EECLK	= 0x00000002,	// unused
+	EEDO	= 0x00000008,	// unused
+	EEDI	= 0x00000004,	// unused
+	EEREQ	= 0x00000080,
+	EEROP	= 0x00000200,
+	EEWOP	= 0x00000100	// unused
+};
+
+/* EEPROM Addresses */
+enum sis190_eeprom_address {
+	EEPROMSignature	= 0x00,
+	EEPROMCLK	= 0x01,	// unused
+	EEPROMInfo	= 0x02,
+	EEPROMMACAddr	= 0x03
+};
+
+struct sis190_private {
+	void __iomem *mmio_addr;
+	struct pci_dev *pci_dev;
+	struct net_device_stats stats;
+	spinlock_t lock;
+	u32 rx_buf_sz;
+	u32 cur_rx;
+	u32 cur_tx;
+	u32 dirty_rx;
+	u32 dirty_tx;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	struct RxDesc *RxDescRing;
+	struct TxDesc *TxDescRing;
+	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
+	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
+	struct work_struct phy_task;
+	struct timer_list timer;
+	u32 msg_enable;
+	struct mii_if_info mii_if;
+	struct list_head first_phy;
+};
+
+struct sis190_phy {
+	struct list_head list;
+	int phy_id;
+	u16 id[2];
+	u16 status;
+	u8  type;
+};
+
+enum sis190_phy_type {
+	UNKNOWN	= 0x00,
+	HOME	= 0x01,
+	LAN	= 0x02,
+	MIX	= 0x03
+};
+
+static struct mii_chip_info {
+        const char *name;
+        u16 id[2];
+        unsigned int type;
+} mii_chip_table[] = {
+	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN },
+	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN },
+	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN },
+	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN },
+	{ NULL, }
+};
+
+const static struct {
+	const char *name;
+	u8 version;		/* depend on docs */
+	u32 RxConfigMask;	/* clear the bits supported by this chip */
+} sis_chip_info[] = {
+	{ DRV_NAME, 0x00, 0xff7e1880, },
+};
+
+static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
+	{ 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
+
+static int rx_copybreak = 200;
+
+static struct {
+	u32 msg_enable;
+} debug = { -1 };
+
+MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+static const u32 sis190_intr_mask =
+	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
+
+/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * The chips use a 64 element hash table based on the Ethernet CRC.
+ */
+static int multicast_filter_limit = 32;
+
+static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
+{
+	unsigned int i;
+
+	SIS_W32(GMIIControl, ctl);
+
+	msleep(1);
+
+	for (i = 0; i < 100; i++) {
+		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
+			break;
+		msleep(1);
+	}
+
+	if (i > 999)
+		printk(KERN_ERR PFX "PHY command failed !\n");
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
+{
+	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
+		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
+		(((u32) val) << EhnMIIdataShift));
+}
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
+{
+	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
+		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
+
+	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
+}
+
+static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	mdio_write(tp->mmio_addr, phy_id, reg, val);
+}
+
+static int __mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mdio_read(tp->mmio_addr, phy_id, reg);
+}
+
+static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
+{
+	mdio_read(ioaddr, phy_id, reg);
+	return mdio_read(ioaddr, phy_id, reg);
+}
+
+static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
+{
+	u16 data = 0xffff;
+	unsigned int i;
+
+	if (!(SIS_R32(ROMControl) & 0x0002))
+		return 0;
+
+	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
+
+	for (i = 0; i < 200; i++) {
+		if (!(SIS_R32(ROMInterface) & EEREQ)) {
+			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
+			break;
+		}
+		msleep(1);
+	}
+
+	return data;
+}
+
+static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
+{
+	SIS_W32(IntrMask, 0x00);
+	SIS_W32(IntrStatus, 0xffffffff);
+	SIS_PCI_COMMIT();
+}
+
+static void sis190_asic_down(void __iomem *ioaddr)
+{
+	/* Stop the chip's Tx and Rx DMA processes. */
+
+	SIS_W32(TxControl, 0x1a00);
+	SIS_W32(RxControl, 0x1a00);
+
+	sis190_irq_mask_and_ack(ioaddr);
+}
+
+static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
+{
+	desc->size |= cpu_to_le32(RingEnd);
+}
+
+static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+	u32 eor = le32_to_cpu(desc->size) & RingEnd;
+
+	desc->PSize = 0x0;
+	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
+	wmb();
+	desc->status = cpu_to_le32(OWNbit | INTbit);
+}
+
+static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+				      u32 rx_buf_sz)
+{
+	desc->addr = cpu_to_le32(mapping);
+	sis190_give_to_asic(desc, rx_buf_sz);
+}
+
+static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
+{
+	desc->PSize = 0x0;
+	desc->addr = 0xdeadbeef;
+	desc->size &= cpu_to_le32(RingEnd);
+	wmb();
+	desc->status = 0x0;
+}
+
+static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+			       struct RxDesc *desc, u32 rx_buf_sz)
+{
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int ret = 0;
+
+	skb = dev_alloc_skb(rx_buf_sz);
+	if (!skb)
+		goto err_out;
+
+	*sk_buff = skb;
+
+	mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+				 PCI_DMA_FROMDEVICE);
+
+	sis190_map_to_asic(desc, mapping, rx_buf_sz);
+out:
+	return ret;
+
+err_out:
+	ret = -ENOMEM;
+	sis190_make_unusable_by_asic(desc);
+	goto out;
+}
+
+static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
+			  u32 start, u32 end)
+{
+	u32 cur;
+
+	for (cur = start; cur < end; cur++) {
+		int ret, i = cur % NUM_RX_DESC;
+
+		if (tp->Rx_skbuff[i])
+			continue;
+
+		ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+					  tp->RxDescRing + i, tp->rx_buf_sz);
+		if (ret < 0)
+			break;
+	}
+	return cur - start;
+}
+
+static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+				     struct RxDesc *desc, int rx_buf_sz)
+{
+	int ret = -1;
+
+	if (pkt_size < rx_copybreak) {
+		struct sk_buff *skb;
+
+		skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+		if (skb) {
+			skb_reserve(skb, NET_IP_ALIGN);
+			eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+			*sk_buff = skb;
+			sis190_give_to_asic(desc, rx_buf_sz);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
+
+	if ((status & CRCOK) && !(status & ErrMask))
+		return 0;
+
+	if (!(status & CRCOK))
+		stats->rx_crc_errors++;
+	else if (status & OVRUN)
+		stats->rx_over_errors++;
+	else if (status & (SHORT | LIMIT))
+		stats->rx_length_errors++;
+	else if (status & (MIIER | NIBON | COLON))
+		stats->rx_frame_errors++;
+
+	stats->rx_errors++;
+	return -1;
+}
+
+static int sis190_rx_interrupt(struct net_device *dev,
+			       struct sis190_private *tp, void __iomem *ioaddr)
+{
+	struct net_device_stats *stats = &tp->stats;
+	u32 rx_left, cur_rx = tp->cur_rx;
+	u32 delta, count;
+
+	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
+
+	for (; rx_left > 0; rx_left--, cur_rx++) {
+		unsigned int entry = cur_rx % NUM_RX_DESC;
+		struct RxDesc *desc = tp->RxDescRing + entry;
+		u32 status;
+
+		if (desc->status & OWNbit)
+			break;
+
+		status = le32_to_cpu(desc->PSize);
+
+		// net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
+		//	 status);
+
+		if (sis190_rx_pkt_err(status, stats) < 0)
+			sis190_give_to_asic(desc, tp->rx_buf_sz);
+		else {
+			struct sk_buff *skb = tp->Rx_skbuff[entry];
+			int pkt_size = (status & RxSizeMask) - 4;
+			void (*pci_action)(struct pci_dev *, dma_addr_t,
+				size_t, int) = pci_dma_sync_single_for_device;
+
+			if (unlikely(pkt_size > tp->rx_buf_sz)) {
+				net_intr(tp, KERN_INFO
+					 "%s: (frag) status = %08x.\n",
+					 dev->name, status);
+				stats->rx_dropped++;
+				stats->rx_length_errors++;
+				sis190_give_to_asic(desc, tp->rx_buf_sz);
+				continue;
+			}
+
+			pci_dma_sync_single_for_cpu(tp->pci_dev,
+				le32_to_cpu(desc->addr), tp->rx_buf_sz,
+				PCI_DMA_FROMDEVICE);
+
+			if (sis190_try_rx_copy(&skb, pkt_size, desc,
+					       tp->rx_buf_sz)) {
+				pci_action = pci_unmap_single;
+				tp->Rx_skbuff[entry] = NULL;
+				sis190_make_unusable_by_asic(desc);
+			}
+
+			pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
+				   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+			skb->dev = dev;
+			skb_put(skb, pkt_size);
+			skb->protocol = eth_type_trans(skb, dev);
+
+			sis190_rx_skb(skb);
+
+			dev->last_rx = jiffies;
+			stats->rx_packets++;
+			stats->rx_bytes += pkt_size;
+			if ((status & BCAST) == MCAST)
+				stats->multicast++;
+		}
+	}
+	count = cur_rx - tp->cur_rx;
+	tp->cur_rx = cur_rx;
+
+	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+	if (!delta && count && netif_msg_intr(tp))
+		printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+	tp->dirty_rx += delta;
+
+	if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
+		printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+
+	return count;
+}
+
+static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+				struct TxDesc *desc)
+{
+	unsigned int len;
+
+	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+
+	memset(desc, 0x00, sizeof(*desc));
+}
+
+static void sis190_tx_interrupt(struct net_device *dev,
+				struct sis190_private *tp, void __iomem *ioaddr)
+{
+	u32 pending, dirty_tx = tp->dirty_tx;
+	/*
+	 * It would not be needed if queueing was allowed to be enabled
+	 * again too early (hint: think preempt and unclocked smp systems).
+	 */
+	unsigned int queue_stopped;
+
+	smp_rmb();
+	pending = tp->cur_tx - dirty_tx;
+	queue_stopped = (pending == NUM_TX_DESC);
+
+	for (; pending; pending--, dirty_tx++) {
+		unsigned int entry = dirty_tx % NUM_TX_DESC;
+		struct TxDesc *txd = tp->TxDescRing + entry;
+		struct sk_buff *skb;
+
+		if (le32_to_cpu(txd->status) & OWNbit)
+			break;
+
+		skb = tp->Tx_skbuff[entry];
+
+		tp->stats.tx_packets++;
+		tp->stats.tx_bytes += skb->len;
+
+		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
+		tp->Tx_skbuff[entry] = NULL;
+		dev_kfree_skb_irq(skb);
+	}
+
+	if (tp->dirty_tx != dirty_tx) {
+		tp->dirty_tx = dirty_tx;
+		smp_wmb();
+		if (queue_stopped)
+			netif_wake_queue(dev);
+	}
+}
+
+/*
+ * The interrupt handler does all of the Rx thread work and cleans up after
+ * the Tx thread.
+ */
+static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
+{
+	struct net_device *dev = __dev;
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned int handled = 0;
+	u32 status;
+
+	status = SIS_R32(IntrStatus);
+
+	if ((status == 0xffffffff) || !status)
+		goto out;
+
+	handled = 1;
+
+	if (unlikely(!netif_running(dev))) {
+		sis190_asic_down(ioaddr);
+		goto out;
+	}
+
+	SIS_W32(IntrStatus, status);
+
+	// net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+
+	if (status & LinkChange) {
+		net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+		schedule_work(&tp->phy_task);
+	}
+
+	if (status & RxQInt)
+		sis190_rx_interrupt(dev, tp, ioaddr);
+
+	if (status & TxQ0Int)
+		sis190_tx_interrupt(dev, tp, ioaddr);
+out:
+	return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis190_netpoll(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+
+	disable_irq(pdev->irq);
+	sis190_interrupt(pdev->irq, dev, NULL);
+	enable_irq(pdev->irq);
+}
+#endif
+
+static void sis190_free_rx_skb(struct sis190_private *tp,
+			       struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+	struct pci_dev *pdev = tp->pci_dev;
+
+	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+			 PCI_DMA_FROMDEVICE);
+	dev_kfree_skb(*sk_buff);
+	*sk_buff = NULL;
+	sis190_make_unusable_by_asic(desc);
+}
+
+static void sis190_rx_clear(struct sis190_private *tp)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_RX_DESC; i++) {
+		if (!tp->Rx_skbuff[i])
+			continue;
+		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
+	}
+}
+
+static void sis190_init_ring_indexes(struct sis190_private *tp)
+{
+	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static int sis190_init_ring(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	sis190_init_ring_indexes(tp);
+
+	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+		goto err_rx_clear;
+
+	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
+
+	return 0;
+
+err_rx_clear:
+	sis190_rx_clear(tp);
+	return -ENOMEM;
+}
+
+static void sis190_set_rx_mode(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned long flags;
+	u32 mc_filter[2];	/* Multicast hash filter */
+	u16 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+			dev->name);
+		rx_mode =
+			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+			AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		struct dev_mc_list *mclist;
+		unsigned int i;
+
+		rx_mode = AcceptBroadcast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+		     i++, mclist = mclist->next) {
+			int bit_nr =
+				ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+			rx_mode |= AcceptMulticast;
+		}
+	}
+
+	spin_lock_irqsave(&tp->lock, flags);
+
+	SIS_W16(RxMacControl, rx_mode | 0x2);
+	SIS_W32(RxHashTable, mc_filter[0]);
+	SIS_W32(RxHashTable + 4, mc_filter[1]);
+
+	spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void sis190_soft_reset(void __iomem *ioaddr)
+{
+	SIS_W32(IntrControl, 0x8000);
+	SIS_PCI_COMMIT();
+	msleep(1);
+	SIS_W32(IntrControl, 0x0);
+	sis190_asic_down(ioaddr);
+	msleep(1);
+}
+
+static void sis190_hw_start(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	sis190_soft_reset(ioaddr);
+
+	SIS_W32(TxDescStartAddr, tp->tx_dma);
+	SIS_W32(RxDescStartAddr, tp->rx_dma);
+
+	SIS_W32(IntrStatus, 0xffffffff);
+	SIS_W32(IntrMask, 0x0);
+	/*
+	 * Default is 100Mbps.
+	 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
+	 */
+	SIS_W16(StationControl, 0x1901);
+	SIS_W32(GMIIControl, 0x0);
+	SIS_W32(TxMacControl, 0x60);
+	SIS_W16(RxMacControl, 0x02);
+	SIS_W32(RxHashTable, 0x0);
+	SIS_W32(0x6c, 0x0);
+	SIS_W32(RxWolCtrl, 0x0);
+	SIS_W32(RxWolData, 0x0);
+
+	SIS_PCI_COMMIT();
+
+	sis190_set_rx_mode(dev);
+
+	/* Enable all known interrupts by setting the interrupt mask. */
+	SIS_W32(IntrMask, sis190_intr_mask);
+
+	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
+	SIS_W32(RxControl, 0x1a1d);
+
+	netif_start_queue(dev);
+}
+
+static void sis190_phy_task(void * data)
+{
+	struct net_device *dev = data;
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id = tp->mii_if.phy_id;
+	u16 val;
+
+	rtnl_lock();
+
+	val = mdio_read(ioaddr, phy_id, MII_BMCR);
+	if (val & BMCR_RESET) {
+		// FIXME: needlessly high ?  -- FR 02/07/2005
+		mod_timer(&tp->timer, jiffies + HZ/10);
+	} else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
+		     BMSR_ANEGCOMPLETE)) {
+		net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
+			 dev->name);
+		mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
+		mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
+	} else {
+		/* Rejoice ! */
+		struct {
+			int val;
+			const char *msg;
+			u16 ctl;
+		} reg31[] = {
+			{ LPA_1000XFULL | LPA_SLCT,
+				"1000 Mbps Full Duplex",
+				0x01 | _1000bpsF },
+			{ LPA_1000XHALF | LPA_SLCT,
+				"1000 Mbps Half Duplex",
+				0x01 | _1000bpsH },
+			{ LPA_100FULL,
+				"100 Mbps Full Duplex",
+				0x01 | _100bpsF },
+			{ LPA_100HALF,
+				"100 Mbps Half Duplex",
+				0x01 | _100bpsH },
+			{ LPA_10FULL,
+				"10 Mbps Full Duplex",
+				0x01 | _10bpsF },
+			{ LPA_10HALF,
+				"10 Mbps Half Duplex",
+				0x01 | _10bpsH },
+			{ 0, "unknown", 0x0000 }
+		}, *p;
+		u16 adv;
+
+		val = mdio_read(ioaddr, phy_id, 0x1f);
+		net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+
+		val = mdio_read(ioaddr, phy_id, MII_LPA);
+		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+		net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
+			 dev->name, val, adv);
+
+		val &= adv;
+
+		for (p = reg31; p->ctl; p++) {
+			if ((val & p->val) == p->val)
+				break;
+		}
+		if (p->ctl)
+			SIS_W16(StationControl, p->ctl);
+		net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
+			 p->msg);
+		netif_carrier_on(dev);
+	}
+
+	rtnl_unlock();
+}
+
+static void sis190_phy_timer(unsigned long __opaque)
+{
+	struct net_device *dev = (struct net_device *)__opaque;
+	struct sis190_private *tp = netdev_priv(dev);
+
+	if (likely(netif_running(dev)))
+		schedule_work(&tp->phy_task);
+}
+
+static inline void sis190_delete_timer(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	del_timer_sync(&tp->timer);
+}
+
+static inline void sis190_request_timer(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct timer_list *timer = &tp->timer;
+
+	init_timer(timer);
+	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
+	timer->data = (unsigned long)dev;
+	timer->function = sis190_phy_timer;
+	add_timer(timer);
+}
+
+static void sis190_set_rxbufsize(struct sis190_private *tp,
+				 struct net_device *dev)
+{
+	unsigned int mtu = dev->mtu;
+
+	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+	/* RxDesc->size has a licence to kill the lower bits */
+	if (tp->rx_buf_sz & 0x07) {
+		tp->rx_buf_sz += 8;
+		tp->rx_buf_sz &= RX_BUF_MASK;
+	}
+}
+
+static int sis190_open(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+	int rc = -ENOMEM;
+
+	sis190_set_rxbufsize(tp, dev);
+
+	/*
+	 * Rx and Tx descriptors need 256 bytes alignment.
+	 * pci_alloc_consistent() guarantees a stronger alignment.
+	 */
+	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+	if (!tp->TxDescRing)
+		goto out;
+
+	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+	if (!tp->RxDescRing)
+		goto err_free_tx_0;
+
+	rc = sis190_init_ring(dev);
+	if (rc < 0)
+		goto err_free_rx_1;
+
+	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+	sis190_request_timer(dev);
+
+	rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
+	if (rc < 0)
+		goto err_release_timer_2;
+
+	sis190_hw_start(dev);
+out:
+	return rc;
+
+err_release_timer_2:
+	sis190_delete_timer(dev);
+	sis190_rx_clear(tp);
+err_free_rx_1:
+	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
+		tp->rx_dma);
+err_free_tx_0:
+	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
+		tp->tx_dma);
+	goto out;
+}
+
+static void sis190_tx_clear(struct sis190_private *tp)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_TX_DESC; i++) {
+		struct sk_buff *skb = tp->Tx_skbuff[i];
+
+		if (!skb)
+			continue;
+
+		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
+		tp->Tx_skbuff[i] = NULL;
+		dev_kfree_skb(skb);
+
+		tp->stats.tx_dropped++;
+	}
+	tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void sis190_down(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned int poll_locked = 0;
+
+	sis190_delete_timer(dev);
+
+	netif_stop_queue(dev);
+
+	flush_scheduled_work();
+
+	do {
+		spin_lock_irq(&tp->lock);
+
+		sis190_asic_down(ioaddr);
+
+		spin_unlock_irq(&tp->lock);
+
+		synchronize_irq(dev->irq);
+
+		if (!poll_locked) {
+			netif_poll_disable(dev);
+			poll_locked++;
+		}
+
+		synchronize_sched();
+
+	} while (SIS_R32(IntrMask));
+
+	sis190_tx_clear(tp);
+	sis190_rx_clear(tp);
+}
+
+static int sis190_close(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+
+	sis190_down(dev);
+
+	free_irq(dev->irq, dev);
+
+	netif_poll_enable(dev);
+
+	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+
+	tp->TxDescRing = NULL;
+	tp->RxDescRing = NULL;
+
+	return 0;
+}
+
+static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u32 len, entry, dirty_tx;
+	struct TxDesc *desc;
+	dma_addr_t mapping;
+
+	if (unlikely(skb->len < ETH_ZLEN)) {
+		skb = skb_padto(skb, ETH_ZLEN);
+		if (!skb) {
+			tp->stats.tx_dropped++;
+			goto out;
+		}
+		len = ETH_ZLEN;
+	} else {
+		len = skb->len;
+	}
+
+	entry = tp->cur_tx % NUM_TX_DESC;
+	desc = tp->TxDescRing + entry;
+
+	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
+		netif_stop_queue(dev);
+		net_tx_err(tp, KERN_ERR PFX
+			   "%s: BUG! Tx Ring full when queue awake!\n",
+			   dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+	tp->Tx_skbuff[entry] = skb;
+
+	desc->PSize = cpu_to_le32(len);
+	desc->addr = cpu_to_le32(mapping);
+
+	desc->size = cpu_to_le32(len);
+	if (entry == (NUM_TX_DESC - 1))
+		desc->size |= cpu_to_le32(RingEnd);
+
+	wmb();
+
+	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+
+	tp->cur_tx++;
+
+	smp_wmb();
+
+	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
+
+	dev->trans_start = jiffies;
+
+	dirty_tx = tp->dirty_tx;
+	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
+		netif_stop_queue(dev);
+		smp_rmb();
+		if (dirty_tx != tp->dirty_tx)
+			netif_wake_queue(dev);
+	}
+out:
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *sis190_get_stats(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return &tp->stats;
+}
+
+static void sis190_free_phy(struct list_head *first_phy)
+{
+	struct sis190_phy *cur, *next;
+
+	list_for_each_entry_safe(cur, next, first_phy, list) {
+		kfree(cur);
+	}
+}
+
+/**
+ *	sis190_default_phy - Select default PHY for sis190 mac.
+ *	@dev: the net device to probe for
+ *
+ *	Select first detected PHY with link as default.
+ *	If no one is link on, select PHY whose types is HOME as default.
+ *	If HOME doesn't exist, select LAN.
+ */
+static u16 sis190_default_phy(struct net_device *dev)
+{
+	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
+	struct sis190_private *tp = netdev_priv(dev);
+	struct mii_if_info *mii_if = &tp->mii_if;
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 status;
+
+	phy_home = phy_default = phy_lan = NULL;
+
+	list_for_each_entry(phy, &tp->first_phy, list) {
+		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
+
+		// Link ON & Not select default PHY & not ghost PHY.
+		if ((status & BMSR_LSTATUS) &&
+		    !phy_default &&
+		    (phy->type != UNKNOWN)) {
+			phy_default = phy;
+		} else {
+			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
+			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
+				   status | BMCR_ANENABLE | BMCR_ISOLATE);
+			if (phy->type == HOME)
+				phy_home = phy;
+			else if (phy->type == LAN)
+				phy_lan = phy;
+		}
+	}
+
+	if (!phy_default) {
+		if (phy_home)
+			phy_default = phy_home;
+		else if (phy_lan)
+			phy_default = phy_lan;
+		else
+			phy_default = list_entry(&tp->first_phy,
+						 struct sis190_phy, list);
+	}
+
+	if (mii_if->phy_id != phy_default->phy_id) {
+		mii_if->phy_id = phy_default->phy_id;
+		net_probe(tp, KERN_INFO
+		       "%s: Using transceiver at address %d as default.\n",
+		       dev->name, mii_if->phy_id);
+	}
+
+	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
+	status &= (~BMCR_ISOLATE);
+
+	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
+	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
+
+	return status;
+}
+
+static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
+			    struct sis190_phy *phy, unsigned int phy_id,
+			    u16 mii_status)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct mii_chip_info *p;
+
+	INIT_LIST_HEAD(&phy->list);
+	phy->status = mii_status;
+	phy->phy_id = phy_id;
+
+	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
+	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
+
+	for (p = mii_chip_table; p->type; p++) {
+		if ((p->id[0] == phy->id[0]) &&
+		    (p->id[1] == (phy->id[1] & 0xfff0))) {
+			break;
+		}
+	}
+
+	if (p->id[1]) {
+		phy->type = (p->type == MIX) ?
+			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
+				LAN : HOME) : p->type;
+	} else
+		phy->type = UNKNOWN;
+
+	net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
+		  dev->name, (phy->type == UNKNOWN) ? "Unknown PHY" : p->name,
+		  phy_id);
+}
+
+/**
+ *	sis190_mii_probe - Probe MII PHY for sis190
+ *	@dev: the net device to probe for
+ *
+ *	Search for total of 32 possible mii phy addresses.
+ *	Identify and set current phy if found one,
+ *	return error if it failed to found.
+ */
+static int __devinit sis190_mii_probe(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct mii_if_info *mii_if = &tp->mii_if;
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id;
+	int rc = 0;
+
+	INIT_LIST_HEAD(&tp->first_phy);
+
+	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+		struct sis190_phy *phy;
+		u16 status;
+
+		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+
+		// Try next mii if the current one is not accessible.
+		if (status == 0xffff || status == 0x0000)
+			continue;
+
+		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
+		if (!phy) {
+			sis190_free_phy(&tp->first_phy);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		sis190_init_phy(dev, tp, phy, phy_id, status);
+
+		list_add(&tp->first_phy, &phy->list);
+	}
+
+	if (list_empty(&tp->first_phy)) {
+		net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
+			  dev->name);
+		rc = -EIO;
+		goto out;
+	}
+
+	/* Select default PHY for mac */
+	sis190_default_phy(dev);
+
+	mii_if->dev = dev;
+	mii_if->mdio_read = __mdio_read;
+	mii_if->mdio_write = __mdio_write;
+	mii_if->phy_id_mask = PHY_ID_ANY;
+	mii_if->reg_num_mask = MII_REG_ANY;
+out:
+	return rc;
+}
+
+static void __devexit sis190_mii_remove(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	sis190_free_phy(&tp->first_phy);
+}
+
+static void sis190_release_board(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sis190_private *tp = netdev_priv(dev);
+
+	iounmap(tp->mmio_addr);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	free_netdev(dev);
+}
+
+static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
+{
+	struct sis190_private *tp;
+	struct net_device *dev;
+	void __iomem *ioaddr;
+	int rc;
+
+	dev = alloc_etherdev(sizeof(*tp));
+	if (!dev) {
+		net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+		rc = -ENOMEM;
+		goto err_out_0;
+	}
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	tp = netdev_priv(dev);
+	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
+
+	rc = pci_enable_device(pdev);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+		goto err_free_dev_1;
+	}
+
+	rc = -ENODEV;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
+		net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+
+	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
+			  pci_name(pdev));
+		goto err_free_res_3;
+	}
+
+	pci_set_master(pdev);
+
+	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
+	if (!ioaddr) {
+		net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
+			  pci_name(pdev));
+		rc = -EIO;
+		goto err_free_res_3;
+	}
+
+	tp->pci_dev = pdev;
+	tp->mmio_addr = ioaddr;
+
+	sis190_irq_mask_and_ack(ioaddr);
+
+	sis190_soft_reset(ioaddr);
+out:
+	return dev;
+
+err_free_res_3:
+	pci_release_regions(pdev);
+err_pci_disable_2:
+	pci_disable_device(pdev);
+err_free_dev_1:
+	free_netdev(dev);
+err_out_0:
+	dev = ERR_PTR(rc);
+	goto out;
+}
+
+static void sis190_tx_timeout(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u8 tmp8;
+
+	/* Disable Tx, if not already */
+	tmp8 = SIS_R8(TxControl);
+	if (tmp8 & CmdTxEnb)
+		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
+
+
+	net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
+		   dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	SIS_W32(IntrMask, 0x0000);
+
+	/* Stop a shared interrupt from scavenging while we are. */
+	spin_lock_irq(&tp->lock);
+	sis190_tx_clear(tp);
+	spin_unlock_irq(&tp->lock);
+
+	/* ...and finally, reset everything. */
+	sis190_hw_start(dev);
+
+	netif_wake_queue(dev);
+}
+
+static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+						     struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 sig;
+	int i;
+
+	net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
+		  pci_name(pdev));
+
+	/* Check to see if there is a sane EEPROM */
+	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
+
+	if ((sig == 0xffff) || (sig == 0x0000)) {
+		net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
+			  pci_name(pdev), sig);
+		return -EIO;
+	}
+
+	/* Get MAC address from EEPROM */
+	for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+		__le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
+
+		((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
+	}
+
+	return 0;
+}
+
+/**
+ *	sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
+ *	@pdev: PCI device
+ *	@dev:  network device to get address for
+ *
+ *	SiS965 model, use APC CMOS RAM to store MAC address.
+ *	APC CMOS RAM is accessed through ISA bridge.
+ *	MAC address is read into @net_dev->dev_addr.
+ */
+static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+						  struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *isa_bridge;
+	u8 reg, tmp8;
+	int i;
+
+	net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
+		  pci_name(pdev));
+
+	isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
+	if (!isa_bridge) {
+		net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
+			  pci_name(pdev));
+		return -EIO;
+	}
+
+	/* Enable port 78h & 79h to access APC Registers. */
+	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
+	reg = (tmp8 & ~0x02);
+	pci_write_config_byte(isa_bridge, 0x48, reg);
+	udelay(50);
+	pci_read_config_byte(isa_bridge, 0x48, &reg);
+
+        for (i = 0; i < MAC_ADDR_LEN; i++) {
+                outb(0x9 + i, 0x78);
+                dev->dev_addr[i] = inb(0x79);
+        }
+
+	outb(0x12, 0x78);
+	reg = inb(0x79);
+
+	/* Restore the value to ISA Bridge */
+	pci_write_config_byte(isa_bridge, 0x48, tmp8);
+	pci_dev_put(isa_bridge);
+
+	return 0;
+}
+
+/**
+ *      sis190_init_rxfilter - Initialize the Rx filter
+ *      @dev: network device to initialize
+ *
+ *      Set receive filter address to our MAC address
+ *      and enable packet filtering.
+ */
+static inline void sis190_init_rxfilter(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 ctl;
+	int i;
+
+	ctl = SIS_R16(RxMacControl);
+	/*
+	 * Disable packet filtering before setting filter.
+	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
+	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
+	 */
+	SIS_W16(RxMacControl, ctl & ~0x0f00);
+
+	for (i = 0; i < MAC_ADDR_LEN; i++)
+		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
+
+	SIS_W16(RxMacControl, ctl);
+	SIS_PCI_COMMIT();
+}
+
+static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
+{
+	u8 from;
+
+	pci_read_config_byte(pdev, 0x73, &from);
+
+	return (from & 0x00000001) ?
+		sis190_get_mac_addr_from_apc(pdev, dev) :
+		sis190_get_mac_addr_from_eeprom(pdev, dev);
+}
+
+static void sis190_set_speed_auto(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id = tp->mii_if.phy_id;
+	int val;
+
+	net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+
+	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+
+	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
+	// unchanged.
+	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
+		   ADVERTISE_100FULL | ADVERTISE_10FULL |
+		   ADVERTISE_100HALF | ADVERTISE_10HALF);
+
+	// Enable 1000 Full Mode.
+	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
+
+	// Enable auto-negotiation and restart auto-negotiation.
+	mdio_write(ioaddr, phy_id, MII_BMCR,
+		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
+}
+
+static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_ethtool_gset(&tp->mii_if, cmd);
+}
+
+static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_ethtool_sset(&tp->mii_if, cmd);
+}
+
+static void sis190_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(tp->pci_dev));
+}
+
+static int sis190_get_regs_len(struct net_device *dev)
+{
+	return SIS190_REGS_SIZE;
+}
+
+static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			    void *p)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	unsigned long flags;
+
+	if (regs->len > SIS190_REGS_SIZE)
+		regs->len = SIS190_REGS_SIZE;
+
+	spin_lock_irqsave(&tp->lock, flags);
+	memcpy_fromio(p, tp->mmio_addr, regs->len);
+	spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static int sis190_nway_reset(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_nway_restart(&tp->mii_if);
+}
+
+static u32 sis190_get_msglevel(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return tp->msg_enable;
+}
+
+static void sis190_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	tp->msg_enable = value;
+}
+
+static struct ethtool_ops sis190_ethtool_ops = {
+	.get_settings	= sis190_get_settings,
+	.set_settings	= sis190_set_settings,
+	.get_drvinfo	= sis190_get_drvinfo,
+	.get_regs_len	= sis190_get_regs_len,
+	.get_regs	= sis190_get_regs,
+	.get_link	= ethtool_op_get_link,
+	.get_msglevel	= sis190_get_msglevel,
+	.set_msglevel	= sis190_set_msglevel,
+	.nway_reset	= sis190_nway_reset,
+};
+
+static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return !netif_running(dev) ? -EINVAL :
+		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
+}
+
+static int __devinit sis190_init_one(struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
+{
+	static int printed_version = 0;
+	struct sis190_private *tp;
+	struct net_device *dev;
+	void __iomem *ioaddr;
+	int rc;
+
+	if (!printed_version) {
+		net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+		printed_version = 1;
+	}
+
+	dev = sis190_init_board(pdev);
+	if (IS_ERR(dev)) {
+		rc = PTR_ERR(dev);
+		goto out;
+	}
+
+	tp = netdev_priv(dev);
+	ioaddr = tp->mmio_addr;
+
+	rc = sis190_get_mac_addr(pdev, dev);
+	if (rc < 0)
+		goto err_release_board;
+
+	sis190_init_rxfilter(dev);
+
+	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+	dev->open = sis190_open;
+	dev->stop = sis190_close;
+	dev->do_ioctl = sis190_ioctl;
+	dev->get_stats = sis190_get_stats;
+	dev->tx_timeout = sis190_tx_timeout;
+	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
+	dev->hard_start_xmit = sis190_start_xmit;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = sis190_netpoll;
+#endif
+	dev->set_multicast_list = sis190_set_rx_mode;
+	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+	dev->irq = pdev->irq;
+	dev->base_addr = (unsigned long) 0xdead;
+
+	spin_lock_init(&tp->lock);
+	rc = register_netdev(dev);
+	if (rc < 0)
+		goto err_release_board;
+
+	pci_set_drvdata(pdev, dev);
+
+	rc = sis190_mii_probe(dev);
+	if (rc < 0)
+		goto err_unregister_dev;
+
+	net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
+	       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+	       pci_name(pdev), sis_chip_info[ent->driver_data].name,
+	       ioaddr, dev->irq,
+	       dev->dev_addr[0], dev->dev_addr[1],
+	       dev->dev_addr[2], dev->dev_addr[3],
+	       dev->dev_addr[4], dev->dev_addr[5]);
+
+	netif_carrier_off(dev);
+
+	sis190_set_speed_auto(dev);
+out:
+	return rc;
+
+err_unregister_dev:
+	unregister_netdev(dev);
+err_release_board:
+	sis190_release_board(pdev);
+	goto out;
+}
+
+static void __devexit sis190_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	sis190_mii_remove(dev);
+	unregister_netdev(dev);
+	sis190_release_board(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver sis190_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= sis190_pci_tbl,
+	.probe		= sis190_init_one,
+	.remove		= __devexit_p(sis190_remove_one),
+};
+
+static int __init sis190_init_module(void)
+{
+	return pci_module_init(&sis190_pci_driver);
+}
+
+static void __exit sis190_cleanup_module(void)
+{
+	pci_unregister_driver(&sis190_pci_driver);
+}
+
+module_init(sis190_init_module);
+module_exit(sis190_cleanup_module);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -42,7 +42,7 @@
 #include "skge.h"
 
 #define DRV_NAME		"skge"
-#define DRV_VERSION		"0.8"
+#define DRV_VERSION		"0.9"
 #define PFX			DRV_NAME " "
 
 #define DEFAULT_TX_RING_SIZE	128
@@ -189,7 +189,7 @@ static u32 skge_supported_modes(const st
 {
 	u32 supported;
 
-	if (iscopper(hw)) {
+	if (hw->copper) {
 		supported = SUPPORTED_10baseT_Half
 			| SUPPORTED_10baseT_Full
 			| SUPPORTED_100baseT_Half
@@ -222,7 +222,7 @@ static int skge_get_settings(struct net_
 	ecmd->transceiver = XCVR_INTERNAL;
 	ecmd->supported = skge_supported_modes(hw);
 
-	if (iscopper(hw)) {
+	if (hw->copper) {
 		ecmd->port = PORT_TP;
 		ecmd->phy_address = hw->phy_addr;
 	} else
@@ -876,6 +876,9 @@ static int skge_rx_fill(struct skge_port
 
 static void skge_link_up(struct skge_port *skge)
 {
+	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), 
+		    LED_BLK_OFF|LED_SYNC_OFF|LED_ON);
+
 	netif_carrier_on(skge->netdev);
 	if (skge->tx_avail > MAX_SKB_FRAGS + 1)
 		netif_wake_queue(skge->netdev);
@@ -894,6 +897,7 @@ static void skge_link_up(struct skge_por
 
 static void skge_link_down(struct skge_port *skge)
 {
+	skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF);
 	netif_carrier_off(skge->netdev);
 	netif_stop_queue(skge->netdev);
 
@@ -1599,7 +1603,7 @@ static void yukon_init(struct skge_hw *h
 	adv = PHY_AN_CSMA;
 
 	if (skge->autoneg == AUTONEG_ENABLE) {
-		if (iscopper(hw)) {
+		if (hw->copper) {
 			if (skge->advertising & ADVERTISED_1000baseT_Full)
 				ct1000 |= PHY_M_1000C_AFD;
 			if (skge->advertising & ADVERTISED_1000baseT_Half)
@@ -1691,7 +1695,7 @@ static void yukon_mac_init(struct skge_h
 	/* Set hardware config mode */
 	reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
 		GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE;
-	reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
+	reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
 
 	/* Clear GMC reset */
 	skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
@@ -1780,7 +1784,12 @@ static void yukon_mac_init(struct skge_h
 		reg &= ~GMF_RX_F_FL_ON;
 	skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
 	skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
-	skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+	/*
+	 * because Pause Packet Truncation in GMAC is not working
+	 * we have to increase the Flush Threshold to 64 bytes
+	 * in order to flush pause packets in Rx FIFO on Yukon-1
+	 */
+	skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1);
 
 	/* Configure Tx MAC FIFO */
 	skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
@@ -2670,18 +2679,6 @@ static void skge_error_irq(struct skge_h
 		/* Timestamp (unused) overflow */
 		if (hwstatus & IS_IRQ_TIST_OV)
 			skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
-
-		if (hwstatus & IS_IRQ_SENSOR) {
-			/* no sensors on 32-bit Yukon */
-			if (!(skge_read16(hw, B0_CTST) & CS_BUS_SLOT_SZ)) {
-				printk(KERN_ERR PFX "ignoring bogus sensor interrups\n");
-				skge_write32(hw, B0_HWE_IMSK,
-					     IS_ERR_MSK & ~IS_IRQ_SENSOR);
-			} else
-				printk(KERN_WARNING PFX "sensor interrupt\n");
-		}
-
-
 	}
 
 	if (hwstatus & IS_RAM_RD_PAR) {
@@ -2712,9 +2709,10 @@ static void skge_error_irq(struct skge_h
 
 		skge_pci_clear(hw);
 
+		/* if error still set then just ignore it */
 		hwstatus = skge_read32(hw, B0_HWE_ISRC);
 		if (hwstatus & IS_IRQ_STAT) {
-			printk(KERN_WARNING PFX "IRQ status %x: still set ignoring hardware errors\n",
+			pr_debug("IRQ status %x: still set ignoring hardware errors\n",
 			       hwstatus);
 			hw->intr_mask &= ~IS_HW_ERR;
 		}
@@ -2876,7 +2874,7 @@ static const char *skge_board_name(const
 static int skge_reset(struct skge_hw *hw)
 {
 	u16 ctst;
-	u8 t8, mac_cfg;
+	u8 t8, mac_cfg, pmd_type, phy_type;
 	int i;
 
 	ctst = skge_read16(hw, B0_CTST);
@@ -2895,18 +2893,19 @@ static int skge_reset(struct skge_hw *hw
 		     ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA));
 
 	hw->chip_id = skge_read8(hw, B2_CHIP_ID);
-	hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
-	hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
+	phy_type = skge_read8(hw, B2_E_1) & 0xf;
+	pmd_type = skge_read8(hw, B2_PMD_TYP);
+	hw->copper = (pmd_type == 'T' || pmd_type == '1');
 
 	switch (hw->chip_id) {
 	case CHIP_ID_GENESIS:
-		switch (hw->phy_type) {
+		switch (phy_type) {
 		case SK_PHY_BCOM:
 			hw->phy_addr = PHY_ADDR_BCOM;
 			break;
 		default:
 			printk(KERN_ERR PFX "%s: unsupported phy type 0x%x\n",
-			       pci_name(hw->pdev), hw->phy_type);
+			       pci_name(hw->pdev), phy_type);
 			return -EOPNOTSUPP;
 		}
 		break;
@@ -2914,13 +2913,10 @@ static int skge_reset(struct skge_hw *hw
 	case CHIP_ID_YUKON:
 	case CHIP_ID_YUKON_LITE:
 	case CHIP_ID_YUKON_LP:
-		if (hw->phy_type < SK_PHY_MARV_COPPER && hw->pmd_type != 'S')
-			hw->phy_type = SK_PHY_MARV_COPPER;
+		if (phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S')
+			hw->copper = 1;
 
 		hw->phy_addr = PHY_ADDR_MARV;
-		if (!iscopper(hw))
-			hw->phy_type = SK_PHY_MARV_FIBER;
-
 		break;
 
 	default:
@@ -2948,12 +2944,20 @@ static int skge_reset(struct skge_hw *hw
 	else
 		hw->ram_size = t8 * 4096;
 
+	hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
 	if (hw->chip_id == CHIP_ID_GENESIS)
 		genesis_init(hw);
 	else {
 		/* switch power to VCC (WA for VAUX problem) */
 		skge_write8(hw, B0_POWER_CTRL,
 			    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+		/* avoid boards with stuck Hardware error bits */
+		if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
+		    (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
+			printk(KERN_WARNING PFX "stuck hardware sensor bit\n");
+			hw->intr_mask &= ~IS_HW_ERR;
+		}
+
 		for (i = 0; i < hw->ports; i++) {
 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
 			skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
@@ -2994,7 +2998,6 @@ static int skge_reset(struct skge_hw *hw
 	skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
 	skge_write32(hw, B2_IRQM_CTRL, TIM_START);
 
-	hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
 	skge_write32(hw, B0_IMSK, hw->intr_mask);
 
 	if (hw->chip_id != CHIP_ID_GENESIS)
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -214,8 +214,6 @@ enum {
 
 /*	B2_IRQM_HWE_MSK	32 bit	IRQ Moderation HW Error Mask */
 enum {
-	IS_ERR_MSK	= 0x00003fff,/* 		All Error bits */
-
 	IS_IRQ_TIST_OV	= 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
 	IS_IRQ_SENSOR	= 1<<12, /* IRQ from Sensor (YUKON only) */
 	IS_IRQ_MST_ERR	= 1<<11, /* IRQ master error detected */
@@ -230,6 +228,12 @@ enum {
 	IS_M2_PAR_ERR	= 1<<2,	/* MAC 2 Parity Error */
 	IS_R1_PAR_ERR	= 1<<1,	/* Queue R1 Parity Error */
 	IS_R2_PAR_ERR	= 1<<0,	/* Queue R2 Parity Error */
+
+	IS_ERR_MSK	= IS_IRQ_MST_ERR | IS_IRQ_STAT
+			| IS_NO_STAT_M1 | IS_NO_STAT_M2
+			| IS_RAM_RD_PAR | IS_RAM_WR_PAR
+			| IS_M1_PAR_ERR | IS_M2_PAR_ERR
+			| IS_R1_PAR_ERR | IS_R2_PAR_ERR,
 };
 
 /*	B2_TST_CTRL1	 8 bit	Test Control Register 1 */
@@ -2456,24 +2460,17 @@ struct skge_hw {
 
 	u8	     	     chip_id;
 	u8		     chip_rev;
-	u8		     phy_type;
-	u8		     pmd_type;
-	u16		     phy_addr;
+	u8		     copper;
 	u8		     ports;
 
 	u32	     	     ram_size;
 	u32	     	     ram_offset;
+	u16		     phy_addr;
 
 	struct tasklet_struct ext_tasklet;
 	spinlock_t	     phy_lock;
 };
 
-
-static inline int iscopper(const struct skge_hw *hw)
-{
-	return (hw->pmd_type == 'T');
-}
-
 enum {
 	FLOW_MODE_NONE 		= 0, /* No Flow-Control */
 	FLOW_MODE_LOC_SEND	= 1, /* Local station sends PAUSE */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/sky2.c
@@ -0,0 +1,2686 @@
+/*
+ * New driver for Marvell Yukon 2 chipset.
+ * Based on earlier sk98lin, and skge driver.
+ *
+ * This driver intentionally does not support all the features
+ * of the original driver such as link fail-over and link management because
+ * those should be done at higher levels.
+ *
+ * Copyright (C) 2005 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * TODO
+ *	- coalescing setting?
+ *	- variable ring size?
+ *
+ * TOTEST
+ *	- speed setting
+ *	- power management
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+
+#include <asm/irq.h>
+
+#include "sky2.h"
+
+#define DRV_NAME		"sky2"
+#define DRV_VERSION		"0.2"
+#define PFX			DRV_NAME " "
+
+/*
+ * The Yukon II chipset takes 64 bit command blocks (called list elements)
+ * that are organized into three (receive, transmit, status) different rings
+ * similar to Tigon3. A transmit can require several elements;
+ * a receive requires one (or two if using 64 bit dma).
+ */
+
+#ifdef CONFIG_SKY2_EC_A1
+#define is_ec_a1(hw) \
+	((hw)->chip_id == CHIP_ID_YUKON_EC && \
+	 (hw)->chip_rev == CHIP_REV_YU_EC_A1)
+#else
+#define is_ec_a1(hw)	0
+#endif
+
+#define RX_LE_SIZE		256
+#define MIN_RX_BUFFERS		8
+#define MAX_RX_BUFFERS		124
+#define RX_LE_BYTES		(RX_LE_SIZE*sizeof(struct sky2_rx_le))
+
+#define TX_RING_SIZE		256	// min 64 max 4096
+#define STATUS_RING_SIZE	1024	// pow2 > (2*Rx + Tx)
+#define STATUS_LE_BYTES		(STATUS_RING_SIZE*sizeof(struct sky2_status_le))
+#define ETH_JUMBO_MTU		9000
+#define TX_WATCHDOG		(5 * HZ)
+#define NAPI_WEIGHT		64
+#define PHY_RETRIES		1000
+
+static const u32 default_msg =
+	NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
+	| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
+	| NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
+
+static int debug = -1;	/* defaults above */
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+static const struct pci_device_id sky2_id_table[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4343) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4344) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4345) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4346) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4347) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4350) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4351) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4360) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, sky2_id_table);
+
+/* Avoid conditionals by using array */
+static const unsigned txqaddr[] = { Q_XA1, Q_XA2 };
+static const unsigned rxqaddr[] = { Q_R1, Q_R2 };
+
+static inline const char *chip_name(u8 chip_id)
+{
+	switch (chip_id) {
+	case CHIP_ID_GENESIS:
+		return "Genesis";
+	case CHIP_ID_YUKON:
+		return "Yukon";
+	case CHIP_ID_YUKON_LITE:
+		return "Yukon-Lite";
+	case CHIP_ID_YUKON_LP:
+		return  "Yukon-LP";
+	case CHIP_ID_YUKON_XL:
+		return "Yukon-XL";
+	case CHIP_ID_YUKON_EC:
+		return "Yukon-EC";
+	case CHIP_ID_YUKON_FE:
+		return "Yukon-FE";
+	default:
+		return "???";
+	}
+}
+
+static void gm_phy_write(struct sky2_hw *hw, unsigned port, u16 reg, u16 val)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_DATA, val);
+	gma_write16(hw, port, GM_SMI_CTRL,
+		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV) | GM_SMI_CT_REG_AD(reg));
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		udelay(1);
+
+		if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
+			break;
+	}
+}
+
+static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
+{
+	int i;
+
+	gma_write16(hw, port, GM_SMI_CTRL,
+		    GM_SMI_CT_PHY_AD(PHY_ADDR_MARV)
+		    | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
+
+	for (i = 0; i < PHY_RETRIES; i++) {
+		udelay(1);
+		if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
+			goto ready;
+	}
+
+	printk(KERN_WARNING PFX "%s: phy read timeout\n",
+	       hw->dev[port]->name);
+ ready:
+	return gma_read16(hw, port, GM_SMI_DATA);
+}
+
+static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
+{
+	u16 reg;
+
+	/* disable all GMAC IRQ's */
+	sky2_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
+	/* disable PHY IRQs */
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H1, 0);	/* clear MC hash */
+	gma_write16(hw, port, GM_MC_ADDR_H2, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H3, 0);
+	gma_write16(hw, port, GM_MC_ADDR_H4, 0);
+
+	reg = gma_read16(hw, port, GM_RX_CTRL);
+	reg |= GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA;
+	gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+	u16 ctrl, ct1000, adv;
+	u16 ledctrl, ledover;
+
+	pr_debug("phy reset autoneg=%s advertising=0x%x pause rx=%s tx=%s\n",
+		 sky2->autoneg == AUTONEG_ENABLE ? "enable" : "disable",
+		 sky2->advertising,
+		 sky2->rx_pause ? "on" : "off",
+		 sky2->tx_pause ? "on" : "off");
+
+	if (sky2->autoneg == AUTONEG_ENABLE &&
+	    hw->chip_id != CHIP_ID_YUKON_XL) {
+		u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
+
+		ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
+			  PHY_M_EC_MAC_S_MSK);
+		ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
+
+		if (hw->chip_id == CHIP_ID_YUKON_EC)
+			ectrl |= PHY_M_EC_DSC_2(2) | PHY_M_EC_DOWN_S_ENA;
+		else
+			ectrl |= PHY_M_EC_M_DSC(2) | PHY_M_EC_S_DSC(3);
+
+		gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
+	}
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+	if (hw->copper) {
+		if (hw->chip_id == CHIP_ID_YUKON_FE) {
+			/* enable automatic crossover */
+			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO) >> 1;
+		} else {
+			/* disable energy detect */
+			ctrl &= ~PHY_M_PC_EN_DET_MSK;
+
+			/* enable automatic crossover */
+			ctrl |= PHY_M_PC_MDI_XMODE(PHY_M_PC_ENA_AUTO);
+
+			if (sky2->autoneg == AUTONEG_ENABLE &&
+			    hw->chip_id == CHIP_ID_YUKON_XL) {
+				ctrl &= ~PHY_M_PC_DSC_MSK;
+				ctrl |= PHY_M_PC_DSC(2) | PHY_M_PC_DOWN_S_ENA;
+			}
+		}
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+	} else {
+		/* workaround for deviation #4.88 (CRC errors) */
+		/* disable Automatic Crossover */
+
+		ctrl &= ~PHY_M_PC_MDIX_MSK;
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+		if (hw->chip_id == CHIP_ID_YUKON_XL) {
+			/* Fiber: select 1000BASE-X only mode MAC Specific Ctrl Reg. */
+			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 2);
+			ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
+			ctrl &= ~PHY_M_MAC_MD_MSK;
+			ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+			gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
+
+			/* select page 1 to access Fiber registers */
+			gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 1);
+		}
+
+		ctrl &= ~(PHY_M_PC_MDIX_MSK | PHY_M_MAC_MD_MSK);
+		ctrl |= PHY_M_MAC_MODE_SEL(PHY_M_MAC_MD_1000BX);
+	}
+
+	ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
+	if (sky2->autoneg == AUTONEG_DISABLE)
+		ctrl &= ~PHY_CT_ANE;
+	else
+		ctrl |= PHY_CT_ANE;
+
+	ctrl |= PHY_CT_RESET;
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	ctrl = 0;
+	ct1000 = 0;
+	adv = PHY_AN_CSMA;
+
+	if (sky2->autoneg == AUTONEG_ENABLE) {
+		if (hw->copper) {
+			if (sky2->advertising & ADVERTISED_1000baseT_Full)
+				ct1000 |= PHY_M_1000C_AFD;
+			if (sky2->advertising & ADVERTISED_1000baseT_Half)
+				ct1000 |= PHY_M_1000C_AHD;
+			if (sky2->advertising & ADVERTISED_100baseT_Full)
+				adv |= PHY_M_AN_100_FD;
+			if (sky2->advertising & ADVERTISED_100baseT_Half)
+				adv |= PHY_M_AN_100_HD;
+			if (sky2->advertising & ADVERTISED_10baseT_Full)
+				adv |= PHY_M_AN_10_FD;
+			if (sky2->advertising & ADVERTISED_10baseT_Half)
+				adv |= PHY_M_AN_10_HD;
+		} else	/* special defines for FIBER (88E1011S only) */
+			adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
+
+		/* Set Flow-control capabilities */
+		if (sky2->tx_pause && sky2->rx_pause)
+			adv |= PHY_AN_PAUSE_CAP;		/* symmetric */
+		else if (sky2->rx_pause && !sky2->tx_pause)
+			adv |= PHY_AN_PAUSE_ASYM|PHY_AN_PAUSE_CAP;
+		else if (!sky2->rx_pause && sky2->tx_pause)
+			adv |= PHY_AN_PAUSE_ASYM;	/* local */
+
+		/* Restart Auto-negotiation */
+		ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
+	} else {
+		/* forced speed/duplex settings */
+		ct1000 = PHY_M_1000C_MSE;
+
+		if (sky2->duplex == DUPLEX_FULL)
+			ctrl |= PHY_CT_DUP_MD;
+
+		switch (sky2->speed) {
+		case SPEED_1000:
+			ctrl |= PHY_CT_SP1000;
+			break;
+		case SPEED_100:
+			ctrl |= PHY_CT_SP100;
+			break;
+		}
+
+		ctrl |= PHY_CT_RESET;
+	}
+
+	if (hw->chip_id != CHIP_ID_YUKON_FE)
+		gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
+
+	gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
+	gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
+
+	/* Setup Phy LED's */
+	ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
+	ledover = 0;
+
+	switch (hw->chip_id) {
+	case CHIP_ID_YUKON_FE:
+		/* on 88E3082 these bits are at 11..9 (shifted left) */
+		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
+
+		ctrl = gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR);
+
+		/* delete ACT LED control bits */
+		ctrl &= ~PHY_M_FELP_LED1_MSK;
+		/* change ACT LED control to blink mode */
+		ctrl |= PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL);
+		gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR, ctrl);
+		break;
+
+	case CHIP_ID_YUKON_XL:
+		ctrl = gm_phy_read(hw, port, PHY_MARV_EXT_ADR);
+
+		/* select page 3 to access LED control register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 3);
+
+		/* set LED Function Control register */
+		gm_phy_write(hw, port, PHY_MARV_PHY_CTRL,
+			     (PHY_M_LEDC_LOS_CTRL(1) |		/* LINK/ACT */
+			      PHY_M_LEDC_INIT_CTRL(7) |		/* 10 Mbps */
+			      PHY_M_LEDC_STA1_CTRL(7) |		/* 100 Mbps */
+			      PHY_M_LEDC_STA0_CTRL(7)));		/* 1000 Mbps */
+
+		/* set Polarity Control register */
+		gm_phy_write(hw, port, PHY_MARV_PHY_STAT,
+			     (PHY_M_POLC_LS1_P_MIX(4) | PHY_M_POLC_IS0_P_MIX(4) |
+			      PHY_M_POLC_LOS_CTRL(2) | PHY_M_POLC_INIT_CTRL(2) |
+			      PHY_M_POLC_STA1_CTRL(2) | PHY_M_POLC_STA0_CTRL(2)));
+
+		/* restore page register */
+		gm_phy_write(hw, port, PHY_MARV_EXT_ADR, ctrl);
+		break;
+
+	default:
+		/* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
+		ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
+		/* turn off the Rx LED (LED_RX) */
+		ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
+	}
+
+	gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+
+	if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) {
+		/* turn on 100 Mbps LED (LED_LINK100) */
+		ledover |= PHY_M_LED_MO_100(MO_LED_ON);
+	}
+
+	if (ledover)
+		gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+
+	/* Enable phy interrupt on autonegotiation complete (or link up) */
+	if (sky2->autoneg == AUTONEG_ENABLE)
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
+	else
+		gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+}
+
+static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
+{
+	struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
+	u16 reg;
+	int i;
+	const u8 *addr = hw->dev[port]->dev_addr;
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_CLR);
+
+	sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
+	    && port == 1) {
+		/* WA DEV_472 -- looks like crossed wires on port 2 */
+		/* clear GMAC 1 Control reset */
+		sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
+		do {
+			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_SET);
+			sky2_write8(hw, SK_REG(1, GMAC_CTRL), GMC_RST_CLR);
+		} while (gm_phy_read(hw, 1, PHY_MARV_ID0) != PHY_MARV_ID0_VAL ||
+			 gm_phy_read(hw, 1, PHY_MARV_ID1) != PHY_MARV_ID1_Y2 ||
+			 gm_phy_read(hw, 1, PHY_MARV_INT_MASK) != 0);
+	}
+
+
+	if (sky2->autoneg == AUTONEG_DISABLE) {
+		reg = gma_read16(hw, port, GM_GP_CTRL);
+		reg |= GM_GPCR_AU_ALL_DIS;
+		gma_write16(hw, port, GM_GP_CTRL, reg);
+		gma_read16(hw, port, GM_GP_CTRL);
+
+
+		switch (sky2->speed) {
+		case SPEED_1000:
+			reg |= GM_GPCR_SPEED_1000;
+			/* fallthru */
+		case SPEED_100:
+			reg |= GM_GPCR_SPEED_100;
+		}
+
+		if (sky2->duplex == DUPLEX_FULL)
+			reg |= GM_GPCR_DUP_FULL;
+	} else
+		reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
+
+	if (!sky2->tx_pause && !sky2->rx_pause) {
+		sky2_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+		reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+	} else if (sky2->tx_pause &&!sky2->rx_pause) {
+		/* disable Rx flow-control */
+		reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
+	}
+
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+
+	sky2_read16(hw, GMAC_IRQ_SRC);
+
+	spin_lock_bh(&hw->phy_lock);
+	sky2_phy_init(hw, port);
+	spin_unlock_bh(&hw->phy_lock);
+
+	/* MIB clear */
+	reg = gma_read16(hw, port, GM_PHY_ADDR);
+	gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
+
+	for (i = 0; i < GM_MIB_CNT_SIZE; i++)
+		gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
+	gma_write16(hw, port, GM_PHY_ADDR, reg);
+
+	/* transmit control */
+	gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
+
+	/* receive control reg: unicast + multicast + no FCS  */
+	gma_write16(hw, port, GM_RX_CTRL,
+			 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
+
+	/* transmit flow control */
+	gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
+
+	/* transmit parameter */
+	gma_write16(hw, port, GM_TX_PARAM,
+		    TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
+		    TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
+		    TX_IPG_JAM_DATA(TX_IPG_JAM_DEF) |
+		    TX_BACK_OFF_LIM(TX_BOF_LIM_DEF));
+
+	/* serial mode register */
+	reg = DATA_BLIND_VAL(DATA_BLIND_DEF) |
+		GM_SMOD_VLAN_ENA | IPG_DATA_VAL(IPG_DATA_DEF);
+
+	if (hw->dev[port]->mtu > 1500)
+		reg |= GM_SMOD_JUMBO_ENA;
+
+	gma_write16(hw, port, GM_SERIAL_MODE, reg);
+
+	/* physical address: used for pause frames */
+	gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
+	/* virtual address for data */
+	gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
+
+	/* enable interrupt mask for counter overflows */
+	gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
+	gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
+
+	/* Configure Rx MAC FIFO */
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
+	sky2_write16(hw, SK_REG(port, RX_GMF_CTRL_T), 
+		     GMF_OPER_ON | GMF_RX_F_FL_ON);
+
+	reg = RX_FF_FL_DEF_MSK;
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev <= 1)
+		reg = 0;	/* WA Dev #4115 */
+
+	sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), reg);
+	/* Set threshold to 0xa (64 bytes) 
+	 *  ASF disabled so no need to do WA dev #4.30 
+	 */
+	sky2_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
+
+	/* Configure Tx MAC FIFO */
+	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
+	sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
+
+	/* Turn off Rx fifo flush (per sk98lin) */
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RX_F_FL_OFF);
+}
+
+static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
+{
+	u32 end;
+
+	start /= 8;
+	len /= 8;
+	end = start + len - 1;
+	pr_debug("ramset q=%d start=0x%x end=0x%x\n", q, start, end);
+
+	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
+	sky2_write32(hw, RB_ADDR(q, RB_START), start);
+	sky2_write32(hw, RB_ADDR(q, RB_END), end);
+	sky2_write32(hw, RB_ADDR(q, RB_WP), start);
+	sky2_write32(hw, RB_ADDR(q, RB_RP), start);
+
+	if (q == Q_R1 || q == Q_R2) {
+		/* Set thresholds on receive queue's */
+		sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP),
+			     start + (2*len)/3);
+		sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP),
+			     start + (len/3));
+	} else {
+		/* Enable store & forward on Tx queue's because
+		 * Tx FIFO is only 1K on Yukon
+		 */
+		sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD);
+	}
+
+	sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD);
+}
+
+
+/* Setup Bus Memory Interface */
+static void sky2_qset(struct sky2_hw *hw, u16 q, u32 wm)
+{
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET);
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT);
+	sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON);
+	sky2_write32(hw, Q_ADDR(q, Q_WM), wm);
+}
+
+
+/* Setup prefetch unit registers. This is the interface between
+ * hardware and driver list elements
+ */
+static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
+				      u64 addr, u32 last)
+{
+	pr_debug("sky2 prefetch init q=%x addr=%llx last=%x\n",
+		 Y2_QADDR(qaddr, 0), addr, last);
+
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_CLR);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_HI), addr >> 32);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_ADDR_LO), (u32) addr);
+	sky2_write16(hw, Y2_QADDR(qaddr, PREF_UNIT_LAST_IDX), last);
+	sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_OP_ON);
+}
+
+
+/*
+ * This is a workaround code taken from syskonnect sk98lin driver
+ * to deal with chip bug in the wraparound case.
+ */
+static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
+				u16 idx, u16 *last, u16 size)
+
+{
+	BUG_ON(idx >= size);
+
+	wmb();
+	if (is_ec_a1(hw) && idx < *last) {
+		u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
+
+		if (hwget == 0) {
+			/* Start prefetching again */
+			sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM),
+				    0xe0);
+			goto setnew;
+		}
+
+		if (hwget == size-1) {
+			/* set watermark to one list element */
+			sky2_write8(hw, Y2_QADDR(q, PREF_UNIT_FIFO_WM), 8);
+
+			/* set put index to first list element */
+			sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), 0);
+		} else 	/* have hardware go to end of list */
+			sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), size-1);
+	} else {
+	setnew:
+		sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
+		*last = idx;
+	}
+}
+
+static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
+{
+	struct sky2_rx_le *le = sky2->rx_le + sky2->rx_put;
+	sky2->rx_put = (sky2->rx_put + 1) % RX_LE_SIZE;
+	return le;
+}
+
+static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map, u16 len)
+{
+	struct sky2_rx_le *le;
+
+	if (sizeof(map) > sizeof(u32)) {
+		le = sky2_next_rx(sky2);
+		le->rx.addr = cpu_to_le32((u64) map >> 32);
+		le->ctrl = 0;
+		le->opcode = OP_ADDR64 | HW_OWNER;
+	}
+	
+	le = sky2_next_rx(sky2);
+	le->rx.addr = cpu_to_le32((u32) map);
+	le->length = cpu_to_le16(len);
+	le->ctrl = 0;
+	le->opcode = OP_PACKET | HW_OWNER;
+}
+
+/* Tell chip where to start receive checksum.
+ * Actually has two checksums, but set both same to avoid possible byte
+ * order problems.
+ */
+static void sky2_rx_set_offset(struct sky2_port *sky2)
+{
+	struct sky2_rx_le *le;
+
+	sky2_write32(sky2->hw, 
+		     Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+		     sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+
+	le = sky2_next_rx(sky2);
+	le->rx.csum.start1 = ETH_HLEN;
+	le->rx.csum.start2 = ETH_HLEN;
+	le->ctrl = 0;
+	le->opcode = OP_TCPSTART | HW_OWNER;
+	wmb();
+	sky2_write16(sky2->hw, 
+		     Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX),
+		     sky2->rx_put);
+
+}
+
+/* Cleanout receive buffer area, assumes receiver hardware stopped */
+static void sky2_rx_clean(struct sky2_port *sky2)
+{
+	unsigned i;
+
+	memset(sky2->rx_le, 0, RX_LE_BYTES);
+	for (i = 0; i < sky2->rx_ring_size; i++) {
+		struct ring_info *re = sky2->rx_ring + i;
+
+		if (re->skb) {
+			pci_unmap_single(sky2->hw->pdev, 
+					 pci_unmap_addr(re, mapaddr),
+					 pci_unmap_len(re, maplen),
+					 PCI_DMA_FROMDEVICE);
+			kfree_skb(re->skb);
+			re->skb = NULL;
+		}
+	}
+}
+
+static inline struct sk_buff *sky2_rx_alloc_skb(struct sky2_port *sky2,
+						unsigned int size, int gfp_mask)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(size, gfp_mask);
+	if (likely(skb)) {
+		skb->dev = sky2->netdev;
+		skb_reserve(skb, NET_IP_ALIGN);
+	}
+	return skb;
+}
+
+/*
+ * Allocate and setup receiver buffer pool.
+ * In case of 64 bit dma, there are 2X as many list elements
+ * available as ring entries
+ * and need to reserve one list element so we don't wrap around.
+ */
+static int sky2_rx_fill(struct sky2_port *sky2)
+{
+	unsigned i;
+	unsigned int rx_buf_size = sky2->netdev->mtu + ETH_HLEN + 8;
+
+	pr_debug("sky2_rx_fill %d\n", sky2->rx_ring_size);
+	for (i = 0; i < sky2->rx_ring_size; i++) {
+		struct ring_info *re = sky2->rx_ring + i;
+		dma_addr_t paddr;
+
+		re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_KERNEL);
+		if (!re->skb)
+			goto nomem;
+
+		paddr = pci_map_single(sky2->hw->pdev, re->skb->data,
+				       rx_buf_size, PCI_DMA_FROMDEVICE);
+
+		pci_unmap_len_set(re, maplen, rx_buf_size);
+		pci_unmap_addr_set(re, mapaddr, paddr);
+		sky2_rx_add(sky2, paddr, rx_buf_size);
+	}
+
+	sky2_write16(sky2->hw, 
+		     Y2_QADDR(rxqaddr[sky2->port], PREF_UNIT_PUT_IDX),
+		     sky2->rx_put);
+
+	return 0;
+nomem:
+	sky2_rx_clean(sky2);
+	return -ENOMEM;
+}
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u32 ramsize, rxspace;
+	int err = -ENOMEM;
+
+	if (netif_msg_ifup(sky2))
+		printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+
+	/* must be power of 2 */
+	sky2->tx_le = pci_alloc_consistent(hw->pdev,
+					   TX_RING_SIZE * sizeof(struct sky2_tx_le),
+					   &sky2->tx_le_map);
+	if (!sky2->tx_le)
+		goto err_out;
+
+	sky2->tx_ring = kmalloc(TX_RING_SIZE * sizeof(struct ring_info),
+				GFP_KERNEL);
+	if (!sky2->tx_ring)
+		goto err_out;
+	sky2->tx_prod = sky2->tx_cons = 0;
+	memset(sky2->tx_ring, 0, TX_RING_SIZE * sizeof(struct ring_info));
+
+	sky2->rx_le = pci_alloc_consistent(hw->pdev, RX_LE_BYTES,
+					   &sky2->rx_le_map);
+	if (!sky2->rx_le)
+		goto err_out;
+	memset(sky2->rx_le, 0, RX_LE_BYTES);
+
+	sky2->rx_ring = kmalloc(sky2->rx_ring_size * sizeof(struct ring_info),
+				GFP_KERNEL);
+	if (!sky2->rx_ring)
+		goto err_out;
+
+	sky2_mac_init(hw, port);
+
+	/* Configure RAM buffers */
+	if (hw->chip_id == CHIP_ID_YUKON_FE ||
+	    (hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2))
+		ramsize = 4096;
+	else {
+		u8 e0  = sky2_read8(hw, B2_E_0);
+		ramsize = (e0 == 0) ? (128*1024) : (e0 * 4096);
+	}
+
+	/* 2/3 for Rx */
+	rxspace = (2 * ramsize) / 3;
+	sky2_ramset(hw, rxqaddr[port], 0, rxspace);
+	sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace);
+
+	sky2_qset(hw, rxqaddr[port], is_pciex(hw) ? 0x80 : 0x600);
+	sky2_qset(hw, txqaddr[port], 0x600);
+
+	sky2->rx_put = sky2->rx_next = 0;
+	sky2_prefetch_init(hw, rxqaddr[port], sky2->rx_le_map, RX_LE_SIZE-1);
+
+	sky2_rx_set_offset(sky2);
+
+	err = sky2_rx_fill(sky2);
+	if (err)
+		goto err_out;
+
+	sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
+			   TX_RING_SIZE - 1);
+
+	/* Enable interrupts from phy/mac for port */
+	hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2;
+	sky2_write32(hw, B0_IMSK, hw->intr_mask);
+	return 0;
+
+err_out:
+	if (sky2->rx_le)
+		pci_free_consistent(hw->pdev, RX_LE_BYTES,
+				    sky2->rx_le, sky2->rx_le_map);
+	if (sky2->tx_le)
+		pci_free_consistent(hw->pdev,
+				    TX_RING_SIZE * sizeof(struct sky2_tx_le),
+				    sky2->tx_le, sky2->tx_le_map);
+	if (sky2->tx_ring)
+		kfree(sky2->tx_ring);
+	if (sky2->rx_ring)
+		kfree(sky2->rx_ring);
+
+	return err;
+}
+
+/*
+ * Worst case number of list elements is 36
+ *	TSO + CHKSUM + ADDR64 + BUFFER + (ADDR+BUFFER)*MAXFRAGS
+ */
+#define MAX_SKB_TX_LE	(4 + 2*MAX_SKB_FRAGS)
+
+static inline int sky2_xmit_avail(const struct sky2_port *sky2)
+{
+	return (sky2->tx_cons > sky2->tx_prod ? 0 : TX_RING_SIZE)
+		+ sky2->tx_cons - sky2->tx_prod - 1;
+}
+
+static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
+{
+	struct sky2_tx_le *le = sky2->tx_le + sky2->tx_prod;
+	sky2->tx_prod = (sky2->tx_prod + 1) % TX_RING_SIZE;
+	return le;
+}
+
+/* Put one frame in ring for transmit. */
+static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	struct sky2_tx_le *le;
+	struct ring_info *re;
+	unsigned i, len;
+	dma_addr_t mapping;
+	u32 addr64;
+	u16 mss;
+	u8 ctrl;
+
+	skb = skb_padto(skb, ETH_ZLEN);
+	if (!skb)
+		return NETDEV_TX_OK;
+
+	if (!spin_trylock(&sky2->tx_lock))
+		return NETDEV_TX_LOCKED;
+
+	if (unlikely(sky2_xmit_avail(sky2) < MAX_SKB_TX_LE)) {
+		netif_stop_queue(dev);
+		spin_unlock(&sky2->tx_lock);
+
+		printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
+		       dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (netif_msg_tx_queued(sky2))
+		printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
+		       dev->name, sky2->tx_prod, skb->len);
+
+
+	len = skb_headlen(skb);
+	mapping = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+
+	/* Check for TCP Segmentation Offload */
+	mss = skb_shinfo(skb)->tso_size;
+	if (mss) {
+		/* just drop the packet if non-linear expansion fails */
+		if (skb_header_cloned(skb) &&
+		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+			dev_kfree_skb(skb);
+			return NETDEV_TX_OK;
+		}
+
+		mss += ((skb->h.th->doff - 5) * 4);	/* TCP options */
+		mss += (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
+		mss += ETH_HLEN;
+
+		le = get_tx_le(sky2);
+		le->tx.tso.size = cpu_to_le16(mss);
+		le->ctrl = 0;
+		le->opcode = OP_LRGLEN | HW_OWNER;
+	}
+
+	/* Handle Hi DMA */
+	if (sizeof(mapping) > sizeof(u32)) {
+		addr64 = (u64)mapping >> 32;
+
+		le = get_tx_le(sky2);
+		le->tx.addr = cpu_to_le32(addr64);
+		le->ctrl = 0;
+		le->opcode = OP_ADDR64 | HW_OWNER;
+	}
+
+	/* Handle TCP checksum offload */
+	ctrl = 0;
+	if (skb->ip_summed == CHECKSUM_HW) {
+		ptrdiff_t hdr = skb->h.raw - skb->data;
+
+		ctrl = CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
+		if (skb->nh.iph->protocol == IPPROTO_UDP)
+			ctrl |= UDPTCP;
+
+		le = get_tx_le(sky2);
+		le->tx.csum.start = cpu_to_le16(hdr);
+		le->tx.csum.offset = cpu_to_le16(hdr + skb->csum);
+		le->length = 0;
+		le->ctrl = 1;	/* one packet */
+		le->opcode = OP_TCPLISW|HW_OWNER;
+	}
+
+	le = get_tx_le(sky2);
+	le->tx.addr = cpu_to_le32((u32) mapping);
+	le->length = cpu_to_le16(len);
+	le->ctrl = ctrl;
+	le->opcode = (mss ? OP_LARGESEND : OP_PACKET) |HW_OWNER;
+
+	re = &sky2->tx_ring[le - sky2->tx_le];
+	re->skb = skb;
+	pci_unmap_addr_set(re, mapaddr, mapping);
+	pci_unmap_len_set(re, maplen, len);
+
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
+				       frag->size, PCI_DMA_TODEVICE);
+
+		if (sizeof(mapping) > sizeof(u32)) {
+			u32 hi = (u64) mapping  >> 32;
+			if (hi != addr64) {
+				le = get_tx_le(sky2);
+				le->tx.addr = cpu_to_le32(hi);
+				le->ctrl = 0;
+				le->opcode = OP_ADDR64|HW_OWNER;
+				addr64 = hi;
+			}
+		}
+
+		le = get_tx_le(sky2);
+		le->tx.addr = cpu_to_le32((u32) mapping);
+		le->length = cpu_to_le16(frag->size);
+		le->ctrl = ctrl;
+		le->opcode = OP_BUFFER|HW_OWNER;
+
+		re = &sky2->tx_ring[le - sky2->tx_le];
+		pci_unmap_addr_set(re, mapaddr, mapping);
+		pci_unmap_len_set(re, maplen, frag->size);
+	}
+
+	le->ctrl |= EOP;
+
+	sky2_put_idx(sky2->hw, txqaddr[sky2->port], sky2->tx_prod,
+		     &sky2->tx_last_put, TX_RING_SIZE);
+
+	if (sky2_xmit_avail(sky2) < MAX_SKB_TX_LE) {
+		pr_debug("%s: transmit queue full\n", dev->name);
+		netif_stop_queue(dev);
+	}
+	spin_unlock(&sky2->tx_lock);
+
+	dev->trans_start = jiffies;
+	return NETDEV_TX_OK;
+}
+
+
+/*
+ * Free ring elements from starting at tx_cons until done
+ * This unwinds the elements based on the usage assigned
+ * xmit routine.
+ */
+static void sky2_tx_complete(struct net_device *dev, u16 done)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	unsigned idx = sky2->tx_cons;
+	struct sk_buff *skb = NULL;
+
+	BUG_ON(done >= TX_RING_SIZE);
+
+	spin_lock(&sky2->tx_lock);
+	while (idx != done) {
+		struct ring_info *re = sky2->tx_ring + idx;
+		struct sky2_tx_le *le = sky2->tx_le + idx;
+
+		BUG_ON(le->opcode == 0);
+
+		switch(le->opcode & ~HW_OWNER) {
+		case OP_LARGESEND:
+		case OP_PACKET:
+			if (skb)
+				dev_kfree_skb_any(skb);
+			skb = re->skb;
+			BUG_ON(!skb);
+			re->skb = NULL;
+
+			pci_unmap_single(sky2->hw->pdev, 
+					 pci_unmap_addr(re, mapaddr),
+					 pci_unmap_len(re, maplen),
+					 PCI_DMA_TODEVICE);
+			break;
+
+		case OP_BUFFER:
+			pci_unmap_page(sky2->hw->pdev,
+				       pci_unmap_addr(re, mapaddr),
+				       pci_unmap_len(re, maplen),
+				       PCI_DMA_TODEVICE);
+			break;
+		}
+
+		le->opcode = 0;
+		idx = (idx + 1) % TX_RING_SIZE;
+	}
+
+	if (skb)
+		dev_kfree_skb_any(skb);
+	sky2->tx_cons = idx;
+
+	if (sky2_xmit_avail(sky2) > MAX_SKB_TX_LE)
+		netif_wake_queue(dev);
+	spin_unlock(&sky2->tx_lock);
+}
+
+/* Cleanup all untransmitted buffers, assume transmitter not running */
+static inline void sky2_tx_clean(struct sky2_port *sky2)
+{
+	sky2_tx_complete(sky2->netdev, sky2->tx_prod);
+}
+
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 ctrl;
+	int i;
+
+	if (netif_msg_ifdown(sky2))
+		printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+
+	netif_stop_queue(dev);
+
+	/* Stop transmitter */
+	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_STOP);
+	sky2_read32(hw, Q_ADDR(txqaddr[port], Q_CSR));
+
+	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL),
+		     RB_RST_SET|RB_DIS_OP_MD);
+
+	ctrl = gma_read16(hw, port, GM_GP_CTRL);
+	ctrl &= ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA);
+	gma_write16(hw, port, GM_GP_CTRL, ctrl);
+
+	sky2_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
+
+	/* Workaround shared GMAC reset */
+	if (! (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0
+	       && port == 0 && hw->dev[1] && netif_running(hw->dev[1])))
+		sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
+
+	/* Disable Force Sync bit and Enable Alloc bit */
+	sky2_write8(hw, SK_REG(port, TXA_CTRL),
+		    TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
+
+	/* Stop Interval Timer and Limit Counter of Tx Arbiter */
+	sky2_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
+	sky2_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
+
+	/* Reset the PCI FIFO of the async Tx queue */
+	sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
+
+	/* Reset the Tx prefetch units */
+	sky2_write32(hw, Y2_QADDR(txqaddr[port], PREF_UNIT_CTRL),
+		     PREF_UNIT_RST_SET);
+
+	sky2_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET);
+
+	/*
+	 * The RX Stop command will not work for Yukon-2 if the BMU does not
+	 * reach the end of packet and since we can't make sure that we have
+	 * incoming data, we must reset the BMU while it is not doing a DMA
+	 * transfer. Since it is possible that the RX path is still active,
+	 * the RX RAM buffer will be stopped first, so any possible incoming
+	 * data will not trigger a DMA. After the RAM buffer is stopped, the
+	 * BMU is polled until any DMA in progress is ended and only then it
+	 * will be reset.
+	 */
+
+	/* disable the RAM Buffer receive queue */
+	sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_DIS_OP_MD);
+
+	for (i = 0; i < 0xffff; i++)
+		if (sky2_read8(hw, RB_ADDR(rxqaddr[port], Q_RSL))
+		    == sky2_read8(hw, RB_ADDR(rxqaddr[port], Q_RL)))
+			break;
+
+	sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR),
+		     BMU_RST_SET | BMU_FIFO_RST);
+	/* reset the Rx prefetch unit */
+	sky2_write32(hw, Y2_QADDR(rxqaddr[port], PREF_UNIT_CTRL),
+		     PREF_UNIT_RST_SET);
+
+	sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
+	sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
+
+	/* turn off led's */
+	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
+
+	sky2_tx_clean(sky2);
+	sky2_rx_clean(sky2);
+
+	pci_free_consistent(hw->pdev, RX_LE_BYTES,
+			    sky2->rx_le, sky2->rx_le_map);
+	kfree(sky2->rx_ring);
+
+	pci_free_consistent(hw->pdev,
+			    TX_RING_SIZE * sizeof(struct sky2_tx_le),
+			    sky2->tx_le, sky2->tx_le_map);
+	kfree(sky2->tx_ring);
+
+	return 0;
+}
+
+static u16 sky2_phy_speed(const struct sky2_hw *hw, u16 aux)
+{
+	if (hw->chip_id == CHIP_ID_YUKON_FE)
+		return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
+
+	switch (aux & PHY_M_PS_SPEED_MSK) {
+	case PHY_M_PS_SPEED_1000:
+		return SPEED_1000;
+	case PHY_M_PS_SPEED_100:
+		return SPEED_100;
+	default:
+		return SPEED_10;
+	}
+}
+
+static void sky2_link_up(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 reg;
+
+	/* Enable Transmit FIFO Underrun */
+	sky2_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
+
+	reg = gma_read16(hw, port, GM_GP_CTRL);
+	if (sky2->duplex == DUPLEX_FULL || sky2->autoneg == AUTONEG_ENABLE)
+		reg |= GM_GPCR_DUP_FULL;
+
+
+	/* enable Rx/Tx */
+	reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+	gma_read16(hw, port, GM_GP_CTRL);
+
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
+
+	netif_carrier_on(sky2->netdev);
+	netif_wake_queue(sky2->netdev);
+
+	/* Turn on link LED */
+	sky2_write8(hw, SK_REG(port, LNK_LED_REG), 
+		    LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
+
+	if (netif_msg_link(sky2))
+		printk(KERN_INFO PFX
+		       "%s: Link is up at %d Mbps, %s duplex, flowcontrol %s\n",
+		       sky2->netdev->name, sky2->speed,
+		       sky2->duplex == DUPLEX_FULL ? "full" : "half",
+		       (sky2->tx_pause && sky2->rx_pause) ? "both" :
+		       sky2->tx_pause ? "tx" :
+		       sky2->rx_pause ? "rx" : "none");
+}
+
+static void sky2_link_down(struct sky2_port *sky2)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 reg;
+
+	gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
+
+	reg = gma_read16(hw, port, GM_GP_CTRL);
+	reg &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
+	gma_write16(hw, port, GM_GP_CTRL, reg);
+	gma_read16(hw, port, GM_GP_CTRL);	/* PCI post */
+
+	if (sky2->rx_pause && !sky2->tx_pause) {
+		/* restore Asymmetric Pause bit */
+		gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
+				  gm_phy_read(hw, port,
+						   PHY_MARV_AUNE_ADV)
+				  | PHY_M_AN_ASP);
+	}
+
+	sky2_phy_reset(hw, port);
+
+	netif_carrier_off(sky2->netdev);
+	netif_stop_queue(sky2->netdev);
+
+	/* Turn on link LED */
+	sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
+
+	if (netif_msg_link(sky2))
+		printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
+	sky2_phy_init(hw, port);
+}
+
+
+/*
+ * Interrrupt from PHY are handled in tasklet (soft irq)
+ * because accessing phy registers requires spin wait which might
+ * cause excess interrupt latency.
+ */
+static void sky2_phy_task(unsigned long data)
+{
+	struct sky2_port *sky2 = (struct sky2_port *) data;
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 istatus, phystat;
+
+	istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
+
+	phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
+
+	if (netif_msg_intr(sky2))
+		printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
+		       sky2->netdev->name, istatus, phystat);
+
+	if (istatus & PHY_M_IS_AN_COMPL) {
+		u16 lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
+
+		if (lpa & PHY_M_AN_RF) {
+			printk(KERN_ERR PFX "%s: remote fault",
+			       sky2->netdev->name);
+		}
+		else if (hw->chip_id != CHIP_ID_YUKON_FE
+			 && gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
+			 & PHY_B_1000S_MSF) {
+			printk(KERN_ERR PFX "%s: master/slave fault",
+			       sky2->netdev->name);
+		}
+		else if (!(phystat & PHY_M_PS_SPDUP_RES)) {
+			printk(KERN_ERR PFX "%s: speed/duplex mismatch",
+			       sky2->netdev->name);
+		}
+		else {
+			sky2->duplex = (phystat & PHY_M_PS_FULL_DUP)
+				? DUPLEX_FULL : DUPLEX_HALF;
+
+			sky2->speed = sky2_phy_speed(hw, phystat);
+			
+			sky2->tx_pause = (phystat & PHY_M_PS_TX_P_EN) != 0;
+			sky2->rx_pause = (phystat & PHY_M_PS_RX_P_EN) != 0;
+
+			if ((!sky2->tx_pause && !sky2->rx_pause) ||
+			    (sky2->speed < SPEED_1000 && sky2->duplex == DUPLEX_HALF))
+				sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
+			else
+				sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
+			sky2_link_up(sky2);
+		}
+	} else {
+
+		if (istatus & PHY_M_IS_LSP_CHANGE)
+			sky2->speed = sky2_phy_speed(hw, phystat);
+
+		if (istatus & PHY_M_IS_DUP_CHANGE)
+			sky2->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF;
+		if (istatus & PHY_M_IS_LST_CHANGE) {
+			if (phystat & PHY_M_PS_LINK_UP)
+				sky2_link_up(sky2);
+			else
+				sky2_link_down(sky2);
+		}
+	}
+
+	local_irq_disable();
+	hw->intr_mask |= (port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2;
+	sky2_write32(hw, B0_IMSK, hw->intr_mask);
+	local_irq_enable();
+}
+
+static void sky2_tx_timeout(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (netif_msg_timer(sky2))
+		printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
+
+	sky2_write32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR), BMU_STOP);
+	sky2_read32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR));
+
+	sky2_tx_clean(sky2);
+}
+
+static int sky2_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int err = 0;
+
+	if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
+		return -EINVAL;
+
+	if (netif_running(dev))
+		sky2_down(dev);
+
+	dev->mtu = new_mtu;
+
+	if (netif_running(dev))
+		err = sky2_up(dev);
+
+	return err;
+}
+
+/*
+ * Receive one packet.
+ * For small packets or errors, just reuse existing skb.
+ * For larger pakects, get new buffer.
+ */
+static struct sk_buff *sky2_receive(struct sky2_hw *hw, unsigned port,
+				    u16 length, u32 status)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct ring_info *re = sky2->rx_ring + sky2->rx_next;
+	struct sk_buff *skb = re->skb;
+	dma_addr_t mapping;
+	const unsigned int rx_buf_size = dev->mtu + ETH_HLEN + 8;
+
+	if (unlikely(netif_msg_rx_status(sky2)))
+		printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
+		       dev->name, sky2->rx_next, status, length);
+
+	sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_ring_size;
+
+	pci_unmap_single(sky2->hw->pdev,
+			 pci_unmap_addr(re, mapaddr),
+			 pci_unmap_len(re, maplen),
+			 PCI_DMA_FROMDEVICE);
+	prefetch(skb->data);
+
+	if (!(status & GMR_FS_RX_OK) 
+	    || (status & GMR_FS_ANY_ERR) 
+	    || (length << 16) != (status & GMR_FS_LEN) 
+	    || length > rx_buf_size) 
+		goto error;
+
+	re->skb = sky2_rx_alloc_skb(sky2, rx_buf_size, GFP_ATOMIC);
+	if (!re->skb) 
+		goto reuse;
+
+submit:
+	mapping = pci_map_single(sky2->hw->pdev, re->skb->data,
+				 rx_buf_size, PCI_DMA_FROMDEVICE);
+
+	pci_unmap_len_set(re, maplen, rx_buf_size);
+	pci_unmap_addr_set(re, mapaddr, mapping);
+
+	sky2_rx_add(sky2, mapping, rx_buf_size);
+	sky2_put_idx(sky2->hw, rxqaddr[sky2->port],
+		     sky2->rx_put, &sky2->rx_last_put, RX_LE_SIZE);
+
+	return skb;
+
+error:
+	if (netif_msg_rx_err(sky2))
+		printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
+		       sky2->netdev->name, status, length);
+	
+	if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
+		sky2->net_stats.rx_length_errors++;
+	if (status & GMR_FS_FRAGMENT)
+		sky2->net_stats.rx_frame_errors++;
+	if (status & GMR_FS_CRC_ERR)
+		sky2->net_stats.rx_crc_errors++;
+reuse:
+	re->skb = skb;
+	skb = NULL;
+	goto submit;
+}
+
+static u16 get_tx_index(u8 port, u32 status, u16 len)
+{
+	if (port == 0)
+		return status & 0xfff;
+	else
+		return ((status >> 24) & 0xff) | (len & 0xf) << 8;
+}
+
+/*
+ * NAPI poll routine.
+ * Both ports share the same status interrupt, therefore there is only
+ * one poll routine.
+ *
+ */
+static int sky2_poll(struct net_device *dev, int *budget)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned int to_do = min(dev->quota, *budget);
+	unsigned int work_done = 0;
+	unsigned char summed[2] = { CHECKSUM_NONE, CHECKSUM_NONE };
+	unsigned int csum[2] = { 0 };
+	unsigned int rx_handled[2] = { 0, 0};
+	u16 last;
+
+	sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
+	last = sky2_read16(hw, STAT_PUT_IDX);
+
+	while (hw->st_idx != last && work_done < to_do) {
+		struct sky2_status_le *le = hw->st_le + hw->st_idx;
+		struct sk_buff *skb;
+		u8 port;
+		u32 status;
+		u16 length;
+
+		rmb();
+		status = le32_to_cpu(le->status);
+		length = le16_to_cpu(le->length);
+		port = le->link;
+
+		BUG_ON(port >= hw->ports);
+
+		switch(le->opcode & ~HW_OWNER) {
+		case OP_RXSTAT:
+			++rx_handled[port];
+			skb = sky2_receive(hw, port, length, status);
+			if (likely(skb)) {
+				__skb_put(skb, length);
+				skb->protocol = eth_type_trans(skb, dev);
+
+				/* Add hw checksum if available */
+				skb->ip_summed = summed[port];
+				skb->csum = csum[port];
+
+				/* Clear for next packet */
+				csum[port] = 0;
+				summed[port] = CHECKSUM_NONE;
+
+				netif_receive_skb(skb);
+
+				dev->last_rx = jiffies;
+				++work_done;
+			}
+			break;
+
+		case OP_RXCHKS:
+			/* Save computed checksum for next rx */
+			csum[port] = le16_to_cpu(status & 0xffff);
+			summed[port] = CHECKSUM_HW;
+			break;
+
+		case OP_TXINDEXLE:
+			sky2_tx_complete(hw->dev[port],
+					 get_tx_index(port, status, length));
+			break;
+
+		case OP_RXTIMESTAMP:
+			break;
+
+		default:
+			if (net_ratelimit())
+				printk(KERN_WARNING PFX "unknown status opcode 0x%x\n",
+				       le->opcode);
+			break;
+		}
+
+		hw->st_idx = (hw->st_idx + 1) & (STATUS_RING_SIZE -1);
+	}
+
+	*budget -= work_done;
+	dev->quota -= work_done;
+	if (work_done < to_do) {
+		/*
+		 * Another chip workaround, need to restart TX timer if status
+		 * LE was handled. WA_DEV_43_418
+		 */
+		if (is_ec_a1(hw)) {
+			sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
+			sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+		}
+
+		hw->intr_mask |= Y2_IS_STAT_BMU;
+		sky2_write32(hw, B0_IMSK, hw->intr_mask);
+		netif_rx_complete(dev);
+	}
+
+	return work_done >= to_do;
+
+}
+
+static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
+{
+	struct net_device *dev = hw->dev[port];
+
+	printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
+	       dev->name, status);
+
+	if (status & Y2_IS_PAR_RD1) {
+		printk(KERN_ERR PFX "%s: ram data read parity error\n",
+		       dev->name);
+		/* Clear IRQ */
+		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
+	}
+
+	if (status & Y2_IS_PAR_WR1) {
+		printk(KERN_ERR PFX "%s: ram data write parity error\n",
+		       dev->name);
+
+		sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
+	}
+
+	if (status & Y2_IS_PAR_MAC1) {
+		printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
+	}
+
+	if (status & Y2_IS_PAR_RX1) {
+		printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+		sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
+	}
+
+	if (status & Y2_IS_TCP_TXA1) {
+		printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name);
+		sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
+	}
+}
+
+static void sky2_hw_intr(struct sky2_hw *hw)
+{
+	u32 status = sky2_read32(hw, B0_HWE_ISRC);
+
+	if (status & Y2_IS_TIST_OV) {
+		pr_debug (PFX "%s: unused timer overflow??\n", 
+			  pci_name(hw->pdev));
+		sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ);
+	}
+
+	if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) {
+		u16 pci_err = sky2_read16(hw, PCI_C(PCI_STATUS));
+		printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
+		       pci_name(hw->pdev), pci_err);
+
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		sky2_write16(hw, PCI_C(PCI_STATUS),
+			     pci_err | PCI_STATUS_ERROR_BITS);
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	}
+
+	if (status & Y2_IS_PCI_EXP) {
+		/* PCI-Express uncorrectable Error occured */
+		u32 pex_err =  sky2_read32(hw, PCI_C(PEX_UNC_ERR_STAT));
+
+		/*
+		 * On PCI-Express bus bridges are called root complexes.
+		 * PCI-Express errors are recognized by the root complex too,
+		 * which requests the system to handle the problem. After error
+		 * occurence it may be that no access to the adapter may be performed
+		 * any longer.
+		 */
+		printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
+		       pci_name(hw->pdev), pex_err);
+
+		/* clear the interrupt */
+		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL);
+		sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+		if (pex_err & PEX_FATAL_ERRORS) {
+			u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
+			hwmsk &= ~Y2_IS_PCI_EXP;
+			sky2_write32(hw, B0_HWE_IMSK, hwmsk);
+		}
+	}
+
+	if (status & Y2_HWE_L1_MASK)
+		sky2_hw_error(hw, 0, status);
+	status >>= 8;
+	if (status & Y2_HWE_L1_MASK)
+		sky2_hw_error(hw, 1, status);
+}
+
+static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+	u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
+
+	if (netif_msg_intr(sky2))
+		printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
+		       dev->name, status);
+
+	if (status & GM_IS_RX_FF_OR) {
+		++sky2->net_stats.rx_fifo_errors;
+		sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO);
+	}
+
+	if (status & GM_IS_TX_FF_UR) {
+		++sky2->net_stats.tx_fifo_errors;
+		sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU);
+	}
+
+}
+
+static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
+{
+	struct net_device *dev = hw->dev[port];
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2);
+	sky2_write32(hw, B0_IMSK, hw->intr_mask);
+	tasklet_schedule(&sky2->phy_task);
+}
+
+static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct sky2_hw *hw = dev_id;
+	u32 status;
+
+	status = sky2_read32(hw, B0_Y2_SP_ISRC2);
+	if (status == 0 || status == ~0) /* hotplug or shared irq */
+		return IRQ_NONE;
+
+	if (status & Y2_IS_HW_ERR)
+		sky2_hw_intr(hw);
+
+	if ((status & Y2_IS_STAT_BMU) && netif_rx_schedule_prep(hw->dev[0])) {
+		hw->intr_mask &= ~Y2_IS_STAT_BMU;
+		sky2_write32(hw, B0_IMSK, hw->intr_mask);
+		__netif_rx_schedule(hw->dev[0]);
+	}
+
+	if (status & Y2_IS_IRQ_PHY1) 
+		sky2_phy_intr(hw, 0);
+
+	if (status & Y2_IS_IRQ_PHY2)
+		sky2_phy_intr(hw, 1);
+
+	if (status & Y2_IS_IRQ_MAC1)
+		sky2_mac_intr(hw, 0);
+
+	if (status & Y2_IS_IRQ_MAC2)
+		sky2_mac_intr(hw, 1);
+
+
+	sky2_write32(hw, B0_Y2_SP_ICR, 2);
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sky2_netpoll(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	disable_irq(dev->irq);
+	sky2_intr(dev->irq, sky2->hw, NULL);
+	enable_irq(dev->irq);
+}
+#endif
+
+/* Chip internal frequency for clock calculations */
+static inline u32 sky2_khz(const struct sky2_hw *hw)
+{
+	switch(hw->chip_id) {
+	case CHIP_ID_YUKON_EC:
+		return 125000;	/* 125 Mhz */
+	case CHIP_ID_YUKON_FE:
+		return 100000;	/* 100 Mhz */
+	default: /* YUKON_XL */
+		return 156000;	/* 156 Mhz */
+	}
+}
+
+static inline u32 sky2_ms2clk(const struct sky2_hw *hw, u32 ms)
+{
+	return sky2_khz(hw) * ms;
+}
+
+static inline u32 sky2_us2clk(const struct sky2_hw *hw, u32 us)
+{
+	return (sky2_khz(hw) * 75) / 1000;
+}
+
+static int sky2_reset(struct sky2_hw *hw)
+{
+	u32 ctst, power;
+	u16 status;
+	u8 t8, pmd_type;
+	int i;
+
+	ctst = sky2_read32(hw, B0_CTST);
+
+	sky2_write8(hw, B0_CTST, CS_RST_CLR);
+	hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
+	if (hw->chip_id < CHIP_ID_YUKON_XL || hw->chip_id > CHIP_ID_YUKON_FE) {
+		printk(KERN_ERR PFX "%s: unsupported chip type 0x%x\n",
+		       pci_name(hw->pdev), hw->chip_id);
+		return -EOPNOTSUPP;
+	}
+
+	/* disable ASF */
+	if (hw->chip_id <= CHIP_ID_YUKON_EC) {
+		sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
+		sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
+	}
+
+	/* do a SW reset */
+	sky2_write8(hw, B0_CTST, CS_RST_SET);
+	sky2_write8(hw, B0_CTST, CS_RST_CLR);
+
+	/* clear PCI errors, if any */
+	status = sky2_read16(hw, PCI_C(PCI_STATUS));
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+	sky2_write16(hw, PCI_C(PCI_STATUS),
+		     status | PCI_STATUS_ERROR_BITS);
+
+	sky2_write8(hw, B0_CTST, CS_MRST_CLR);
+
+	/* clear any PEX errors */
+	if (is_pciex(hw)) {
+		sky2_write32(hw, PCI_C(PEX_UNC_ERR_STAT), 0xffffffffUL);
+		sky2_read16(hw, PCI_C(PEX_LNK_STAT));
+	}
+
+	pmd_type = sky2_read8(hw, B2_PMD_TYP);
+	hw->copper = !(pmd_type == 'L' || pmd_type == 'S');
+
+	hw->ports = 1;
+	t8 = sky2_read8(hw, B2_Y2_HW_RES);
+	if ((t8 & CFG_DUAL_MAC_MSK) == CFG_DUAL_MAC_MSK) {
+		if (!(sky2_read8(hw, B2_Y2_CLK_GATE) & Y2_STATUS_LNK2_INAC))
+			++hw->ports;
+	}
+	hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
+
+	/* switch power to VCC (WA for VAUX problem) */
+	sky2_write8(hw, B0_POWER_CTRL,
+		    PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
+
+	/* disable Core Clock Division, */
+	sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
+
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1)
+		/* enable bits are inverted */
+		sky2_write8(hw, B2_Y2_CLK_GATE,
+			    Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
+			    Y2_CLK_GAT_LNK1_DIS | Y2_PCI_CLK_LNK2_DIS |
+			    Y2_COR_CLK_LNK2_DIS | Y2_CLK_GAT_LNK2_DIS);
+		else
+			sky2_write8(hw, B2_Y2_CLK_GATE, 0);
+
+	/* Turn off phy power saving */
+	power = sky2_read32(hw, PCI_C(PCI_DEV_REG1));
+	power &= ~(PCI_Y2_PHY1_POWD|PCI_Y2_PHY2_POWD);
+
+	/* back asswards .. */
+	if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) {
+		power |= PCI_Y2_PHY1_COMA;
+		if (hw->ports > 1)
+			power |= PCI_Y2_PHY2_COMA;
+	}
+	sky2_write32(hw, PCI_C(PCI_DEV_REG1), power);
+
+	for (i = 0; i < hw->ports; i++) {
+		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
+		sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
+	}
+
+	sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+
+	sky2_write32(hw, B2_I2C_IRQ, 1); /* Clear I2C IRQ noise */
+
+	/* turn off hardware timer (unused) */
+	sky2_write8(hw, B2_TI_CTRL, TIM_STOP);
+	sky2_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ);
+	
+	sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
+
+	/* Turn on descriptor polling  -- is this necessary? */
+	sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
+	sky2_write8(hw, B28_DPT_CTRL, DPT_START);
+
+	/* Turn off receive timestamp */
+	sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
+
+	/* enable the Tx Arbiters */
+	for (i = 0; i < hw->ports; i++)
+		sky2_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
+
+	/* Initialize ram interface */
+	for (i = 0; i < hw->ports; i++) {
+		sky2_write16(hw, RAM_BUFFER(i, B3_RI_CTRL), RI_RST_CLR);
+
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS1), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_R2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XA2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_WTO_XS2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_R2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XA2), SK_RI_TO_53);
+		sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
+	}
+
+	/* Optimize PCI Express access */
+	if (is_pciex(hw)) {
+		u16 ctrl = sky2_read32(hw, PCI_C(PEX_DEV_CTRL));
+		ctrl &= ~PEX_DC_MAX_RRS_MSK;
+		ctrl |= PEX_DC_MAX_RD_RQ_SIZE(4);
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
+		sky2_write16(hw, PCI_C(PEX_DEV_CTRL), ctrl);
+		sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
+	}
+
+	sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
+
+	hw->intr_mask = Y2_IS_BASE;
+	sky2_write32(hw, B0_IMSK, hw->intr_mask);
+
+	/* disable all GMAC IRQ's */
+	sky2_write8(hw, GMAC_IRQ_MSK, 0);
+
+	spin_lock_bh(&hw->phy_lock);
+	for (i = 0; i < hw->ports; i++)
+		sky2_phy_reset(hw, i);
+	spin_unlock_bh(&hw->phy_lock);
+
+	/* Setup ring for status responses */
+	hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES,
+					 &hw->st_dma);
+	if (!hw->st_le)
+		return -ENOMEM;
+
+	memset(hw->st_le, 0, STATUS_LE_BYTES);
+	hw->st_idx = 0;
+
+	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
+	sky2_write32(hw, STAT_CTRL, SC_STAT_RST_CLR);
+
+	sky2_write32(hw, STAT_LIST_ADDR_LO, hw->st_dma);
+	sky2_write32(hw, STAT_LIST_ADDR_HI, (u64)hw->st_dma >> 32);
+
+	/* Set the list last index */
+	sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE-1);
+
+	if (is_ec_a1(hw)) {
+		/* WA for dev. #4.3 */
+		sky2_write16(hw, STAT_TX_IDX_TH, ST_TXTH_IDX_MASK);
+
+		/* set Status-FIFO watermark */
+		sky2_write8(hw, STAT_FIFO_WM, 0x21);	/* WA for dev. #4.18 */
+
+		/* set Status-FIFO ISR watermark */
+		sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07);/* WA for dev. #4.18 */
+
+		/* WA for dev. #4.3 and #4.18 */
+		/* set Status-FIFO Tx timer init value */
+		sky2_write32(hw, STAT_TX_TIMER_INI, sky2_ms2clk(hw, 10));
+	} else {
+		/*
+		 * Theses settings should avoid the
+		 * temporary hanging of the status BMU.
+		 * May be not all required... still under investigation...
+		 */
+		sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
+
+		/* set Status-FIFO watermark */
+		sky2_write8(hw, STAT_FIFO_WM, 0x10);
+
+		/* set Status-FIFO ISR watermark */
+		if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
+			sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
+
+		else	 /* WA 4109 */
+			sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
+
+		sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
+	}
+
+	/* enable the prefetch unit */
+	/* operational bit not functional for Yukon-EC, but fixed in Yukon-2? */
+	sky2_write32(hw, STAT_CTRL, SC_STAT_OP_ON);
+
+	sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
+	sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START);
+	sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START);
+
+	return 0;
+}
+
+static inline u32 sky2_supported_modes(const struct sky2_hw *hw)
+{
+	u32 modes;
+	if (hw->copper) {
+		modes =  SUPPORTED_10baseT_Half
+			| SUPPORTED_10baseT_Full
+			| SUPPORTED_100baseT_Half
+			| SUPPORTED_100baseT_Full
+			| SUPPORTED_Autoneg| SUPPORTED_TP;
+
+		if (hw->chip_id != CHIP_ID_YUKON_FE)
+			modes |= SUPPORTED_1000baseT_Half
+				| SUPPORTED_1000baseT_Full;
+	} else
+		modes = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
+			| SUPPORTED_Autoneg;
+	return modes;
+}
+
+static int sky2_get_settings(struct net_device *dev,
+			     struct ethtool_cmd *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	ecmd->transceiver = XCVR_INTERNAL;
+	ecmd->supported = sky2_supported_modes(hw);
+	ecmd->phy_address = PHY_ADDR_MARV;
+	if (hw->copper) {
+		ecmd->supported = SUPPORTED_10baseT_Half
+
+			| SUPPORTED_10baseT_Full
+			| SUPPORTED_100baseT_Half
+			| SUPPORTED_100baseT_Full
+			| SUPPORTED_1000baseT_Half
+			| SUPPORTED_1000baseT_Full
+			| SUPPORTED_Autoneg| SUPPORTED_TP;
+		ecmd->port = PORT_TP;
+	} else
+		ecmd->port = PORT_FIBRE;
+
+	ecmd->advertising = sky2->advertising;
+	ecmd->autoneg = sky2->autoneg;
+	ecmd->speed = sky2->speed;
+	ecmd->duplex = sky2->duplex;
+	return 0;
+}
+
+static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	const struct sky2_hw *hw = sky2->hw;
+	u32 supported = sky2_supported_modes(hw);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		ecmd->advertising = supported;
+		sky2->duplex = -1;
+		sky2->speed = -1;
+	} else {
+		u32 setting;
+
+		switch(ecmd->speed) {
+		case SPEED_1000:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_1000baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_1000baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		case SPEED_100:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_100baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_100baseT_Half;
+			else
+				return -EINVAL;
+			break;
+
+		case SPEED_10:
+			if (ecmd->duplex == DUPLEX_FULL)
+				setting = SUPPORTED_10baseT_Full;
+			else if (ecmd->duplex == DUPLEX_HALF)
+				setting = SUPPORTED_10baseT_Half;
+			else
+				return -EINVAL;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		if ((setting & supported) == 0)
+			return -EINVAL;
+
+		sky2->speed = ecmd->speed;
+		sky2->duplex = ecmd->duplex;
+	}
+
+	sky2->autoneg = ecmd->autoneg;
+	sky2->advertising = ecmd->advertising;
+
+	if (netif_running(dev)) {
+		sky2_down(dev);
+		sky2_up(dev);
+	}
+
+	return 0;
+}
+
+static void sky2_get_drvinfo(struct net_device *dev,
+			     struct ethtool_drvinfo *info)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->fw_version, "N/A");
+	strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+}
+
+static const struct sky2_stat {
+	char 	   name[ETH_GSTRING_LEN];
+	u16	   offset;
+} sky2_stats[] = {
+	{ "tx_bytes",	   GM_TXO_OK_HI },
+	{ "rx_bytes",	   GM_RXO_OK_HI },
+	{ "tx_broadcast",  GM_TXF_BC_OK },
+	{ "rx_broadcast",  GM_RXF_BC_OK },
+	{ "tx_multicast",  GM_TXF_MC_OK },
+	{ "rx_multicast",  GM_RXF_MC_OK },
+	{ "tx_unicast",    GM_TXF_UC_OK },
+	{ "rx_unicast",    GM_RXF_UC_OK },
+	{ "tx_mac_pause",  GM_TXF_MPAUSE },
+	{ "rx_mac_pause",  GM_RXF_MPAUSE },
+	{ "collisions",    GM_TXF_SNG_COL },
+	{ "late_collision",GM_TXF_LAT_COL },
+	{ "aborted", 	   GM_TXF_ABO_COL },
+	{ "multi_collisions", GM_TXF_MUL_COL },
+	{ "fifo_underrun", GM_TXE_FIFO_UR },
+	{ "fifo_overflow", GM_RXE_FIFO_OV },
+	{ "rx_toolong",    GM_RXF_LNG_ERR },
+	{ "rx_jabber",     GM_RXF_JAB_PKT },
+	{ "rx_runt", 	   GM_RXE_FRAG },
+	{ "rx_too_long",   GM_RXF_LNG_ERR },
+	{ "rx_fcs_error",   GM_RXF_FCS_ERR },
+};
+
+
+static u32 sky2_get_rx_csum(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	return sky2->rx_csum;
+}
+
+static int sky2_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	sky2->rx_csum = data;
+	sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+		     data ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+
+	return 0;
+}
+
+static u32 sky2_get_msglevel(struct net_device *netdev)
+{
+	struct sky2_port *sky2 = netdev_priv(netdev);
+	return sky2->msg_enable;
+}
+
+static void sky2_phy_stats(struct sky2_port *sky2, u64 *data)
+{
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	int i;
+
+	data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
+		| (u64) gma_read32(hw, port, GM_TXO_OK_LO);
+	data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
+		| (u64) gma_read32(hw, port, GM_RXO_OK_LO);
+
+	for (i = 2; i < ARRAY_SIZE(sky2_stats); i++)
+		data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset);
+}
+
+
+static void sky2_set_msglevel(struct net_device *netdev, u32 value)
+{
+	struct sky2_port *sky2 = netdev_priv(netdev);
+	sky2->msg_enable = value;
+}
+
+static int sky2_get_stats_count(struct net_device *dev)
+{
+	return ARRAY_SIZE(sky2_stats);
+}
+
+static void sky2_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *stats, u64 *data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	sky2_phy_stats(sky2, data);
+}
+
+static void sky2_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(sky2_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       sky2_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+/* Use hardware MIB variables for critical path statistics and
+ * transmit feedback not reported at interrupt.
+ * Other errors are accounted for in interrupt handler.
+ */
+static struct net_device_stats *sky2_get_stats(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	u64 data[ARRAY_SIZE(sky2_stats)];
+
+	sky2_phy_stats(sky2, data);
+
+	sky2->net_stats.tx_bytes = data[0];
+	sky2->net_stats.rx_bytes = data[1];
+	sky2->net_stats.tx_packets = data[2] + data[4] + data[6];
+	sky2->net_stats.rx_packets = data[3] + data[5] + data[7];
+	sky2->net_stats.multicast = data[5] + data[7];
+	sky2->net_stats.collisions = data[10];
+	sky2->net_stats.tx_aborted_errors = data[12];
+
+	return &sky2->net_stats;
+}
+
+static int sky2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	int err = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	sky2_down(dev);
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+	memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port*8,
+		    dev->dev_addr, ETH_ALEN);
+	memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port*8,
+		    dev->dev_addr, ETH_ALEN);
+	if (dev->flags & IFF_UP)
+		err = sky2_up(dev);
+	return err;
+}
+
+static void sky2_set_multicast(struct net_device *dev)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	struct dev_mc_list *list = dev->mc_list;
+	u16 reg;
+	u8 filter[8];
+
+	memset(filter, 0, sizeof(filter));
+
+	reg = gma_read16(hw, port, GM_RX_CTRL);
+	reg |= GM_RXCR_UCF_ENA;
+
+	if (dev->flags & IFF_PROMISC) 		/* promiscious */
+		reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
+	else if (dev->flags & IFF_ALLMULTI)	/* all multicast */
+		memset(filter, 0xff, sizeof(filter));
+	else if (dev->mc_count == 0)		/* no multicast */
+		reg &= ~GM_RXCR_MCF_ENA;
+	else {
+		int i;
+		reg |= GM_RXCR_MCF_ENA;
+
+		for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
+			u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
+			filter[bit/8] |= 1 << (bit%8);
+		}
+	}
+
+
+	gma_write16(hw, port, GM_MC_ADDR_H1,
+			 (u16)filter[0] | ((u16)filter[1] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H2,
+			 (u16)filter[2] | ((u16)filter[3] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H3,
+			 (u16)filter[4] | ((u16)filter[5] << 8));
+	gma_write16(hw, port, GM_MC_ADDR_H4,
+			 (u16)filter[6] | ((u16)filter[7] << 8));
+
+	gma_write16(hw, port, GM_RX_CTRL, reg);
+}
+
+/* Can have one global because blinking is controlled by
+ * ethtool and that is always under RTNL mutex
+ */
+static inline void sky2_led(struct sky2_hw *hw, unsigned port, int on)
+{
+	spin_lock_bh(&hw->phy_lock);
+	gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
+	if (on)
+		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+			     PHY_M_LED_MO_DUP(MO_LED_ON)  |
+			     PHY_M_LED_MO_10(MO_LED_ON)   |
+			     PHY_M_LED_MO_100(MO_LED_ON)  |
+			     PHY_M_LED_MO_1000(MO_LED_ON) |
+			     PHY_M_LED_MO_RX(MO_LED_ON));
+	else
+		gm_phy_write(hw, port, PHY_MARV_LED_OVER,
+
+			     PHY_M_LED_MO_DUP(MO_LED_OFF)  |
+			     PHY_M_LED_MO_10(MO_LED_OFF)   |
+			     PHY_M_LED_MO_100(MO_LED_OFF)  |
+			     PHY_M_LED_MO_1000(MO_LED_OFF) |
+			     PHY_M_LED_MO_RX(MO_LED_OFF));
+
+	spin_unlock_bh(&hw->phy_lock);
+}
+
+/* blink LED's for finding board */
+static int sky2_phys_id(struct net_device *dev, u32 data)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+	unsigned port = sky2->port;
+	u16 ledctrl, ledover;
+	long ms;
+	int onoff = 1;
+
+	if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
+		ms = jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT);
+	else
+		ms = data * 1000;
+
+	/* save initial values */
+	spin_lock_bh(&hw->phy_lock);
+	ledctrl = gm_phy_read(hw, port, PHY_MARV_LED_CTRL);
+	ledover = gm_phy_read(hw, port, PHY_MARV_LED_OVER);
+	spin_unlock_bh(&hw->phy_lock);
+
+	while (ms > 0) {
+		sky2_led(hw, port, onoff);
+		onoff = !onoff;
+
+		if (msleep_interruptible(250))
+			break;	/* interrupted */
+		ms -= 250;
+	}
+
+	/* resume regularly scheduled programming */
+	spin_lock_bh(&hw->phy_lock);
+	gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
+	gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
+	spin_unlock_bh(&hw->phy_lock);
+
+	return 0;
+}
+
+static void sky2_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	ecmd->tx_pause = sky2->tx_pause;
+	ecmd->rx_pause = sky2->rx_pause;
+	ecmd->autoneg = sky2->autoneg;
+}
+
+static int sky2_set_pauseparam(struct net_device *dev,
+			       struct ethtool_pauseparam *ecmd)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	int err = 0;
+
+	sky2->autoneg = ecmd->autoneg;
+	sky2->tx_pause = ecmd->tx_pause != 0;
+	sky2->rx_pause = ecmd->rx_pause != 0;
+
+	if (netif_running(dev)) {
+		sky2_down(dev);
+		err = sky2_up(dev);
+	}
+
+	return err;
+}
+
+#ifdef CONFIG_PM
+static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+
+	wol->supported = WAKE_MAGIC;
+	wol->wolopts = sky2->wol ? WAKE_MAGIC : 0;
+}
+
+static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct sky2_port *sky2 = netdev_priv(dev);
+	struct sky2_hw *hw = sky2->hw;
+
+	if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
+		return -EOPNOTSUPP;
+
+	sky2->wol = wol->wolopts == WAKE_MAGIC;
+
+	if (sky2->wol) {
+		memcpy_toio(hw->regs + WOL_MAC_ADDR, dev->dev_addr, ETH_ALEN);
+
+		sky2_write16(hw, WOL_CTRL_STAT,
+			     WOL_CTL_ENA_PME_ON_MAGIC_PKT |
+			     WOL_CTL_ENA_MAGIC_PKT_UNIT);
+	} else
+		sky2_write16(hw, WOL_CTRL_STAT, WOL_CTL_DEFAULT);
+
+	return 0;
+}
+#endif
+
+
+static struct ethtool_ops sky2_ethtool_ops = {
+	.get_settings	= sky2_get_settings,
+	.set_settings	= sky2_set_settings,
+	.get_drvinfo	= sky2_get_drvinfo,
+	.get_msglevel	= sky2_get_msglevel,
+	.set_msglevel	= sky2_set_msglevel,
+	.get_link	= ethtool_op_get_link,
+	.get_sg		= ethtool_op_get_sg,
+	.set_sg		= ethtool_op_set_sg,
+	.get_tx_csum	= ethtool_op_get_tx_csum,
+	.set_tx_csum	= ethtool_op_set_tx_csum,
+	.get_tso	= ethtool_op_get_tso,
+	.set_tso	= ethtool_op_set_tso,
+	.get_rx_csum	= sky2_get_rx_csum,
+	.set_rx_csum	= sky2_set_rx_csum,
+	.get_strings	= sky2_get_strings,
+	.get_pauseparam = sky2_get_pauseparam,
+	.set_pauseparam = sky2_set_pauseparam,
+#ifdef CONFIG_PM
+	.get_wol	= sky2_get_wol,
+	.set_wol	= sky2_set_wol,
+#endif
+	.phys_id	= sky2_phys_id,
+	.get_stats_count = sky2_get_stats_count,
+	.get_ethtool_stats = sky2_get_ethtool_stats,
+};
+
+/* Initialize network device */
+static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
+						     unsigned port, int highmem)
+{
+	struct sky2_port *sky2;
+	struct net_device *dev = alloc_etherdev(sizeof(*sky2));
+
+	if (!dev) {
+		printk(KERN_ERR "sky2 etherdev alloc failed");
+		return NULL;
+	}
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &hw->pdev->dev);
+	dev->open = sky2_up;
+	dev->stop = sky2_down;
+	dev->hard_start_xmit = sky2_xmit_frame;
+	dev->get_stats = sky2_get_stats;
+	dev->set_multicast_list = sky2_set_multicast;
+	dev->set_mac_address = sky2_set_mac_address;
+	dev->change_mtu = sky2_change_mtu;
+	SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+	dev->tx_timeout = sky2_tx_timeout;
+	dev->watchdog_timeo = TX_WATCHDOG;
+	if (port == 0)
+		dev->poll = sky2_poll;
+	dev->weight = NAPI_WEIGHT;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = sky2_netpoll;
+#endif
+	dev->irq = hw->pdev->irq;
+
+	sky2 = netdev_priv(dev);
+	sky2->netdev = dev;
+	sky2->hw = hw;
+	sky2->msg_enable = netif_msg_init(debug, default_msg);
+
+	spin_lock_init(&sky2->tx_lock);
+	/* Auto speed and flow control */
+	sky2->autoneg = AUTONEG_ENABLE;
+	sky2->tx_pause = 0;
+	sky2->rx_pause = 1;
+	sky2->duplex = -1;
+	sky2->speed = -1;
+	sky2->advertising = sky2_supported_modes(hw);
+	sky2->rx_csum = 1;
+	sky2->rx_ring_size = is_ec_a1(hw) ? MIN_RX_BUFFERS : MAX_RX_BUFFERS;
+	tasklet_init(&sky2->phy_task, sky2_phy_task, (unsigned long) sky2);
+
+	hw->dev[port] = dev;
+
+	sky2->port = port;
+
+	dev->features |= NETIF_F_LLTX;
+	if (highmem)
+		dev->features |= NETIF_F_HIGHDMA;
+	dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
+
+	/* read the mac address */
+	memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+
+	/* device is off until link detection */
+	netif_carrier_off(dev);
+	netif_stop_queue(dev);
+
+	return dev;
+}
+
+static inline void sky2_show_addr(struct net_device *dev)
+{
+	const struct sky2_port *sky2 = netdev_priv(dev);
+
+	if (netif_msg_probe(sky2))
+		printk(KERN_INFO PFX "%s: addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       dev->name,
+		       dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
+		       dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+}
+
+static int __devinit sky2_probe(struct pci_dev *pdev,
+				const struct pci_device_id *ent)
+{
+	struct net_device *dev, *dev1;
+	struct sky2_hw *hw;
+	int err, using_dac = 0;
+
+	if ((err = pci_enable_device(pdev))) {
+		printk(KERN_ERR PFX "%s cannot enable PCI device\n",
+		       pci_name(pdev));
+		goto err_out;
+	}
+
+	if ((err = pci_request_regions(pdev, DRV_NAME))) {
+		printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
+		       pci_name(pdev));
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	if (sizeof(dma_addr_t) > sizeof(u32)) {
+		err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+		if (!err)
+			using_dac = 1;
+	}
+
+	if (!using_dac) {
+		err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+		if (err) {
+			printk(KERN_ERR PFX "%s no usable DMA configuration\n",
+			       pci_name(pdev));
+			goto err_out_free_regions;
+		}
+	}
+
+#ifdef __BIG_ENDIAN
+	/* byte swap decriptors in hardware */
+	{
+		u32 reg;
+
+		pci_read_config_dword(pdev, PCI_DEV_REG2, &reg);
+		reg |= PCI_REV_DESC;
+		pci_write_config_dword(pdev, PCI_DEV_REG2, reg);
+	}
+#endif
+
+	err = -ENOMEM;
+	hw = kmalloc(sizeof(*hw), GFP_KERNEL);
+	if (!hw) {
+		printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
+		       pci_name(pdev));
+		goto err_out_free_regions;
+	}
+
+	memset(hw, 0, sizeof(*hw));
+	hw->pdev = pdev;
+	spin_lock_init(&hw->phy_lock);
+
+	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
+	if (!hw->regs) {
+		printk(KERN_ERR PFX "%s: cannot map device registers\n",
+		       pci_name(pdev));
+		goto err_out_free_hw;
+	}
+
+	err = request_irq(pdev->irq, sky2_intr, SA_SHIRQ, DRV_NAME, hw);
+	if (err) {
+		printk(KERN_ERR PFX "%s: cannot assign irq %d\n",
+		       pci_name(pdev), pdev->irq);
+		goto err_out_iounmap;
+	}
+	pci_set_drvdata(pdev, hw);
+
+	err = sky2_reset(hw);
+	if (err)
+		goto err_out_free_irq;
+
+	printk(KERN_INFO PFX "addr 0x%lx irq %d chip 0x%x (%s) rev %d\n",
+	       pci_resource_start(pdev, 0), pdev->irq,
+	       hw->chip_id, chip_name(hw->chip_id), hw->chip_rev);
+
+	if ((dev = sky2_init_netdev(hw, 0, using_dac)) == NULL)
+		goto err_out_free_pci;
+
+	if ((err = register_netdev(dev))) {
+		printk(KERN_ERR PFX "%s: cannot register net device\n",
+		       pci_name(pdev));
+		goto err_out_free_netdev;
+	}
+
+	sky2_show_addr(dev);
+
+	if (hw->ports > 1 && (dev1 = sky2_init_netdev(hw, 1, using_dac))) {
+		if (register_netdev(dev1) == 0)
+			sky2_show_addr(dev1);
+		else {
+			/* Failure to register second port need not be fatal */
+			printk(KERN_WARNING PFX "register of second port failed\n");
+			hw->dev[1] = NULL;
+			free_netdev(dev1);
+		}
+	}
+
+	return 0;
+
+err_out_free_netdev:
+	free_netdev(dev);
+
+err_out_free_irq:
+	free_irq(pdev->irq, hw);
+err_out_free_pci:
+	pci_free_consistent(hw->pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
+err_out_iounmap:
+	iounmap(hw->regs);
+err_out_free_hw:
+	kfree(hw);
+err_out_free_regions:
+	pci_release_regions(pdev);
+err_out_disable_pdev:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+err_out:
+	return err;
+}
+
+static void __devexit sky2_remove(struct pci_dev *pdev)
+{
+	struct sky2_hw *hw  = pci_get_drvdata(pdev);
+	struct net_device *dev0, *dev1;
+
+	if(!hw)
+		return;
+
+	if ((dev1 = hw->dev[1]))
+		unregister_netdev(dev1);
+	dev0 = hw->dev[0];
+	unregister_netdev(dev0);
+
+	sky2_write16(hw, B0_Y2LED, LED_STAT_OFF);
+
+	free_irq(pdev->irq, hw);
+	pci_free_consistent(pdev, STATUS_LE_BYTES,
+			    hw->st_le, hw->st_dma);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	if (dev1)
+		free_netdev(dev1);
+	free_netdev(dev0);
+	iounmap(hw->regs);
+	kfree(hw);
+	pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct sky2_hw *hw  = pci_get_drvdata(pdev);
+	int i, wol = 0;
+
+	for (i = 0; i < 2; i++) {
+		struct net_device *dev = hw->dev[i];
+
+		if (dev) {
+			struct sky2_port *sky2 = netdev_priv(dev);
+			if (netif_running(dev)) {
+				netif_carrier_off(dev);
+				sky2_down(dev);
+			}
+			netif_device_detach(dev);
+			wol |= sky2->wol;
+		}
+	}
+
+	pci_save_state(pdev);
+	pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int sky2_resume(struct pci_dev *pdev)
+{
+	struct sky2_hw *hw  = pci_get_drvdata(pdev);
+	int i;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_enable_wake(pdev, PCI_D0, 0);
+
+	sky2_reset(hw);
+
+	for (i = 0; i < 2; i++) {
+		struct net_device *dev = hw->dev[i];
+		if (dev) {
+			netif_device_attach(dev);
+			if (netif_running(dev))
+				sky2_up(dev);
+		}
+	}
+	return 0;
+}
+#endif
+
+static struct pci_driver sky2_driver = {
+	.name =         DRV_NAME,
+	.id_table =     sky2_id_table,
+	.probe =        sky2_probe,
+	.remove =       __devexit_p(sky2_remove),
+#ifdef CONFIG_PM
+	.suspend =	sky2_suspend,
+	.resume = 	sky2_resume,
+#endif
+};
+
+static int __init sky2_init_module(void)
+{
+
+	return pci_module_init(&sky2_driver);
+}
+
+static void __exit sky2_cleanup_module(void)
+{
+	pci_unregister_driver(&sky2_driver);
+}
+
+module_init(sky2_init_module);
+module_exit(sky2_cleanup_module);
+
+MODULE_DESCRIPTION("Marvell Yukon 2 Gigabit Ethernet driver");
+MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/sky2.h
@@ -0,0 +1,1935 @@
+/*
+ * Definitions for the new Marvell Yukon 2 driver.
+ */
+#ifndef _SKY2_H
+#define _SKY2_H
+
+/* PCI config registers */
+#define PCI_DEV_REG1	0x40
+#define PCI_DEV_REG2	0x44
+#define PCI_DEV_STATUS  0x7c
+#define PCI_OS_PCI_X    (1<<26)
+
+#define PEX_LNK_STAT	0xf2
+#define PEX_UNC_ERR_STAT 0x104
+#define PEX_DEV_CTRL	0xe8
+
+/* Yukon-2 */
+enum pci_dev_reg_1 {
+	PCI_Y2_PIG_ENA	 = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
+	PCI_Y2_DLL_DIS	 = 1<<30, /* Disable PCI DLL (YUKON-2) */
+	PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
+	PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
+	PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
+	PCI_Y2_PHY1_POWD = 1<<26, /* Set PHY 1 to Power Down (YUKON-2) */
+};
+
+enum pci_dev_reg_2 {
+	PCI_VPD_WR_THR	= 0xffL<<24,	/* Bit 31..24:	VPD Write Threshold */
+	PCI_DEV_SEL	= 0x7fL<<17,	/* Bit 23..17:	EEPROM Device Select */
+	PCI_VPD_ROM_SZ	= 7L<<14,	/* Bit 16..14:	VPD ROM Size	*/
+
+	PCI_PATCH_DIR	= 0xfL<<8,	/* Bit 11.. 8:	Ext Patches dir 3..0 */
+	PCI_EXT_PATCHS	= 0xfL<<4,	/* Bit	7.. 4:	Extended Patches 3..0 */
+	PCI_EN_DUMMY_RD	= 1<<3,		/* Enable Dummy Read */
+	PCI_REV_DESC	= 1<<2,		/* Reverse Desc. Bytes */
+
+	PCI_USEDATA64	= 1<<0,		/* Use 64Bit Data bus ext */
+};
+
+
+#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
+			       PCI_STATUS_SIG_SYSTEM_ERROR | \
+			       PCI_STATUS_REC_MASTER_ABORT | \
+			       PCI_STATUS_REC_TARGET_ABORT | \
+			       PCI_STATUS_PARITY)
+
+enum pex_dev_ctrl {
+	PEX_DC_MAX_RRS_MSK	= 7<<12, /* Bit 14..12:	Max. Read Request Size */
+	PEX_DC_EN_NO_SNOOP	= 1<<11,/* Enable No Snoop */
+	PEX_DC_EN_AUX_POW	= 1<<10,/* Enable AUX Power */
+	PEX_DC_EN_PHANTOM	= 1<<9,	/* Enable Phantom Functions */
+	PEX_DC_EN_EXT_TAG	= 1<<8,	/* Enable Extended Tag Field */
+	PEX_DC_MAX_PLS_MSK	= 7<<5,	/* Bit  7.. 5:	Max. Payload Size Mask */
+	PEX_DC_EN_REL_ORD	= 1<<4,	/* Enable Relaxed Ordering */
+	PEX_DC_EN_UNS_RQ_RP	= 1<<3,	/* Enable Unsupported Request Reporting */
+	PEX_DC_EN_FAT_ER_RP	= 1<<2,	/* Enable Fatal Error Reporting */
+	PEX_DC_EN_NFA_ER_RP	= 1<<1,	/* Enable Non-Fatal Error Reporting */
+	PEX_DC_EN_COR_ER_RP	= 1<<0,	/* Enable Correctable Error Reporting */
+};
+#define  PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
+
+/* PEX_UNC_ERR_STAT	 PEX Uncorrectable Errors Status Register (Yukon-2) */
+enum pex_err {
+	PEX_UNSUP_REQ 	= 1<<20, /* Unsupported Request Error */
+
+	PEX_MALFOR_TLP	= 1<<18, /* Malformed TLP */
+
+	PEX_UNEXP_COMP	= 1<<16, /* Unexpected Completion */
+
+	PEX_COMP_TO	= 1<<14, /* Completion Timeout */
+	PEX_FLOW_CTRL_P	= 1<<13, /* Flow Control Protocol Error */
+	PEX_POIS_TLP	= 1<<12, /* Poisoned TLP */
+
+	PEX_DATA_LINK_P = 1<<4,	/* Data Link Protocol Error */
+	PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
+};
+
+
+enum csr_regs {
+	B0_RAP		= 0x0000,
+	B0_CTST		= 0x0004,
+	B0_Y2LED	= 0x0005,
+	B0_POWER_CTRL	= 0x0007,
+	B0_ISRC		= 0x0008,
+	B0_IMSK		= 0x000c,
+	B0_HWE_ISRC	= 0x0010,
+	B0_HWE_IMSK	= 0x0014,
+	B0_SP_ISRC	= 0x0018,
+	B0_XM1_IMSK	= 0x0020,
+	B0_XM1_ISRC	= 0x0028,
+	B0_XM1_PHY_ADDR	= 0x0030,
+	B0_XM1_PHY_DATA	= 0x0034,
+	B0_XM2_IMSK	= 0x0040,
+	B0_XM2_ISRC	= 0x0048,
+	B0_XM2_PHY_ADDR	= 0x0050,
+	B0_XM2_PHY_DATA	= 0x0054,
+	B0_R1_CSR	= 0x0060,
+	B0_R2_CSR	= 0x0064,
+	B0_XS1_CSR	= 0x0068,
+	B0_XA1_CSR	= 0x006c,
+	B0_XS2_CSR	= 0x0070,
+	B0_XA2_CSR	= 0x0074,
+
+	/* Special ISR registers (Yukon-2 only) */
+	B0_Y2_SP_ISRC2	= 0x001c,
+	B0_Y2_SP_ISRC3	= 0x0020,
+	B0_Y2_SP_EISR	= 0x0024,
+	B0_Y2_SP_LISR	= 0x0028,
+	B0_Y2_SP_ICR	= 0x002c,
+
+	B2_MAC_1	= 0x0100,
+	B2_MAC_2	= 0x0108,
+	B2_MAC_3	= 0x0110,
+	B2_CONN_TYP	= 0x0118,
+	B2_PMD_TYP	= 0x0119,
+	B2_MAC_CFG	= 0x011a,
+	B2_CHIP_ID	= 0x011b,
+	B2_E_0		= 0x011c,
+	B2_E_1		= 0x011d,
+	B2_E_2		= 0x011e,
+	B2_Y2_CLK_GATE  = 0x011d,
+	B2_Y2_HW_RES	= 0x011e,
+	B2_E_3		= 0x011f,
+	B2_Y2_CLK_CTRL	= 0x0120,
+	B2_LD_CTRL	= 0x0128,
+	B2_LD_TEST	= 0x0129,
+	B2_TI_INI	= 0x0130,
+	B2_TI_VAL	= 0x0134,
+	B2_TI_CTRL	= 0x0138,
+	B2_TI_TEST	= 0x0139,
+	B2_IRQM_INI	= 0x0140,
+	B2_IRQM_VAL	= 0x0144,
+	B2_IRQM_CTRL	= 0x0148,
+	B2_IRQM_TEST	= 0x0149,
+	B2_IRQM_MSK	= 0x014c,
+	B2_IRQM_HWE_MSK	= 0x0150,
+	B2_TST_CTRL1	= 0x0158,
+	B2_TST_CTRL2	= 0x0159,
+	B2_GP_IO	= 0x015c,
+	B2_I2C_CTRL	= 0x0160,
+	B2_I2C_DATA	= 0x0164,
+	B2_I2C_IRQ	= 0x0168,
+	B2_I2C_SW	= 0x016c,
+	B2_BSC_INI	= 0x0170,
+	B2_BSC_VAL	= 0x0174,
+	B2_BSC_CTRL	= 0x0178,
+	B2_BSC_STAT	= 0x0179,
+	B2_BSC_TST	= 0x017a,
+
+	B3_RAM_ADDR	= 0x0180,
+	B3_RAM_DATA_LO	= 0x0184,
+	B3_RAM_DATA_HI	= 0x0188,
+
+/* RAM Interface Registers */
+/* Yukon-2: use RAM_BUFFER() to access the RAM buffer */
+/*
+ * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
+ * not usable in SW. Please notice these are NOT real timeouts, these are
+ * the number of qWords transferred continuously.
+ */
+#define RAM_BUFFER(port, reg)	(reg | (port <<6))
+
+	B3_RI_WTO_R1	= 0x0190,
+	B3_RI_WTO_XA1	= 0x0191,
+	B3_RI_WTO_XS1	= 0x0192,
+	B3_RI_RTO_R1	= 0x0193,
+	B3_RI_RTO_XA1	= 0x0194,
+	B3_RI_RTO_XS1	= 0x0195,
+	B3_RI_WTO_R2	= 0x0196,
+	B3_RI_WTO_XA2	= 0x0197,
+	B3_RI_WTO_XS2	= 0x0198,
+	B3_RI_RTO_R2	= 0x0199,
+	B3_RI_RTO_XA2	= 0x019a,
+	B3_RI_RTO_XS2	= 0x019b,
+	B3_RI_TO_VAL	= 0x019c,
+	B3_RI_CTRL	= 0x01a0,
+	B3_RI_TEST	= 0x01a2,
+	B3_MA_TOINI_RX1	= 0x01b0,
+	B3_MA_TOINI_RX2	= 0x01b1,
+	B3_MA_TOINI_TX1	= 0x01b2,
+	B3_MA_TOINI_TX2	= 0x01b3,
+	B3_MA_TOVAL_RX1	= 0x01b4,
+	B3_MA_TOVAL_RX2	= 0x01b5,
+	B3_MA_TOVAL_TX1	= 0x01b6,
+	B3_MA_TOVAL_TX2	= 0x01b7,
+	B3_MA_TO_CTRL	= 0x01b8,
+	B3_MA_TO_TEST	= 0x01ba,
+	B3_MA_RCINI_RX1	= 0x01c0,
+	B3_MA_RCINI_RX2	= 0x01c1,
+	B3_MA_RCINI_TX1	= 0x01c2,
+	B3_MA_RCINI_TX2	= 0x01c3,
+	B3_MA_RCVAL_RX1	= 0x01c4,
+	B3_MA_RCVAL_RX2	= 0x01c5,
+	B3_MA_RCVAL_TX1	= 0x01c6,
+	B3_MA_RCVAL_TX2	= 0x01c7,
+	B3_MA_RC_CTRL	= 0x01c8,
+	B3_MA_RC_TEST	= 0x01ca,
+	B3_PA_TOINI_RX1	= 0x01d0,
+	B3_PA_TOINI_RX2	= 0x01d4,
+	B3_PA_TOINI_TX1	= 0x01d8,
+	B3_PA_TOINI_TX2	= 0x01dc,
+	B3_PA_TOVAL_RX1	= 0x01e0,
+	B3_PA_TOVAL_RX2	= 0x01e4,
+	B3_PA_TOVAL_TX1	= 0x01e8,
+	B3_PA_TOVAL_TX2	= 0x01ec,
+	B3_PA_CTRL	= 0x01f0,
+	B3_PA_TEST	= 0x01f2,
+
+	Y2_CFG_SPC	= 0x1c00,
+};
+
+/* Access pci config through board I/O */
+#define PCI_C(x)	(Y2_CFG_SPC + (x))
+
+
+/*	B0_CTST			16 bit	Control/Status register */
+enum {
+	Y2_VMAIN_AVAIL	= 1<<17, /* VMAIN available (YUKON-2 only) */
+	Y2_VAUX_AVAIL	= 1<<16,/* VAUX available (YUKON-2 only) */
+	Y2_ASF_ENABLE	= 1<<13,/* ASF Unit Enable (YUKON-2 only) */
+	Y2_ASF_DISABLE	= 1<<12,/* ASF Unit Disable (YUKON-2 only) */
+	Y2_CLK_RUN_ENA	= 1<<11,/* CLK_RUN Enable  (YUKON-2 only) */
+	Y2_CLK_RUN_DIS	= 1<<10,/* CLK_RUN Disable (YUKON-2 only) */
+	Y2_LED_STAT_ON	= 1<<9, /* Status LED On  (YUKON-2 only) */
+	Y2_LED_STAT_OFF	= 1<<8, /* Status LED Off (YUKON-2 only) */
+
+	CS_BUS_CLOCK	= 1<<9,	/* Bus Clock 0/1 = 33/66 MHz */
+	CS_BUS_SLOT_SZ	= 1<<8,	/* Slot Size 0/1 = 32/64 bit slot */
+	CS_ST_SW_IRQ	= 1<<7,	/* Set IRQ SW Request */
+	CS_CL_SW_IRQ	= 1<<6,	/* Clear IRQ SW Request */
+	CS_STOP_DONE	= 1<<5,	/* Stop Master is finished */
+	CS_STOP_MAST	= 1<<4,	/* Command Bit to stop the master */
+	CS_MRST_CLR	= 1<<3,	/* Clear Master reset	*/
+	CS_MRST_SET	= 1<<2,	/* Set Master reset	*/
+	CS_RST_CLR	= 1<<1,	/* Clear Software reset	*/
+	CS_RST_SET	= 1,	/* Set   Software reset	*/
+
+/*	B0_LED			 8 Bit	LED register */
+/* Bit  7.. 2:	reserved */
+	LED_STAT_ON	= 1<<1,	/* Status LED on	*/
+	LED_STAT_OFF	= 1,		/* Status LED off	*/
+
+/*	B0_POWER_CTRL	 8 Bit	Power Control reg (YUKON only) */
+	PC_VAUX_ENA	= 1<<7,	/* Switch VAUX Enable  */
+	PC_VAUX_DIS	= 1<<6,	/* Switch VAUX Disable */
+	PC_VCC_ENA	= 1<<5,	/* Switch VCC Enable  */
+	PC_VCC_DIS	= 1<<4,	/* Switch VCC Disable */
+	PC_VAUX_ON	= 1<<3,	/* Switch VAUX On  */
+	PC_VAUX_OFF	= 1<<2,	/* Switch VAUX Off */
+	PC_VCC_ON	= 1<<1,	/* Switch VCC On  */
+	PC_VCC_OFF	= 1<<0,	/* Switch VCC Off */
+};
+
+/*	B2_IRQM_MSK 	32 bit	IRQ Moderation Mask */
+
+/*	B0_Y2_SP_ISRC2	32 bit	Special Interrupt Source Reg 2 */
+/*	B0_Y2_SP_ISRC3	32 bit	Special Interrupt Source Reg 3 */
+/*	B0_Y2_SP_EISR	32 bit	Enter ISR Reg */
+/*	B0_Y2_SP_LISR	32 bit	Leave ISR Reg */
+enum {
+	Y2_IS_HW_ERR	= 1<<31,	/* Interrupt HW Error */
+	Y2_IS_STAT_BMU	= 1<<30,	/* Status BMU Interrupt */
+	Y2_IS_ASF	= 1<<29,	/* ASF subsystem Interrupt */
+
+	Y2_IS_POLL_CHK	= 1<<27,	/* Check IRQ from polling unit */
+	Y2_IS_TWSI_RDY	= 1<<26,	/* IRQ on end of TWSI Tx */
+	Y2_IS_IRQ_SW	= 1<<25,	/* SW forced IRQ	*/
+	Y2_IS_TIMINT	= 1<<24,	/* IRQ from Timer	*/
+
+	Y2_IS_IRQ_PHY2	= 1<<12,	/* Interrupt from PHY 2 */
+	Y2_IS_IRQ_MAC2	= 1<<11,	/* Interrupt from MAC 2 */
+	Y2_IS_CHK_RX2	= 1<<10,	/* Descriptor error Rx 2 */
+	Y2_IS_CHK_TXS2	= 1<<9,		/* Descriptor error TXS 2 */
+	Y2_IS_CHK_TXA2	= 1<<8,		/* Descriptor error TXA 2 */
+
+	Y2_IS_IRQ_PHY1	= 1<<4,		/* Interrupt from PHY 1 */
+	Y2_IS_IRQ_MAC1	= 1<<3,		/* Interrupt from MAC 1 */
+	Y2_IS_CHK_RX1	= 1<<2,		/* Descriptor error Rx 1 */
+	Y2_IS_CHK_TXS1	= 1<<1,		/* Descriptor error TXS 1 */
+	Y2_IS_CHK_TXA1	= 1<<0,		/* Descriptor error TXA 1 */
+
+	Y2_IS_BASE	= Y2_IS_HW_ERR | Y2_IS_STAT_BMU |
+			  Y2_IS_POLL_CHK | Y2_IS_TWSI_RDY |
+			  Y2_IS_IRQ_SW | Y2_IS_TIMINT,
+	Y2_IS_PORT_1	= Y2_IS_IRQ_PHY1 | Y2_IS_IRQ_MAC1 |
+			  Y2_IS_CHK_RX1 | Y2_IS_CHK_TXA1 | Y2_IS_CHK_TXS1,
+	Y2_IS_PORT_2	= Y2_IS_IRQ_PHY2 | Y2_IS_IRQ_MAC2 |
+			  Y2_IS_CHK_RX2 | Y2_IS_CHK_TXA2 | Y2_IS_CHK_TXS2,
+};
+
+/*	B2_IRQM_HWE_MSK	32 bit	IRQ Moderation HW Error Mask */
+enum {
+	IS_ERR_MSK	= 0x00003fff,/* 		All Error bits */
+
+	IS_IRQ_TIST_OV	= 1<<13, /* Time Stamp Timer Overflow (YUKON only) */
+	IS_IRQ_SENSOR	= 1<<12, /* IRQ from Sensor (YUKON only) */
+	IS_IRQ_MST_ERR	= 1<<11, /* IRQ master error detected */
+	IS_IRQ_STAT	= 1<<10, /* IRQ status exception */
+	IS_NO_STAT_M1	= 1<<9,	/* No Rx Status from MAC 1 */
+	IS_NO_STAT_M2	= 1<<8,	/* No Rx Status from MAC 2 */
+	IS_NO_TIST_M1	= 1<<7,	/* No Time Stamp from MAC 1 */
+	IS_NO_TIST_M2	= 1<<6,	/* No Time Stamp from MAC 2 */
+	IS_RAM_RD_PAR	= 1<<5,	/* RAM Read  Parity Error */
+	IS_RAM_WR_PAR	= 1<<4,	/* RAM Write Parity Error */
+	IS_M1_PAR_ERR	= 1<<3,	/* MAC 1 Parity Error */
+	IS_M2_PAR_ERR	= 1<<2,	/* MAC 2 Parity Error */
+	IS_R1_PAR_ERR	= 1<<1,	/* Queue R1 Parity Error */
+	IS_R2_PAR_ERR	= 1<<0,	/* Queue R2 Parity Error */
+};
+
+/* Hardware error interrupt mask for Yukon 2 */
+enum {
+	Y2_IS_TIST_OV	= 1<<29,/* Time Stamp Timer overflow interrupt */
+	Y2_IS_SENSOR	= 1<<28, /* Sensor interrupt */
+	Y2_IS_MST_ERR	= 1<<27, /* Master error interrupt */
+	Y2_IS_IRQ_STAT	= 1<<26, /* Status exception interrupt */
+	Y2_IS_PCI_EXP	= 1<<25, /* PCI-Express interrupt */
+	Y2_IS_PCI_NEXP	= 1<<24, /* PCI-Express error similar to PCI error */
+						/* Link 2 */
+	Y2_IS_PAR_RD2	= 1<<13, /* Read RAM parity error interrupt */
+	Y2_IS_PAR_WR2	= 1<<12, /* Write RAM parity error interrupt */
+	Y2_IS_PAR_MAC2	= 1<<11, /* MAC hardware fault interrupt */
+	Y2_IS_PAR_RX2	= 1<<10, /* Parity Error Rx Queue 2 */
+	Y2_IS_TCP_TXS2	= 1<<9, /* TCP length mismatch sync Tx queue IRQ */
+	Y2_IS_TCP_TXA2	= 1<<8, /* TCP length mismatch async Tx queue IRQ */
+						/* Link 1 */
+	Y2_IS_PAR_RD1	= 1<<5, /* Read RAM parity error interrupt */
+	Y2_IS_PAR_WR1	= 1<<4, /* Write RAM parity error interrupt */
+	Y2_IS_PAR_MAC1	= 1<<3, /* MAC hardware fault interrupt */
+	Y2_IS_PAR_RX1	= 1<<2, /* Parity Error Rx Queue 1 */
+	Y2_IS_TCP_TXS1	= 1<<1, /* TCP length mismatch sync Tx queue IRQ */
+	Y2_IS_TCP_TXA1	= 1<<0, /* TCP length mismatch async Tx queue IRQ */
+
+	Y2_HWE_L1_MASK	= Y2_IS_PAR_RD1 | Y2_IS_PAR_WR1 | Y2_IS_PAR_MAC1 |
+			  Y2_IS_PAR_RX1 | Y2_IS_TCP_TXS1| Y2_IS_TCP_TXA1,
+	Y2_HWE_L2_MASK	= Y2_IS_PAR_RD2 | Y2_IS_PAR_WR2 | Y2_IS_PAR_MAC2 |
+			  Y2_IS_PAR_RX2 | Y2_IS_TCP_TXS2| Y2_IS_TCP_TXA2,
+
+	Y2_HWE_ALL_MASK	= Y2_IS_SENSOR | Y2_IS_MST_ERR | Y2_IS_IRQ_STAT |
+			  Y2_IS_PCI_EXP | Y2_IS_PCI_NEXP |
+			  Y2_HWE_L1_MASK | Y2_HWE_L2_MASK,
+};
+
+/*	B28_DPT_CTRL	 8 bit	Descriptor Poll Timer Ctrl Reg */
+enum {
+	DPT_START	= 1<<1,
+	DPT_STOP	= 1<<0,
+};
+
+/*	B2_TST_CTRL1	 8 bit	Test Control Register 1 */
+enum {
+	TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */
+	TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */
+	TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */
+	TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */
+	TST_FRC_APERR_M	 = 1<<3, /* force ADDRPERR on MST */
+	TST_FRC_APERR_T	 = 1<<2, /* force ADDRPERR on TRG */
+	TST_CFG_WRITE_ON = 1<<1, /* Enable  Config Reg WR */
+	TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
+};
+
+/*	B2_MAC_CFG		 8 bit	MAC Configuration / Chip Revision */
+enum {
+	CFG_CHIP_R_MSK	  = 0xf<<4,	/* Bit 7.. 4: Chip Revision */
+					/* Bit 3.. 2:	reserved */
+	CFG_DIS_M2_CLK	  = 1<<1,	/* Disable Clock for 2nd MAC */
+	CFG_SNG_MAC	  = 1<<0,	/* MAC Config: 0=2 MACs / 1=1 MAC*/
+};
+
+/*	B2_CHIP_ID		 8 bit 	Chip Identification Number */
+enum {
+	CHIP_ID_GENESIS	   = 0x0a, /* Chip ID for GENESIS */
+	CHIP_ID_YUKON	   = 0xb0, /* Chip ID for YUKON */
+	CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */
+	CHIP_ID_YUKON_LP   = 0xb2, /* Chip ID for YUKON-LP */
+	CHIP_ID_YUKON_XL   = 0xb3, /* Chip ID for YUKON-2 XL */
+	CHIP_ID_YUKON_EC   = 0xb6, /* Chip ID for YUKON-2 EC */
+ 	CHIP_ID_YUKON_FE   = 0xb7, /* Chip ID for YUKON-2 FE */
+
+	CHIP_REV_YU_EC_A1    = 0,  /* Chip Rev. for Yukon-EC A1/A0 */
+	CHIP_REV_YU_EC_A2    = 1,  /* Chip Rev. for Yukon-EC A2 */
+	CHIP_REV_YU_EC_A3    = 2,  /* Chip Rev. for Yukon-EC A3 */
+};
+
+/*	B2_Y2_CLK_GATE	 8 bit	Clock Gating (Yukon-2 only) */
+enum {
+	Y2_STATUS_LNK2_INAC	= 1<<7, /* Status Link 2 inactiv (0 = activ) */
+	Y2_CLK_GAT_LNK2_DIS	= 1<<6, /* Disable clock gating Link 2 */
+	Y2_COR_CLK_LNK2_DIS	= 1<<5, /* Disable Core clock Link 2 */
+	Y2_PCI_CLK_LNK2_DIS	= 1<<4, /* Disable PCI clock Link 2 */
+	Y2_STATUS_LNK1_INAC	= 1<<3, /* Status Link 1 inactiv (0 = activ) */
+	Y2_CLK_GAT_LNK1_DIS	= 1<<2, /* Disable clock gating Link 1 */
+	Y2_COR_CLK_LNK1_DIS	= 1<<1, /* Disable Core clock Link 1 */
+	Y2_PCI_CLK_LNK1_DIS	= 1<<0, /* Disable PCI clock Link 1 */
+};
+
+/*	B2_Y2_HW_RES	8 bit	HW Resources (Yukon-2 only) */
+enum {
+	CFG_LED_MODE_MSK	= 7<<2,	/* Bit  4.. 2:	LED Mode Mask */
+	CFG_LINK_2_AVAIL	= 1<<1,	/* Link 2 available */
+	CFG_LINK_1_AVAIL	= 1<<0,	/* Link 1 available */
+};
+#define CFG_LED_MODE(x)		(((x) & CFG_LED_MODE_MSK) >> 2)
+#define CFG_DUAL_MAC_MSK	(CFG_LINK_2_AVAIL | CFG_LINK_1_AVAIL)
+
+
+/* B2_Y2_CLK_CTRL	32 bit	Clock Frequency Control Register (Yukon-2/EC) */
+enum {
+	Y2_CLK_DIV_VAL_MSK	= 0xff<<16,/* Bit 23..16: Clock Divisor Value */
+#define	Y2_CLK_DIV_VAL(x)	(((x)<<16) & Y2_CLK_DIV_VAL_MSK)
+	Y2_CLK_DIV_VAL2_MSK	= 7<<21,   /* Bit 23..21: Clock Divisor Value */
+	Y2_CLK_SELECT2_MSK	= 0x1f<<16,/* Bit 20..16: Clock Select */
+#define Y2_CLK_DIV_VAL_2(x)	(((x)<<21) & Y2_CLK_DIV_VAL2_MSK)
+#define Y2_CLK_SEL_VAL_2(x)	(((x)<<16) & Y2_CLK_SELECT2_MSK)
+	Y2_CLK_DIV_ENA		= 1<<1, /* Enable  Core Clock Division */
+	Y2_CLK_DIV_DIS		= 1<<0,	/* Disable Core Clock Division */
+};
+
+/*	B2_TI_CTRL		 8 bit	Timer control */
+/*	B2_IRQM_CTRL	 8 bit	IRQ Moderation Timer Control */
+enum {
+	TIM_START	= 1<<2,	/* Start Timer */
+	TIM_STOP	= 1<<1,	/* Stop  Timer */
+	TIM_CLR_IRQ	= 1<<0,	/* Clear Timer IRQ (!IRQM) */
+};
+
+/*	B2_TI_TEST		 8 Bit	Timer Test */
+/*	B2_IRQM_TEST	 8 bit	IRQ Moderation Timer Test */
+/*	B28_DPT_TST		 8 bit	Descriptor Poll Timer Test Reg */
+enum {
+	TIM_T_ON	= 1<<2,	/* Test mode on */
+	TIM_T_OFF	= 1<<1,	/* Test mode off */
+	TIM_T_STEP	= 1<<0,	/* Test step */
+};
+
+/*	B3_RAM_ADDR		32 bit	RAM Address, to read or write */
+					/* Bit 31..19:	reserved */
+#define RAM_ADR_RAN	0x0007ffffL	/* Bit 18.. 0:	RAM Address Range */
+/* RAM Interface Registers */
+
+/*	B3_RI_CTRL		16 bit	RAM Iface Control Register */
+enum {
+	RI_CLR_RD_PERR	= 1<<9,	/* Clear IRQ RAM Read Parity Err */
+	RI_CLR_WR_PERR	= 1<<8,	/* Clear IRQ RAM Write Parity Err*/
+
+	RI_RST_CLR	= 1<<1,	/* Clear RAM Interface Reset */
+	RI_RST_SET	= 1<<0,	/* Set   RAM Interface Reset */
+};
+
+#define SK_RI_TO_53	36		/* RAM interface timeout */
+
+
+/* Port related registers FIFO, and Arbiter */
+#define SK_REG(port,reg)	(((port)<<7)+(reg))
+
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+/*	TXA_ITI_INI		32 bit	Tx Arb Interval Timer Init Val */
+/*	TXA_ITI_VAL		32 bit	Tx Arb Interval Timer Value */
+/*	TXA_LIM_INI		32 bit	Tx Arb Limit Counter Init Val */
+/*	TXA_LIM_VAL		32 bit	Tx Arb Limit Counter Value */
+
+#define TXA_MAX_VAL	0x00ffffffUL	/* Bit 23.. 0:	Max TXA Timer/Cnt Val */
+
+/*	TXA_CTRL		 8 bit	Tx Arbiter Control Register */
+enum {
+	TXA_ENA_FSYNC	= 1<<7,	/* Enable  force of sync Tx queue */
+	TXA_DIS_FSYNC	= 1<<6,	/* Disable force of sync Tx queue */
+	TXA_ENA_ALLOC	= 1<<5,	/* Enable  alloc of free bandwidth */
+	TXA_DIS_ALLOC	= 1<<4,	/* Disable alloc of free bandwidth */
+	TXA_START_RC	= 1<<3,	/* Start sync Rate Control */
+	TXA_STOP_RC	= 1<<2,	/* Stop  sync Rate Control */
+	TXA_ENA_ARB	= 1<<1,	/* Enable  Tx Arbiter */
+	TXA_DIS_ARB	= 1<<0,	/* Disable Tx Arbiter */
+};
+
+/*
+ *	Bank 4 - 5
+ */
+/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
+enum {
+	TXA_ITI_INI	= 0x0200,/* 32 bit	Tx Arb Interval Timer Init Val*/
+	TXA_ITI_VAL	= 0x0204,/* 32 bit	Tx Arb Interval Timer Value */
+	TXA_LIM_INI	= 0x0208,/* 32 bit	Tx Arb Limit Counter Init Val */
+	TXA_LIM_VAL	= 0x020c,/* 32 bit	Tx Arb Limit Counter Value */
+	TXA_CTRL	= 0x0210,/*  8 bit	Tx Arbiter Control Register */
+	TXA_TEST	= 0x0211,/*  8 bit	Tx Arbiter Test Register */
+	TXA_STAT	= 0x0212,/*  8 bit	Tx Arbiter Status Register */
+};
+
+
+enum {
+	B6_EXT_REG	= 0x0300,/* External registers (GENESIS only) */
+	B7_CFG_SPC	= 0x0380,/* copy of the Configuration register */
+	B8_RQ1_REGS	= 0x0400,/* Receive Queue 1 */
+	B8_RQ2_REGS	= 0x0480,/* Receive Queue 2 */
+	B8_TS1_REGS	= 0x0600,/* Transmit sync queue 1 */
+	B8_TA1_REGS	= 0x0680,/* Transmit async queue 1 */
+	B8_TS2_REGS	= 0x0700,/* Transmit sync queue 2 */
+	B8_TA2_REGS	= 0x0780,/* Transmit sync queue 2 */
+	B16_RAM_REGS	= 0x0800,/* RAM Buffer Registers */
+};
+
+/* Queue Register Offsets, use Q_ADDR() to access */
+enum {
+	B8_Q_REGS = 0x0400, /* base of Queue registers */
+	Q_D	= 0x00,	/* 8*32	bit	Current Descriptor */
+	Q_DA_L	= 0x20,	/* 32 bit	Current Descriptor Address Low dWord */
+	Q_DA_H	= 0x24,	/* 32 bit	Current Descriptor Address High dWord */
+	Q_AC_L	= 0x28,	/* 32 bit	Current Address Counter Low dWord */
+	Q_AC_H	= 0x2c,	/* 32 bit	Current Address Counter High dWord */
+	Q_BC	= 0x30,	/* 32 bit	Current Byte Counter */
+	Q_CSR	= 0x34,	/* 32 bit	BMU Control/Status Register */
+	Q_F	= 0x38,	/* 32 bit	Flag Register */
+	Q_T1	= 0x3c,	/* 32 bit	Test Register 1 */
+	Q_T1_TR	= 0x3c,	/*  8 bit	Test Register 1 Transfer SM */
+	Q_T1_WR	= 0x3d,	/*  8 bit	Test Register 1 Write Descriptor SM */
+	Q_T1_RD	= 0x3e,	/*  8 bit	Test Register 1 Read Descriptor SM */
+	Q_T1_SV	= 0x3f,	/*  8 bit	Test Register 1 Supervisor SM */
+	Q_T2	= 0x40,	/* 32 bit	Test Register 2	*/
+	Q_T3	= 0x44,	/* 32 bit	Test Register 3	*/
+
+/* Yukon-2 */
+	Q_DONE	= 0x24,	/* 16 bit	Done Index 		(Yukon-2 only) */
+	Q_WM	= 0x40,	/* 16 bit	FIFO Watermark */
+	Q_AL	= 0x42,	/*  8 bit	FIFO Alignment */
+	Q_RSP	= 0x44,	/* 16 bit	FIFO Read Shadow Pointer */
+	Q_RSL	= 0x46,	/*  8 bit	FIFO Read Shadow Level */
+	Q_RP	= 0x48,	/*  8 bit	FIFO Read Pointer */
+	Q_RL	= 0x4a,	/*  8 bit	FIFO Read Level */
+	Q_WP	= 0x4c,	/*  8 bit	FIFO Write Pointer */
+	Q_WSP	= 0x4d,	/*  8 bit	FIFO Write Shadow Pointer */
+	Q_WL	= 0x4e,	/*  8 bit	FIFO Write Level */
+	Q_WSL	= 0x4f,	/*  8 bit	FIFO Write Shadow Level */
+};
+#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
+
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+enum {
+	Y2_B8_PREF_REGS		= 0x0450,
+
+	PREF_UNIT_CTRL		= 0x00,	/* 32 bit	Control register */
+	PREF_UNIT_LAST_IDX	= 0x04,	/* 16 bit	Last Index */
+	PREF_UNIT_ADDR_LO	= 0x08,	/* 32 bit	List start addr, low part */
+	PREF_UNIT_ADDR_HI	= 0x0c,	/* 32 bit	List start addr, high part*/
+	PREF_UNIT_GET_IDX	= 0x10,	/* 16 bit	Get Index */
+	PREF_UNIT_PUT_IDX	= 0x14,	/* 16 bit	Put Index */
+	PREF_UNIT_FIFO_WP	= 0x20,	/*  8 bit	FIFO write pointer */
+	PREF_UNIT_FIFO_RP	= 0x24,	/*  8 bit	FIFO read pointer */
+	PREF_UNIT_FIFO_WM	= 0x28,	/*  8 bit	FIFO watermark */
+	PREF_UNIT_FIFO_LEV	= 0x2c,	/*  8 bit	FIFO level */
+
+	PREF_UNIT_MASK_IDX	= 0x0fff,
+};
+#define Y2_QADDR(q,reg)		(Y2_B8_PREF_REGS + (q) + (reg))
+
+/* RAM Buffer Register Offsets */
+enum {
+
+	RB_START	= 0x00,/* 32 bit	RAM Buffer Start Address */
+	RB_END	= 0x04,/* 32 bit	RAM Buffer End Address */
+	RB_WP	= 0x08,/* 32 bit	RAM Buffer Write Pointer */
+	RB_RP	= 0x0c,/* 32 bit	RAM Buffer Read Pointer */
+	RB_RX_UTPP	= 0x10,/* 32 bit	Rx Upper Threshold, Pause Packet */
+	RB_RX_LTPP	= 0x14,/* 32 bit	Rx Lower Threshold, Pause Packet */
+	RB_RX_UTHP	= 0x18,/* 32 bit	Rx Upper Threshold, High Prio */
+	RB_RX_LTHP	= 0x1c,/* 32 bit	Rx Lower Threshold, High Prio */
+	/* 0x10 - 0x1f:	reserved at Tx RAM Buffer Registers */
+	RB_PC	= 0x20,/* 32 bit	RAM Buffer Packet Counter */
+	RB_LEV	= 0x24,/* 32 bit	RAM Buffer Level Register */
+	RB_CTRL	= 0x28,/* 32 bit	RAM Buffer Control Register */
+	RB_TST1	= 0x29,/*  8 bit	RAM Buffer Test Register 1 */
+	RB_TST2	= 0x2a,/*  8 bit	RAM Buffer Test Register 2 */
+};
+
+/* Receive and Transmit Queues */
+enum {
+	Q_R1	= 0x0000,	/* Receive Queue 1 */
+	Q_R2	= 0x0080,	/* Receive Queue 2 */
+	Q_XS1	= 0x0200,	/* Synchronous Transmit Queue 1 */
+	Q_XA1	= 0x0280,	/* Asynchronous Transmit Queue 1 */
+	Q_XS2	= 0x0300,	/* Synchronous Transmit Queue 2 */
+	Q_XA2	= 0x0380,	/* Asynchronous Transmit Queue 2 */
+};
+
+/* Different PHY Types */
+enum {
+	PHY_ADDR_MARV	= 0,
+};
+
+#define RB_ADDR(offs, queue) (B16_RAM_REGS + (queue) + (offs))
+
+
+enum {
+	LNK_SYNC_INI	= 0x0c30,/* 32 bit	Link Sync Cnt Init Value */
+	LNK_SYNC_VAL	= 0x0c34,/* 32 bit	Link Sync Cnt Current Value */
+	LNK_SYNC_CTRL	= 0x0c38,/*  8 bit	Link Sync Cnt Control Register */
+	LNK_SYNC_TST	= 0x0c39,/*  8 bit	Link Sync Cnt Test Register */
+
+	LNK_LED_REG	= 0x0c3c,/*  8 bit	Link LED Register */
+
+/* Receive GMAC FIFO (YUKON and Yukon-2) */
+
+	RX_GMF_EA	= 0x0c40,/* 32 bit	Rx GMAC FIFO End Address */
+	RX_GMF_AF_THR	= 0x0c44,/* 32 bit	Rx GMAC FIFO Almost Full Thresh. */
+	RX_GMF_CTRL_T	= 0x0c48,/* 32 bit	Rx GMAC FIFO Control/Test */
+	RX_GMF_FL_MSK	= 0x0c4c,/* 32 bit	Rx GMAC FIFO Flush Mask */
+	RX_GMF_FL_THR	= 0x0c50,/* 32 bit	Rx GMAC FIFO Flush Threshold */
+	RX_GMF_TR_THR	= 0x0c54,/* 32 bit	Rx Truncation Threshold (Yukon-2) */
+
+	RX_GMF_VLAN	= 0x0c5c,/* 32 bit	Rx VLAN Type Register (Yukon-2) */
+	RX_GMF_WP	= 0x0c60,/* 32 bit	Rx GMAC FIFO Write Pointer */
+
+	RX_GMF_WLEV	= 0x0c68,/* 32 bit	Rx GMAC FIFO Write Level */
+
+	RX_GMF_RP	= 0x0c70,/* 32 bit	Rx GMAC FIFO Read Pointer */
+
+	RX_GMF_RLEV	= 0x0c78,/* 32 bit	Rx GMAC FIFO Read Level */
+};
+
+
+/*	Q_BC			32 bit	Current Byte Counter */
+
+/* BMU Control Status Registers */
+/*	B0_R1_CSR		32 bit	BMU Ctrl/Stat Rx Queue 1 */
+/*	B0_R2_CSR		32 bit	BMU Ctrl/Stat Rx Queue 2 */
+/*	B0_XA1_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 1 */
+/*	B0_XS1_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 1 */
+/*	B0_XA2_CSR		32 bit	BMU Ctrl/Stat Sync Tx Queue 2 */
+/*	B0_XS2_CSR		32 bit	BMU Ctrl/Stat Async Tx Queue 2 */
+/*	Q_CSR			32 bit	BMU Control/Status Register */
+
+/* Rx BMU Control / Status Registers (Yukon-2) */
+enum {
+	BMU_IDLE	= 1<<31, /* BMU Idle State */
+	BMU_RX_TCP_PKT	= 1<<30, /* Rx TCP Packet (when RSS Hash enabled) */
+	BMU_RX_IP_PKT	= 1<<29, /* Rx IP  Packet (when RSS Hash enabled) */
+
+	BMU_ENA_RX_RSS_HASH = 1<<15, /* Enable  Rx RSS Hash */
+	BMU_DIS_RX_RSS_HASH = 1<<14, /* Disable Rx RSS Hash */
+	BMU_ENA_RX_CHKSUM = 1<<13, /* Enable  Rx TCP/IP Checksum Check */
+	BMU_DIS_RX_CHKSUM = 1<<12, /* Disable Rx TCP/IP Checksum Check */
+	BMU_CLR_IRQ_PAR	= 1<<11, /* Clear IRQ on Parity errors (Rx) */
+	BMU_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segmen. error (Tx) */
+	BMU_CLR_IRQ_CHK	= 1<<10, /* Clear IRQ Check */
+	BMU_STOP	= 1<<9, /* Stop  Rx/Tx Queue */
+	BMU_START	= 1<<8, /* Start Rx/Tx Queue */
+	BMU_FIFO_OP_ON	= 1<<7, /* FIFO Operational On */
+	BMU_FIFO_OP_OFF	= 1<<6, /* FIFO Operational Off */
+	BMU_FIFO_ENA	= 1<<5, /* Enable FIFO */
+	BMU_FIFO_RST	= 1<<4, /* Reset  FIFO */
+	BMU_OP_ON	= 1<<3, /* BMU Operational On */
+	BMU_OP_OFF	= 1<<2, /* BMU Operational Off */
+	BMU_RST_CLR	= 1<<1, /* Clear BMU Reset (Enable) */
+	BMU_RST_SET	= 1<<0, /* Set   BMU Reset */
+
+	BMU_CLR_RESET	= BMU_FIFO_RST | BMU_OP_OFF | BMU_RST_CLR,
+	BMU_OPER_INIT	= BMU_CLR_IRQ_PAR | BMU_CLR_IRQ_CHK | BMU_START |
+			  BMU_FIFO_ENA | BMU_OP_ON,
+};
+
+/* Tx BMU Control / Status Registers (Yukon-2) */
+								/* Bit 31: same as for Rx */
+enum {
+	BMU_TX_IPIDINCR_ON	= 1<<13, /* Enable  IP ID Increment */
+	BMU_TX_IPIDINCR_OFF	= 1<<12, /* Disable IP ID Increment */
+	BMU_TX_CLR_IRQ_TCP	= 1<<11, /* Clear IRQ on TCP segm. length mism. */
+};
+
+/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
+/* PREF_UNIT_CTRL	32 bit	Prefetch Control register */
+enum {
+	PREF_UNIT_OP_ON		= 1<<3,	/* prefetch unit operational */
+	PREF_UNIT_OP_OFF	= 1<<2,	/* prefetch unit not operational */
+	PREF_UNIT_RST_CLR	= 1<<1,	/* Clear Prefetch Unit Reset */
+	PREF_UNIT_RST_SET	= 1<<0,	/* Set   Prefetch Unit Reset */
+};
+
+/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
+/*	RB_START		32 bit	RAM Buffer Start Address */
+/*	RB_END			32 bit	RAM Buffer End Address */
+/*	RB_WP			32 bit	RAM Buffer Write Pointer */
+/*	RB_RP			32 bit	RAM Buffer Read Pointer */
+/*	RB_RX_UTPP		32 bit	Rx Upper Threshold, Pause Pack */
+/*	RB_RX_LTPP		32 bit	Rx Lower Threshold, Pause Pack */
+/*	RB_RX_UTHP		32 bit	Rx Upper Threshold, High Prio */
+/*	RB_RX_LTHP		32 bit	Rx Lower Threshold, High Prio */
+/*	RB_PC			32 bit	RAM Buffer Packet Counter */
+/*	RB_LEV			32 bit	RAM Buffer Level Register */
+
+#define RB_MSK	0x0007ffff	/* Bit 18.. 0:	RAM Buffer Pointer Bits */
+/*	RB_TST2			 8 bit	RAM Buffer Test Register 2 */
+/*	RB_TST1			 8 bit	RAM Buffer Test Register 1 */
+
+/*	RB_CTRL			 8 bit	RAM Buffer Control Register */
+enum {
+	RB_ENA_STFWD	= 1<<5,	/* Enable  Store & Forward */
+	RB_DIS_STFWD	= 1<<4,	/* Disable Store & Forward */
+	RB_ENA_OP_MD	= 1<<3,	/* Enable  Operation Mode */
+	RB_DIS_OP_MD	= 1<<2,	/* Disable Operation Mode */
+	RB_RST_CLR	= 1<<1,	/* Clear RAM Buf STM Reset */
+	RB_RST_SET	= 1<<0,	/* Set   RAM Buf STM Reset */
+};
+
+
+/* Transmit GMAC FIFO (YUKON only) */
+enum {
+	TX_GMF_EA	= 0x0d40,/* 32 bit	Tx GMAC FIFO End Address */
+	TX_GMF_AE_THR	= 0x0d44,/* 32 bit	Tx GMAC FIFO Almost Empty Thresh.*/
+	TX_GMF_CTRL_T	= 0x0d48,/* 32 bit	Tx GMAC FIFO Control/Test */
+
+	TX_GMF_WP	= 0x0d60,/* 32 bit 	Tx GMAC FIFO Write Pointer */
+	TX_GMF_WSP	= 0x0d64,/* 32 bit 	Tx GMAC FIFO Write Shadow Ptr. */
+	TX_GMF_WLEV	= 0x0d68,/* 32 bit 	Tx GMAC FIFO Write Level */
+
+	TX_GMF_RP	= 0x0d70,/* 32 bit 	Tx GMAC FIFO Read Pointer */
+	TX_GMF_RSTP	= 0x0d74,/* 32 bit 	Tx GMAC FIFO Restart Pointer */
+	TX_GMF_RLEV	= 0x0d78,/* 32 bit 	Tx GMAC FIFO Read Level */
+};
+
+/* Descriptor Poll Timer Registers */
+enum {
+	B28_DPT_INI	= 0x0e00,/* 24 bit	Descriptor Poll Timer Init Val */
+	B28_DPT_VAL	= 0x0e04,/* 24 bit	Descriptor Poll Timer Curr Val */
+	B28_DPT_CTRL	= 0x0e08,/*  8 bit	Descriptor Poll Timer Ctrl Reg */
+
+	B28_DPT_TST	= 0x0e0a,/*  8 bit	Descriptor Poll Timer Test Reg */
+};
+
+/* Time Stamp Timer Registers (YUKON only) */
+enum {
+	GMAC_TI_ST_VAL	= 0x0e14,/* 32 bit	Time Stamp Timer Curr Val */
+	GMAC_TI_ST_CTRL	= 0x0e18,/*  8 bit	Time Stamp Timer Ctrl Reg */
+	GMAC_TI_ST_TST	= 0x0e1a,/*  8 bit	Time Stamp Timer Test Reg */
+};
+
+/* Polling Unit Registers (Yukon-2 only) */
+enum {
+	POLL_CTRL	= 0x0e20, /* 32 bit	Polling Unit Control Reg */
+	POLL_LAST_IDX	= 0x0e24,/* 16 bit	Polling Unit List Last Index */
+
+	POLL_LIST_ADDR_LO= 0x0e28,/* 32 bit	Poll. List Start Addr (low) */
+	POLL_LIST_ADDR_HI= 0x0e2c,/* 32 bit	Poll. List Start Addr (high) */
+};
+
+/* ASF Subsystem Registers (Yukon-2 only) */
+enum {
+	B28_Y2_SMB_CONFIG  = 0x0e40,/* 32 bit	ASF SMBus Config Register */
+	B28_Y2_SMB_CSD_REG = 0x0e44,/* 32 bit	ASF SMB Control/Status/Data */
+	B28_Y2_ASF_IRQ_V_BASE=0x0e60,/* 32 bit	ASF IRQ Vector Base */
+
+	B28_Y2_ASF_STAT_CMD= 0x0e68,/* 32 bit	ASF Status and Command Reg */
+	B28_Y2_ASF_HOST_COM= 0x0e6c,/* 32 bit	ASF Host Communication Reg */
+	B28_Y2_DATA_REG_1  = 0x0e70,/* 32 bit	ASF/Host Data Register 1 */
+	B28_Y2_DATA_REG_2  = 0x0e74,/* 32 bit	ASF/Host Data Register 2 */
+	B28_Y2_DATA_REG_3  = 0x0e78,/* 32 bit	ASF/Host Data Register 3 */
+	B28_Y2_DATA_REG_4  = 0x0e7c,/* 32 bit	ASF/Host Data Register 4 */
+};
+
+/* Status BMU Registers (Yukon-2 only)*/
+enum {
+	STAT_CTRL	= 0x0e80,/* 32 bit	Status BMU Control Reg */
+	STAT_LAST_IDX	= 0x0e84,/* 16 bit	Status BMU Last Index */
+
+	STAT_LIST_ADDR_LO= 0x0e88,/* 32 bit	Status List Start Addr (low) */
+	STAT_LIST_ADDR_HI= 0x0e8c,/* 32 bit	Status List Start Addr (high) */
+	STAT_TXA1_RIDX	= 0x0e90,/* 16 bit	Status TxA1 Report Index Reg */
+	STAT_TXS1_RIDX	= 0x0e92,/* 16 bit	Status TxS1 Report Index Reg */
+	STAT_TXA2_RIDX	= 0x0e94,/* 16 bit	Status TxA2 Report Index Reg */
+	STAT_TXS2_RIDX	= 0x0e96,/* 16 bit	Status TxS2 Report Index Reg */
+	STAT_TX_IDX_TH	= 0x0e98,/* 16 bit	Status Tx Index Threshold Reg */
+	STAT_PUT_IDX	= 0x0e9c,/* 16 bit	Status Put Index Reg */
+
+/* FIFO Control/Status Registers (Yukon-2 only)*/
+	STAT_FIFO_WP	= 0x0ea0,/*  8 bit	Status FIFO Write Pointer Reg */
+	STAT_FIFO_RP	= 0x0ea4,/*  8 bit	Status FIFO Read Pointer Reg */
+	STAT_FIFO_RSP	= 0x0ea6,/*  8 bit	Status FIFO Read Shadow Ptr */
+	STAT_FIFO_LEVEL	= 0x0ea8,/*  8 bit	Status FIFO Level Reg */
+	STAT_FIFO_SHLVL	= 0x0eaa,/*  8 bit	Status FIFO Shadow Level Reg */
+	STAT_FIFO_WM	= 0x0eac,/*  8 bit	Status FIFO Watermark Reg */
+	STAT_FIFO_ISR_WM= 0x0ead,/*  8 bit	Status FIFO ISR Watermark Reg */
+
+/* Level and ISR Timer Registers (Yukon-2 only)*/
+	STAT_LEV_TIMER_INI= 0x0eb0,/* 32 bit	Level Timer Init. Value Reg */
+	STAT_LEV_TIMER_CNT= 0x0eb4,/* 32 bit	Level Timer Counter Reg */
+	STAT_LEV_TIMER_CTRL= 0x0eb8,/*  8 bit	Level Timer Control Reg */
+	STAT_LEV_TIMER_TEST= 0x0eb9,/*  8 bit	Level Timer Test Reg */
+	STAT_TX_TIMER_INI  = 0x0ec0,/* 32 bit	Tx Timer Init. Value Reg */
+	STAT_TX_TIMER_CNT  = 0x0ec4,/* 32 bit	Tx Timer Counter Reg */
+	STAT_TX_TIMER_CTRL = 0x0ec8,/*  8 bit	Tx Timer Control Reg */
+	STAT_TX_TIMER_TEST = 0x0ec9,/*  8 bit	Tx Timer Test Reg */
+	STAT_ISR_TIMER_INI = 0x0ed0,/* 32 bit	ISR Timer Init. Value Reg */
+	STAT_ISR_TIMER_CNT = 0x0ed4,/* 32 bit	ISR Timer Counter Reg */
+	STAT_ISR_TIMER_CTRL= 0x0ed8,/*  8 bit	ISR Timer Control Reg */
+	STAT_ISR_TIMER_TEST= 0x0ed9,/*  8 bit	ISR Timer Test Reg */
+
+	ST_LAST_IDX_MASK	= 0x007f,/* Last Index Mask */
+	ST_TXRP_IDX_MASK	= 0x0fff,/* Tx Report Index Mask */
+	ST_TXTH_IDX_MASK	= 0x0fff,/* Tx Threshold Index Mask */
+	ST_WM_IDX_MASK	= 0x3f,/* FIFO Watermark Index Mask */
+};
+
+enum {
+	LINKLED_OFF 	     = 0x01,
+	LINKLED_ON  	     = 0x02,
+	LINKLED_LINKSYNC_OFF = 0x04,
+	LINKLED_LINKSYNC_ON  = 0x08,
+	LINKLED_BLINK_OFF    = 0x10,
+	LINKLED_BLINK_ON     = 0x20,
+};
+
+/* GMAC and GPHY Control Registers (YUKON only) */
+enum {
+	GMAC_CTRL	= 0x0f00,/* 32 bit	GMAC Control Reg */
+	GPHY_CTRL	= 0x0f04,/* 32 bit	GPHY Control Reg */
+	GMAC_IRQ_SRC	= 0x0f08,/*  8 bit	GMAC Interrupt Source Reg */
+	GMAC_IRQ_MSK	= 0x0f0c,/*  8 bit	GMAC Interrupt Mask Reg */
+	GMAC_LINK_CTRL	= 0x0f10,/* 16 bit	Link Control Reg */
+
+/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
+
+	WOL_REG_OFFS	= 0x20,/* HW-Bug: Address is + 0x20 against spec. */
+
+	WOL_CTRL_STAT	= 0x0f20,/* 16 bit	WOL Control/Status Reg */
+	WOL_MATCH_CTL	= 0x0f22,/*  8 bit	WOL Match Control Reg */
+	WOL_MATCH_RES	= 0x0f23,/*  8 bit	WOL Match Result Reg */
+	WOL_MAC_ADDR	= 0x0f24,/* 32 bit	WOL MAC Address */
+	WOL_PATT_PME	= 0x0f2a,/*  8 bit	WOL PME Match Enable (Yukon-2) */
+	WOL_PATT_ASFM	= 0x0f2b,/*  8 bit	WOL ASF Match Enable (Yukon-2) */
+	WOL_PATT_RPTR	= 0x0f2c,/*  8 bit	WOL Pattern Read Pointer */
+
+/* WOL Pattern Length Registers (YUKON only) */
+
+	WOL_PATT_LEN_LO	= 0x0f30,/* 32 bit	WOL Pattern Length 3..0 */
+	WOL_PATT_LEN_HI	= 0x0f34,/* 24 bit	WOL Pattern Length 6..4 */
+
+/* WOL Pattern Counter Registers (YUKON only) */
+
+	WOL_PATT_CNT_0	= 0x0f38,/* 32 bit	WOL Pattern Counter 3..0 */
+	WOL_PATT_CNT_4	= 0x0f3c,/* 24 bit	WOL Pattern Counter 6..4 */
+};
+
+enum {
+	WOL_PATT_RAM_1	= 0x1000,/*  WOL Pattern RAM Link 1 */
+	WOL_PATT_RAM_2	= 0x1400,/*  WOL Pattern RAM Link 2 */
+};
+
+enum {
+	BASE_GMAC_1	= 0x2800,/* GMAC 1 registers */
+	BASE_GMAC_2	= 0x3800,/* GMAC 2 registers */
+};
+
+/*
+ * Marvel-PHY Registers, indirect addressed over GMAC
+ */
+enum {
+	PHY_MARV_CTRL		= 0x00,/* 16 bit r/w	PHY Control Register */
+	PHY_MARV_STAT		= 0x01,/* 16 bit r/o	PHY Status Register */
+	PHY_MARV_ID0		= 0x02,/* 16 bit r/o	PHY ID0 Register */
+	PHY_MARV_ID1		= 0x03,/* 16 bit r/o	PHY ID1 Register */
+	PHY_MARV_AUNE_ADV	= 0x04,/* 16 bit r/w	Auto-Neg. Advertisement */
+	PHY_MARV_AUNE_LP	= 0x05,/* 16 bit r/o	Link Part Ability Reg */
+	PHY_MARV_AUNE_EXP	= 0x06,/* 16 bit r/o	Auto-Neg. Expansion Reg */
+	PHY_MARV_NEPG		= 0x07,/* 16 bit r/w	Next Page Register */
+	PHY_MARV_NEPG_LP	= 0x08,/* 16 bit r/o	Next Page Link Partner */
+	/* Marvel-specific registers */
+	PHY_MARV_1000T_CTRL	= 0x09,/* 16 bit r/w	1000Base-T Control Reg */
+	PHY_MARV_1000T_STAT	= 0x0a,/* 16 bit r/o	1000Base-T Status Reg */
+	PHY_MARV_EXT_STAT	= 0x0f,/* 16 bit r/o	Extended Status Reg */
+	PHY_MARV_PHY_CTRL	= 0x10,/* 16 bit r/w	PHY Specific Ctrl Reg */
+	PHY_MARV_PHY_STAT	= 0x11,/* 16 bit r/o	PHY Specific Stat Reg */
+	PHY_MARV_INT_MASK	= 0x12,/* 16 bit r/w	Interrupt Mask Reg */
+	PHY_MARV_INT_STAT	= 0x13,/* 16 bit r/o	Interrupt Status Reg */
+	PHY_MARV_EXT_CTRL	= 0x14,/* 16 bit r/w	Ext. PHY Specific Ctrl */
+	PHY_MARV_RXE_CNT	= 0x15,/* 16 bit r/w	Receive Error Counter */
+	PHY_MARV_EXT_ADR	= 0x16,/* 16 bit r/w	Ext. Ad. for Cable Diag. */
+	PHY_MARV_PORT_IRQ	= 0x17,/* 16 bit r/o	Port 0 IRQ (88E1111 only) */
+	PHY_MARV_LED_CTRL	= 0x18,/* 16 bit r/w	LED Control Reg */
+	PHY_MARV_LED_OVER	= 0x19,/* 16 bit r/w	Manual LED Override Reg */
+	PHY_MARV_EXT_CTRL_2	= 0x1a,/* 16 bit r/w	Ext. PHY Specific Ctrl 2 */
+	PHY_MARV_EXT_P_STAT	= 0x1b,/* 16 bit r/w	Ext. PHY Spec. Stat Reg */
+	PHY_MARV_CABLE_DIAG	= 0x1c,/* 16 bit r/o	Cable Diagnostic Reg */
+	PHY_MARV_PAGE_ADDR	= 0x1d,/* 16 bit r/w	Extended Page Address Reg */
+	PHY_MARV_PAGE_DATA	= 0x1e,/* 16 bit r/w	Extended Page Data Reg */
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+	PHY_MARV_FE_LED_PAR	= 0x16,/* 16 bit r/w	LED Parallel Select Reg. */
+	PHY_MARV_FE_LED_SER	= 0x17,/* 16 bit r/w	LED Stream Select S. LED */
+	PHY_MARV_FE_VCT_TX	= 0x1a,/* 16 bit r/w	VCT Reg. for TXP/N Pins */
+	PHY_MARV_FE_VCT_RX	= 0x1b,/* 16 bit r/o	VCT Reg. for RXP/N Pins */
+	PHY_MARV_FE_SPEC_2	= 0x1c,/* 16 bit r/w	Specific Control Reg. 2 */
+};
+
+enum {
+	PHY_CT_RESET	= 1<<15, /* Bit 15: (sc)	clear all PHY related regs */
+	PHY_CT_LOOP	= 1<<14, /* Bit 14:	enable Loopback over PHY */
+	PHY_CT_SPS_LSB	= 1<<13, /* Bit 13:	Speed select, lower bit */
+	PHY_CT_ANE	= 1<<12, /* Bit 12:	Auto-Negotiation Enabled */
+	PHY_CT_PDOWN	= 1<<11, /* Bit 11:	Power Down Mode */
+	PHY_CT_ISOL	= 1<<10, /* Bit 10:	Isolate Mode */
+	PHY_CT_RE_CFG	= 1<<9, /* Bit  9:	(sc) Restart Auto-Negotiation */
+	PHY_CT_DUP_MD	= 1<<8, /* Bit  8:	Duplex Mode */
+	PHY_CT_COL_TST	= 1<<7, /* Bit  7:	Collision Test enabled */
+	PHY_CT_SPS_MSB	= 1<<6, /* Bit  6:	Speed select, upper bit */
+};
+
+enum {
+	PHY_CT_SP1000	= PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */
+	PHY_CT_SP100	= PHY_CT_SPS_LSB, /* enable speed of  100 Mbps */
+	PHY_CT_SP10	= 0,		  /* enable speed of   10 Mbps */
+};
+
+enum {
+	PHY_ST_EXT_ST	= 1<<8, /* Bit  8:	Extended Status Present */
+
+	PHY_ST_PRE_SUP	= 1<<6, /* Bit  6:	Preamble Suppression */
+	PHY_ST_AN_OVER	= 1<<5, /* Bit  5:	Auto-Negotiation Over */
+	PHY_ST_REM_FLT	= 1<<4, /* Bit  4:	Remote Fault Condition Occured */
+	PHY_ST_AN_CAP	= 1<<3, /* Bit  3:	Auto-Negotiation Capability */
+	PHY_ST_LSYNC	= 1<<2, /* Bit  2:	Link Synchronized */
+	PHY_ST_JAB_DET	= 1<<1, /* Bit  1:	Jabber Detected */
+	PHY_ST_EXT_REG	= 1<<0, /* Bit  0:	Extended Register available */
+};
+
+enum {
+	PHY_I1_OUI_MSK	= 0x3f<<10, /* Bit 15..10:	Organization Unique ID */
+	PHY_I1_MOD_NUM	= 0x3f<<4, /* Bit  9.. 4:	Model Number */
+	PHY_I1_REV_MSK	= 0xf, /* Bit  3.. 0:	Revision Number */
+};
+
+/* different Marvell PHY Ids */
+enum {
+	PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */
+
+	PHY_BCOM_ID1_A1	= 0x6041,
+	PHY_BCOM_ID1_B2	= 0x6043,
+	PHY_BCOM_ID1_C0	= 0x6044,
+	PHY_BCOM_ID1_C5	= 0x6047,
+
+	PHY_MARV_ID1_B0	= 0x0C23, /* Yukon (PHY 88E1011) */
+	PHY_MARV_ID1_B2	= 0x0C25, /* Yukon-Plus (PHY 88E1011) */
+	PHY_MARV_ID1_C2	= 0x0CC2, /* Yukon-EC (PHY 88E1111) */
+	PHY_MARV_ID1_Y2	= 0x0C91, /* Yukon-2 (PHY 88E1112) */
+};
+
+/* Advertisement register bits */
+enum {
+	PHY_AN_NXT_PG	= 1<<15, /* Bit 15:	Request Next Page */
+	PHY_AN_ACK	= 1<<14, /* Bit 14:	(ro) Acknowledge Received */
+	PHY_AN_RF	= 1<<13, /* Bit 13:	Remote Fault Bits */
+
+	PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11:	Try for asymmetric */
+	PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10:	Try for pause */
+	PHY_AN_100BASE4	= 1<<9, /* Bit 9:	Try for 100mbps 4k packets */
+	PHY_AN_100FULL	= 1<<8, /* Bit 8:	Try for 100mbps full-duplex */
+	PHY_AN_100HALF	= 1<<7, /* Bit 7:	Try for 100mbps half-duplex */
+	PHY_AN_10FULL	= 1<<6, /* Bit 6:	Try for 10mbps full-duplex */
+	PHY_AN_10HALF	= 1<<5, /* Bit 5:	Try for 10mbps half-duplex */
+	PHY_AN_CSMA	= 1<<0, /* Bit 0:	Only selector supported */
+	PHY_AN_SEL	= 0x1f, /* Bit 4..0:	Selector Field, 00001=Ethernet*/
+	PHY_AN_FULL	= PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
+	PHY_AN_ALL	= PHY_AN_10HALF | PHY_AN_10FULL |
+		  	  PHY_AN_100HALF | PHY_AN_100FULL,
+};
+
+/*****  PHY_BCOM_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+/*****  PHY_MARV_1000T_STAT	16 bit r/o	1000Base-T Status Reg *****/
+enum {
+	PHY_B_1000S_MSF	= 1<<15, /* Bit 15:	Master/Slave Fault */
+	PHY_B_1000S_MSR	= 1<<14, /* Bit 14:	Master/Slave Result */
+	PHY_B_1000S_LRS	= 1<<13, /* Bit 13:	Local Receiver Status */
+	PHY_B_1000S_RRS	= 1<<12, /* Bit 12:	Remote Receiver Status */
+	PHY_B_1000S_LP_FD	= 1<<11, /* Bit 11:	Link Partner can FD */
+	PHY_B_1000S_LP_HD	= 1<<10, /* Bit 10:	Link Partner can HD */
+									/* Bit  9..8:	reserved */
+	PHY_B_1000S_IEC	= 0xff, /* Bit  7..0:	Idle Error Count */
+};
+
+/** Marvell-Specific */
+enum {
+	PHY_M_AN_NXT_PG	= 1<<15, /* Request Next Page */
+	PHY_M_AN_ACK	= 1<<14, /* (ro)	Acknowledge Received */
+	PHY_M_AN_RF	= 1<<13, /* Remote Fault */
+
+	PHY_M_AN_ASP	= 1<<11, /* Asymmetric Pause */
+	PHY_M_AN_PC	= 1<<10, /* MAC Pause implemented */
+	PHY_M_AN_100_T4	= 1<<9, /* Not cap. 100Base-T4 (always 0) */
+	PHY_M_AN_100_FD	= 1<<8, /* Advertise 100Base-TX Full Duplex */
+	PHY_M_AN_100_HD	= 1<<7, /* Advertise 100Base-TX Half Duplex */
+	PHY_M_AN_10_FD	= 1<<6, /* Advertise 10Base-TX Full Duplex */
+	PHY_M_AN_10_HD	= 1<<5, /* Advertise 10Base-TX Half Duplex */
+	PHY_M_AN_SEL_MSK =0x1f<<4,	/* Bit  4.. 0: Selector Field Mask */
+};
+
+/* special defines for FIBER (88E1011S only) */
+enum {
+	PHY_M_AN_ASP_X	= 1<<8, /* Asymmetric Pause */
+	PHY_M_AN_PC_X	= 1<<7, /* MAC Pause implemented */
+	PHY_M_AN_1000X_AHD	= 1<<6, /* Advertise 10000Base-X Half Duplex */
+	PHY_M_AN_1000X_AFD	= 1<<5, /* Advertise 10000Base-X Full Duplex */
+};
+
+/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
+enum {
+	PHY_M_P_NO_PAUSE_X	= 0<<7,/* Bit  8.. 7:	no Pause Mode */
+	PHY_M_P_SYM_MD_X	= 1<<7, /* Bit  8.. 7:	symmetric Pause Mode */
+	PHY_M_P_ASYM_MD_X	= 2<<7,/* Bit  8.. 7:	asymmetric Pause Mode */
+	PHY_M_P_BOTH_MD_X	= 3<<7,/* Bit  8.. 7:	both Pause Mode */
+};
+
+/*****  PHY_MARV_1000T_CTRL	16 bit r/w	1000Base-T Control Reg *****/
+enum {
+	PHY_M_1000C_TEST	= 7<<13,/* Bit 15..13:	Test Modes */
+	PHY_M_1000C_MSE	= 1<<12, /* Manual Master/Slave Enable */
+	PHY_M_1000C_MSC	= 1<<11, /* M/S Configuration (1=Master) */
+	PHY_M_1000C_MPD	= 1<<10, /* Multi-Port Device */
+	PHY_M_1000C_AFD	= 1<<9, /* Advertise Full Duplex */
+	PHY_M_1000C_AHD	= 1<<8, /* Advertise Half Duplex */
+};
+
+/*****  PHY_MARV_PHY_CTRL	16 bit r/w	PHY Specific Ctrl Reg *****/
+enum {
+	PHY_M_PC_TX_FFD_MSK	= 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */
+	PHY_M_PC_RX_FFD_MSK	= 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */
+	PHY_M_PC_ASS_CRS_TX	= 1<<11, /* Assert CRS on Transmit */
+	PHY_M_PC_FL_GOOD	= 1<<10, /* Force Link Good */
+	PHY_M_PC_EN_DET_MSK	= 3<<8,/* Bit  9.. 8: Energy Detect Mask */
+	PHY_M_PC_ENA_EXT_D	= 1<<7, /* Enable Ext. Distance (10BT) */
+	PHY_M_PC_MDIX_MSK	= 3<<5,/* Bit  6.. 5: MDI/MDIX Config. Mask */
+	PHY_M_PC_DIS_125CLK	= 1<<4, /* Disable 125 CLK */
+	PHY_M_PC_MAC_POW_UP	= 1<<3, /* MAC Power up */
+	PHY_M_PC_SQE_T_ENA	= 1<<2, /* SQE Test Enabled */
+	PHY_M_PC_POL_R_DIS	= 1<<1, /* Polarity Reversal Disabled */
+	PHY_M_PC_DIS_JABBER	= 1<<0, /* Disable Jabber */
+};
+
+enum {
+	PHY_M_PC_EN_DET		= 2<<8,	/* Energy Detect (Mode 1) */
+	PHY_M_PC_EN_DET_PLUS	= 3<<8, /* Energy Detect Plus (Mode 2) */
+};
+
+#define PHY_M_PC_MDI_XMODE(x)	(((x)<<5) & PHY_M_PC_MDIX_MSK)
+
+enum {
+	PHY_M_PC_MAN_MDI	= 0, /* 00 = Manual MDI configuration */
+	PHY_M_PC_MAN_MDIX	= 1, /* 01 = Manual MDIX configuration */
+	PHY_M_PC_ENA_AUTO	= 3, /* 11 = Enable Automatic Crossover */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PC_ENA_DTE_DT	= 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */
+	PHY_M_PC_ENA_ENE_DT	= 1<<14, /* Enable Energy Detect (sense & pulse) */
+	PHY_M_PC_DIS_NLP_CK	= 1<<13, /* Disable Normal Link Puls (NLP) Check */
+	PHY_M_PC_ENA_LIP_NP	= 1<<12, /* Enable Link Partner Next Page Reg. */
+	PHY_M_PC_DIS_NLP_GN	= 1<<11, /* Disable Normal Link Puls Generation */
+
+	PHY_M_PC_DIS_SCRAMB	= 1<<9, /* Disable Scrambler */
+	PHY_M_PC_DIS_FEFI	= 1<<8, /* Disable Far End Fault Indic. (FEFI) */
+
+	PHY_M_PC_SH_TP_SEL	= 1<<6, /* Shielded Twisted Pair Select */
+	PHY_M_PC_RX_FD_MSK	= 3<<2,/* Bit  3.. 2: Rx FIFO Depth Mask */
+};
+
+/*****  PHY_MARV_PHY_STAT	16 bit r/o	PHY Specific Status Reg *****/
+enum {
+	PHY_M_PS_SPEED_MSK	= 3<<14, /* Bit 15..14: Speed Mask */
+	PHY_M_PS_SPEED_1000	= 1<<15, /*		10 = 1000 Mbps */
+	PHY_M_PS_SPEED_100	= 1<<14, /*		01 =  100 Mbps */
+	PHY_M_PS_SPEED_10	= 0,	 /*		00 =   10 Mbps */
+	PHY_M_PS_FULL_DUP	= 1<<13, /* Full Duplex */
+	PHY_M_PS_PAGE_REC	= 1<<12, /* Page Received */
+	PHY_M_PS_SPDUP_RES	= 1<<11, /* Speed & Duplex Resolved */
+	PHY_M_PS_LINK_UP	= 1<<10, /* Link Up */
+	PHY_M_PS_CABLE_MSK	= 7<<7,  /* Bit  9.. 7: Cable Length Mask */
+	PHY_M_PS_MDI_X_STAT	= 1<<6,  /* MDI Crossover Stat (1=MDIX) */
+	PHY_M_PS_DOWNS_STAT	= 1<<5,  /* Downshift Status (1=downsh.) */
+	PHY_M_PS_ENDET_STAT	= 1<<4,  /* Energy Detect Status (1=act) */
+	PHY_M_PS_TX_P_EN	= 1<<3,  /* Tx Pause Enabled */
+	PHY_M_PS_RX_P_EN	= 1<<2,  /* Rx Pause Enabled */
+	PHY_M_PS_POL_REV	= 1<<1,  /* Polarity Reversed */
+	PHY_M_PS_JABBER		= 1<<0,  /* Jabber */
+};
+
+#define PHY_M_PS_PAUSE_MSK	(PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+enum {
+	PHY_M_PS_DTE_DETECT	= 1<<15, /* Data Terminal Equipment (DTE) Detected */
+	PHY_M_PS_RES_SPEED	= 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */
+};
+
+enum {
+	PHY_M_IS_AN_ERROR	= 1<<15, /* Auto-Negotiation Error */
+	PHY_M_IS_LSP_CHANGE	= 1<<14, /* Link Speed Changed */
+	PHY_M_IS_DUP_CHANGE	= 1<<13, /* Duplex Mode Changed */
+	PHY_M_IS_AN_PR		= 1<<12, /* Page Received */
+	PHY_M_IS_AN_COMPL	= 1<<11, /* Auto-Negotiation Completed */
+	PHY_M_IS_LST_CHANGE	= 1<<10, /* Link Status Changed */
+	PHY_M_IS_SYMB_ERROR	= 1<<9, /* Symbol Error */
+	PHY_M_IS_FALSE_CARR	= 1<<8, /* False Carrier */
+	PHY_M_IS_FIFO_ERROR	= 1<<7, /* FIFO Overflow/Underrun Error */
+	PHY_M_IS_MDI_CHANGE	= 1<<6, /* MDI Crossover Changed */
+	PHY_M_IS_DOWNSH_DET	= 1<<5, /* Downshift Detected */
+	PHY_M_IS_END_CHANGE	= 1<<4, /* Energy Detect Changed */
+
+	PHY_M_IS_DTE_CHANGE	= 1<<2, /* DTE Power Det. Status Changed */
+	PHY_M_IS_POL_CHANGE	= 1<<1, /* Polarity Changed */
+	PHY_M_IS_JABBER		= 1<<0, /* Jabber */
+
+	PHY_M_DEF_MSK		= PHY_M_IS_LSP_CHANGE | PHY_M_IS_LST_CHANGE
+				 | PHY_M_IS_FIFO_ERROR,
+	PHY_M_AN_MSK	       = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL,
+};
+
+
+/*****  PHY_MARV_EXT_CTRL	16 bit r/w	Ext. PHY Specific Ctrl *****/
+enum {
+	PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */
+	PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */
+
+	PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */
+	PHY_M_EC_M_DSC_MSK  = 3<<10, /* Bit 11..10:	Master Downshift Counter */
+					/* (88E1011 only) */
+	PHY_M_EC_S_DSC_MSK  = 3<<8,/* Bit  9.. 8:	Slave  Downshift Counter */
+				       /* (88E1011 only) */
+	PHY_M_EC_M_DSC_MSK2 = 7<<9,/* Bit 11.. 9:	Master Downshift Counter */
+					/* (88E1111 only) */
+	PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */
+					/* !!! Errata in spec. (1 = disable) */
+	PHY_M_EC_RX_TIM_CT  = 1<<7, /* RGMII Rx Timing Control*/
+	PHY_M_EC_MAC_S_MSK  = 7<<4,/* Bit  6.. 4:	Def. MAC interface speed */
+	PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */
+	PHY_M_EC_DTE_D_ENA  = 1<<2, /* DTE Detect Enable (88E1111 only) */
+	PHY_M_EC_TX_TIM_CT  = 1<<1, /* RGMII Tx Timing Control */
+	PHY_M_EC_TRANS_DIS  = 1<<0, /* Transmitter Disable (88E1111 only) */};
+
+#define PHY_M_EC_M_DSC(x)	((x)<<10 & PHY_M_EC_M_DSC_MSK)
+					/* 00=1x; 01=2x; 10=3x; 11=4x */
+#define PHY_M_EC_S_DSC(x)	((x)<<8 & PHY_M_EC_S_DSC_MSK)
+					/* 00=dis; 01=1x; 10=2x; 11=3x */
+#define PHY_M_EC_DSC_2(x)	((x)<<9 & PHY_M_EC_M_DSC_MSK2)
+					/* 000=1x; 001=2x; 010=3x; 011=4x */
+#define PHY_M_EC_MAC_S(x)	((x)<<4 & PHY_M_EC_MAC_S_MSK)
+					/* 01X=0; 110=2.5; 111=25 (MHz) */
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+enum {
+	PHY_M_PC_DIS_LINK_Pa	= 1<<15,/* Disable Link Pulses */
+	PHY_M_PC_DSC_MSK	= 7<<12,/* Bit 14..12:	Downshift Counter */
+	PHY_M_PC_DOWN_S_ENA	= 1<<11,/* Downshift Enable */
+};
+/* !!! Errata in spec. (1 = disable) */
+
+#define PHY_M_PC_DSC(x)			(((x)<<12) & PHY_M_PC_DSC_MSK)
+											/* 100=5x; 101=6x; 110=7x; 111=8x */
+enum {
+	MAC_TX_CLK_0_MHZ	= 2,
+	MAC_TX_CLK_2_5_MHZ	= 6,
+	MAC_TX_CLK_25_MHZ 	= 7,
+};
+
+/*****  PHY_MARV_LED_CTRL	16 bit r/w	LED Control Reg *****/
+enum {
+	PHY_M_LEDC_DIS_LED	= 1<<15, /* Disable LED */
+	PHY_M_LEDC_PULS_MSK	= 7<<12,/* Bit 14..12: Pulse Stretch Mask */
+	PHY_M_LEDC_F_INT	= 1<<11, /* Force Interrupt */
+	PHY_M_LEDC_BL_R_MSK	= 7<<8,/* Bit 10.. 8: Blink Rate Mask */
+	PHY_M_LEDC_DP_C_LSB	= 1<<7, /* Duplex Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_TX_C_LSB	= 1<<6, /* Tx Control (LSB, 88E1111 only) */
+	PHY_M_LEDC_LK_C_MSK	= 7<<3,/* Bit  5.. 3: Link Control Mask */
+					/* (88E1111 only) */
+};
+
+enum {
+	PHY_M_LEDC_LINK_MSK	= 3<<3,/* Bit  4.. 3: Link Control Mask */
+									/* (88E1011 only) */
+	PHY_M_LEDC_DP_CTRL	= 1<<2, /* Duplex Control */
+	PHY_M_LEDC_DP_C_MSB	= 1<<2, /* Duplex Control (MSB, 88E1111 only) */
+	PHY_M_LEDC_RX_CTRL	= 1<<1, /* Rx Activity / Link */
+	PHY_M_LEDC_TX_CTRL	= 1<<0, /* Tx Activity / Link */
+	PHY_M_LEDC_TX_C_MSB	= 1<<0, /* Tx Control (MSB, 88E1111 only) */
+};
+
+#define PHY_M_LED_PULS_DUR(x)	(((x)<<12) & PHY_M_LEDC_PULS_MSK)
+
+/*****  PHY_MARV_PHY_STAT (page 3)16 bit r/w	Polarity Control Reg. *****/
+enum {
+	PHY_M_POLC_LS1M_MSK	= 0xf<<12, /* Bit 15..12: LOS,STAT1 Mix % Mask */
+	PHY_M_POLC_IS0M_MSK	= 0xf<<8,  /* Bit 11.. 8: INIT,STAT0 Mix % Mask */
+	PHY_M_POLC_LOS_MSK	= 0x3<<6,  /* Bit  7.. 6: LOS Pol. Ctrl. Mask */
+	PHY_M_POLC_INIT_MSK	= 0x3<<4,  /* Bit  5.. 4: INIT Pol. Ctrl. Mask */
+	PHY_M_POLC_STA1_MSK	= 0x3<<2,  /* Bit  3.. 2: STAT1 Pol. Ctrl. Mask */
+	PHY_M_POLC_STA0_MSK	= 0x3,     /* Bit  1.. 0: STAT0 Pol. Ctrl. Mask */
+};
+
+#define PHY_M_POLC_LS1_P_MIX(x)	(((x)<<12) & PHY_M_POLC_LS1M_MSK)
+#define PHY_M_POLC_IS0_P_MIX(x)	(((x)<<8) & PHY_M_POLC_IS0M_MSK)
+#define PHY_M_POLC_LOS_CTRL(x)	(((x)<<6) & PHY_M_POLC_LOS_MSK)
+#define PHY_M_POLC_INIT_CTRL(x)	(((x)<<4) & PHY_M_POLC_INIT_MSK)
+#define PHY_M_POLC_STA1_CTRL(x)	(((x)<<2) & PHY_M_POLC_STA1_MSK)
+#define PHY_M_POLC_STA0_CTRL(x)	(((x)<<0) & PHY_M_POLC_STA0_MSK)
+
+enum {
+	PULS_NO_STR	= 0,/* no pulse stretching */
+	PULS_21MS	= 1,/* 21 ms to 42 ms */
+	PULS_42MS	= 2,/* 42 ms to 84 ms */
+	PULS_84MS	= 3,/* 84 ms to 170 ms */
+	PULS_170MS	= 4,/* 170 ms to 340 ms */
+	PULS_340MS	= 5,/* 340 ms to 670 ms */
+	PULS_670MS	= 6,/* 670 ms to 1.3 s */
+	PULS_1300MS	= 7,/* 1.3 s to 2.7 s */
+};
+
+#define PHY_M_LED_BLINK_RT(x)	(((x)<<8) & PHY_M_LEDC_BL_R_MSK)
+
+enum {
+	BLINK_42MS	= 0,/* 42 ms */
+	BLINK_84MS	= 1,/* 84 ms */
+	BLINK_170MS	= 2,/* 170 ms */
+	BLINK_340MS	= 3,/* 340 ms */
+	BLINK_670MS	= 4,/* 670 ms */
+};
+
+/*****  PHY_MARV_LED_OVER	16 bit r/w	Manual LED Override Reg *****/
+#define PHY_M_LED_MO_SGMII(x)	((x)<<14) /* Bit 15..14:  SGMII AN Timer */
+										/* Bit 13..12:	reserved */
+#define PHY_M_LED_MO_DUP(x)	((x)<<10) /* Bit 11..10:  Duplex */
+#define PHY_M_LED_MO_10(x)	((x)<<8) /* Bit  9.. 8:  Link 10 */
+#define PHY_M_LED_MO_100(x)	((x)<<6) /* Bit  7.. 6:  Link 100 */
+#define PHY_M_LED_MO_1000(x)	((x)<<4) /* Bit  5.. 4:  Link 1000 */
+#define PHY_M_LED_MO_RX(x)	((x)<<2) /* Bit  3.. 2:  Rx */
+#define PHY_M_LED_MO_TX(x)	((x)<<0) /* Bit  1.. 0:  Tx */
+
+enum {
+	MO_LED_NORM	= 0,
+	MO_LED_BLINK	= 1,
+	MO_LED_OFF	= 2,
+	MO_LED_ON	= 3,
+};
+
+/*****  PHY_MARV_EXT_CTRL_2	16 bit r/w	Ext. PHY Specific Ctrl 2 *****/
+enum {
+	PHY_M_EC2_FI_IMPED	= 1<<6, /* Fiber Input  Impedance */
+	PHY_M_EC2_FO_IMPED	= 1<<5, /* Fiber Output Impedance */
+	PHY_M_EC2_FO_M_CLK	= 1<<4, /* Fiber Mode Clock Enable */
+	PHY_M_EC2_FO_BOOST	= 1<<3, /* Fiber Output Boost */
+	PHY_M_EC2_FO_AM_MSK	= 7,/* Bit  2.. 0:	Fiber Output Amplitude */
+};
+
+/*****  PHY_MARV_EXT_P_STAT 16 bit r/w	Ext. PHY Specific Status *****/
+enum {
+	PHY_M_FC_AUTO_SEL	= 1<<15, /* Fiber/Copper Auto Sel. Dis. */
+	PHY_M_FC_AN_REG_ACC	= 1<<14, /* Fiber/Copper AN Reg. Access */
+	PHY_M_FC_RESOLUTION	= 1<<13, /* Fiber/Copper Resolution */
+	PHY_M_SER_IF_AN_BP	= 1<<12, /* Ser. IF AN Bypass Enable */
+	PHY_M_SER_IF_BP_ST	= 1<<11, /* Ser. IF AN Bypass Status */
+	PHY_M_IRQ_POLARITY	= 1<<10, /* IRQ polarity */
+	PHY_M_DIS_AUT_MED	= 1<<9, /* Disable Aut. Medium Reg. Selection */
+	/* (88E1111 only) */
+
+	PHY_M_UNDOC1		= 1<<7, /* undocumented bit !! */
+	PHY_M_DTE_POW_STAT	= 1<<4, /* DTE Power Status (88E1111 only) */
+	PHY_M_MODE_MASK	= 0xf, /* Bit  3.. 0: copy of HWCFG MODE[3:0] */
+};
+
+/* for 10/100 Fast Ethernet PHY (88E3082 only) */
+/*****  PHY_MARV_FE_LED_PAR		16 bit r/w	LED Parallel Select Reg. *****/
+									/* Bit 15..12: reserved (used internally) */
+enum {
+	PHY_M_FELP_LED2_MSK = 0xf<<8,	/* Bit 11.. 8: LED2 Mask (LINK) */
+	PHY_M_FELP_LED1_MSK = 0xf<<4,	/* Bit  7.. 4: LED1 Mask (ACT) */
+	PHY_M_FELP_LED0_MSK = 0xf, /* Bit  3.. 0: LED0 Mask (SPEED) */
+};
+
+#define PHY_M_FELP_LED2_CTRL(x)	(((x)<<8) & PHY_M_FELP_LED2_MSK)
+#define PHY_M_FELP_LED1_CTRL(x)	(((x)<<4) & PHY_M_FELP_LED1_MSK)
+#define PHY_M_FELP_LED0_CTRL(x)	(((x)<<0) & PHY_M_FELP_LED0_MSK)
+
+enum {
+	LED_PAR_CTRL_COLX	= 0x00,
+	LED_PAR_CTRL_ERROR	= 0x01,
+	LED_PAR_CTRL_DUPLEX	= 0x02,
+	LED_PAR_CTRL_DP_COL	= 0x03,
+	LED_PAR_CTRL_SPEED	= 0x04,
+	LED_PAR_CTRL_LINK	= 0x05,
+	LED_PAR_CTRL_TX		= 0x06,
+	LED_PAR_CTRL_RX		= 0x07,
+	LED_PAR_CTRL_ACT	= 0x08,
+	LED_PAR_CTRL_LNK_RX	= 0x09,
+	LED_PAR_CTRL_LNK_AC	= 0x0a,
+	LED_PAR_CTRL_ACT_BL	= 0x0b,
+	LED_PAR_CTRL_TX_BL	= 0x0c,
+	LED_PAR_CTRL_RX_BL	= 0x0d,
+	LED_PAR_CTRL_COL_BL	= 0x0e,
+	LED_PAR_CTRL_INACT	= 0x0f
+};
+
+/*****,PHY_MARV_FE_SPEC_2		16 bit r/w	Specific Control Reg. 2 *****/
+enum {
+	PHY_M_FESC_DIS_WAIT	= 1<<2, /* Disable TDR Waiting Period */
+	PHY_M_FESC_ENA_MCLK	= 1<<1, /* Enable MAC Rx Clock in sleep mode */
+	PHY_M_FESC_SEL_CL_A	= 1<<0, /* Select Class A driver (100B-TX) */
+};
+
+/* for Yukon-2 Gigabit Ethernet PHY (88E1112 only) */
+/*****  PHY_MARV_PHY_CTRL (page 2)		16 bit r/w	MAC Specific Ctrl *****/
+enum {
+	PHY_M_MAC_MD_MSK	= 7<<7, /* Bit  9.. 7: Mode Select Mask */
+	PHY_M_MAC_MD_AUTO	= 3,/* Auto Copper/1000Base-X */
+	PHY_M_MAC_MD_COPPER	= 5,/* Copper only */
+	PHY_M_MAC_MD_1000BX	= 7,/* 1000Base-X only */
+};
+#define PHY_M_MAC_MODE_SEL(x)	(((x)<<7) & PHY_M_MAC_MD_MSK)
+
+/*****  PHY_MARV_PHY_CTRL (page 3)		16 bit r/w	LED Control Reg. *****/
+enum {
+	PHY_M_LEDC_LOS_MSK	= 0xf<<12,/* Bit 15..12: LOS LED Ctrl. Mask */
+	PHY_M_LEDC_INIT_MSK	= 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */
+	PHY_M_LEDC_STA1_MSK	= 0xf<<4,/* Bit  7.. 4: STAT1 LED Ctrl. Mask */
+	PHY_M_LEDC_STA0_MSK	= 0xf, /* Bit  3.. 0: STAT0 LED Ctrl. Mask */
+};
+
+#define PHY_M_LEDC_LOS_CTRL(x)	(((x)<<12) & PHY_M_LEDC_LOS_MSK)
+#define PHY_M_LEDC_INIT_CTRL(x)	(((x)<<8) & PHY_M_LEDC_INIT_MSK)
+#define PHY_M_LEDC_STA1_CTRL(x)	(((x)<<4) & PHY_M_LEDC_STA1_MSK)
+#define PHY_M_LEDC_STA0_CTRL(x)	(((x)<<0) & PHY_M_LEDC_STA0_MSK)
+
+/* GMAC registers  */
+/* Port Registers */
+enum {
+	GM_GP_STAT	= 0x0000,	/* 16 bit r/o	General Purpose Status */
+	GM_GP_CTRL	= 0x0004,	/* 16 bit r/w	General Purpose Control */
+	GM_TX_CTRL	= 0x0008,	/* 16 bit r/w	Transmit Control Reg. */
+	GM_RX_CTRL	= 0x000c,	/* 16 bit r/w	Receive Control Reg. */
+	GM_TX_FLOW_CTRL	= 0x0010,	/* 16 bit r/w	Transmit Flow-Control */
+	GM_TX_PARAM	= 0x0014,	/* 16 bit r/w	Transmit Parameter Reg. */
+	GM_SERIAL_MODE	= 0x0018,	/* 16 bit r/w	Serial Mode Register */
+/* Source Address Registers */
+	GM_SRC_ADDR_1L	= 0x001c,	/* 16 bit r/w	Source Address 1 (low) */
+	GM_SRC_ADDR_1M	= 0x0020,	/* 16 bit r/w	Source Address 1 (middle) */
+	GM_SRC_ADDR_1H	= 0x0024,	/* 16 bit r/w	Source Address 1 (high) */
+	GM_SRC_ADDR_2L	= 0x0028,	/* 16 bit r/w	Source Address 2 (low) */
+	GM_SRC_ADDR_2M	= 0x002c,	/* 16 bit r/w	Source Address 2 (middle) */
+	GM_SRC_ADDR_2H	= 0x0030,	/* 16 bit r/w	Source Address 2 (high) */
+
+/* Multicast Address Hash Registers */
+	GM_MC_ADDR_H1	= 0x0034,	/* 16 bit r/w	Multicast Address Hash 1 */
+	GM_MC_ADDR_H2	= 0x0038,	/* 16 bit r/w	Multicast Address Hash 2 */
+	GM_MC_ADDR_H3	= 0x003c,	/* 16 bit r/w	Multicast Address Hash 3 */
+	GM_MC_ADDR_H4	= 0x0040,	/* 16 bit r/w	Multicast Address Hash 4 */
+
+/* Interrupt Source Registers */
+	GM_TX_IRQ_SRC	= 0x0044,	/* 16 bit r/o	Tx Overflow IRQ Source */
+	GM_RX_IRQ_SRC	= 0x0048,	/* 16 bit r/o	Rx Overflow IRQ Source */
+	GM_TR_IRQ_SRC	= 0x004c,	/* 16 bit r/o	Tx/Rx Over. IRQ Source */
+
+/* Interrupt Mask Registers */
+	GM_TX_IRQ_MSK	= 0x0050,	/* 16 bit r/w	Tx Overflow IRQ Mask */
+	GM_RX_IRQ_MSK	= 0x0054,	/* 16 bit r/w	Rx Overflow IRQ Mask */
+	GM_TR_IRQ_MSK	= 0x0058,	/* 16 bit r/w	Tx/Rx Over. IRQ Mask */
+
+/* Serial Management Interface (SMI) Registers */
+	GM_SMI_CTRL	= 0x0080,	/* 16 bit r/w	SMI Control Register */
+	GM_SMI_DATA	= 0x0084,	/* 16 bit r/w	SMI Data Register */
+	GM_PHY_ADDR	= 0x0088,	/* 16 bit r/w	GPHY Address Register */
+};
+
+/* MIB Counters */
+#define GM_MIB_CNT_BASE	0x0100		/* Base Address of MIB Counters */
+#define GM_MIB_CNT_SIZE	44		/* Number of MIB Counters */
+
+/*
+ * MIB Counters base address definitions (low word) -
+ * use offset 4 for access to high word	(32 bit r/o)
+ */
+enum {
+	GM_RXF_UC_OK  = GM_MIB_CNT_BASE + 0,	/* Unicast Frames Received OK */
+	GM_RXF_BC_OK	= GM_MIB_CNT_BASE + 8,	/* Broadcast Frames Received OK */
+	GM_RXF_MPAUSE	= GM_MIB_CNT_BASE + 16,	/* Pause MAC Ctrl Frames Received */
+	GM_RXF_MC_OK	= GM_MIB_CNT_BASE + 24,	/* Multicast Frames Received OK */
+	GM_RXF_FCS_ERR	= GM_MIB_CNT_BASE + 32,	/* Rx Frame Check Seq. Error */
+	/* GM_MIB_CNT_BASE + 40:	reserved */
+	GM_RXO_OK_LO	= GM_MIB_CNT_BASE + 48,	/* Octets Received OK Low */
+	GM_RXO_OK_HI	= GM_MIB_CNT_BASE + 56,	/* Octets Received OK High */
+	GM_RXO_ERR_LO	= GM_MIB_CNT_BASE + 64,	/* Octets Received Invalid Low */
+	GM_RXO_ERR_HI	= GM_MIB_CNT_BASE + 72,	/* Octets Received Invalid High */
+	GM_RXF_SHT	= GM_MIB_CNT_BASE + 80,	/* Frames <64 Byte Received OK */
+	GM_RXE_FRAG	= GM_MIB_CNT_BASE + 88,	/* Frames <64 Byte Received with FCS Err */
+	GM_RXF_64B	= GM_MIB_CNT_BASE + 96,	/* 64 Byte Rx Frame */
+	GM_RXF_127B	= GM_MIB_CNT_BASE + 104,	/* 65-127 Byte Rx Frame */
+	GM_RXF_255B	= GM_MIB_CNT_BASE + 112,	/* 128-255 Byte Rx Frame */
+	GM_RXF_511B	= GM_MIB_CNT_BASE + 120,	/* 256-511 Byte Rx Frame */
+	GM_RXF_1023B	= GM_MIB_CNT_BASE + 128,	/* 512-1023 Byte Rx Frame */
+	GM_RXF_1518B	= GM_MIB_CNT_BASE + 136,	/* 1024-1518 Byte Rx Frame */
+	GM_RXF_MAX_SZ	= GM_MIB_CNT_BASE + 144,	/* 1519-MaxSize Byte Rx Frame */
+	GM_RXF_LNG_ERR	= GM_MIB_CNT_BASE + 152,	/* Rx Frame too Long Error */
+	GM_RXF_JAB_PKT	= GM_MIB_CNT_BASE + 160,	/* Rx Jabber Packet Frame */
+	/* GM_MIB_CNT_BASE + 168:	reserved */
+	GM_RXE_FIFO_OV	= GM_MIB_CNT_BASE + 176,	/* Rx FIFO overflow Event */
+	/* GM_MIB_CNT_BASE + 184:	reserved */
+	GM_TXF_UC_OK	= GM_MIB_CNT_BASE + 192,	/* Unicast Frames Xmitted OK */
+	GM_TXF_BC_OK	= GM_MIB_CNT_BASE + 200,	/* Broadcast Frames Xmitted OK */
+	GM_TXF_MPAUSE	= GM_MIB_CNT_BASE + 208,	/* Pause MAC Ctrl Frames Xmitted */
+	GM_TXF_MC_OK	= GM_MIB_CNT_BASE + 216,	/* Multicast Frames Xmitted OK */
+	GM_TXO_OK_LO	= GM_MIB_CNT_BASE + 224,	/* Octets Transmitted OK Low */
+	GM_TXO_OK_HI	= GM_MIB_CNT_BASE + 232,	/* Octets Transmitted OK High */
+	GM_TXF_64B	= GM_MIB_CNT_BASE + 240,	/* 64 Byte Tx Frame */
+	GM_TXF_127B	= GM_MIB_CNT_BASE + 248,	/* 65-127 Byte Tx Frame */
+	GM_TXF_255B	= GM_MIB_CNT_BASE + 256,	/* 128-255 Byte Tx Frame */
+	GM_TXF_511B	= GM_MIB_CNT_BASE + 264,	/* 256-511 Byte Tx Frame */
+	GM_TXF_1023B	= GM_MIB_CNT_BASE + 272,	/* 512-1023 Byte Tx Frame */
+	GM_TXF_1518B	= GM_MIB_CNT_BASE + 280,	/* 1024-1518 Byte Tx Frame */
+	GM_TXF_MAX_SZ	= GM_MIB_CNT_BASE + 288,	/* 1519-MaxSize Byte Tx Frame */
+
+	GM_TXF_COL	= GM_MIB_CNT_BASE + 304,	/* Tx Collision */
+	GM_TXF_LAT_COL	= GM_MIB_CNT_BASE + 312,	/* Tx Late Collision */
+	GM_TXF_ABO_COL	= GM_MIB_CNT_BASE + 320,	/* Tx aborted due to Exces. Col. */
+	GM_TXF_MUL_COL	= GM_MIB_CNT_BASE + 328,	/* Tx Multiple Collision */
+	GM_TXF_SNG_COL	= GM_MIB_CNT_BASE + 336,	/* Tx Single Collision */
+	GM_TXE_FIFO_UR	= GM_MIB_CNT_BASE + 344,	/* Tx FIFO Underrun Event */
+};
+
+/* GMAC Bit Definitions */
+/*	GM_GP_STAT	16 bit r/o	General Purpose Status Register */
+enum {
+	GM_GPSR_SPEED		= 1<<15, /* Bit 15:	Port Speed (1 = 100 Mbps) */
+	GM_GPSR_DUPLEX		= 1<<14, /* Bit 14:	Duplex Mode (1 = Full) */
+	GM_GPSR_FC_TX_DIS	= 1<<13, /* Bit 13:	Tx Flow-Control Mode Disabled */
+	GM_GPSR_LINK_UP		= 1<<12, /* Bit 12:	Link Up Status */
+	GM_GPSR_PAUSE		= 1<<11, /* Bit 11:	Pause State */
+	GM_GPSR_TX_ACTIVE	= 1<<10, /* Bit 10:	Tx in Progress */
+	GM_GPSR_EXC_COL		= 1<<9,	/* Bit  9:	Excessive Collisions Occured */
+	GM_GPSR_LAT_COL		= 1<<8,	/* Bit  8:	Late Collisions Occured */
+
+	GM_GPSR_PHY_ST_CH	= 1<<5,	/* Bit  5:	PHY Status Change */
+	GM_GPSR_GIG_SPEED	= 1<<4,	/* Bit  4:	Gigabit Speed (1 = 1000 Mbps) */
+	GM_GPSR_PART_MODE	= 1<<3,	/* Bit  3:	Partition mode */
+	GM_GPSR_FC_RX_DIS	= 1<<2,	/* Bit  2:	Rx Flow-Control Mode Disabled */
+	GM_GPSR_PROM_EN		= 1<<1,	/* Bit  1:	Promiscuous Mode Enabled */
+};
+
+/*	GM_GP_CTRL	16 bit r/w	General Purpose Control Register */
+enum {
+	GM_GPCR_PROM_ENA	= 1<<14,	/* Bit 14:	Enable Promiscuous Mode */
+	GM_GPCR_FC_TX_DIS	= 1<<13, /* Bit 13:	Disable Tx Flow-Control Mode */
+	GM_GPCR_TX_ENA		= 1<<12, /* Bit 12:	Enable Transmit */
+	GM_GPCR_RX_ENA		= 1<<11, /* Bit 11:	Enable Receive */
+	GM_GPCR_BURST_ENA	= 1<<10, /* Bit 10:	Enable Burst Mode */
+	GM_GPCR_LOOP_ENA	= 1<<9,	/* Bit  9:	Enable MAC Loopback Mode */
+	GM_GPCR_PART_ENA	= 1<<8,	/* Bit  8:	Enable Partition Mode */
+	GM_GPCR_GIGS_ENA	= 1<<7,	/* Bit  7:	Gigabit Speed (1000 Mbps) */
+	GM_GPCR_FL_PASS		= 1<<6,	/* Bit  6:	Force Link Pass */
+	GM_GPCR_DUP_FULL	= 1<<5,	/* Bit  5:	Full Duplex Mode */
+	GM_GPCR_FC_RX_DIS	= 1<<4,	/* Bit  4:	Disable Rx Flow-Control Mode */
+	GM_GPCR_SPEED_100	= 1<<3,   /* Bit  3:	Port Speed 100 Mbps */
+	GM_GPCR_AU_DUP_DIS	= 1<<2,	/* Bit  2:	Disable Auto-Update Duplex */
+	GM_GPCR_AU_FCT_DIS	= 1<<1,	/* Bit  1:	Disable Auto-Update Flow-C. */
+	GM_GPCR_AU_SPD_DIS	= 1<<0,	/* Bit  0:	Disable Auto-Update Speed */
+};
+
+#define GM_GPCR_SPEED_1000	(GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
+#define GM_GPCR_AU_ALL_DIS	(GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
+
+/*	GM_TX_CTRL			16 bit r/w	Transmit Control Register */
+enum {
+	GM_TXCR_FORCE_JAM	= 1<<15, /* Bit 15:	Force Jam / Flow-Control */
+	GM_TXCR_CRC_DIS		= 1<<14, /* Bit 14:	Disable insertion of CRC */
+	GM_TXCR_PAD_DIS		= 1<<13, /* Bit 13:	Disable padding of packets */
+	GM_TXCR_COL_THR_MSK	= 1<<10, /* Bit 12..10:	Collision Threshold */
+};
+
+#define TX_COL_THR(x)		(((x)<<10) & GM_TXCR_COL_THR_MSK)
+#define TX_COL_DEF		0x04
+
+/*	GM_RX_CTRL			16 bit r/w	Receive Control Register */
+enum {
+	GM_RXCR_UCF_ENA	= 1<<15, /* Bit 15:	Enable Unicast filtering */
+	GM_RXCR_MCF_ENA	= 1<<14, /* Bit 14:	Enable Multicast filtering */
+	GM_RXCR_CRC_DIS	= 1<<13, /* Bit 13:	Remove 4-byte CRC */
+	GM_RXCR_PASS_FC	= 1<<12, /* Bit 12:	Pass FC packets to FIFO */
+};
+
+/*	GM_TX_PARAM		16 bit r/w	Transmit Parameter Register */
+enum {
+	GM_TXPA_JAMLEN_MSK	= 0x03<<14,	/* Bit 15..14:	Jam Length */
+	GM_TXPA_JAMIPG_MSK	= 0x1f<<9,	/* Bit 13..9:	Jam IPG */
+	GM_TXPA_JAMDAT_MSK	= 0x1f<<4,	/* Bit  8..4:	IPG Jam to Data */
+	GM_TXPA_BO_LIM_MSK	= 0x0f,		/* Bit  3.. 0: Backoff Limit Mask */
+
+	TX_JAM_LEN_DEF		= 0x03,
+	TX_JAM_IPG_DEF		= 0x0b,
+	TX_IPG_JAM_DEF		= 0x1c,
+	TX_BOF_LIM_DEF		= 0x04,
+};
+
+#define TX_JAM_LEN_VAL(x)	(((x)<<14) & GM_TXPA_JAMLEN_MSK)
+#define TX_JAM_IPG_VAL(x)	(((x)<<9)  & GM_TXPA_JAMIPG_MSK)
+#define TX_IPG_JAM_DATA(x)	(((x)<<4)  & GM_TXPA_JAMDAT_MSK)
+#define TX_BACK_OFF_LIM(x)	((x) & GM_TXPA_BO_LIM_MSK)
+
+
+/*	GM_SERIAL_MODE			16 bit r/w	Serial Mode Register */
+enum {
+	GM_SMOD_DATABL_MSK	= 0x1f<<11, /* Bit 15..11:	Data Blinder (r/o) */
+	GM_SMOD_LIMIT_4		= 1<<10, /* Bit 10:	4 consecutive Tx trials */
+	GM_SMOD_VLAN_ENA	= 1<<9,	/* Bit  9:	Enable VLAN  (Max. Frame Len) */
+	GM_SMOD_JUMBO_ENA	= 1<<8,	/* Bit  8:	Enable Jumbo (Max. Frame Len) */
+	 GM_SMOD_IPG_MSK	= 0x1f	/* Bit 4..0:	Inter-Packet Gap (IPG) */
+};
+
+#define DATA_BLIND_VAL(x)	(((x)<<11) & GM_SMOD_DATABL_MSK)
+#define DATA_BLIND_DEF		0x04
+
+#define IPG_DATA_VAL(x)		(x & GM_SMOD_IPG_MSK)
+#define IPG_DATA_DEF		0x1e
+
+/*	GM_SMI_CTRL			16 bit r/w	SMI Control Register */
+enum {
+	GM_SMI_CT_PHY_A_MSK	= 0x1f<<11,/* Bit 15..11:	PHY Device Address */
+	GM_SMI_CT_REG_A_MSK	= 0x1f<<6,/* Bit 10.. 6:	PHY Register Address */
+	GM_SMI_CT_OP_RD		= 1<<5,	/* Bit  5:	OpCode Read (0=Write)*/
+	GM_SMI_CT_RD_VAL	= 1<<4,	/* Bit  4:	Read Valid (Read completed) */
+	GM_SMI_CT_BUSY		= 1<<3,	/* Bit  3:	Busy (Operation in progress) */
+};
+
+#define GM_SMI_CT_PHY_AD(x)	(((x)<<11) & GM_SMI_CT_PHY_A_MSK)
+#define GM_SMI_CT_REG_AD(x)	(((x)<<6) & GM_SMI_CT_REG_A_MSK)
+
+/*	GM_PHY_ADDR				16 bit r/w	GPHY Address Register */
+enum {
+	GM_PAR_MIB_CLR	= 1<<5,	/* Bit  5:	Set MIB Clear Counter Mode */
+	GM_PAR_MIB_TST	= 1<<4,	/* Bit  4:	MIB Load Counter (Test Mode) */
+};
+
+/* Receive Frame Status Encoding */
+enum {
+	GMR_FS_LEN	= 0xffff<<16, /* Bit 31..16:	Rx Frame Length */
+	GMR_FS_VLAN	= 1<<13, /* Bit 13:	VLAN Packet */
+	GMR_FS_JABBER	= 1<<12, /* Bit 12:	Jabber Packet */
+	GMR_FS_UN_SIZE	= 1<<11, /* Bit 11:	Undersize Packet */
+	GMR_FS_MC	= 1<<10, /* Bit 10:	Multicast Packet */
+	GMR_FS_BC	= 1<<9, /* Bit  9:	Broadcast Packet */
+	GMR_FS_RX_OK	= 1<<8, /* Bit  8:	Receive OK (Good Packet) */
+	GMR_FS_GOOD_FC	= 1<<7, /* Bit  7:	Good Flow-Control Packet */
+	GMR_FS_BAD_FC	= 1<<6, /* Bit  6:	Bad  Flow-Control Packet */
+	GMR_FS_MII_ERR	= 1<<5, /* Bit  5:	MII Error */
+	GMR_FS_LONG_ERR	= 1<<4, /* Bit  4:	Too Long Packet */
+	GMR_FS_FRAGMENT	= 1<<3, /* Bit  3:	Fragment */
+
+	GMR_FS_CRC_ERR	= 1<<1, /* Bit  1:	CRC Error */
+	GMR_FS_RX_FF_OV	= 1<<0, /* Bit  0:	Rx FIFO Overflow */
+
+/*
+ * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
+ */
+	GMR_FS_ANY_ERR	= GMR_FS_RX_FF_OV | GMR_FS_CRC_ERR |
+			  GMR_FS_FRAGMENT | GMR_FS_LONG_ERR |
+		  	  GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
+			  GMR_FS_UN_SIZE | GMR_FS_JABBER,
+/* Rx GMAC FIFO Flush Mask (default) */
+	RX_FF_FL_DEF_MSK = GMR_FS_ANY_ERR,
+};
+
+/*	RX_GMF_CTRL_T	32 bit	Rx GMAC FIFO Control/Test */
+enum {
+	GMF_WP_TST_ON	= 1<<14,	/* Write Pointer Test On */
+	GMF_WP_TST_OFF	= 1<<13,	/* Write Pointer Test Off */
+	GMF_WP_STEP	= 1<<12,	/* Write Pointer Step/Increment */
+
+	GMF_RP_TST_ON	= 1<<10,	/* Read Pointer Test On */
+	GMF_RP_TST_OFF	= 1<<9,		/* Read Pointer Test Off */
+	GMF_RP_STEP	= 1<<8,		/* Read Pointer Step/Increment */
+	GMF_RX_F_FL_ON	= 1<<7,		/* Rx FIFO Flush Mode On */
+	GMF_RX_F_FL_OFF	= 1<<6,		/* Rx FIFO Flush Mode Off */
+	GMF_CLI_RX_FO	= 1<<5,		/* Clear IRQ Rx FIFO Overrun */
+	GMF_CLI_RX_FC	= 1<<4,		/* Clear IRQ Rx Frame Complete */
+	GMF_OPER_ON	= 1<<3,		/* Operational Mode On */
+	GMF_OPER_OFF	= 1<<2,		/* Operational Mode Off */
+	GMF_RST_CLR	= 1<<1,		/* Clear GMAC FIFO Reset */
+	GMF_RST_SET	= 1<<0,		/* Set   GMAC FIFO Reset */
+
+	RX_GMF_FL_THR_DEF = 0xa,	/* flush threshold (default) */
+};
+
+
+/*	TX_GMF_CTRL_T	32 bit	Tx GMAC FIFO Control/Test */
+enum {
+	GMF_WSP_TST_ON	= 1<<18,/* Write Shadow Pointer Test On */
+	GMF_WSP_TST_OFF	= 1<<17,/* Write Shadow Pointer Test Off */
+	GMF_WSP_STEP	= 1<<16,/* Write Shadow Pointer Step/Increment */
+
+	GMF_CLI_TX_FU	= 1<<6,	/* Clear IRQ Tx FIFO Underrun */
+	GMF_CLI_TX_FC	= 1<<5,	/* Clear IRQ Tx Frame Complete */
+	GMF_CLI_TX_PE	= 1<<4,	/* Clear IRQ Tx Parity Error */
+};
+
+/*	GMAC_TI_ST_CTRL	 8 bit	Time Stamp Timer Ctrl Reg (YUKON only) */
+enum {
+	GMT_ST_START	= 1<<2,	/* Start Time Stamp Timer */
+	GMT_ST_STOP	= 1<<1,	/* Stop  Time Stamp Timer */
+	GMT_ST_CLR_IRQ	= 1<<0,	/* Clear Time Stamp Timer IRQ */
+};
+
+/* B28_Y2_ASF_STAT_CMD		32 bit	ASF Status and Command Reg */
+enum {
+	Y2_ASF_OS_PRES	= 1<<4,	/* ASF operation system present */
+	Y2_ASF_RESET	= 1<<3,	/* ASF system in reset state */
+	Y2_ASF_RUNNING	= 1<<2,	/* ASF system operational */
+	Y2_ASF_CLR_HSTI = 1<<1,	/* Clear ASF IRQ */
+	Y2_ASF_IRQ	= 1<<0,	/* Issue an IRQ to ASF system */
+
+	Y2_ASF_UC_STATE = 3<<2,	/* ASF uC State */
+	Y2_ASF_CLK_HALT	= 0,	/* ASF system clock stopped */
+};
+
+/* B28_Y2_ASF_HOST_COM	32 bit	ASF Host Communication Reg */
+enum {
+	Y2_ASF_CLR_ASFI = 1<<1,	/* Clear host IRQ */
+	Y2_ASF_HOST_IRQ = 1<<0,	/* Issue an IRQ to HOST system */
+};
+
+/*	STAT_CTRL		32 bit	Status BMU control register (Yukon-2 only) */
+enum {
+	SC_STAT_CLR_IRQ	= 1<<4,	/* Status Burst IRQ clear */
+	SC_STAT_OP_ON	= 1<<3,	/* Operational Mode On */
+	SC_STAT_OP_OFF	= 1<<2,	/* Operational Mode Off */
+	SC_STAT_RST_CLR	= 1<<1,	/* Clear Status Unit Reset (Enable) */
+	SC_STAT_RST_SET	= 1<<0,	/* Set   Status Unit Reset */
+};
+
+/*	GMAC_CTRL		32 bit	GMAC Control Reg (YUKON only) */
+enum {
+	GMC_H_BURST_ON	= 1<<7,	/* Half Duplex Burst Mode On */
+	GMC_H_BURST_OFF	= 1<<6,	/* Half Duplex Burst Mode Off */
+	GMC_F_LOOPB_ON	= 1<<5,	/* FIFO Loopback On */
+	GMC_F_LOOPB_OFF	= 1<<4,	/* FIFO Loopback Off */
+	GMC_PAUSE_ON	= 1<<3,	/* Pause On */
+	GMC_PAUSE_OFF	= 1<<2,	/* Pause Off */
+	GMC_RST_CLR	= 1<<1,	/* Clear GMAC Reset */
+	GMC_RST_SET	= 1<<0,	/* Set   GMAC Reset */
+};
+
+/*	GPHY_CTRL		32 bit	GPHY Control Reg (YUKON only) */
+enum {
+	GPC_SEL_BDT	= 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */
+	GPC_INT_POL_HI	= 1<<27, /* IRQ Polarity is Active HIGH */
+	GPC_75_OHM	= 1<<26, /* Use 75 Ohm Termination instead of 50 */
+	GPC_DIS_FC	= 1<<25, /* Disable Automatic Fiber/Copper Detection */
+	GPC_DIS_SLEEP	= 1<<24, /* Disable Energy Detect */
+	GPC_HWCFG_M_3	= 1<<23, /* HWCFG_MODE[3] */
+	GPC_HWCFG_M_2	= 1<<22, /* HWCFG_MODE[2] */
+	GPC_HWCFG_M_1	= 1<<21, /* HWCFG_MODE[1] */
+	GPC_HWCFG_M_0	= 1<<20, /* HWCFG_MODE[0] */
+	GPC_ANEG_0	= 1<<19, /* ANEG[0] */
+	GPC_ENA_XC	= 1<<18, /* Enable MDI crossover */
+	GPC_DIS_125	= 1<<17, /* Disable 125 MHz clock */
+	GPC_ANEG_3	= 1<<16, /* ANEG[3] */
+	GPC_ANEG_2	= 1<<15, /* ANEG[2] */
+	GPC_ANEG_1	= 1<<14, /* ANEG[1] */
+	GPC_ENA_PAUSE	= 1<<13, /* Enable Pause (SYM_OR_REM) */
+	GPC_PHYADDR_4	= 1<<12, /* Bit 4 of Phy Addr */
+	GPC_PHYADDR_3	= 1<<11, /* Bit 3 of Phy Addr */
+	GPC_PHYADDR_2	= 1<<10, /* Bit 2 of Phy Addr */
+	GPC_PHYADDR_1	= 1<<9,	 /* Bit 1 of Phy Addr */
+	GPC_PHYADDR_0	= 1<<8,	 /* Bit 0 of Phy Addr */
+						/* Bits  7..2:	reserved */
+	GPC_RST_CLR	= 1<<1,	/* Clear GPHY Reset */
+	GPC_RST_SET	= 1<<0,	/* Set   GPHY Reset */
+};
+
+/*	GMAC_IRQ_SRC	 8 bit	GMAC Interrupt Source Reg (YUKON only) */
+/*	GMAC_IRQ_MSK	 8 bit	GMAC Interrupt Mask   Reg (YUKON only) */
+enum {
+	GM_IS_TX_CO_OV	= 1<<5,	/* Transmit Counter Overflow IRQ */
+	GM_IS_RX_CO_OV	= 1<<4,	/* Receive Counter Overflow IRQ */
+	GM_IS_TX_FF_UR	= 1<<3,	/* Transmit FIFO Underrun */
+	GM_IS_TX_COMPL	= 1<<2,	/* Frame Transmission Complete */
+	GM_IS_RX_FF_OR	= 1<<1,	/* Receive FIFO Overrun */
+	GM_IS_RX_COMPL	= 1<<0,	/* Frame Reception Complete */
+
+#define GMAC_DEF_MSK	(GM_IS_TX_CO_OV | GM_IS_RX_CO_OV |\
+			 GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+
+/*	GMAC_LINK_CTRL	16 bit	GMAC Link Control Reg (YUKON only) */
+						/* Bits 15.. 2:	reserved */
+	GMLC_RST_CLR	= 1<<1,	/* Clear GMAC Link Reset */
+	GMLC_RST_SET	= 1<<0,	/* Set   GMAC Link Reset */
+
+
+/*	WOL_CTRL_STAT	16 bit	WOL Control/Status Reg */
+	WOL_CTL_LINK_CHG_OCC		= 1<<15,
+	WOL_CTL_MAGIC_PKT_OCC		= 1<<14,
+	WOL_CTL_PATTERN_OCC		= 1<<13,
+	WOL_CTL_CLEAR_RESULT		= 1<<12,
+	WOL_CTL_ENA_PME_ON_LINK_CHG	= 1<<11,
+	WOL_CTL_DIS_PME_ON_LINK_CHG	= 1<<10,
+	WOL_CTL_ENA_PME_ON_MAGIC_PKT	= 1<<9,
+	WOL_CTL_DIS_PME_ON_MAGIC_PKT	= 1<<8,
+	WOL_CTL_ENA_PME_ON_PATTERN	= 1<<7,
+	WOL_CTL_DIS_PME_ON_PATTERN	= 1<<6,
+	WOL_CTL_ENA_LINK_CHG_UNIT	= 1<<5,
+	WOL_CTL_DIS_LINK_CHG_UNIT	= 1<<4,
+	WOL_CTL_ENA_MAGIC_PKT_UNIT	= 1<<3,
+	WOL_CTL_DIS_MAGIC_PKT_UNIT	= 1<<2,
+	WOL_CTL_ENA_PATTERN_UNIT	= 1<<1,
+	WOL_CTL_DIS_PATTERN_UNIT	= 1<<0,
+};
+
+#define WOL_CTL_DEFAULT				\
+	(WOL_CTL_DIS_PME_ON_LINK_CHG |	\
+	WOL_CTL_DIS_PME_ON_PATTERN |	\
+	WOL_CTL_DIS_PME_ON_MAGIC_PKT |	\
+	WOL_CTL_DIS_LINK_CHG_UNIT |		\
+	WOL_CTL_DIS_PATTERN_UNIT |		\
+	WOL_CTL_DIS_MAGIC_PKT_UNIT)
+
+/*	WOL_MATCH_CTL	 8 bit	WOL Match Control Reg */
+#define WOL_CTL_PATT_ENA(x)	(1 << (x))
+
+
+/* Control flags */
+enum {
+	UDPTCP	= 1<<0,
+	CALSUM	= 1<<1,
+	WR_SUM	= 1<<2,
+	INIT_SUM= 1<<3,
+	LOCK_SUM= 1<<4,
+	INS_VLAN= 1<<5,
+	FRC_STAT= 1<<6,
+	EOP	= 1<<7,
+};
+
+enum {
+	HW_OWNER 	= 1<<7,
+	OP_TCPWRITE	= 0x11,
+	OP_TCPSTART	= 0x12,
+	OP_TCPINIT	= 0x14,
+	OP_TCPLCK	= 0x18,
+	OP_TCPCHKSUM	= OP_TCPSTART,
+	OP_TCPIS	= OP_TCPINIT | OP_TCPSTART,
+	OP_TCPLW	= OP_TCPLCK | OP_TCPWRITE,
+	OP_TCPLSW	= OP_TCPLCK | OP_TCPSTART | OP_TCPWRITE,
+	OP_TCPLISW	= OP_TCPLCK | OP_TCPINIT | OP_TCPSTART | OP_TCPWRITE,
+
+	OP_ADDR64	= 0x21,
+	OP_VLAN		= 0x22,
+	OP_ADDR64VLAN	= OP_ADDR64 | OP_VLAN,
+	OP_LRGLEN	= 0x24,
+	OP_LRGLENVLAN	= OP_LRGLEN | OP_VLAN,
+	OP_BUFFER	= 0x40,
+	OP_PACKET	= 0x41,
+	OP_LARGESEND	= 0x43,
+
+/* YUKON-2 STATUS opcodes defines */
+	OP_RXSTAT	= 0x60,
+	OP_RXTIMESTAMP	= 0x61,
+	OP_RXVLAN	= 0x62,
+	OP_RXCHKS	= 0x64,
+	OP_RXCHKSVLAN	= OP_RXCHKS | OP_RXVLAN,
+	OP_RXTIMEVLAN	= OP_RXTIMESTAMP | OP_RXVLAN,
+	OP_RSS_HASH	= 0x65,
+	OP_TXINDEXLE	= 0x68,
+
+/* YUKON-2 SPECIAL opcodes defines */
+	OP_PUTIDX	= 0x70,
+};
+
+/* Yukon 2 hardware interface
+ * Not tested on big endian
+ */
+struct sky2_tx_le {
+	union {
+		u32	addr;
+		struct {
+			u16	offset;
+			u16	start;
+		} csum;
+		struct {
+			u16	size;
+			u16	rsvd;
+		} tso;
+	} tx;
+	u16	length;	/* also vlan tag or checksum start */
+	u8	ctrl;
+	u8	opcode;
+};
+
+struct sky2_rx_le {
+	union {
+		u32	addr;
+		struct {
+			u16	start1;
+			u16	start2;
+		} csum;
+	} rx;
+	u16	length;
+	u8	ctrl;
+	u8	opcode;
+};
+
+struct sky2_status_le {
+	u32	status;	/* also checksum */
+	u16	length;	/* also vlan tag */
+	u8	link;
+	u8	opcode;
+};
+
+
+struct ring_info {
+	struct sk_buff	*skb;
+	DECLARE_PCI_UNMAP_ADDR(mapaddr);
+	DECLARE_PCI_UNMAP_LEN(maplen);
+};
+
+struct sky2_port {
+	struct sky2_hw	     *hw		____cacheline_aligned;
+	struct net_device    *netdev;
+	unsigned	     port;
+	u32		     msg_enable;
+
+	struct ring_info     *tx_ring		____cacheline_aligned;
+	struct sky2_tx_le    *tx_le;
+	spinlock_t	     tx_lock;
+	u16		     tx_cons;		/* next le to check */
+	u16		     tx_prod;		/* next le to use */
+	u16		     tx_last_put;
+
+	struct ring_info     *rx_ring		____cacheline_aligned;
+	struct sky2_rx_le    *rx_le;
+	u16		     rx_ring_size;
+	u16		     rx_next;		/* next re to check */
+	u16		     rx_put;		/* next le index to use */
+	u16		     rx_last_put;
+
+	dma_addr_t	     rx_le_map;
+	dma_addr_t	     tx_le_map;
+	u32		     advertising;	/* ADVERTISED_ bits */
+	u16		     speed;	/* SPEED_1000, SPEED_100, ... */
+	u8		     autoneg;	/* AUTONEG_ENABLE, AUTONEG_DISABLE */
+	u8		     duplex;	/* DUPLEX_HALF, DUPLEX_FULL */
+	u8		     rx_pause;
+	u8		     tx_pause;
+	u8		     rx_csum;
+	u8		     wol;
+
+	struct tasklet_struct phy_task;
+	struct net_device_stats net_stats;
+};
+
+struct sky2_hw {
+	void __iomem  	     *regs;
+	struct pci_dev	     *pdev;
+	u32		     intr_mask;
+	struct net_device    *dev[2];
+
+	u8	     	     chip_id;
+	u8		     chip_rev;
+	u8		     copper;
+	u8		     ports;
+
+	struct sky2_status_le *st_le;
+	u32		     st_idx;
+	dma_addr_t   	     st_dma;
+
+	spinlock_t	     phy_lock;
+};
+
+/* Register accessor for memory mapped device */
+static inline u32 sky2_read32(const struct sky2_hw *hw, unsigned reg)
+{
+	return readl(hw->regs + reg);
+}
+
+static inline u16 sky2_read16(const struct sky2_hw *hw, unsigned reg)
+{
+	return readw(hw->regs + reg);
+}
+
+static inline u8 sky2_read8(const struct sky2_hw *hw, unsigned reg)
+{
+	return readb(hw->regs + reg);
+}
+
+static inline int is_pciex(const struct sky2_hw *hw)
+{
+	return (sky2_read32(hw, PCI_C(PCI_DEV_STATUS)) & PCI_OS_PCI_X) == 0;
+}
+
+
+static inline void sky2_write32(const struct sky2_hw *hw, unsigned reg, u32 val)
+{
+	writel(val, hw->regs + reg);
+}
+
+static inline void sky2_write16(const struct sky2_hw *hw, unsigned reg, u16 val)
+{
+	writew(val, hw->regs + reg);
+}
+
+static inline void sky2_write8(const struct sky2_hw *hw, unsigned reg, u8 val)
+{
+	writeb(val, hw->regs + reg);
+}
+
+/* Yukon PHY related registers */
+#define SK_GMAC_REG(port,reg) \
+	(BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
+#define GM_PHY_RETRIES	100
+
+static inline u16 gma_read16(const struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	return sky2_read16(hw, SK_GMAC_REG(port,reg));
+}
+
+static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
+{
+	unsigned base = SK_GMAC_REG(port, reg);
+	return (u32) sky2_read16(hw, base)
+		| (u32) sky2_read16(hw, base+4) << 16;
+}
+
+static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
+{
+	sky2_write16(hw, SK_GMAC_REG(port,r), v);
+}
+
+static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg,
+				    const u8 *addr)
+{
+	gma_write16(hw, port, reg,  (u16) addr[0] | ((u16) addr[1] << 8));
+	gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
+	gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
+}
+#endif
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1735,6 +1735,160 @@ static void smc_ethtool_setmsglevel(stru
 	lp->msg_enable = level;
 }
 
+/*
+ * Must be called with lp->lock locked. Caller must select bank 2 again.
+ */
+static int smc_read_eeprom_reg(void __iomem *ioaddr, unsigned int reg)
+{
+	unsigned int timeout;
+
+	SMC_SELECT_BANK(2);
+	SMC_SET_PTR(reg);
+
+	SMC_SELECT_BANK(1);
+	SMC_SET_CTL(SMC_GET_CTL() | CTL_EEPROM_SELECT | CTL_RELOAD);
+	timeout = 100;
+	while ((SMC_GET_CTL() & CTL_RELOAD) && --timeout)
+		udelay(100);
+	if (timeout == 0) {
+		printk(KERN_INFO "%s: timeout reading EEPROM register %02x\n",
+			CARDNAME, reg);
+		return -EIO;
+	}
+
+	return SMC_GET_GP();
+}
+
+/*
+ * Must be called with lp->lock locked. Caller must select bank 2 again.
+ */
+static int smc_write_eeprom_reg(void __iomem *ioaddr, unsigned int reg,
+				unsigned int val)
+{
+	unsigned int timeout;
+
+	SMC_SELECT_BANK(2);
+	SMC_SET_PTR(reg);
+
+	SMC_SELECT_BANK(1);
+	SMC_SET_GP(val);
+	SMC_SET_CTL(SMC_GET_CTL() | CTL_EEPROM_SELECT | CTL_STORE);
+	timeout = 100;
+	while ((SMC_GET_CTL() & CTL_STORE) && --timeout)
+		udelay(100);
+	if (timeout == 0) {
+		printk(KERN_INFO "%s: timeout writing EEPROM register %02x\n",
+			CARDNAME, reg);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int smc_ethtool_geteepromlen(struct net_device *dev)
+{
+	return SMC_EEPROM_SIZE;
+}
+
+static int smc_ethtool_geteeprom(struct net_device *dev,
+				 struct ethtool_eeprom *eeprom, u8 *data)
+{
+
+	struct smc_local *lp = netdev_priv(dev);
+	void __iomem *ioaddr = lp->base;
+	unsigned reg;
+	int ret, len;
+
+	len = eeprom->len;
+	reg = eeprom->offset >> 1;
+	eeprom->len = 0;
+	eeprom->magic = SMC_EEPROM_MAGIC;
+
+	spin_lock_irq(&lp->lock);
+
+	if (eeprom->offset & 1) {
+		ret = smc_read_eeprom_reg(ioaddr, reg++);
+		if (ret < 0)
+			goto out;
+		*data++ = ret >> 8;
+	        eeprom->len++;
+		len--;
+	}
+	while (len) {
+		len -= 2;
+		ret = smc_read_eeprom_reg(ioaddr, reg++);
+		if (ret < 0)
+			goto out;
+		*data++ = ret & 0xff;
+		if (len < 0) {
+			eeprom->len++;
+			break;
+		}
+		*data++ = ret >> 8;
+		eeprom->len += 2;
+	}
+
+	ret = 0;
+out:
+	SMC_SELECT_BANK(2);
+	spin_unlock_irq(&lp->lock);
+
+	return ret;
+}
+
+static int smc_ethtool_seteeprom(struct net_device *dev,
+				 struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct smc_local *lp = netdev_priv(dev);
+	void __iomem *ioaddr = lp->base;
+	unsigned reg;
+	int ret, len;
+
+	if (eeprom->magic != SMC_EEPROM_MAGIC)
+		return -EINVAL;
+
+	reg = eeprom->offset >> 1;
+	len = eeprom->len;
+
+	spin_lock_irq(&lp->lock);
+
+	if (eeprom->offset & 1) {
+		ret = smc_read_eeprom_reg(ioaddr, reg);
+		if (ret < 0)
+			goto out;
+		ret = (ret & 0xff) | ((int)*data++ << 8);
+		ret = smc_write_eeprom_reg(ioaddr, reg++, ret);
+		if (ret < 0)
+			goto out;
+		len--;
+	}
+	while (len) {
+		len -= 2;
+		if (len < 0) {
+			ret = smc_read_eeprom_reg(ioaddr, reg);
+			if (ret < 0)
+				goto out;
+			ret = (ret & 0xff) | *data;
+			ret = smc_write_eeprom_reg(ioaddr, reg, ret);
+			if (ret < 0)
+				goto out;
+			break;
+		}
+		ret = *data++;
+		ret |= (int)*data++ << 8;
+		ret = smc_write_eeprom_reg(ioaddr, reg++, ret);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = 0;
+out:
+	SMC_SELECT_BANK(2);
+	spin_unlock_irq(&lp->lock);
+
+	return ret;
+}
+
 static struct ethtool_ops smc_ethtool_ops = {
 	.get_settings	= smc_ethtool_getsettings,
 	.set_settings	= smc_ethtool_setsettings,
@@ -1744,8 +1898,9 @@ static struct ethtool_ops smc_ethtool_op
 	.set_msglevel	= smc_ethtool_setmsglevel,
 	.nway_reset	= smc_ethtool_nwayreset,
 	.get_link	= ethtool_op_get_link,
-//	.get_eeprom	= smc_ethtool_geteeprom,
-//	.set_eeprom	= smc_ethtool_seteeprom,
+	.get_eeprom_len	= smc_ethtool_geteepromlen,
+	.get_eeprom	= smc_ethtool_geteeprom,
+	.set_eeprom	= smc_ethtool_seteeprom,
 };
 
 /*
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -417,6 +417,13 @@ smc_pxa_dma_irq(int dma, void *dummy, st
 #define SMC_DATA_EXTENT (4)
 
 /*
+ * 128 bytes serial EEPROM
+ */
+#define SMC_EEPROM_SIZE		128
+
+#define SMC_EEPROM_MAGIC	0x3A8EBEEF
+
+/*
  . Bank Select Register:
  .
  .		yyyy yyyy 0000 00xx
@@ -839,6 +846,8 @@ static const char * chip_ids[ 16 ] =  {
 #define SMC_GET_CONFIG()	SMC_inw( ioaddr, CONFIG_REG )
 #define SMC_SET_CONFIG(x)	SMC_outw( x, ioaddr, CONFIG_REG )
 #define SMC_GET_COUNTER()	SMC_inw( ioaddr, COUNTER_REG )
+#define SMC_GET_GP()		SMC_inw( ioaddr, GP_REG )
+#define SMC_SET_GP(x)		SMC_outw( x, ioaddr, GP_REG )
 #define SMC_GET_CTL()		SMC_inw( ioaddr, CTL_REG )
 #define SMC_SET_CTL(x)		SMC_outw( x, ioaddr, CTL_REG )
 #define SMC_GET_MII()		SMC_inw( ioaddr, MII_REG )
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -139,7 +139,7 @@ static int __devinit abyss_attach(struct
 	 */
 	dev->base_addr += 0x10;
 		
-	ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+	ret = tmsdev_init(dev, PCI_MAX_ADDRESS, &pdev->dev);
 	if (ret) {
 		printk("%s: unable to get memory for dev->priv.\n", 
 		       dev->name);
diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
--- a/drivers/net/tokenring/proteon.c
+++ b/drivers/net/tokenring/proteon.c
@@ -62,8 +62,7 @@ static int dmalist[] __initdata = {
 };
 
 static char cardname[] = "Proteon 1392\0";
-
-struct net_device *proteon_probe(int unit);
+static u64 dma_mask = ISA_MAX_ADDRESS;
 static int proteon_open(struct net_device *dev);
 static void proteon_read_eeprom(struct net_device *dev);
 static unsigned short proteon_setnselout_pins(struct net_device *dev);
@@ -116,7 +115,7 @@ nodev:
 	return -ENODEV;
 }
 
-static int __init setup_card(struct net_device *dev)
+static int __init setup_card(struct net_device *dev, struct device *pdev)
 {
 	struct net_local *tp;
         static int versionprinted;
@@ -137,7 +136,7 @@ static int __init setup_card(struct net_
 		}
 	}
 	if (err)
-		goto out4;
+		goto out5;
 
 	/* At this point we have found a valid card. */
 
@@ -145,14 +144,15 @@ static int __init setup_card(struct net_
 		printk(KERN_DEBUG "%s", version);
 
 	err = -EIO;
-	if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+	pdev->dma_mask = &dma_mask;
+	if (tmsdev_init(dev, ISA_MAX_ADDRESS, pdev))
 		goto out4;
 
 	dev->base_addr &= ~3; 
 		
 	proteon_read_eeprom(dev);
 
-	printk(KERN_DEBUG "%s:    Ring Station Address: ", dev->name);
+	printk(KERN_DEBUG "proteon.c:    Ring Station Address: ");
 	printk("%2.2x", dev->dev_addr[0]);
 	for (j = 1; j < 6; j++)
 		printk(":%2.2x", dev->dev_addr[j]);
@@ -185,7 +185,7 @@ static int __init setup_card(struct net_
 		
                 if(irqlist[j] == 0)
                 {
-                        printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+                        printk(KERN_INFO "proteon.c: AutoSelect no IRQ available\n");
 			goto out3;
 		}
 	}
@@ -196,15 +196,15 @@ static int __init setup_card(struct net_
 				break;
 		if (irqlist[j] == 0)
 		{
-			printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
-				dev->name, dev->irq);
+			printk(KERN_INFO "proteon.c: Illegal IRQ %d specified\n",
+				dev->irq);
 			goto out3;
 		}
 		if (request_irq(dev->irq, tms380tr_interrupt, 0, 
 			cardname, dev))
 		{
-                        printk(KERN_INFO "%s: Selected IRQ %d not available\n", 
-				dev->name, dev->irq);
+                        printk(KERN_INFO "proteon.c: Selected IRQ %d not available\n",
+				dev->irq);
 			goto out3;
 		}
 	}
@@ -220,7 +220,7 @@ static int __init setup_card(struct net_
 
 		if(dmalist[j] == 0)
 		{
-			printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+			printk(KERN_INFO "proteon.c: AutoSelect no DMA available\n");
 			goto out2;
 		}
 	}
@@ -231,25 +231,25 @@ static int __init setup_card(struct net_
 				break;
 		if (dmalist[j] == 0)
 		{
-                        printk(KERN_INFO "%s: Illegal DMA %d specified\n", 
-				dev->name, dev->dma);
+                        printk(KERN_INFO "proteon.c: Illegal DMA %d specified\n",
+				dev->dma);
 			goto out2;
 		}
 		if (request_dma(dev->dma, cardname))
 		{
-                        printk(KERN_INFO "%s: Selected DMA %d not available\n", 
-				dev->name, dev->dma);
+                        printk(KERN_INFO "proteon.c: Selected DMA %d not available\n",
+				dev->dma);
 			goto out2;
 		}
 	}
 
-	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
-	       dev->name, dev->base_addr, dev->irq, dev->dma);
-		
 	err = register_netdev(dev);
 	if (err)
 		goto out;
 
+	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
+	       dev->name, dev->base_addr, dev->irq, dev->dma);
+
 	return 0;
 out:
 	free_dma(dev->dma);
@@ -258,34 +258,11 @@ out2:
 out3:
 	tmsdev_term(dev);
 out4:
-	release_region(dev->base_addr, PROTEON_IO_EXTENT); 
+	release_region(dev->base_addr, PROTEON_IO_EXTENT);
+out5:
 	return err;
 }
 
-struct net_device * __init proteon_probe(int unit)
-{
-	struct net_device *dev = alloc_trdev(sizeof(struct net_local));
-	int err = 0;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	if (unit >= 0) {
-		sprintf(dev->name, "tr%d", unit);
-		netdev_boot_setup_check(dev);
-	}
-
-	err = setup_card(dev);
-	if (err)
-		goto out;
-
-	return dev;
-
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-
 /*
  * Reads MAC address from adapter RAM, which should've read it from
  * the onboard ROM.  
@@ -352,8 +329,6 @@ static int proteon_open(struct net_devic
 	return tms380tr_open(dev);
 }
 
-#ifdef MODULE
-
 #define ISATR_MAX_ADAPTERS 3
 
 static int io[ISATR_MAX_ADAPTERS];
@@ -366,13 +341,23 @@ module_param_array(io, int, NULL, 0);
 module_param_array(irq, int, NULL, 0);
 module_param_array(dma, int, NULL, 0);
 
-static struct net_device *proteon_dev[ISATR_MAX_ADAPTERS];
+static struct platform_device *proteon_dev[ISATR_MAX_ADAPTERS];
+
+static struct device_driver proteon_driver = {
+	.name		= "proteon",
+	.bus		= &platform_bus_type,
+};
 
-int init_module(void)
+static int __init proteon_init(void)
 {
 	struct net_device *dev;
+	struct platform_device *pdev;
 	int i, num = 0, err = 0;
 
+	err = driver_register(&proteon_driver);
+	if (err)
+		return err;
+
 	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
 		dev = alloc_trdev(sizeof(struct net_local));
 		if (!dev)
@@ -381,11 +366,15 @@ int init_module(void)
 		dev->base_addr = io[i];
 		dev->irq = irq[i];
 		dev->dma = dma[i];
-		err = setup_card(dev);
+		pdev = platform_device_register_simple("proteon",
+			i, NULL, 0);
+		err = setup_card(dev, &pdev->dev);
 		if (!err) {
-			proteon_dev[i] = dev;
+			proteon_dev[i] = pdev;
+			dev_set_drvdata(&pdev->dev, dev);
 			++num;
 		} else {
+			platform_device_unregister(pdev);
 			free_netdev(dev);
 		}
 	}
@@ -399,23 +388,28 @@ int init_module(void)
 	return (0);
 }
 
-void cleanup_module(void)
+static void __exit proteon_cleanup(void)
 {
+	struct net_device *dev;
 	int i;
 
 	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		struct net_device *dev = proteon_dev[i];
+		struct platform_device *pdev = proteon_dev[i];
 		
-		if (!dev) 
+		if (!pdev)
 			continue;
-		
+		dev = dev_get_drvdata(&pdev->dev);
 		unregister_netdev(dev);
 		release_region(dev->base_addr, PROTEON_IO_EXTENT);
 		free_irq(dev->irq, dev);
 		free_dma(dev->dma);
 		tmsdev_term(dev);
 		free_netdev(dev);
+		dev_set_drvdata(&pdev->dev, NULL);
+		platform_device_unregister(pdev);
 	}
+	driver_unregister(&proteon_driver);
 }
-#endif /* MODULE */
 
+module_init(proteon_init);
+module_exit(proteon_cleanup);
diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
--- a/drivers/net/tokenring/skisa.c
+++ b/drivers/net/tokenring/skisa.c
@@ -68,8 +68,7 @@ static int dmalist[] __initdata = {
 };
 
 static char isa_cardname[] = "SK NET TR 4/16 ISA\0";
-
-struct net_device *sk_isa_probe(int unit);
+static u64 dma_mask = ISA_MAX_ADDRESS;
 static int sk_isa_open(struct net_device *dev);
 static void sk_isa_read_eeprom(struct net_device *dev);
 static unsigned short sk_isa_setnselout_pins(struct net_device *dev);
@@ -133,7 +132,7 @@ static int __init sk_isa_probe1(struct n
 	return 0;
 }
 
-static int __init setup_card(struct net_device *dev)
+static int __init setup_card(struct net_device *dev, struct device *pdev)
 {
 	struct net_local *tp;
         static int versionprinted;
@@ -154,7 +153,7 @@ static int __init setup_card(struct net_
 		}
 	}
 	if (err)
-		goto out4;
+		goto out5;
 
 	/* At this point we have found a valid card. */
 
@@ -162,14 +161,15 @@ static int __init setup_card(struct net_
 		printk(KERN_DEBUG "%s", version);
 
 	err = -EIO;
-	if (tmsdev_init(dev, ISA_MAX_ADDRESS, NULL))
+	pdev->dma_mask = &dma_mask;
+	if (tmsdev_init(dev, ISA_MAX_ADDRESS, pdev))
 		goto out4;
 
 	dev->base_addr &= ~3; 
 		
 	sk_isa_read_eeprom(dev);
 
-	printk(KERN_DEBUG "%s:    Ring Station Address: ", dev->name);
+	printk(KERN_DEBUG "skisa.c:    Ring Station Address: ");
 	printk("%2.2x", dev->dev_addr[0]);
 	for (j = 1; j < 6; j++)
 		printk(":%2.2x", dev->dev_addr[j]);
@@ -202,7 +202,7 @@ static int __init setup_card(struct net_
 		
                 if(irqlist[j] == 0)
                 {
-                        printk(KERN_INFO "%s: AutoSelect no IRQ available\n", dev->name);
+                        printk(KERN_INFO "skisa.c: AutoSelect no IRQ available\n");
 			goto out3;
 		}
 	}
@@ -213,15 +213,15 @@ static int __init setup_card(struct net_
 				break;
 		if (irqlist[j] == 0)
 		{
-			printk(KERN_INFO "%s: Illegal IRQ %d specified\n",
-				dev->name, dev->irq);
+			printk(KERN_INFO "skisa.c: Illegal IRQ %d specified\n",
+				dev->irq);
 			goto out3;
 		}
 		if (request_irq(dev->irq, tms380tr_interrupt, 0, 
 			isa_cardname, dev))
 		{
-                        printk(KERN_INFO "%s: Selected IRQ %d not available\n", 
-				dev->name, dev->irq);
+                        printk(KERN_INFO "skisa.c: Selected IRQ %d not available\n",
+				dev->irq);
 			goto out3;
 		}
 	}
@@ -237,7 +237,7 @@ static int __init setup_card(struct net_
 
 		if(dmalist[j] == 0)
 		{
-			printk(KERN_INFO "%s: AutoSelect no DMA available\n", dev->name);
+			printk(KERN_INFO "skisa.c: AutoSelect no DMA available\n");
 			goto out2;
 		}
 	}
@@ -248,25 +248,25 @@ static int __init setup_card(struct net_
 				break;
 		if (dmalist[j] == 0)
 		{
-                        printk(KERN_INFO "%s: Illegal DMA %d specified\n", 
-				dev->name, dev->dma);
+                        printk(KERN_INFO "skisa.c: Illegal DMA %d specified\n",
+				dev->dma);
 			goto out2;
 		}
 		if (request_dma(dev->dma, isa_cardname))
 		{
-                        printk(KERN_INFO "%s: Selected DMA %d not available\n", 
-				dev->name, dev->dma);
+                        printk(KERN_INFO "skisa.c: Selected DMA %d not available\n",
+				dev->dma);
 			goto out2;
 		}
 	}
 
-	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
-	       dev->name, dev->base_addr, dev->irq, dev->dma);
-		
 	err = register_netdev(dev);
 	if (err)
 		goto out;
 
+	printk(KERN_DEBUG "%s:    IO: %#4lx  IRQ: %d  DMA: %d\n",
+	       dev->name, dev->base_addr, dev->irq, dev->dma);
+
 	return 0;
 out:
 	free_dma(dev->dma);
@@ -275,33 +275,11 @@ out2:
 out3:
 	tmsdev_term(dev);
 out4:
-	release_region(dev->base_addr, SK_ISA_IO_EXTENT); 
+	release_region(dev->base_addr, SK_ISA_IO_EXTENT);
+out5:
 	return err;
 }
 
-struct net_device * __init sk_isa_probe(int unit)
-{
-	struct net_device *dev = alloc_trdev(sizeof(struct net_local));
-	int err = 0;
-
-	if (!dev)
-		return ERR_PTR(-ENOMEM);
-
-	if (unit >= 0) {
-		sprintf(dev->name, "tr%d", unit);
-		netdev_boot_setup_check(dev);
-	}
-
-	err = setup_card(dev);
-	if (err)
-		goto out;
-
-	return dev;
-out:
-	free_netdev(dev);
-	return ERR_PTR(err);
-}
-
 /*
  * Reads MAC address from adapter RAM, which should've read it from
  * the onboard ROM.  
@@ -361,8 +339,6 @@ static int sk_isa_open(struct net_device
 	return tms380tr_open(dev);
 }
 
-#ifdef MODULE
-
 #define ISATR_MAX_ADAPTERS 3
 
 static int io[ISATR_MAX_ADAPTERS];
@@ -375,13 +351,23 @@ module_param_array(io, int, NULL, 0);
 module_param_array(irq, int, NULL, 0);
 module_param_array(dma, int, NULL, 0);
 
-static struct net_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
+static struct platform_device *sk_isa_dev[ISATR_MAX_ADAPTERS];
 
-int init_module(void)
+static struct device_driver sk_isa_driver = {
+	.name		= "skisa",
+	.bus		= &platform_bus_type,
+};
+
+static int __init sk_isa_init(void)
 {
 	struct net_device *dev;
+	struct platform_device *pdev;
 	int i, num = 0, err = 0;
 
+	err = driver_register(&sk_isa_driver);
+	if (err)
+		return err;
+
 	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
 		dev = alloc_trdev(sizeof(struct net_local));
 		if (!dev)
@@ -390,12 +376,15 @@ int init_module(void)
 		dev->base_addr = io[i];
 		dev->irq = irq[i];
 		dev->dma = dma[i];
-		err = setup_card(dev);
-
+		pdev = platform_device_register_simple("skisa",
+			i, NULL, 0);
+		err = setup_card(dev, &pdev->dev);
 		if (!err) {
-			sk_isa_dev[i] = dev;
+			sk_isa_dev[i] = pdev;
+			dev_set_drvdata(&sk_isa_dev[i]->dev, dev);
 			++num;
 		} else {
+			platform_device_unregister(pdev);
 			free_netdev(dev);
 		}
 	}
@@ -409,23 +398,28 @@ int init_module(void)
 	return (0);
 }
 
-void cleanup_module(void)
+static void __exit sk_isa_cleanup(void)
 {
+	struct net_device *dev;
 	int i;
 
 	for (i = 0; i < ISATR_MAX_ADAPTERS ; i++) {
-		struct net_device *dev = sk_isa_dev[i];
+		struct platform_device *pdev = sk_isa_dev[i];
 
-		if (!dev) 
+		if (!pdev)
 			continue;
-		
+		dev = dev_get_drvdata(&pdev->dev);
 		unregister_netdev(dev);
 		release_region(dev->base_addr, SK_ISA_IO_EXTENT);
 		free_irq(dev->irq, dev);
 		free_dma(dev->dma);
 		tmsdev_term(dev);
 		free_netdev(dev);
+		dev_set_drvdata(&pdev->dev, NULL);
+		platform_device_unregister(pdev);
 	}
+	driver_unregister(&sk_isa_driver);
 }
-#endif /* MODULE */
 
+module_init(sk_isa_init);
+module_exit(sk_isa_cleanup);
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -62,6 +62,7 @@
  *				normal operation.
  *	30-Dec-02	JF	Removed incorrect __init from 
  *				tms380tr_init_card.
+ *	22-Jul-05	JF	Converted to dma-mapping.
  *      			
  *  To do:
  *    1. Multi/Broadcast packet handling (this may have fixed itself)
@@ -89,7 +90,7 @@ static const char version[] = "tms380tr.
 #include <linux/time.h>
 #include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/pci.h>
+#include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -114,8 +115,6 @@ static const char version[] = "tms380tr.
 #endif
 static unsigned int tms380tr_debug = TMS380TR_DEBUG;
 
-static struct device tms_device;
-
 /* Index to functions, as function prototypes.
  * Alphabetical by function name.
  */
@@ -434,7 +433,7 @@ static void tms380tr_init_net_local(stru
 			skb_put(tp->Rpl[i].Skb, tp->MaxPacketSize);
 
 			/* data unreachable for DMA ? then use local buffer */
-			dmabuf = pci_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+			dmabuf = dma_map_single(tp->pdev, tp->Rpl[i].Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
 			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
 			{
 				tp->Rpl[i].SkbStat = SKB_DATA_COPY;
@@ -638,10 +637,10 @@ static int tms380tr_hardware_send_packet
 	/* Is buffer reachable for Busmaster-DMA? */
 
 	length	= skb->len;
-	dmabuf = pci_map_single(tp->pdev, skb->data, length, PCI_DMA_TODEVICE);
+	dmabuf = dma_map_single(tp->pdev, skb->data, length, DMA_TO_DEVICE);
 	if(tp->dmalimit && (dmabuf + length > tp->dmalimit)) {
 		/* Copy frame to local buffer */
-		pci_unmap_single(tp->pdev, dmabuf, length, PCI_DMA_TODEVICE);
+		dma_unmap_single(tp->pdev, dmabuf, length, DMA_TO_DEVICE);
 		dmabuf  = 0;
 		i 	= tp->TplFree->TPLIndex;
 		buf 	= tp->LocalTxBuffers[i];
@@ -1284,9 +1283,7 @@ static int tms380tr_reset_adapter(struct
 	unsigned short count, c, count2;
 	const struct firmware *fw_entry = NULL;
 
-	strncpy(tms_device.bus_id,dev->name, BUS_ID_SIZE);
-
-	if (request_firmware(&fw_entry, "tms380tr.bin", &tms_device) != 0) {
+	if (request_firmware(&fw_entry, "tms380tr.bin", tp->pdev) != 0) {
 		printk(KERN_ALERT "%s: firmware %s is missing, cannot start.\n",
 			dev->name, "tms380tr.bin");
 		return (-1);
@@ -2021,7 +2018,7 @@ static void tms380tr_cancel_tx_queue(str
 
 		printk(KERN_INFO "Cancel tx (%08lXh).\n", (unsigned long)tpl);
 		if (tpl->DMABuff)
-			pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+			dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
 		dev_kfree_skb_any(tpl->Skb);
 	}
 
@@ -2090,7 +2087,7 @@ static void tms380tr_tx_status_irq(struc
 
 		tp->MacStat.tx_packets++;
 		if (tpl->DMABuff)
-			pci_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, PCI_DMA_TODEVICE);
+			dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
 		dev_kfree_skb_irq(tpl->Skb);
 		tpl->BusyFlag = 0;	/* "free" TPL */
 	}
@@ -2209,7 +2206,7 @@ static void tms380tr_rcv_status_irq(stru
 				tp->MacStat.rx_errors++;
 		}
 		if (rpl->DMABuff)
-			pci_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, PCI_DMA_TODEVICE);
+			dma_unmap_single(tp->pdev, rpl->DMABuff, tp->MaxPacketSize, DMA_TO_DEVICE);
 		rpl->DMABuff = 0;
 
 		/* Allocate new skb for rpl */
@@ -2227,7 +2224,7 @@ static void tms380tr_rcv_status_irq(stru
 			skb_put(rpl->Skb, tp->MaxPacketSize);
 
 			/* Data unreachable for DMA ? then use local buffer */
-			dmabuf = pci_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, PCI_DMA_FROMDEVICE);
+			dmabuf = dma_map_single(tp->pdev, rpl->Skb->data, tp->MaxPacketSize, DMA_FROM_DEVICE);
 			if(tp->dmalimit && (dmabuf + tp->MaxPacketSize > tp->dmalimit))
 			{
 				rpl->SkbStat = SKB_DATA_COPY;
@@ -2332,12 +2329,12 @@ void tmsdev_term(struct net_device *dev)
 	struct net_local *tp;
 
 	tp = netdev_priv(dev);
-	pci_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
-		PCI_DMA_BIDIRECTIONAL);
+	dma_unmap_single(tp->pdev, tp->dmabuffer, sizeof(struct net_local),
+		DMA_BIDIRECTIONAL);
 }
 
 int tmsdev_init(struct net_device *dev, unsigned long dmalimit, 
-		struct pci_dev *pdev)
+		struct device *pdev)
 {
 	struct net_local *tms_local;
 
@@ -2346,8 +2343,8 @@ int tmsdev_init(struct net_device *dev, 
 	init_waitqueue_head(&tms_local->wait_for_tok_int);
 	tms_local->dmalimit = dmalimit;
 	tms_local->pdev = pdev;
-	tms_local->dmabuffer = pci_map_single(pdev, (void *)tms_local,
-	    sizeof(struct net_local), PCI_DMA_BIDIRECTIONAL);
+	tms_local->dmabuffer = dma_map_single(pdev, (void *)tms_local,
+	    sizeof(struct net_local), DMA_BIDIRECTIONAL);
 	if (tms_local->dmabuffer + sizeof(struct net_local) > dmalimit)
 	{
 		printk(KERN_INFO "%s: Memory not accessible for DMA\n",
@@ -2370,8 +2367,6 @@ int tmsdev_init(struct net_device *dev, 
 	return 0;
 }
 
-#ifdef MODULE
-
 EXPORT_SYMBOL(tms380tr_open);
 EXPORT_SYMBOL(tms380tr_close);
 EXPORT_SYMBOL(tms380tr_interrupt);
@@ -2379,6 +2374,8 @@ EXPORT_SYMBOL(tmsdev_init);
 EXPORT_SYMBOL(tmsdev_term);
 EXPORT_SYMBOL(tms380tr_wait);
 
+#ifdef MODULE
+
 static struct module *TMS380_module = NULL;
 
 int init_module(void)
diff --git a/drivers/net/tokenring/tms380tr.h b/drivers/net/tokenring/tms380tr.h
--- a/drivers/net/tokenring/tms380tr.h
+++ b/drivers/net/tokenring/tms380tr.h
@@ -18,7 +18,7 @@ int tms380tr_open(struct net_device *dev
 int tms380tr_close(struct net_device *dev);
 irqreturn_t tms380tr_interrupt(int irq, void *dev_id, struct pt_regs *regs);
 int tmsdev_init(struct net_device *dev, unsigned long dmalimit,
-		struct pci_dev *pdev);
+		struct device *pdev);
 void tmsdev_term(struct net_device *dev);
 void tms380tr_wait(unsigned long time);
 
@@ -719,7 +719,7 @@ struct s_TPL {	/* Transmit Parameter Lis
 	struct sk_buff *Skb;
 	unsigned char TPLIndex;
 	volatile unsigned char BusyFlag;/* Flag: TPL busy? */
-	dma_addr_t DMABuff;		/* DMA IO bus address from pci_map */
+	dma_addr_t DMABuff;		/* DMA IO bus address from dma_map */
 };
 
 /* ---------------------Receive Functions-------------------------------*
@@ -1060,7 +1060,7 @@ struct s_RPL {	/* Receive Parameter List
 	struct sk_buff *Skb;
 	SKB_STAT SkbStat;
 	int RPLIndex;
-	dma_addr_t DMABuff;		/* DMA IO bus address from pci_map */
+	dma_addr_t DMABuff;		/* DMA IO bus address from dma_map */
 };
 
 /* Information that need to be kept for each board. */
@@ -1091,7 +1091,7 @@ typedef struct net_local {
 	RPL *RplTail;
 	unsigned char LocalRxBuffers[RPL_NUM][DEFAULT_PACKET_SIZE];
 
-	struct pci_dev *pdev;
+	struct device *pdev;
 	int DataRate;
 	unsigned char ScbInUse;
 	unsigned short CMDqueue;
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -100,7 +100,7 @@ static int __devinit tms_pci_attach(stru
 	unsigned int pci_irq_line;
 	unsigned long pci_ioaddr;
 	struct card_info *cardinfo = &card_info_table[ent->driver_data];
-		
+
 	if (versionprinted++ == 0)
 		printk("%s", version);
 
@@ -143,7 +143,7 @@ static int __devinit tms_pci_attach(stru
 		printk(":%2.2x", dev->dev_addr[i]);
 	printk("\n");
 		
-	ret = tmsdev_init(dev, PCI_MAX_ADDRESS, pdev);
+	ret = tmsdev_init(dev, PCI_MAX_ADDRESS, &pdev->dev);
 	if (ret) {
 		printk("%s: unable to get memory for dev->priv.\n", dev->name);
 		goto err_out_irq;
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -135,6 +135,18 @@ config DM9102
 	  <file:Documentation/networking/net-modules.txt>.  The module will
 	  be called dmfe.
 
+config ULI526X
+	tristate "ULi M526x controller support"
+	depends on NET_TULIP && PCI
+	select CRC32
+	---help---
+	  This driver is for ULi M5261/M5263 10/100M Ethernet Controller
+	  (<http://www.uli.com.tw/>).
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module will
+	  be called uli526x.
+	  
 config PCMCIA_XIRCOM
 	tristate "Xircom CardBus support (new driver)"
 	depends on NET_TULIP && CARDBUS
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_WINBOND_840)	+= winbond-840
 obj-$(CONFIG_DE2104X)		+= de2104x.o
 obj-$(CONFIG_TULIP)		+= tulip.o
 obj-$(CONFIG_DE4X5)		+= de4x5.o
+obj-$(CONFIG_ULI526X)		+= uli526x.o
 
 # Declare multi-part drivers.
 
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -81,25 +81,6 @@ int tulip_mdio_read(struct net_device *d
 		return retval & 0xffff;
 	}
 
-	if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
-		int value;
-		int i = 1000;
-		
-		value = ioread32(ioaddr + CSR9);
-		iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-		
-		value = (phy_id << 21) | (location << 16) | 0x08000000;
-		iowrite32(value, ioaddr + CSR10);
-		
-		while(--i > 0) {
-			mdio_delay();
-			if(ioread32(ioaddr + CSR10) & 0x10000000)
-				break;
-		}
-		retval = ioread32(ioaddr + CSR10);
-		spin_unlock_irqrestore(&tp->mii_lock, flags);
-		return retval & 0xFFFF;
-	}
 	/* Establish sync by sending at least 32 logic ones. */
 	for (i = 32; i >= 0; i--) {
 		iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@ void tulip_mdio_write(struct net_device 
 		spin_unlock_irqrestore(&tp->mii_lock, flags);
 		return;
 	}
-	if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
-		int value;
-		int i = 1000;
-		
-		value = ioread32(ioaddr + CSR9);
-		iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-		
-		value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
-		iowrite32(value, ioaddr + CSR10);
-		
-		while(--i > 0) {
-			if (ioread32(ioaddr + CSR10) & 0x10000000)
-				break;
-		}
-		spin_unlock_irqrestore(&tp->mii_lock, flags);
-		return;
-	}
 		
 	/* Establish sync by sending 32 logic ones. */
 	for (i = 32; i >= 0; i--) {
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -39,7 +39,6 @@ void tulip_timer(unsigned long data)
 	case MX98713:
 	case COMPEX9881:
 	case DM910X:
-	case ULI526X:
 	default: {
 		struct medialeaf *mleaf;
 		unsigned char *p;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -88,7 +88,6 @@ enum chips {
 	I21145,
 	DM910X,
 	CONEXANT,
-	ULI526X
 };
 
 
@@ -482,11 +481,8 @@ static inline void tulip_stop_rxtx(struc
 
 static inline void tulip_restart_rxtx(struct tulip_private *tp)
 {
-	if(!(tp->chip_id == ULI526X && 
-		(tp->revision == 0x40 || tp->revision == 0x50))) {
-		tulip_stop_rxtx(tp);
-		udelay(5);
-	}
+	tulip_stop_rxtx(tp);
+	udelay(5);
 	tulip_start_rxtx(tp);
 }
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -199,9 +199,6 @@ struct tulip_chip_table tulip_tbl[] = {
   { "Conexant LANfinity", 256, 0x0001ebef,
 	HAS_MII | HAS_ACPI, tulip_timer },
 
-   /* ULi526X */
-   { "ULi M5261/M5263", 128, 0x0001ebef,
-        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
 };
 
 
@@ -239,8 +236,6 @@ static struct pci_device_id tulip_pci_tb
 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
- 	{ 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },	/* ALi 1563 integrated ethernet */
- 	{ 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },	/* ALi 1563 integrated ethernet */
 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
 	{ } /* terminate list */
@@ -522,7 +517,7 @@ static void tulip_tx_timeout(struct net_
 				   dev->name);
 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
 			   || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
-			   || tp->chip_id == DM910X || tp->chip_id == ULI526X) {
+			   || tp->chip_id == DM910X) {
 		printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
 			   "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
 			   dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1098,16 @@ static void set_rx_mode(struct net_devic
 			entry = tp->cur_tx++ % TX_RING_SIZE;
 
 			if (entry != 0) {
-				/* Avoid a chip errata by prefixing a dummy entry. Don't do
-				   this on the ULI526X as it triggers a different problem */
-				if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
-					tp->tx_buffers[entry].skb = NULL;
-					tp->tx_buffers[entry].mapping = 0;
-					tp->tx_ring[entry].length =
-						(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
-					tp->tx_ring[entry].buffer1 = 0;
-					/* Must set DescOwned later to avoid race with chip */
-					dummy = entry;
-					entry = tp->cur_tx++ % TX_RING_SIZE;
-				}
+				/* Avoid a chip errata by prefixing a dummy entry. */
+				tp->tx_buffers[entry].skb = NULL;
+				tp->tx_buffers[entry].mapping = 0;
+				tp->tx_ring[entry].length =
+					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+				tp->tx_ring[entry].buffer1 = 0;
+				/* Must set DescOwned later to avoid race with chip */
+				dummy = entry;
+				entry = tp->cur_tx++ % TX_RING_SIZE;
+
 			}
 
 			tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1228,6 @@ static int tulip_uli_dm_quirk(struct pci
 {
 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
 		return 1;
-	if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
-		return 1;
-	if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
-		return 1;
 	return 0;
 }
 
@@ -1680,7 +1669,6 @@ static int __devinit tulip_init_one (str
 	switch (chip_idx) {
 	case DC21140:
 	case DM910X:
-	case ULI526X:
 	default:
 		if (tp->mtable)
 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/tulip/uli526x.c
@@ -0,0 +1,1770 @@
+/*
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License
+    as published by the Free Software Foundation; either version 2
+    of the License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    
+*/
+
+#define DRV_NAME	"uli526x"
+#define DRV_VERSION	"0.9.3"
+#define DRV_RELDATE	"2005-7-29"
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_ULI5261_ID  0x526110B9	/* ULi M5261 ID*/
+#define PCI_ULI5263_ID  0x526310B9	/* ULi M5263 ID*/
+
+#define ULI526X_IO_SIZE 0x100
+#define TX_DESC_CNT     0x20            /* Allocated Tx descriptors */
+#define RX_DESC_CNT     0x30            /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
+#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC    0x600
+#define RX_ALLOC_SIZE   0x620
+#define ULI526X_RESET    1
+#define CR0_DEFAULT     0
+#define CR6_DEFAULT     0x00080000      /* HD */
+#define CR6_DEFAULT_A     0x22240000
+#define CR7_DEFAULT     0x180c1
+#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define ULI5261_MAX_MULTICAST 14
+#define RX_COPY_SIZE	100
+#define MAX_CHECK_PACKET 0x8000
+
+#define ULI526X_10MHF      0
+#define ULI526X_100MHF     1
+#define ULI526X_10MFD      4
+#define ULI526X_100MFD     5
+#define ULI526X_AUTO       8
+
+#define ULI526X_TXTH_72	0x400000	/* TX TH 72 byte */
+#define ULI526X_TXTH_96	0x404000	/* TX TH 96 byte */
+#define ULI526X_TXTH_128	0x0000		/* TX TH 128 byte */
+#define ULI526X_TXTH_256	0x4000		/* TX TH 256 byte */
+#define ULI526X_TXTH_512	0x8000		/* TX TH 512 byte */
+#define ULI526X_TXTH_1K	0xC000		/* TX TH 1K  byte */
+
+#define ULI526X_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define ULI526X_TX_TIMEOUT ((16*HZ)/2)	/* tx packet time-out time 8 s" */
+#define ULI526X_TX_KICK 	(4*HZ/2)	/* tx packet Kick-out time 2 s" */
+
+#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+
+#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ   0x4800
+#define CR9_SRCS        0x1
+#define CR9_SRCLK       0x2
+#define CR9_CRDOUT      0x8
+#define SROM_DATA_0     0x0
+#define SROM_DATA_1     0x4
+#define PHY_DATA_1      0x20000
+#define PHY_DATA_0      0x00000
+#define MDCLKH          0x10000
+
+#define PHY_POWER_DOWN	0x800
+
+#define SROM_V41_CODE   0x14
+
+#define SROM_CLK_WRITE(data, ioaddr) outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);udelay(5);outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);udelay(5);
+
+/* Sten Check */
+#define DEVICE net_device
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+        u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+        char *tx_buf_ptr;               /* Data for us */
+        struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+	u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+	struct sk_buff *rx_skb_ptr;	/* Data for us */
+	struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct uli526x_board_info {
+	u32 chip_id;			/* Chip vendor/Device ID */
+	struct DEVICE *next_dev;	/* next device */
+	struct pci_dev *pdev;		/* PCI device */
+	spinlock_t lock;
+
+	long ioaddr;			/* I/O base address */
+	u32 cr0_data;
+	u32 cr5_data;
+	u32 cr6_data;
+	u32 cr7_data;
+	u32 cr15_data;
+
+	/* pointer for memory physical address */
+	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
+	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
+	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
+	dma_addr_t first_tx_desc_dma;
+	dma_addr_t first_rx_desc_dma;
+
+	/* descriptor pointer */
+	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
+	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
+	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
+	struct tx_desc *first_tx_desc;
+	struct tx_desc *tx_insert_ptr;
+	struct tx_desc *tx_remove_ptr;
+	struct rx_desc *first_rx_desc;
+	struct rx_desc *rx_insert_ptr;
+	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
+	unsigned long tx_packet_cnt;	/* transmitted packet count */
+	unsigned long rx_avail_cnt;	/* available rx descriptor count */
+	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
+
+	u16 dbug_cnt;
+	u16 NIC_capability;		/* NIC media capability */
+	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
+
+	u8 media_mode;			/* user specify media mode */
+	u8 op_mode;			/* real work media mode */
+	u8 phy_addr;
+	u8 link_failed;			/* Ever link failed */
+	u8 wait_reset;			/* Hardware failed, need to reset */
+	struct timer_list timer;
+
+	/* System defined statistic counter */
+	struct net_device_stats stats;
+
+	/* Driver defined statistic counter */
+	unsigned long tx_fifo_underrun;
+	unsigned long tx_loss_carrier;
+	unsigned long tx_no_carrier;
+	unsigned long tx_late_collision;
+	unsigned long tx_excessive_collision;
+	unsigned long tx_jabber_timeout;
+	unsigned long reset_count;
+	unsigned long reset_cr8;
+	unsigned long reset_fatal;
+	unsigned long reset_TXtimeout;
+
+	/* NIC SROM data */
+	unsigned char srom[128];
+	u8 init;	
+};
+
+enum uli526x_offsets {
+	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+	DCR15 = 0x78
+};
+
+enum uli526x_CR6_bits {
+	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static char version[] __devinitdata =
+	KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
+	DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static int uli526x_debug;
+static unsigned char uli526x_media_mode = ULI526X_AUTO;
+static u32 uli526x_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static u32 m526x_id;
+static unsigned char mode = 8;
+
+/* function declaration ------------------------------------- */
+static int uli526x_open(struct DEVICE *);
+static int uli526x_start_xmit(struct sk_buff *, struct DEVICE *);
+static int uli526x_stop(struct DEVICE *);
+static struct net_device_stats * uli526x_get_stats(struct DEVICE *);
+static void uli526x_set_filter_mode(struct DEVICE *);
+static struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long ,int);
+static irqreturn_t uli526x_interrupt(int , void *, struct pt_regs *);
+static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
+static void allocate_rx_buffer(struct uli526x_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct DEVICE * ,int);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static u16 phy_readby_cr10(unsigned long, u8, u8);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_writeby_cr10(unsigned long, u8, u8, u16);
+static void phy_write_1bit(unsigned long, u32, u32);
+static u16 phy_read_1bit(unsigned long, u32);
+static u8 uli526x_sense_speed(struct uli526x_board_info *);
+static void uli526x_process_mode(struct uli526x_board_info *);
+static void uli526x_timer(unsigned long);
+static void uli526x_rx_packet(struct DEVICE *, struct uli526x_board_info *);
+static void uli526x_free_tx_pkt(struct DEVICE *, struct uli526x_board_info *);
+static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
+static void uli526x_dynamic_reset(struct DEVICE *);
+static void uli526x_free_rxbuffer(struct uli526x_board_info *);
+static void uli526x_init(struct DEVICE *);
+static void uli526x_set_phyxcer(struct uli526x_board_info *);
+
+/* ULI526X network baord routine ---------------------------- */
+
+/*
+ *	Search ULI526X board ,allocate space and register it
+ */
+
+static int __devinit uli526x_init_one (struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	struct uli526x_board_info *db;	/* board information structure */
+	struct net_device *dev;
+	int i, err;
+	u32 configval;
+
+	ULI526X_DBUG(0, "uli526x_init_one()", 0);
+
+	if (!printed_version++)
+		printk(version);
+
+	/* Init network device */
+	dev = alloc_etherdev(sizeof(*db));
+	if (dev == NULL)
+		return -ENOMEM;
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	if (pci_set_dma_mask(pdev, 0xffffffff)) {
+		printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+		err = -ENODEV;
+		goto err_out_free;
+	}
+
+	/* Enable Master/IO access, Disable memory access */
+	err = pci_enable_device(pdev);
+	if (err)
+		goto err_out_free;
+
+	if (!pci_resource_start(pdev, 0)) {
+		printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
+		printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (pci_request_regions(pdev, DRV_NAME)) {
+		printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	//add by clearzhang 2004/7/8
+	pci_read_config_dword(pdev,0x0,&configval);
+	m526x_id = configval;
+	if(configval == 0x526310b9)
+	{
+		//printk("is m5263\n");
+		pci_read_config_dword(pdev,0x0c,&configval);
+		configval = ((configval & 0xffff00ff) | 0x8000);
+		pci_write_config_dword(pdev,0x0c,configval);
+	}
+	/* Init system & device */
+	db = netdev_priv(dev);
+
+	/* Allocate Tx/Rx descriptor memory */
+	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+
+	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+	db->buf_pool_start = db->buf_pool_ptr;
+	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+	db->chip_id = ent->driver_data;
+	db->ioaddr = pci_resource_start(pdev, 0);
+	
+	db->pdev = pdev;
+	db->init = 1;
+	
+	dev->base_addr = db->ioaddr;
+	dev->irq = pdev->irq;
+	pci_set_drvdata(pdev, dev);
+	
+	/* Register some necessary functions */
+	dev->open = &uli526x_open;
+	dev->hard_start_xmit = &uli526x_start_xmit;
+	dev->stop = &uli526x_stop;
+	dev->get_stats = &uli526x_get_stats;
+	dev->set_multicast_list = &uli526x_set_filter_mode;
+	dev->ethtool_ops = &netdev_ethtool_ops;
+	spin_lock_init(&db->lock);
+
+		
+	/* read 64 word srom data */
+	for (i = 0; i < 64; i++)
+		((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+	/* Set Node address */
+	if(((u16 *) db->srom)[0] == 0xffff)		/* SROM absent, so read MAC address from ID Table */
+	{
+		outl(0x10000, db->ioaddr + DCR0);	//Diagnosis mode
+		outl(0x1c0, db->ioaddr + DCR13);	//Reset dianostic pointer port
+		outl(0, db->ioaddr + DCR14);		//Clear reset port
+		outl(0x10, db->ioaddr + DCR14);		//Reset ID Table pointer
+		outl(0, db->ioaddr + DCR14);		//Clear reset port
+		outl(0, db->ioaddr + DCR13);		//Clear CR13
+		outl(0x1b0, db->ioaddr + DCR13);	//Select ID Table access port
+		//Read MAC address from CR14
+		for (i = 0; i < 6; i++)
+			dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+		//Read end
+		outl(0, db->ioaddr + DCR13);	//Clear CR13
+		outl(0, db->ioaddr + DCR0);		//Clear CR0
+		udelay(10);
+	}
+	else		/*Exist SROM*/
+	{
+		for (i = 0; i < 6; i++)
+			dev->dev_addr[i] = db->srom[20 + i];
+	}
+	err = register_netdev (dev);
+	if (err)
+		goto err_out_res;
+
+	printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
+	
+	for (i = 0; i < 6; i++)
+		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+	printk(", irq %d.\n", dev->irq);
+
+	pci_set_master(pdev);
+
+	return 0;
+
+err_out_res:
+	pci_release_regions(pdev);
+err_out_disable:
+	pci_disable_device(pdev);
+err_out_free:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+
+	return err;
+}
+
+
+static void __devexit uli526x_remove_one (struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_remove_one()", 0);
+
+ 	if (dev) {
+		pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+					DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+ 					db->desc_pool_dma_ptr);
+		pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+					db->buf_pool_ptr, db->buf_pool_dma_ptr);
+		unregister_netdev(dev);
+		pci_release_regions(pdev);
+		free_netdev(dev);	/* free board information */
+		pci_set_drvdata(pdev, NULL);
+	}
+
+	ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+}
+
+
+/*
+ *	Open the interface.
+ *	The interface is opened whenever "ifconfig" actives it.
+ */
+
+static int uli526x_open(struct DEVICE *dev)
+{
+	int ret;
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_open", 0);
+
+	ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
+	if (ret)
+		return ret;
+
+	/* system variable init */
+	db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
+	if(m526x_id == 0x526310b9)
+	{  
+		//printk("is 5263\n");
+		db->cr6_data = CR6_DEFAULT_A | uli526x_cr6_user_set;
+	}
+	db->tx_packet_cnt = 0;
+	db->rx_avail_cnt = 0;
+	db->link_failed = 1;
+	netif_carrier_off(dev);
+	db->wait_reset = 0;
+
+	db->NIC_capability = 0xf;	/* All capability*/
+	db->PHY_reg4 = 0x1e0;
+
+	/* CR6 operation mode decision */
+	db->cr6_data |= ULI526X_TXTH_256;
+	db->cr0_data = CR0_DEFAULT;
+	
+	/* Initilize ULI526X board */
+	uli526x_init(dev);
+
+	/* Active System Interface */
+	netif_wake_queue(dev);
+
+	/* set and active a timer process */
+	init_timer(&db->timer);
+	db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
+	db->timer.data = (unsigned long)dev;
+	db->timer.function = &uli526x_timer;
+	add_timer(&db->timer);
+
+	return 0;
+}
+
+
+/*	Initilize ULI526X board
+ *	Reset ULI526X board
+ *	Initilize TX/Rx descriptor chain structure
+ *	Send the set-up frame
+ *	Enable Tx/Rx machine
+ */
+
+static void uli526x_init(struct DEVICE *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = db->ioaddr;
+	u8	phy_tmp;
+	u16	phy_value;
+	u16 phy_reg_reset;
+
+	ULI526X_DBUG(0, "uli526x_init()", 0);
+
+	/* Reset M526x MAC controller */
+	outl(ULI526X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	udelay(100);
+	outl(db->cr0_data, ioaddr + DCR0);
+	udelay(5);
+
+	/* Phy addr : In some boards,M5261/M5263 phy address != 1 */
+	db->phy_addr = 1;
+	for(phy_tmp=0;phy_tmp<32;phy_tmp++)
+	{
+		phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
+		if(phy_value != 0xffff&&phy_value!=0)
+		{
+			db->phy_addr = phy_tmp;
+			break;
+		}
+	}
+	if(phy_tmp == 32)
+		printk(KERN_WARNING "Can not find the phy address!!!");
+	/* Parser SROM and media mode */
+	db->media_mode = uli526x_media_mode;
+
+	//add by clearzhang 2004/7/8
+	/* RESET Phyxcer Chip by GPR port bit 7 */
+	//outl(0x180, ioaddr + DCR12);		/* Let bit 7 output port */
+	//outl(0x0, ioaddr + DCR12);	/* Clear RESET signal */
+
+	/* Phyxcer capability setting */
+	phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+	phy_reg_reset = (phy_reg_reset | 0x8000);
+	phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+	udelay(500);
+
+	/* Process Phyxcer Media Mode */
+	uli526x_set_phyxcer(db);
+
+	/* Media Mode Process */
+	if ( !(db->media_mode & ULI526X_AUTO) )
+		db->op_mode = db->media_mode; 	/* Force Mode */
+
+	/* Initiliaze Transmit/Receive decriptor and CR3/4 */
+	uli526x_descriptor_init(db, ioaddr);
+
+	/* Init CR6 to program M526X operation */
+	update_cr6(db->cr6_data, ioaddr);
+
+	/* Send setup frame */
+	send_filter_frame(dev, dev->mc_count);	/* M5261/M5263 */
+
+	/* Init CR7, interrupt active bit */
+	db->cr7_data = CR7_DEFAULT;
+	outl(db->cr7_data, ioaddr + DCR7);
+
+	/* Init CR15, Tx jabber and Rx watchdog timer */
+	outl(db->cr15_data, ioaddr + DCR15);
+
+	/* Enable ULI526X Tx/Rx function */
+	db->cr6_data |= CR6_RXSC | CR6_TXSC;
+	update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ *	Hardware start transmission.
+ *	Send a packet to media from the upper layer.
+ */
+
+static int uli526x_start_xmit(struct sk_buff *skb, struct DEVICE *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	struct tx_desc *txptr;
+	unsigned long flags;
+
+	ULI526X_DBUG(0, "uli526x_start_xmit", 0);
+
+	/* Resource flag check */
+	netif_stop_queue(dev);
+
+	/* Too large packet check */
+	if (skb->len > MAX_PACKET_SIZE) {
+		printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	spin_lock_irqsave(&db->lock, flags);
+
+	/* No Tx resource check, it never happen nromally */
+	if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
+		spin_unlock_irqrestore(&db->lock, flags);
+		printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+		return 1;
+	}
+
+	/* Disable NIC interrupt */
+	outl(0, dev->base_addr + DCR7);
+
+	/* transmit this packet */
+	txptr = db->tx_insert_ptr;
+	memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+	/* Point to next transmit free descriptor */
+	db->tx_insert_ptr = txptr->next_tx_desc;
+
+	/* Transmit Packet Process */
+	if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
+		db->tx_packet_cnt++;			/* Ready to send */
+		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dev->trans_start = jiffies;		/* saved time stamp */
+	}
+
+	/* Tx resource check */
+	if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
+		netif_wake_queue(dev);
+
+	/* Restore CR7 to enable interrupt */
+	spin_unlock_irqrestore(&db->lock, flags);
+	outl(db->cr7_data, dev->base_addr + DCR7);
+	
+	/* free this SKB */
+	dev_kfree_skb(skb);
+
+	return 0;
+}
+
+
+/*
+ *	Stop the interface.
+ *	The interface is stopped when it is brought.
+ */
+
+static int uli526x_stop(struct DEVICE *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+
+	ULI526X_DBUG(0, "uli526x_stop", 0);
+
+	/* disable system */
+	netif_stop_queue(dev);
+
+	/* deleted timer */
+	del_timer_sync(&db->timer);
+
+	/* Reset & stop ULI526X board */
+	outl(ULI526X_RESET, ioaddr + DCR0);
+	udelay(5);
+	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+	/* free interrupt */
+	free_irq(dev->irq, dev);
+
+	/* free allocated rx buffer */
+	uli526x_free_rxbuffer(db);
+
+#if 0
+	/* show statistic counter */
+	printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+		db->tx_fifo_underrun, db->tx_excessive_collision,
+		db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+		db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+		db->reset_fatal, db->reset_TXtimeout);
+#endif
+
+	return 0;
+}
+
+
+/*
+ *	M5261/M5263 insterrupt handler
+ *	receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct DEVICE *dev = dev_id;
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	unsigned long flags;
+
+       //ULI526X_DBUG(0, "uli526x_interrupt()", 0);
+
+	if (!dev) {
+		ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
+		return IRQ_NONE;
+	}
+
+	//outl(0, ioaddr + DCR7);
+	spin_lock_irqsave(&db->lock, flags);
+	outl(0, ioaddr + DCR7);
+
+	/* Got ULI526X status */
+	db->cr5_data = inl(ioaddr + DCR5);
+	outl(db->cr5_data, ioaddr + DCR5);
+	if ( !(db->cr5_data & 0x180c1) ) {
+		spin_unlock_irqrestore(&db->lock, flags);
+		outl(db->cr7_data, ioaddr + DCR7);
+		return IRQ_HANDLED;
+	}
+
+	/* Disable all interrupt in CR7 to solve the interrupt edge problem */
+	//outl(0, ioaddr + DCR7);
+
+	/* Check system status */
+	if (db->cr5_data & 0x2000) {
+		/* system bus error happen */
+		ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+		db->reset_fatal++;
+		db->wait_reset = 1;	/* Need to RESET */
+		spin_unlock_irqrestore(&db->lock, flags);
+		return IRQ_HANDLED;
+	}
+
+	 /* Received the coming packet */
+	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+		uli526x_rx_packet(dev, db);
+
+	/* reallocate rx descriptor buffer */
+	if (db->rx_avail_cnt<RX_DESC_CNT)
+		allocate_rx_buffer(db);
+
+	/* Free the transmitted descriptor */
+	if ( db->cr5_data & 0x01)
+		uli526x_free_tx_pkt(dev, db);
+
+	/* Restore CR7 to enable interrupt mask */
+	outl(db->cr7_data, ioaddr + DCR7);
+
+	spin_unlock_irqrestore(&db->lock, flags);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ *	Free TX resource after TX complete
+ */
+
+static void uli526x_free_tx_pkt(struct DEVICE *dev, struct uli526x_board_info * db)
+{
+	struct tx_desc *txptr;
+//	unsigned long ioaddr = dev->base_addr;
+	u32 tdes0;
+
+	txptr = db->tx_remove_ptr;
+	while(db->tx_packet_cnt) {
+		tdes0 = le32_to_cpu(txptr->tdes0);
+		/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+		if (tdes0 & 0x80000000)
+			break;
+
+		/* A packet sent completed */
+		db->tx_packet_cnt--;
+		db->stats.tx_packets++;
+
+		/* Transmit statistic counter */
+		if ( tdes0 != 0x7fffffff ) {
+			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+			db->stats.collisions += (tdes0 >> 3) & 0xf;
+			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+			if (tdes0 & TDES0_ERR_MASK) {
+				db->stats.tx_errors++;
+				if (tdes0 & 0x0002) {	/* UnderRun */
+					db->tx_fifo_underrun++;
+					if ( !(db->cr6_data & CR6_SFT) ) {
+						db->cr6_data = db->cr6_data | CR6_SFT;
+						update_cr6(db->cr6_data, db->ioaddr);
+					}
+				}
+				if (tdes0 & 0x0100)
+					db->tx_excessive_collision++;
+				if (tdes0 & 0x0200)
+					db->tx_late_collision++;
+				if (tdes0 & 0x0400)
+					db->tx_no_carrier++;
+				if (tdes0 & 0x0800)
+					db->tx_loss_carrier++;
+				if (tdes0 & 0x4000)
+					db->tx_jabber_timeout++;
+			}
+		}
+
+    		txptr = txptr->next_tx_desc;
+	}/* End of while */
+
+	/* Update TX remove pointer to next */
+	db->tx_remove_ptr = txptr;
+
+	/* Resource available check */
+	if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
+		netif_wake_queue(dev);	/* Active upper layer, send again */
+}
+
+
+/*
+ *	Receive the come packet and pass to upper layer
+ */
+
+static void uli526x_rx_packet(struct DEVICE *dev, struct uli526x_board_info * db)
+{
+	struct rx_desc *rxptr;
+	struct sk_buff *skb;
+	int rxlen;
+	u32 rdes0;
+	
+	rxptr = db->rx_ready_ptr;
+
+	while(db->rx_avail_cnt) {
+		rdes0 = le32_to_cpu(rxptr->rdes0);
+		if (rdes0 & 0x80000000)	/* packet owner check */
+		{
+			break;
+		}
+
+		db->rx_avail_cnt--;
+		db->interval_rx_cnt++;
+
+		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+		if ( (rdes0 & 0x300) != 0x300) {
+			/* A packet without First/Last flag */
+			/* reuse this SKB */
+			ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+			uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+		} else {
+			/* A packet with First/Last flag */
+			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+			/* error summary bit check */
+			if (rdes0 & 0x8000) {
+				/* This is a error packet */
+				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+				db->stats.rx_errors++;
+				if (rdes0 & 1)
+					db->stats.rx_fifo_errors++;
+				if (rdes0 & 2)
+					db->stats.rx_crc_errors++;
+				if (rdes0 & 0x80)
+					db->stats.rx_length_errors++;
+			}
+
+			if ( !(rdes0 & 0x8000) ||
+				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+				skb = rxptr->rx_skb_ptr;
+		
+				/* Good packet, send to upper layer */
+				/* Shorst packet used new SKB */
+				if ( (rxlen < RX_COPY_SIZE) &&
+					( (skb = dev_alloc_skb(rxlen + 2) )
+					!= NULL) ) {
+					/* size less than COPY_SIZE, allocate a rxlen SKB */
+					skb->dev = dev;
+					skb_reserve(skb, 2); /* 16byte align */
+					memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+					uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+				} else {
+					skb->dev = dev;
+					skb_put(skb, rxlen);
+				}
+				skb->protocol = eth_type_trans(skb, dev);
+				netif_rx(skb);
+				dev->last_rx = jiffies;
+				db->stats.rx_packets++;
+				db->stats.rx_bytes += rxlen;
+				
+			} else {
+				/* Reuse SKB buffer when the packet is error */
+				ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+				uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+			}
+		}
+
+		rxptr = rxptr->next_rx_desc;
+	}
+
+	db->rx_ready_ptr = rxptr;
+}
+
+
+/*
+ *	Get statistics from driver.
+ */
+
+static struct net_device_stats * uli526x_get_stats(struct DEVICE *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_get_stats", 0);
+	return &db->stats;
+}
+
+
+/*
+ * Set ULI526X multicast address
+ */
+
+static void uli526x_set_filter_mode(struct DEVICE * dev)
+{
+	struct uli526x_board_info *db = dev->priv;
+	unsigned long flags;
+
+	ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
+	spin_lock_irqsave(&db->lock, flags);
+
+	if (dev->flags & IFF_PROMISC) {
+		ULI526X_DBUG(0, "Enable PROM Mode", 0);
+		db->cr6_data |= CR6_PM | CR6_PBF;
+		update_cr6(db->cr6_data, db->ioaddr);
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
+		ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+		db->cr6_data &= ~(CR6_PM | CR6_PBF);
+		db->cr6_data |= CR6_PAM;
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
+	send_filter_frame(dev, dev->mc_count); 	/* M5261/M5263 */
+	spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void
+ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+{
+	//struct e1000_hw *hw = &adapter->hw;
+
+	{
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_MII);
+		
+		ecmd->advertising = (ADVERTISED_10baseT_Half |
+		                   ADVERTISED_10baseT_Full |
+		                   ADVERTISED_100baseT_Half |
+		                   ADVERTISED_100baseT_Full |
+		                   ADVERTISED_Autoneg |
+		                   ADVERTISED_MII);
+
+
+		ecmd->port = PORT_MII;
+		ecmd->phy_address = db->phy_addr;
+
+		ecmd->transceiver = XCVR_EXTERNAL;
+		
+
+	}
+
+	
+	ecmd->speed = 10;
+	ecmd->duplex = DUPLEX_HALF;
+	
+	if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+	{
+		ecmd->speed = 100;               
+	}
+	if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+	{
+		ecmd->duplex = DUPLEX_FULL;
+	}
+	if(db->link_failed)
+	{
+		ecmd->speed = -1;
+		ecmd->duplex = -1;	
+	}
+	
+	if (db->media_mode & ULI526X_AUTO)
+	{	
+		ecmd->autoneg = AUTONEG_ENABLE;
+	}
+	
+	
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct uli526x_board_info *np = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	if (np->pdev)
+		strcpy(info->bus_info, pci_name(np->pdev));
+	else
+		sprintf(info->bus_info, "EISA 0x%lx %d",
+			dev->base_addr, dev->irq);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+	struct uli526x_board_info *np = netdev_priv(dev);
+	
+	ULi_ethtool_gset(np, cmd);
+	
+	return 0;
+}
+
+static u32 netdev_get_link(struct net_device *dev) {
+	struct uli526x_board_info *np = netdev_priv(dev);
+		
+	if(np->link_failed)
+		return 0;
+	else
+		return 1;
+}
+
+static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = WAKE_PHY | WAKE_MAGIC;
+	wol->wolopts = 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+	.get_drvinfo		= netdev_get_drvinfo,
+	.get_settings		= netdev_get_settings,
+	.get_link		= netdev_get_link,
+	.get_wol		= uli526x_get_wol,
+};
+
+/*
+ *	A periodic timer routine
+ *	Dynamic media sense, allocate Rx buffer...
+ */
+
+static void uli526x_timer(unsigned long data)
+{
+	u32 tmp_cr8;
+	unsigned char tmp_cr12=0;
+	struct DEVICE *dev = (struct DEVICE *) data;
+	struct uli526x_board_info *db = netdev_priv(dev);
+ 	unsigned long flags;
+	u8 TmpSpeed=10;
+	
+	//ULI526X_DBUG(0, "uli526x_timer()", 0);
+	spin_lock_irqsave(&db->lock, flags);
+
+	
+	/* Dynamic reset ULI526X : system error or transmit time-out */
+	tmp_cr8 = inl(db->ioaddr + DCR8);
+	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+		db->reset_cr8++;
+		db->wait_reset = 1;
+	}
+	db->interval_rx_cnt = 0;
+
+	/* TX polling kick monitor */
+	if ( db->tx_packet_cnt &&
+	     time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+		outl(0x1, dev->base_addr + DCR1);   // Tx polling again 
+
+		// TX Timeout 
+		if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+			db->reset_TXtimeout++;
+			db->wait_reset = 1;
+			printk( "%s: Tx timeout - resetting\n",
+			       dev->name);
+		}
+	}
+
+	if (db->wait_reset) {
+		ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+		db->reset_count++;
+		uli526x_dynamic_reset(dev);
+		db->timer.expires = ULI526X_TIMER_WUT;
+		add_timer(&db->timer);
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	/* Link status check, Dynamic media type change */
+	if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+		tmp_cr12 = 3;
+
+	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+		/* Link Failed */
+		ULI526X_DBUG(0, "Link Failed", tmp_cr12);
+		netif_carrier_off(dev);
+		printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+		db->link_failed = 1;
+
+		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+		/* AUTO don't need */
+		if ( !(db->media_mode & 0x8) )
+			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+
+		/* AUTO mode, if INT phyxcer link failed, select EXT device */
+		if (db->media_mode & ULI526X_AUTO) {
+			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
+			update_cr6(db->cr6_data, db->ioaddr);
+		}
+	} else
+		if ((tmp_cr12 & 0x3) && db->link_failed) {
+			ULI526X_DBUG(0, "Link link OK", tmp_cr12);
+			db->link_failed = 0;
+
+			/* Auto Sense Speed */
+			if ( (db->media_mode & ULI526X_AUTO) &&
+				uli526x_sense_speed(db) )
+				db->link_failed = 1;
+			uli526x_process_mode(db);
+			
+			if(db->link_failed==0)
+			{
+				if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+				{
+					TmpSpeed = 100;
+				}
+				if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+				{
+					printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+				}
+				else
+				{
+					printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+				}
+				netif_carrier_on(dev);
+			}
+			/* SHOW_MEDIA_TYPE(db->op_mode); */
+		}
+		else if(!(tmp_cr12 & 0x3) && db->link_failed)
+		{
+			if(db->init==1)
+			{
+				printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+				netif_carrier_off(dev);
+			}
+		}
+		db->init=0;
+
+	/* Timer active again */
+	db->timer.expires = ULI526X_TIMER_WUT;
+	add_timer(&db->timer);
+	spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ *	Dynamic reset the ULI526X board
+ *	Stop ULI526X board
+ *	Free Tx/Rx allocated memory
+ *	Reset ULI526X board
+ *	Re-initilize ULI526X board
+ */
+
+static void uli526x_dynamic_reset(struct DEVICE *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
+
+	/* Sopt MAC controller */
+	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
+	update_cr6(db->cr6_data, dev->base_addr);
+	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
+	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+	/* Disable upper layer interface */
+	netif_stop_queue(dev);
+
+	/* Free Rx Allocate buffer */
+	uli526x_free_rxbuffer(db);
+
+	/* system variable init */
+	db->tx_packet_cnt = 0;
+	db->rx_avail_cnt = 0;
+	db->link_failed = 1;
+	db->init=1;
+	db->wait_reset = 0;
+
+	/* Re-initilize ULI526X board */
+	uli526x_init(dev);
+
+	/* Restart upper layer interface */
+	netif_wake_queue(dev);
+}
+
+
+/*
+ *	free all allocated rx buffer
+ */
+
+static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
+{
+	ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
+
+	/* free allocated rx buffer */
+	while (db->rx_avail_cnt) {
+		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+		db->rx_avail_cnt--;
+	}
+}
+
+
+/*
+ *	Reuse the SK buffer
+ */
+
+static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
+{
+	struct rx_desc *rxptr = db->rx_insert_ptr;
+
+	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+		rxptr->rx_skb_ptr = skb;
+		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+		wmb();
+		rxptr->rdes0 = cpu_to_le32(0x80000000);
+		db->rx_avail_cnt++;
+		db->rx_insert_ptr = rxptr->next_rx_desc;
+	} else
+		ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ *	Initialize transmit/Receive descriptor
+ *	Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+{
+	struct tx_desc *tmp_tx;
+	struct rx_desc *tmp_rx;
+	unsigned char *tmp_buf;
+	dma_addr_t tmp_tx_dma, tmp_rx_dma;
+	dma_addr_t tmp_buf_dma;
+	int i;
+
+	ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
+
+	/* tx descriptor start pointer */
+	db->tx_insert_ptr = db->first_tx_desc;
+	db->tx_remove_ptr = db->first_tx_desc;
+	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+
+	/* rx descriptor start pointer */
+	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
+	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+	db->rx_insert_ptr = db->first_rx_desc;
+	db->rx_ready_ptr = db->first_rx_desc;
+	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+
+	/* Init Transmit chain */
+	tmp_buf = db->buf_pool_start;
+	tmp_buf_dma = db->buf_pool_dma_start;
+	tmp_tx_dma = db->first_tx_desc_dma;
+	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+		tmp_tx->tx_buf_ptr = tmp_buf;
+		tmp_tx->tdes0 = cpu_to_le32(0);
+		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
+		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+		tmp_tx_dma += sizeof(struct tx_desc);
+		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+		tmp_tx->next_tx_desc = tmp_tx + 1;
+		tmp_buf = tmp_buf + TX_BUF_ALLOC;
+		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+	}
+	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+	tmp_tx->next_tx_desc = db->first_tx_desc;
+
+	 /* Init Receive descriptor chain */
+	tmp_rx_dma=db->first_rx_desc_dma;
+	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+		tmp_rx->rdes0 = cpu_to_le32(0);
+		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+		tmp_rx_dma += sizeof(struct rx_desc);
+		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+		tmp_rx->next_rx_desc = tmp_rx + 1;
+	}
+	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+	tmp_rx->next_rx_desc = db->first_rx_desc;
+
+	/* pre-allocate Rx buffer */
+	allocate_rx_buffer(db);
+}
+
+
+/*
+ *	Update CR6 value
+ *	Firstly stop ULI526X , then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+
+	outl(cr6_data, ioaddr + DCR6);
+	udelay(5);
+}
+
+
+/*
+ *	Send a setup frame for M5261/M5263
+ *	This setup frame initilize ULI526X address filter mode
+ */
+
+static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	struct dev_mc_list *mcptr;
+	struct tx_desc *txptr;
+	u16 * addrptr;
+	u32 * suptr;
+	int i;
+
+	ULI526X_DBUG(0, "send_filter_frame()", 0);
+
+	txptr = db->tx_insert_ptr;
+	suptr = (u32 *) txptr->tx_buf_ptr;
+
+	/* Node address */
+	addrptr = (u16 *) dev->dev_addr;
+	*suptr++ = addrptr[0];
+	*suptr++ = addrptr[1];
+	*suptr++ = addrptr[2];
+
+	/* broadcast address */
+	*suptr++ = 0xffff;
+	*suptr++ = 0xffff;
+	*suptr++ = 0xffff;
+
+	/* fit the multicast address */
+	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+		addrptr = (u16 *) mcptr->dmi_addr;
+		*suptr++ = addrptr[0];
+		*suptr++ = addrptr[1];
+		*suptr++ = addrptr[2];
+	}
+
+	for (; i<14; i++) {
+		*suptr++ = 0xffff;
+		*suptr++ = 0xffff;
+		*suptr++ = 0xffff;
+	}
+
+	/* prepare the setup frame */
+	db->tx_insert_ptr = txptr->next_tx_desc;
+	txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+	/* Resource Check and Send the setup packet */
+	if (db->tx_packet_cnt < TX_DESC_CNT) {
+		/* Resource Empty */
+		db->tx_packet_cnt++;
+		txptr->tdes0 = cpu_to_le32(0x80000000);
+		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, dev->base_addr);
+		dev->trans_start = jiffies;
+	} else
+		printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+}
+
+
+/*
+ *	Allocate rx buffer,
+ *	As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct uli526x_board_info *db)
+{
+	struct rx_desc *rxptr;
+	struct sk_buff *skb;
+
+	rxptr = db->rx_insert_ptr;
+
+	while(db->rx_avail_cnt < RX_DESC_CNT) {
+		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+			break;
+		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+		wmb();
+		rxptr->rdes0 = cpu_to_le32(0x80000000);
+		rxptr = rxptr->next_rx_desc;
+		db->rx_avail_cnt++;
+	}
+
+	db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ *	Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+	int i;
+	u16 srom_data = 0;
+	long cr9_ioaddr = ioaddr + DCR9;
+
+	outl(CR9_SROM_READ, cr9_ioaddr);
+	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+	/* Send the Read Command 110b */
+	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+	/* Send the offset */
+	for (i = 5; i >= 0; i--) {
+		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+	}
+
+	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+	for (i = 16; i > 0; i--) {
+		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		udelay(5);
+		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+		udelay(5);
+	}
+
+	outl(CR9_SROM_READ, cr9_ioaddr);
+	return srom_data;
+}
+
+
+/*
+ *	Auto sense the media mode
+ */
+
+static u8 uli526x_sense_speed(struct uli526x_board_info * db)
+{
+	u8 ErrFlag = 0;
+	u16 phy_mode;
+
+	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+	if ( (phy_mode & 0x24) == 0x24 ) {
+		
+		phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+		if(phy_mode&0x8000)
+			phy_mode = 0x8000;
+		else if(phy_mode&0x4000)
+			phy_mode = 0x4000;
+		else if(phy_mode&0x2000)
+			phy_mode = 0x2000;
+		else
+			phy_mode = 0x1000;
+		
+		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+		switch (phy_mode) {
+		case 0x1000: db->op_mode = ULI526X_10MHF; break;
+		case 0x2000: db->op_mode = ULI526X_10MFD; break;
+		case 0x4000: db->op_mode = ULI526X_100MHF; break;
+		case 0x8000: db->op_mode = ULI526X_100MFD; break;
+		default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
+		}
+	} else {
+		db->op_mode = ULI526X_10MHF;
+		ULI526X_DBUG(0, "Link Failed :", phy_mode);
+		ErrFlag = 1;
+	}
+
+	return ErrFlag;
+}
+
+
+/*
+ *	Set 10/100 phyxcer capability
+ *	AUTO mode : phyxcer register4 is NIC capability
+ *	Force mode: phyxcer register4 is the force media
+ */
+
+static void uli526x_set_phyxcer(struct uli526x_board_info *db)
+{
+	u16 phy_reg;
+	
+	/* Phyxcer capability setting */
+	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+	if (db->media_mode & ULI526X_AUTO) {
+		/* AUTO Mode */
+		phy_reg |= db->PHY_reg4;
+	} else {
+		/* Force Mode */
+		switch(db->media_mode) {
+		case ULI526X_10MHF: phy_reg |= 0x20; break;
+		case ULI526X_10MFD: phy_reg |= 0x40; break;
+		case ULI526X_100MHF: phy_reg |= 0x80; break;
+		case ULI526X_100MFD: phy_reg |= 0x100; break;
+		}
+		
+	}
+
+  	/* Write new capability to Phyxcer Reg4 */
+	if ( !(phy_reg & 0x01e0)) {
+		phy_reg|=db->PHY_reg4;
+		db->media_mode|=ULI526X_AUTO;
+	}
+	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+ 	/* Restart Auto-Negotiation */
+	phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+	udelay(50);
+}
+
+
+/*
+ *	Process op-mode
+ 	AUTO mode : PHY controller in Auto-negotiation Mode
+ *	Force mode: PHY controller in force mode with HUB
+ *			N-way force capability with SWITCH
+ */
+
+static void uli526x_process_mode(struct uli526x_board_info *db)
+{
+	u16 phy_reg;
+
+	/* Full Duplex Mode Check */
+	if (db->op_mode & 0x4)
+		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
+	else
+		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
+
+	update_cr6(db->cr6_data, db->ioaddr);
+
+	/* 10/100M phyxcer force mode need */
+	if ( !(db->media_mode & 0x8)) {
+		/* Forece Mode */
+		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+		if ( !(phy_reg & 0x1) ) {
+			/* parter without N-Way capability */
+			phy_reg = 0x0;
+			switch(db->op_mode) {
+			case ULI526X_10MHF: phy_reg = 0x0; break;
+			case ULI526X_10MFD: phy_reg = 0x100; break;
+			case ULI526X_100MHF: phy_reg = 0x2000; break;
+			case ULI526X_100MFD: phy_reg = 0x2100; break;
+			}
+			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+       			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+		}
+	}
+}
+
+
+/*
+ *	Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+{
+	u16 i;
+	unsigned long ioaddr;
+
+	if(chip_id == PCI_ULI5263_ID)
+	{
+		phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
+		return;
+	}
+	/* M5261/M5263 Chip */
+	ioaddr = iobase + DCR9;
+
+	/* Send 33 synchronization clock to Phy controller */
+	for (i = 0; i < 35; i++)
+		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send start command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send write command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send Phy address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Send register address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* written trasnition */
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+	/* Write a word data to PHY controller */
+	for ( i = 0x8000; i > 0; i >>= 1)
+		phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+	
+}
+
+
+/*
+ *	Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+	int i;
+	u16 phy_data;
+	unsigned long ioaddr;
+
+	if(chip_id == PCI_ULI5263_ID)
+		return phy_readby_cr10(iobase, phy_addr, offset);
+	/* M5261/M5263 Chip */
+	ioaddr = iobase + DCR9;
+	
+	/* Send 33 synchronization clock to Phy controller */
+	for (i = 0; i < 35; i++)
+		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send start command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send read command(10) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+	/* Send Phy address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Send register address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Skip transition state */
+	phy_read_1bit(ioaddr, chip_id);
+
+	/* read 16bit data */
+	for (phy_data = 0, i = 0; i < 16; i++) {
+		phy_data <<= 1;
+		phy_data |= phy_read_1bit(ioaddr, chip_id);
+	}
+
+	return phy_data;
+}
+
+static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+{
+	unsigned long ioaddr,cr10_value;
+	
+	ioaddr = iobase + DCR10;
+	cr10_value = phy_addr;
+	cr10_value = (cr10_value<<5) + offset;
+	cr10_value = (cr10_value<<16) + 0x08000000;
+	outl(cr10_value,ioaddr);
+	udelay(1);
+	while(1)
+	{
+		cr10_value = inl(ioaddr);
+		if(cr10_value&0x10000000)
+			break;
+	}
+	return (cr10_value&0x0ffff);
+}
+
+static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+{
+	unsigned long ioaddr,cr10_value;
+	
+	ioaddr = iobase + DCR10;
+	cr10_value = phy_addr;
+	cr10_value = (cr10_value<<5) + offset;
+	cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
+	outl(cr10_value,ioaddr);
+	udelay(1);
+}
+/*
+ *	Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+{
+	outl(phy_data , ioaddr);			/* MII Clock Low */
+	udelay(1);
+	outl(phy_data  | MDCLKH, ioaddr);	/* MII Clock High */
+	udelay(1);
+	outl(phy_data , ioaddr);			/* MII Clock Low */
+	udelay(1);
+}
+
+
+/*
+ *	Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+{
+	u16 phy_data;
+	
+	outl(0x50000 , ioaddr);
+	udelay(1);
+	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+	outl(0x40000 , ioaddr);
+	udelay(1);
+
+	return phy_data;
+}
+
+
+static struct pci_device_id uli526x_pci_tbl[] = {
+	{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
+	{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
+
+
+static struct pci_driver uli526x_driver = {
+	.name		= "uli526x",
+	.id_table	= uli526x_pci_tbl,
+	.probe		= uli526x_init_one,
+	.remove		= __devexit_p(uli526x_remove_one),
+};
+
+MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
+MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM(mode, "i");
+MODULE_PARM(cr6set, "i");
+MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+/*	Description:
+ *	when user used insmod to add module, system invoked init_module()
+ *	to initilize and register.
+ */
+
+static int __init uli526x_init_module(void)
+{
+	int rc;
+
+	printk(version);
+	printed_version = 1;
+
+	ULI526X_DBUG(0, "init_module() ", debug);
+
+	if (debug)
+		uli526x_debug = debug;	/* set debug flag */
+	if (cr6set)
+		uli526x_cr6_user_set = cr6set;
+
+ 	switch(mode) {
+   	case ULI526X_10MHF:
+	case ULI526X_100MHF:
+	case ULI526X_10MFD:
+	case ULI526X_100MFD:
+		uli526x_media_mode = mode;
+		break;
+	default:uli526x_media_mode = ULI526X_AUTO;
+		break;
+	}
+
+	rc = pci_module_init(&uli526x_driver);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+
+/*
+ *	Description:
+ *	when user used rmmod to delete module, system invoked clean_module()
+ *	to un-register all registered services.
+ */
+
+static void __exit uli526x_cleanup_module(void)
+{
+	ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+	pci_unregister_driver(&uli526x_driver);
+}
+
+module_init(uli526x_init_module);
+module_exit(uli526x_cleanup_module);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -137,6 +137,110 @@ config PCMCIA_RAYCS
 comment "Wireless 802.11b ISA/PCI cards support"
 	depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
 
+config IPW2100
+	tristate "Intel PRO/Wireless 2100 Network Connection"
+	depends on NET_RADIO && PCI && IEEE80211
+	select FW_LOADER
+	---help---
+          A driver for the Intel PRO/Wireless 2100 Network 
+	  Connection 802.11b wireless network adapter.
+
+          See <file:Documentation/networking/README.ipw2100> for information on
+          the capabilities currently enabled in this driver and for tips
+          for debugging issues and problems.
+
+	  In order to use this driver, you will need a firmware image for it.
+          You can obtain the firmware from
+	  <http://ipw2100.sf.net/>.  Once you have the firmware image, you 
+	  will need to place it in /etc/firmware.
+
+          You will also very likely need the Wireless Tools in order to
+          configure your card:
+
+          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+ 
+          If you want to compile the driver as a module ( = code which can be
+          inserted in and remvoed from the running kernel whenever you want),
+          say M here and read <file:Documentation/modules.txt>.  The module
+          will be called ipw2100.ko.
+	
+config IPW2100_PROMISC
+        bool "Enable promiscuous mode"
+        depends on IPW2100
+        ---help---
+	  Enables promiscuous/monitor mode support for the ipw2100 driver.
+	  With this feature compiled into the driver, you can switch to 
+	  promiscuous mode via the Wireless Tool's Monitor mode.  While in this
+	  mode, no packets can be sent.
+
+config IPW_DEBUG
+	bool "Enable full debugging output in IPW2100 module."
+	depends on IPW2100
+	---help---
+	  This option will enable debug tracing output for the IPW2100.  
+
+	  This will result in the kernel module being ~60k larger.  You can 
+	  control which debug output is sent to the kernel log by setting the 
+	  value in 
+
+	  /sys/bus/pci/drivers/ipw2100/debug_level
+
+	  This entry will only exist if this option is enabled.
+
+	  If you are not trying to debug or develop the IPW2100 driver, you 
+	  most likely want to say N here.
+
+config IPW2200
+	tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
+	depends on IEEE80211 && PCI
+	select FW_LOADER
+	---help---
+          A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
+	  Connection adapters. 
+
+          See <file:Documentation/networking/README.ipw2200> for 
+	  information on the capabilities currently enabled in this 
+	  driver and for tips for debugging issues and problems.
+
+	  In order to use this driver, you will need a firmware image for it.
+          You can obtain the firmware from
+	  <http://ipw2200.sf.net/>.  See the above referenced README.ipw2200 
+	  for information on where to install the firmare images.
+
+          You will also very likely need the Wireless Tools in order to
+          configure your card:
+
+          <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
+ 
+          If you want to compile the driver as a module ( = code which can be
+          inserted in and remvoed from the running kernel whenever you want),
+          say M here and read <file:Documentation/modules.txt>.  The module
+          will be called ipw2200.ko.
+
+config IPW_DEBUG
+	bool "Enable full debugging output in IPW2200 module."
+	depends on IPW2200
+	---help---
+	  This option will enable debug tracing output for the IPW2200.  
+
+	  This will result in the kernel module being ~100k larger.  You can 
+	  control which debug output is sent to the kernel log by setting the 
+	  value in 
+
+	  /sys/bus/pci/drivers/ipw2200/debug_level
+
+	  This entry will only exist if this option is enabled.
+
+	  To set a value, simply echo an 8-byte hex value to the same file:
+
+	  % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
+
+	  You can find the list of debug mask values in 
+	  drivers/net/wireless/ipw2200.h
+
+	  If you are not trying to debug or develop the IPW2200 driver, you 
+	  most likely want to say N here.
+
 config AIRO
 	tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
 	depends on NET_RADIO && ISA && (PCI || BROKEN)
@@ -355,6 +459,8 @@ config PRISM54
 	  say M here and read <file:Documentation/modules.txt>.  The module
 	  will be called prism54.ko.
 
+source "drivers/net/wireless/hostap/Kconfig"
+
 # yes, this works even when no drivers are selected
 config NET_WIRELESS
 	bool
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -2,6 +2,10 @@
 # Makefile for the Linux Wireless network device drivers.
 #
 
+obj-$(CONFIG_IPW2100) += ipw2100.o
+
+obj-$(CONFIG_IPW2200) += ipw2200.o
+
 obj-$(CONFIG_STRIP) += strip.o
 obj-$(CONFIG_ARLAN) += arlan.o 
 
@@ -28,6 +32,8 @@ obj-$(CONFIG_PCMCIA_ATMEL)      += atmel
 
 obj-$(CONFIG_PRISM54)		+= prism54/
 
+obj-$(CONFIG_HOSTAP)		+= hostap/
+
 # 16-bit wireless PCMCIA client drivers
 obj-$(CONFIG_PCMCIA_RAYCS)	+= ray_cs.o
 obj-$(CONFIG_PCMCIA_WL3501)	+= wl3501_cs.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1040,7 +1040,7 @@ typedef struct {
 	u16 status;
 } WifiCtlHdr;
 
-WifiCtlHdr wifictlhdr8023 = {
+static WifiCtlHdr wifictlhdr8023 = {
 	.ctlhdr = {
 		.ctl	= HOST_DONT_RLSE,
 	}
@@ -1111,13 +1111,13 @@ static int airo_thread(void *data);
 static void timer_func( struct net_device *dev );
 static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 #ifdef WIRELESS_EXT
-struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
+static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
 static void airo_read_wireless_stats (struct airo_info *local);
 #endif /* WIRELESS_EXT */
 #ifdef CISCO_EXT
 static int readrids(struct net_device *dev, aironet_ioctl *comp);
 static int writerids(struct net_device *dev, aironet_ioctl *comp);
-int flashcard(struct net_device *dev, aironet_ioctl *comp);
+static int flashcard(struct net_device *dev, aironet_ioctl *comp);
 #endif /* CISCO_EXT */
 #ifdef MICSUPPORT
 static void micinit(struct airo_info *ai);
@@ -1226,6 +1226,12 @@ static int setup_proc_entry( struct net_
 static int takedown_proc_entry( struct net_device *dev,
 				struct airo_info *apriv );
 
+static int cmdreset(struct airo_info *ai);
+static int setflashmode (struct airo_info *ai);
+static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
+static int flashputbuf(struct airo_info *ai);
+static int flashrestart(struct airo_info *ai,struct net_device *dev);
+
 #ifdef MICSUPPORT
 /***********************************************************************
  *                              MIC ROUTINES                           *
@@ -1234,10 +1240,11 @@ static int takedown_proc_entry( struct n
 
 static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
 static void MoveWindow(miccntx *context, u32 micSeq);
-void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
-void emmh32_init(emmh32_context *context);
-void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
-void emmh32_final(emmh32_context *context, u8 digest[4]);
+static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *);
+static void emmh32_init(emmh32_context *context);
+static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
+static void emmh32_final(emmh32_context *context, u8 digest[4]);
+static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
 
 /* micinit - Initialize mic seed */
 
@@ -1315,7 +1322,7 @@ static int micsetup(struct airo_info *ai
 	return SUCCESS;
 }
 
-char micsnap[]= {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
+static char micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
 
 /*===========================================================================
  * Description: Mic a packet
@@ -1570,7 +1577,7 @@ static void MoveWindow(miccntx *context,
 static unsigned char aes_counter[16];
 
 /* expand the key to fill the MMH coefficient array */
-void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
+static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen, struct crypto_tfm *tfm)
 {
   /* take the keying material, expand if necessary, truncate at 16-bytes */
   /* run through AES counter mode to generate context->coeff[] */
@@ -1602,7 +1609,7 @@ void emmh32_setseed(emmh32_context *cont
 }
 
 /* prepare for calculation of a new mic */
-void emmh32_init(emmh32_context *context)
+static void emmh32_init(emmh32_context *context)
 {
 	/* prepare for new mic calculation */
 	context->accum = 0;
@@ -1610,7 +1617,7 @@ void emmh32_init(emmh32_context *context
 }
 
 /* add some bytes to the mic calculation */
-void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
+static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
 {
 	int	coeff_position, byte_position;
   
@@ -1652,7 +1659,7 @@ void emmh32_update(emmh32_context *conte
 static u32 mask32[4] = { 0x00000000L, 0xFF000000L, 0xFFFF0000L, 0xFFFFFF00L };
 
 /* calculate the mic */
-void emmh32_final(emmh32_context *context, u8 digest[4])
+static void emmh32_final(emmh32_context *context, u8 digest[4])
 {
 	int	coeff_position, byte_position;
 	u32	val;
@@ -2255,7 +2262,7 @@ static void airo_read_stats(struct airo_
 	ai->stats.rx_fifo_errors = vals[0];
 }
 
-struct net_device_stats *airo_get_stats(struct net_device *dev)
+static struct net_device_stats *airo_get_stats(struct net_device *dev)
 {
 	struct airo_info *local =  dev->priv;
 
@@ -2414,7 +2421,7 @@ EXPORT_SYMBOL(stop_airo_card);
 
 static int add_airo_dev( struct net_device *dev );
 
-int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
+static int wll_header_parse(struct sk_buff *skb, unsigned char *haddr)
 {
 	memcpy(haddr, skb->mac.raw + 10, ETH_ALEN);
 	return ETH_ALEN;
@@ -2681,7 +2688,7 @@ static struct net_device *init_wifidev(s
 	return dev;
 }
 
-int reset_card( struct net_device *dev , int lock) {
+static int reset_card( struct net_device *dev , int lock) {
 	struct airo_info *ai = dev->priv;
 
 	if (lock && down_interruptible(&ai->sem))
@@ -2696,9 +2703,9 @@ int reset_card( struct net_device *dev ,
 	return 0;
 }
 
-struct net_device *_init_airo_card( unsigned short irq, int port,
-				    int is_pcmcia, struct pci_dev *pci,
-				    struct device *dmdev )
+static struct net_device *_init_airo_card( unsigned short irq, int port,
+					   int is_pcmcia, struct pci_dev *pci,
+					   struct device *dmdev )
 {
 	struct net_device *dev;
 	struct airo_info *ai;
@@ -7235,7 +7242,7 @@ static void airo_read_wireless_stats(str
 	local->wstats.miss.beacon = vals[34];
 }
 
-struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
+static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
 {
 	struct airo_info *local =  dev->priv;
 
@@ -7450,14 +7457,8 @@ static int writerids(struct net_device *
  * Flash command switch table
  */
 
-int flashcard(struct net_device *dev, aironet_ioctl *comp) {
+static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
 	int z;
-	int cmdreset(struct airo_info *);
-	int setflashmode(struct airo_info *);
-	int flashgchar(struct airo_info *,int,int);
-	int flashpchar(struct airo_info *,int,int);
-	int flashputbuf(struct airo_info *);
-	int flashrestart(struct airo_info *,struct net_device *);
 
 	/* Only super-user can modify flash */
 	if (!capable(CAP_NET_ADMIN))
@@ -7515,7 +7516,7 @@ int flashcard(struct net_device *dev, ai
  * card.
  */
 
-int cmdreset(struct airo_info *ai) {
+static int cmdreset(struct airo_info *ai) {
 	disable_MAC(ai, 1);
 
 	if(!waitbusy (ai)){
@@ -7539,7 +7540,7 @@ int cmdreset(struct airo_info *ai) {
  * mode
  */
 
-int setflashmode (struct airo_info *ai) {
+static int setflashmode (struct airo_info *ai) {
 	set_bit (FLAG_FLASHING, &ai->flags);
 
 	OUT4500(ai, SWS0, FLASH_COMMAND);
@@ -7566,7 +7567,7 @@ int setflashmode (struct airo_info *ai) 
  * x 50us for  echo .
  */
 
-int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
+static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
 	int echo;
 	int waittime;
 
@@ -7606,7 +7607,7 @@ int flashpchar(struct airo_info *ai,int 
  * Get a character from the card matching matchbyte
  * Step 3)
  */
-int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
+static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
 	int           rchar;
 	unsigned char rbyte=0;
 
@@ -7637,7 +7638,7 @@ int flashgchar(struct airo_info *ai,int 
  * send to the card
  */
 
-int flashputbuf(struct airo_info *ai){
+static int flashputbuf(struct airo_info *ai){
 	int            nwords;
 
 	/* Write stuff */
@@ -7659,7 +7660,7 @@ int flashputbuf(struct airo_info *ai){
 /*
  *
  */
-int flashrestart(struct airo_info *ai,struct net_device *dev){
+static int flashrestart(struct airo_info *ai,struct net_device *dev){
 	int    i,status;
 
 	ssleep(1);			/* Added 12/7/00 */
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -68,7 +68,7 @@
 #include <linux/device.h>
 #include <linux/moduleparam.h>
 #include <linux/firmware.h>
-#include "ieee802_11.h"
+#include <net/ieee80211.h>
 #include "atmel.h"
 
 #define DRIVER_MAJOR 0
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_p
 static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
 static void atmel_command_irq(struct atmel_private *priv);
 static int atmel_validate_channel(struct atmel_private *priv, int channel);
-static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 
 				   u16 frame_len, u8 rssi);
 static void atmel_management_timer(u_long a);
 static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
 static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
-static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header,
+static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
 					    u8 *body, int body_len);
 
 static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct 
 static int start_tx (struct sk_buff *skb, struct net_device *dev)
 {
 	struct atmel_private *priv = netdev_priv(dev);
-	struct ieee802_11_hdr header;
+	struct ieee80211_hdr header;
 	unsigned long flags;
 	u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
 	u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -863,17 +863,17 @@ static int start_tx (struct sk_buff *skb
 		return 1;
 	}
 	
-	frame_ctl = IEEE802_11_FTYPE_DATA;
+	frame_ctl = IEEE80211_FTYPE_DATA;
 	header.duration_id = 0;
 	header.seq_ctl = 0;
 	if (priv->wep_is_on)
-		frame_ctl |= IEEE802_11_FCTL_WEP;
+		frame_ctl |= IEEE80211_FCTL_WEP;
 	if (priv->operating_mode == IW_MODE_ADHOC) {
 		memcpy(&header.addr1, skb->data, 6);
 		memcpy(&header.addr2, dev->dev_addr, 6);
 		memcpy(&header.addr3, priv->BSSID, 6);
 	} else {
-		frame_ctl |= IEEE802_11_FCTL_TODS;
+		frame_ctl |= IEEE80211_FCTL_TODS;
 		memcpy(&header.addr1, priv->CurrentBSSID, 6);
 		memcpy(&header.addr2, dev->dev_addr, 6);
 		memcpy(&header.addr3, skb->data, 6);
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb
 }
 
 static void atmel_transmit_management_frame(struct atmel_private *priv, 
-					    struct ieee802_11_hdr *header,
+					    struct ieee80211_hdr *header,
 					    u8 *body, int body_len)
 {
 	u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_fr
 	tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
 }
 	
-static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 
+static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 
 			 u16 msdu_size, u16 rx_packet_loc, u32 crc)
 {
 	/* fast path: unfragmented packet copy directly into skbuf */
@@ -955,7 +955,7 @@ static void fast_rx_path(struct atmel_pr
 	}
 	
 	memcpy(skbp, header->addr1, 6); /* destination address */
-	if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 
+	if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 
 		memcpy(&skbp[6], header->addr3, 6);
 	else
 		memcpy(&skbp[6], header->addr2, 6); /* source address */
@@ -990,14 +990,14 @@ static int probe_crc(struct atmel_privat
 	return (crc ^ 0xffffffff) == netcrc;
 }
 
-static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 
+static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header, 
 			 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
 {
 	u8 mac4[6]; 
 	u8 source[6];
 	struct sk_buff *skb;
 
-	if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 
+	if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS) 
 		memcpy(source, header->addr3, 6);
 	else
 		memcpy(source, header->addr2, 6); 
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_pr
 static void rx_done_irq(struct atmel_private *priv)
 {
 	int i;
-	struct ieee802_11_hdr header;
+	struct ieee80211_hdr header;
 	
 	for (i = 0; 
 	     atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -1117,7 +1117,7 @@ static void rx_done_irq(struct atmel_pri
 		/* probe for CRC use here if needed  once five packets have arrived with
 		   the same crc status, we assume we know what's happening and stop probing */
 		if (priv->probe_crc) {
-			if (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP)) {
+			if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_WEP)) {
 				priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
 			} else {
 				priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
@@ -1132,16 +1132,16 @@ static void rx_done_irq(struct atmel_pri
 		}
 		    
 		/* don't CRC header when WEP in use */
-		if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP))) {
+		if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_WEP))) {
 			crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
 		}
 		msdu_size -= 24; /* header */
 
-		if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_DATA) { 
+		if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) { 
 			
-			int more_fragments = frame_ctl & IEEE802_11_FCTL_MOREFRAGS;
-			u8 packet_fragment_no = seq_control & IEEE802_11_SCTL_FRAG;
-			u16 packet_sequence_no = (seq_control & IEEE802_11_SCTL_SEQ) >> 4;
+			int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
+			u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
+			u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
 			
 			if (!more_fragments && packet_fragment_no == 0 ) {
 				fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
@@ -1151,7 +1151,7 @@ static void rx_done_irq(struct atmel_pri
 			}
 		}
 		
-		if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_MGMT) {
+		if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
 			/* copy rest of packet into buffer */
 			atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
 			
@@ -2663,10 +2663,10 @@ static void handle_beacon_probe(struct a
  
 static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
 {
-	struct ieee802_11_hdr header;
+	struct ieee80211_hdr header;
 	struct auth_body auth;
 	
-	header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | IEEE802_11_STYPE_AUTH); 
+	header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH); 
 	header.duration_id	= cpu_to_le16(0x8000);	
 	header.seq_ctl = 0;
 	memcpy(header.addr1, priv->CurrentBSSID, 6);
@@ -2677,7 +2677,7 @@ static void send_authentication_request(
 		auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY); 
 		/* no WEP for authentication frames with TrSeqNo 1 */
 		if (priv->CurrentAuthentTransactionSeqNum != 1)
-			header.frame_ctl |=  cpu_to_le16(IEEE802_11_FCTL_WEP); 
+			header.frame_ctl |=  cpu_to_le16(IEEE80211_FCTL_WEP); 
 	} else {
 		auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
 	}
@@ -2701,7 +2701,7 @@ static void send_association_request(str
 {
 	u8 *ssid_el_p;
 	int bodysize;
-	struct ieee802_11_hdr header;
+	struct ieee80211_hdr header;
 	struct ass_req_format {
 		u16 capability;
 		u16 listen_interval; 
@@ -2714,8 +2714,8 @@ static void send_association_request(str
 		u8 rates[4];
 	} body;
 		
-	header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | 
-		(is_reassoc ? IEEE802_11_STYPE_REASSOC_REQ : IEEE802_11_STYPE_ASSOC_REQ));
+	header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | 
+		(is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
 	header.duration_id = cpu_to_le16(0x8000);
 	header.seq_ctl = 0;
 
@@ -2751,9 +2751,9 @@ static void send_association_request(str
 	atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
 }
 
-static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee802_11_hdr *header)
+static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header)
 {
-	if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS)
+	if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
 		return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
 	else
 		return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
@@ -2801,7 +2801,7 @@ static int retrieve_bss(struct atmel_pri
 }
 
 
-static void store_bss_info(struct atmel_private *priv, struct ieee802_11_hdr *header,
+static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header,
 			   u16 capability, u16 beacon_period, u8 channel, u8 rssi, 
 			   u8 ssid_len, u8 *ssid, int is_beacon)
 {
@@ -3085,12 +3085,12 @@ static void atmel_smooth_qual(struct atm
 }
 
 /* deals with incoming managment frames. */
-static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 
+static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header, 
 		      u16 frame_len, u8 rssi)
 {
 	u16 subtype;
 	
-	switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_STYPE) {
+	switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE) {
 	case C80211_SUBTYPE_MGMT_BEACON :
 	case C80211_SUBTYPE_MGMT_ProbeResponse:
 		
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -0,0 +1,71 @@
+config HOSTAP
+	tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
+	depends on NET_RADIO
+	---help---
+	Shared driver code for IEEE 802.11b wireless cards based on
+	Intersil Prism2/2.5/3 chipset. This driver supports so called
+	Host AP mode that allows the card to act as an IEEE 802.11
+	access point.
+
+	See <http://hostap.epitest.fi/> for more information about the
+	Host AP driver configuration and tools. This site includes
+	information and tools (hostapd and wpa_supplicant) for WPA/WPA2
+	support.
+
+	This option includes the base Host AP driver code that is shared by
+	different hardware models. You will also need to enable support for
+	PLX/PCI/CS version of the driver to actually use the driver.
+
+	The driver can be compiled as a module and it will be called
+	"hostap.ko".
+
+config HOSTAP_FIRMWARE
+	bool "Support downloading firmware images with Host AP driver"
+	depends on HOSTAP
+	---help---
+	Configure Host AP driver to include support for firmware image
+	download. Current version supports only downloading to volatile, i.e.,
+	RAM memory. Flash upgrade is not yet supported.
+
+	Firmware image downloading needs user space tool, prism2_srec. It is
+	available from http://hostap.epitest.fi/.
+
+config HOSTAP_PLX
+	tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
+	depends on PCI && HOSTAP
+	---help---
+	Host AP driver's version for Prism2/2.5/3 PC Cards in PLX9052 based
+	PCI adaptors.
+
+	"Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
+	driver and its help text includes more information about the Host AP
+	driver.
+
+	The driver can be compiled as a module and will be named
+	"hostap_plx.ko".
+
+config HOSTAP_PCI
+	tristate "Host AP driver for Prism2.5 PCI adaptors"
+	depends on PCI && HOSTAP
+	---help---
+	Host AP driver's version for Prism2.5 PCI adaptors.
+
+	"Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
+	driver and its help text includes more information about the Host AP
+	driver.
+
+	The driver can be compiled as a module and will be named
+	"hostap_pci.ko".
+
+config HOSTAP_CS
+	tristate "Host AP driver for Prism2/2.5/3 PC Cards"
+	depends on PCMCIA!=n && HOSTAP
+	---help---
+	Host AP driver's version for Prism2/2.5/3 PC Cards.
+
+	"Host AP support for Prism2/2.5/3 IEEE 802.11b" is required for this
+	driver and its help text includes more information about the Host AP
+	driver.
+
+	The driver can be compiled as a module and will be named
+	"hostap_cs.ko".
diff --git a/drivers/net/wireless/hostap/Makefile b/drivers/net/wireless/hostap/Makefile
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_HOSTAP) += hostap.o
+
+obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
+obj-$(CONFIG_HOSTAP_PLX) += hostap_plx.o
+obj-$(CONFIG_HOSTAP_PCI) += hostap_pci.o
diff --git a/drivers/net/wireless/hostap/hostap.c b/drivers/net/wireless/hostap/hostap.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap.c
@@ -0,0 +1,1198 @@
+/*
+ * Host AP (software wireless LAN access point) driver for
+ * Intersil Prism2/2.5/3 - hostap.o module, common routines
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/workqueue.h>
+#include <linux/kmod.h>
+#include <linux/rtnetlink.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <net/ieee80211.h>
+#include <net/ieee80211_crypt.h>
+#include <asm/uaccess.h>
+
+#include "hostap_wlan.h"
+#include "hostap_80211.h"
+#include "hostap_ap.h"
+#include "hostap.h"
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Host AP common routines");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PRISM2_VERSION);
+
+#define TX_TIMEOUT (2 * HZ)
+
+#define PRISM2_MAX_FRAME_SIZE 2304
+#define PRISM2_MIN_MTU 256
+/* FIX: */
+#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
+
+
+/* hostap.c */
+static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+			  int rtnl_locked);
+static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+			  int rtnl_locked, int do_not_remove);
+
+/* hostap_ap.c */
+static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+				  struct iw_quality qual[], int buf_size,
+				  int aplist);
+static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
+static int prism2_hostapd(struct ap_data *ap,
+			  struct prism2_hostapd_param *param);
+static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+				struct ieee80211_crypt_data ***crypt);
+static void ap_control_kickall(struct ap_data *ap);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
+			      u8 *mac);
+static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
+			      u8 *mac);
+static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
+static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
+			       u8 *mac);
+#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+				  2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
+
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
+
+/* FIX: these could be compiled separately and linked together to hostap.o */
+#include "hostap_ap.c"
+#include "hostap_info.c"
+#include "hostap_ioctl.c"
+#include "hostap_proc.c"
+#include "hostap_80211_rx.c"
+#include "hostap_80211_tx.c"
+
+
+struct net_device * hostap_add_interface(struct local_info *local,
+					 int type, int rtnl_locked,
+					 const char *prefix,
+					 const char *name)
+{
+	struct net_device *dev, *mdev;
+	struct hostap_interface *iface;
+	int ret;
+
+	dev = alloc_etherdev(sizeof(struct hostap_interface));
+	if (dev == NULL)
+		return NULL;
+
+	iface = netdev_priv(dev);
+	iface->dev = dev;
+	iface->local = local;
+	iface->type = type;
+	list_add(&iface->list, &local->hostap_interfaces);
+
+	mdev = local->dev;
+	memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN);
+	dev->base_addr = mdev->base_addr;
+	dev->irq = mdev->irq;
+	dev->mem_start = mdev->mem_start;
+	dev->mem_end = mdev->mem_end;
+
+	hostap_setup_dev(dev, local, 0);
+	dev->destructor = free_netdev;
+
+	sprintf(dev->name, "%s%s", prefix, name);
+	if (!rtnl_locked)
+		rtnl_lock();
+
+	ret = 0;
+	if (strchr(dev->name, '%'))
+		ret = dev_alloc_name(dev, dev->name);
+
+	SET_NETDEV_DEV(dev, mdev->class_dev.dev);
+	if (ret >= 0)
+		ret = register_netdevice(dev);
+
+	if (!rtnl_locked)
+		rtnl_unlock();
+
+	if (ret < 0) {
+		printk(KERN_WARNING "%s: failed to add new netdevice!\n",
+		       dev->name);
+		free_netdev(dev);
+		return NULL;
+	}
+
+	printk(KERN_DEBUG "%s: registered netdevice %s\n",
+	       mdev->name, dev->name);
+
+	return dev;
+}
+
+
+void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
+			     int remove_from_list)
+{
+	struct hostap_interface *iface;
+
+	if (!dev)
+		return;
+
+	iface = netdev_priv(dev);
+
+	if (remove_from_list) {
+		list_del(&iface->list);
+	}
+
+	if (dev == iface->local->ddev)
+		iface->local->ddev = NULL;
+	else if (dev == iface->local->apdev)
+		iface->local->apdev = NULL;
+	else if (dev == iface->local->stadev)
+		iface->local->stadev = NULL;
+
+	if (rtnl_locked)
+		unregister_netdevice(dev);
+	else
+		unregister_netdev(dev);
+
+	/* dev->destructor = free_netdev() will free the device data, including
+	 * private data, when removing the device */
+}
+
+
+static inline int prism2_wds_special_addr(u8 *addr)
+{
+	if (addr[0] || addr[1] || addr[2] || addr[3] || addr[4] || addr[5])
+		return 0;
+
+	return 1;
+}
+
+
+static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
+			  int rtnl_locked)
+{
+	struct net_device *dev;
+	struct list_head *ptr;
+	struct hostap_interface *iface, *empty, *match;
+
+	empty = match = NULL;
+	read_lock_bh(&local->iface_lock);
+	list_for_each(ptr, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		if (iface->type != HOSTAP_INTERFACE_WDS)
+			continue;
+
+		if (prism2_wds_special_addr(iface->u.wds.remote_addr))
+			empty = iface;
+		else if (memcmp(iface->u.wds.remote_addr, remote_addr,
+				ETH_ALEN) == 0) {
+			match = iface;
+			break;
+		}
+	}
+	if (!match && empty && !prism2_wds_special_addr(remote_addr)) {
+		/* take pre-allocated entry into use */
+		memcpy(empty->u.wds.remote_addr, remote_addr, ETH_ALEN);
+		read_unlock_bh(&local->iface_lock);
+		printk(KERN_DEBUG "%s: using pre-allocated WDS netdevice %s\n",
+		       local->dev->name, empty->dev->name);
+		return 0;
+	}
+	read_unlock_bh(&local->iface_lock);
+
+	if (!prism2_wds_special_addr(remote_addr)) {
+		if (match)
+			return -EEXIST;
+		hostap_add_sta(local->ap, remote_addr);
+	}
+
+	if (local->wds_connections >= local->wds_max_connections)
+		return -ENOBUFS;
+
+	/* verify that there is room for wds# postfix in the interface name */
+	if (strlen(local->dev->name) > IFNAMSIZ - 5) {
+		printk(KERN_DEBUG "'%s' too long base device name\n",
+		       local->dev->name);
+		return -EINVAL;
+	}
+
+	dev = hostap_add_interface(local, HOSTAP_INTERFACE_WDS, rtnl_locked,
+				   local->ddev->name, "wds%d");
+	if (dev == NULL)
+		return -ENOMEM;
+
+	iface = netdev_priv(dev);
+	memcpy(iface->u.wds.remote_addr, remote_addr, ETH_ALEN);
+
+	local->wds_connections++;
+
+	return 0;
+}
+
+
+static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
+			  int rtnl_locked, int do_not_remove)
+{
+	unsigned long flags;
+	struct list_head *ptr;
+	struct hostap_interface *iface, *selected = NULL;
+
+	write_lock_irqsave(&local->iface_lock, flags);
+	list_for_each(ptr, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		if (iface->type != HOSTAP_INTERFACE_WDS)
+			continue;
+
+		if (memcmp(iface->u.wds.remote_addr, remote_addr,
+			   ETH_ALEN) == 0) {
+			selected = iface;
+			break;
+		}
+	}
+	if (selected && !do_not_remove)
+		list_del(&selected->list);
+	write_unlock_irqrestore(&local->iface_lock, flags);
+
+	if (selected) {
+		if (do_not_remove)
+			memset(selected->u.wds.remote_addr, 0, ETH_ALEN);
+		else {
+			hostap_remove_interface(selected->dev, rtnl_locked, 0);
+			local->wds_connections--;
+		}
+	}
+
+	return selected ? 0 : -ENODEV;
+}
+
+
+u16 hostap_tx_callback_register(local_info_t *local,
+				void (*func)(struct sk_buff *, int ok, void *),
+				void *data)
+{
+	unsigned long flags;
+	struct hostap_tx_callback_info *entry;
+
+	entry = (struct hostap_tx_callback_info *) kmalloc(sizeof(*entry),
+							   GFP_ATOMIC);
+	if (entry == NULL)
+		return 0;
+
+	entry->func = func;
+	entry->data = data;
+
+	spin_lock_irqsave(&local->lock, flags);
+	entry->idx = local->tx_callback ? local->tx_callback->idx + 1 : 1;
+	entry->next = local->tx_callback;
+	local->tx_callback = entry;
+	spin_unlock_irqrestore(&local->lock, flags);
+
+	return entry->idx;
+}
+
+
+int hostap_tx_callback_unregister(local_info_t *local, u16 idx)
+{
+	unsigned long flags;
+	struct hostap_tx_callback_info *cb, *prev = NULL;
+
+	spin_lock_irqsave(&local->lock, flags);
+	cb = local->tx_callback;
+	while (cb != NULL && cb->idx != idx) {
+		prev = cb;
+		cb = cb->next;
+	}
+	if (cb) {
+		if (prev == NULL)
+			local->tx_callback = cb->next;
+		else
+			prev->next = cb->next;
+		kfree(cb);
+	}
+	spin_unlock_irqrestore(&local->lock, flags);
+
+	return cb ? 0 : -1;
+}
+
+
+/* val is in host byte order */
+int hostap_set_word(struct net_device *dev, int rid, u16 val)
+{
+	struct hostap_interface *iface;
+	u16 tmp = cpu_to_le16(val);
+	iface = netdev_priv(dev);
+	return iface->local->func->set_rid(dev, rid, &tmp, 2);
+}
+
+
+int hostap_set_string(struct net_device *dev, int rid, const char *val)
+{
+	struct hostap_interface *iface;
+	char buf[MAX_SSID_LEN + 2];
+	int len;
+
+	iface = netdev_priv(dev);
+	len = strlen(val);
+	if (len > MAX_SSID_LEN)
+		return -1;
+	memset(buf, 0, sizeof(buf));
+	buf[0] = len; /* little endian 16 bit word */
+	memcpy(buf + 2, val, len);
+
+	return iface->local->func->set_rid(dev, rid, &buf, MAX_SSID_LEN + 2);
+}
+
+
+u16 hostap_get_porttype(local_info_t *local)
+{
+	if (local->iw_mode == IW_MODE_ADHOC && local->pseudo_adhoc)
+		return HFA384X_PORTTYPE_PSEUDO_IBSS;
+	if (local->iw_mode == IW_MODE_ADHOC)
+		return HFA384X_PORTTYPE_IBSS;
+	if (local->iw_mode == IW_MODE_INFRA)
+		return HFA384X_PORTTYPE_BSS;
+	if (local->iw_mode == IW_MODE_REPEAT)
+		return HFA384X_PORTTYPE_WDS;
+	if (local->iw_mode == IW_MODE_MONITOR)
+		return HFA384X_PORTTYPE_PSEUDO_IBSS;
+	return HFA384X_PORTTYPE_HOSTAP;
+}
+
+
+int hostap_set_encryption(local_info_t *local)
+{
+	u16 val, old_val;
+	int i, keylen, len, idx;
+	char keybuf[WEP_KEY_LEN + 1];
+	enum { NONE, WEP, OTHER } encrypt_type;
+
+	idx = local->tx_keyidx;
+	if (local->crypt[idx] == NULL || local->crypt[idx]->ops == NULL)
+		encrypt_type = NONE;
+	else if (strcmp(local->crypt[idx]->ops->name, "WEP") == 0)
+		encrypt_type = WEP;
+	else
+		encrypt_type = OTHER;
+
+	if (local->func->get_rid(local->dev, HFA384X_RID_CNFWEPFLAGS, &val, 2,
+				 1) < 0) {
+		printk(KERN_DEBUG "Could not read current WEP flags.\n");
+		goto fail;
+	}
+	le16_to_cpus(&val);
+	old_val = val;
+
+	if (encrypt_type != NONE || local->privacy_invoked)
+		val |= HFA384X_WEPFLAGS_PRIVACYINVOKED;
+	else
+		val &= ~HFA384X_WEPFLAGS_PRIVACYINVOKED;
+
+	if (local->open_wep || encrypt_type == NONE ||
+	    ((local->ieee_802_1x || local->wpa) && local->host_decrypt))
+		val &= ~HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
+	else
+		val |= HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED;
+
+	if ((encrypt_type != NONE || local->privacy_invoked) &&
+	    (encrypt_type == OTHER || local->host_encrypt))
+		val |= HFA384X_WEPFLAGS_HOSTENCRYPT;
+	else
+		val &= ~HFA384X_WEPFLAGS_HOSTENCRYPT;
+	if ((encrypt_type != NONE || local->privacy_invoked) &&
+	    (encrypt_type == OTHER || local->host_decrypt))
+		val |= HFA384X_WEPFLAGS_HOSTDECRYPT;
+	else
+		val &= ~HFA384X_WEPFLAGS_HOSTDECRYPT;
+
+	if (val != old_val &&
+	    hostap_set_word(local->dev, HFA384X_RID_CNFWEPFLAGS, val)) {
+		printk(KERN_DEBUG "Could not write new WEP flags (0x%x)\n",
+		       val);
+		goto fail;
+	}
+
+	if (encrypt_type != WEP)
+		return 0;
+
+	/* 104-bit support seems to require that all the keys are set to the
+	 * same keylen */
+	keylen = 6; /* first 5 octets */
+	len = local->crypt[idx]->ops->get_key(keybuf, sizeof(keybuf),
+					      NULL, local->crypt[idx]->priv);
+	if (idx >= 0 && idx < WEP_KEYS && len > 5)
+		keylen = WEP_KEY_LEN + 1; /* first 13 octets */
+
+	for (i = 0; i < WEP_KEYS; i++) {
+		memset(keybuf, 0, sizeof(keybuf));
+		if (local->crypt[i]) {
+			(void) local->crypt[i]->ops->get_key(
+				keybuf, sizeof(keybuf),
+				NULL, local->crypt[i]->priv);
+		}
+		if (local->func->set_rid(local->dev,
+					 HFA384X_RID_CNFDEFAULTKEY0 + i,
+					 keybuf, keylen)) {
+			printk(KERN_DEBUG "Could not set key %d (len=%d)\n",
+			       i, keylen);
+			goto fail;
+		}
+	}
+	if (hostap_set_word(local->dev, HFA384X_RID_CNFWEPDEFAULTKEYID, idx)) {
+		printk(KERN_DEBUG "Could not set default keyid %d\n", idx);
+		goto fail;
+	}
+
+	return 0;
+
+ fail:
+	printk(KERN_DEBUG "%s: encryption setup failed\n", local->dev->name);
+	return -1;
+}
+
+
+int hostap_set_antsel(local_info_t *local)
+{
+	u16 val;
+	int ret = 0;
+
+	if (local->antsel_tx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
+	    local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
+			     HFA386X_CR_TX_CONFIGURE,
+			     NULL, &val) == 0) {
+		val &= ~(BIT(2) | BIT(1));
+		switch (local->antsel_tx) {
+		case HOSTAP_ANTSEL_DIVERSITY:
+			val |= BIT(1);
+			break;
+		case HOSTAP_ANTSEL_LOW:
+			break;
+		case HOSTAP_ANTSEL_HIGH:
+			val |= BIT(2);
+			break;
+		}
+
+		if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
+				     HFA386X_CR_TX_CONFIGURE, &val, NULL)) {
+			printk(KERN_INFO "%s: setting TX AntSel failed\n",
+			       local->dev->name);
+			ret = -1;
+		}
+	}
+
+	if (local->antsel_rx != HOSTAP_ANTSEL_DO_NOT_TOUCH &&
+	    local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF,
+			     HFA386X_CR_RX_CONFIGURE,
+			     NULL, &val) == 0) {
+		val &= ~(BIT(1) | BIT(0));
+		switch (local->antsel_rx) {
+		case HOSTAP_ANTSEL_DIVERSITY:
+			break;
+		case HOSTAP_ANTSEL_LOW:
+			val |= BIT(0);
+			break;
+		case HOSTAP_ANTSEL_HIGH:
+			val |= BIT(0) | BIT(1);
+			break;
+		}
+
+		if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF,
+				     HFA386X_CR_RX_CONFIGURE, &val, NULL)) {
+			printk(KERN_INFO "%s: setting RX AntSel failed\n",
+			       local->dev->name);
+			ret = -1;
+		}
+	}
+
+	return ret;
+}
+
+
+int hostap_set_roaming(local_info_t *local)
+{
+	u16 val;
+
+	switch (local->host_roaming) {
+	case 1:
+		val = HFA384X_ROAMING_HOST;
+		break;
+	case 2:
+		val = HFA384X_ROAMING_DISABLED;
+		break;
+	case 0:
+	default:
+		val = HFA384X_ROAMING_FIRMWARE;
+		break;
+	}
+
+	return hostap_set_word(local->dev, HFA384X_RID_CNFROAMINGMODE, val);
+}
+
+
+int hostap_set_auth_algs(local_info_t *local)
+{
+	int val = local->auth_algs;
+	/* At least STA f/w v0.6.2 seems to have issues with cnfAuthentication
+	 * set to include both Open and Shared Key flags. It tries to use
+	 * Shared Key authentication in that case even if WEP keys are not
+	 * configured.. STA f/w v0.7.6 is able to handle such configuration,
+	 * but it is unknown when this was fixed between 0.6.2 .. 0.7.6. */
+	if (local->sta_fw_ver < PRISM2_FW_VER(0,7,0) &&
+	    val != PRISM2_AUTH_OPEN && val != PRISM2_AUTH_SHARED_KEY)
+		val = PRISM2_AUTH_OPEN;
+
+	if (hostap_set_word(local->dev, HFA384X_RID_CNFAUTHENTICATION, val)) {
+		printk(KERN_INFO "%s: cnfAuthentication setting to 0x%x "
+		       "failed\n", local->dev->name, local->auth_algs);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx)
+{
+	u16 status, fc;
+
+	status = __le16_to_cpu(rx->status);
+
+	printk(KERN_DEBUG "%s: RX status=0x%04x (port=%d, type=%d, "
+	       "fcserr=%d) silence=%d signal=%d rate=%d rxflow=%d; "
+	       "jiffies=%ld\n",
+	       name, status, (status >> 8) & 0x07, status >> 13, status & 1,
+	       rx->silence, rx->signal, rx->rate, rx->rxflow, jiffies);
+
+	fc = __le16_to_cpu(rx->frame_control);
+	printk(KERN_DEBUG "   FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
+	       "data_len=%d%s%s\n",
+	       fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
+	       __le16_to_cpu(rx->duration_id), __le16_to_cpu(rx->seq_ctrl),
+	       __le16_to_cpu(rx->data_len),
+	       fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
+	       fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
+
+	printk(KERN_DEBUG "   A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4="
+	       MACSTR "\n",
+	       MAC2STR(rx->addr1), MAC2STR(rx->addr2), MAC2STR(rx->addr3),
+	       MAC2STR(rx->addr4));
+
+	printk(KERN_DEBUG "   dst=" MACSTR " src=" MACSTR " len=%d\n",
+	       MAC2STR(rx->dst_addr), MAC2STR(rx->src_addr),
+	       __be16_to_cpu(rx->len));
+}
+
+
+void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
+{
+	u16 fc;
+
+	printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d "
+	       "tx_control=0x%04x; jiffies=%ld\n",
+	       name, __le16_to_cpu(tx->status), tx->retry_count, tx->tx_rate,
+	       __le16_to_cpu(tx->tx_control), jiffies);
+
+	fc = __le16_to_cpu(tx->frame_control);
+	printk(KERN_DEBUG "   FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x "
+	       "data_len=%d%s%s\n",
+	       fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
+	       __le16_to_cpu(tx->duration_id), __le16_to_cpu(tx->seq_ctrl),
+	       __le16_to_cpu(tx->data_len),
+	       fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
+	       fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
+
+	printk(KERN_DEBUG "   A1=" MACSTR " A2=" MACSTR " A3=" MACSTR " A4="
+	       MACSTR "\n",
+	       MAC2STR(tx->addr1), MAC2STR(tx->addr2), MAC2STR(tx->addr3),
+	       MAC2STR(tx->addr4));
+
+	printk(KERN_DEBUG "   dst=" MACSTR " src=" MACSTR " len=%d\n",
+	       MAC2STR(tx->dst_addr), MAC2STR(tx->src_addr),
+	       __be16_to_cpu(tx->len));
+}
+
+
+int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+	memcpy(haddr, skb->mac.raw + 10, ETH_ALEN); /* addr2 */
+	return ETH_ALEN;
+}
+
+
+int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr)
+{
+	if (*(u32 *)skb->mac.raw == LWNG_CAP_DID_BASE) {
+		memcpy(haddr, skb->mac.raw +
+		       sizeof(struct linux_wlan_ng_prism_hdr) + 10,
+		       ETH_ALEN); /* addr2 */
+	} else { /* (*(u32 *)skb->mac.raw == htonl(LWNG_CAPHDR_VERSION)) */
+		memcpy(haddr, skb->mac.raw +
+		       sizeof(struct linux_wlan_ng_cap_hdr) + 10,
+		       ETH_ALEN); /* addr2 */
+	}
+	return ETH_ALEN;
+}
+
+
+int hostap_80211_get_hdrlen(u16 fc)
+{
+	int hdrlen = 24;
+
+	switch (WLAN_FC_GET_TYPE(fc)) {
+	case IEEE80211_FTYPE_DATA:
+		if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
+			hdrlen = 30; /* Addr4 */
+		break;
+	case IEEE80211_FTYPE_CTL:
+		switch (WLAN_FC_GET_STYPE(fc)) {
+		case IEEE80211_STYPE_CTS:
+		case IEEE80211_STYPE_ACK:
+			hdrlen = 10;
+			break;
+		default:
+			hdrlen = 16;
+			break;
+		}
+		break;
+	}
+
+	return hdrlen;
+}
+
+
+struct net_device_stats *hostap_get_stats(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	iface = netdev_priv(dev);
+	return &iface->stats;
+}
+
+
+static int prism2_close(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	PDEBUG(DEBUG_FLOW, "%s: prism2_close\n", dev->name);
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (dev == local->ddev) {
+		prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
+	}
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (!local->hostapd && dev == local->dev &&
+	    (!local->func->card_present || local->func->card_present(local)) &&
+	    local->hw_ready && local->ap && local->iw_mode == IW_MODE_MASTER)
+		hostap_deauth_all_stas(dev, local->ap, 1);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	if (local->func->dev_close && local->func->dev_close(local))
+		return 0;
+
+	if (dev == local->dev) {
+		local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL);
+	}
+
+	if (netif_running(dev)) {
+		netif_stop_queue(dev);
+		netif_device_detach(dev);
+	}
+
+	flush_scheduled_work();
+
+	module_put(local->hw_module);
+
+	local->num_dev_open--;
+
+	if (dev != local->dev && local->dev->flags & IFF_UP &&
+	    local->master_dev_auto_open && local->num_dev_open == 1) {
+		/* Close master radio interface automatically if it was also
+		 * opened automatically and we are now closing the last
+		 * remaining non-master device. */
+		dev_close(local->dev);
+	}
+
+	return 0;
+}
+
+
+static int prism2_open(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	PDEBUG(DEBUG_FLOW, "%s: prism2_open\n", dev->name);
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->no_pri) {
+		printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
+		       "f/w\n", dev->name);
+		return 1;
+	}
+
+	if ((local->func->card_present && !local->func->card_present(local)) ||
+	    local->hw_downloading)
+		return -ENODEV;
+
+	if (local->func->dev_open && local->func->dev_open(local))
+		return 1;
+
+	if (!try_module_get(local->hw_module))
+		return -ENODEV;
+	local->num_dev_open++;
+
+	if (!local->dev_enabled && local->func->hw_enable(dev, 1)) {
+		printk(KERN_WARNING "%s: could not enable MAC port\n",
+		       dev->name);
+		prism2_close(dev);
+		return 1;
+	}
+	if (!local->dev_enabled)
+		prism2_callback(local, PRISM2_CALLBACK_ENABLE);
+	local->dev_enabled = 1;
+
+	if (dev != local->dev && !(local->dev->flags & IFF_UP)) {
+		/* Master radio interface is needed for all operation, so open
+		 * it automatically when any virtual net_device is opened. */
+		local->master_dev_auto_open = 1;
+		dev_open(local->dev);
+	}
+
+	netif_device_attach(dev);
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+
+static int prism2_set_mac_address(struct net_device *dev, void *p)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct list_head *ptr;
+	struct sockaddr *addr = p;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->set_rid(dev, HFA384X_RID_CNFOWNMACADDR, addr->sa_data,
+				 ETH_ALEN) < 0 || local->func->reset_port(dev))
+		return -EINVAL;
+
+	read_lock_bh(&local->iface_lock);
+	list_for_each(ptr, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		memcpy(iface->dev->dev_addr, addr->sa_data, ETH_ALEN);
+	}
+	memcpy(local->dev->dev_addr, addr->sa_data, ETH_ALEN);
+	read_unlock_bh(&local->iface_lock);
+
+	return 0;
+}
+
+
+/* TODO: to be further implemented as soon as Prism2 fully supports
+ *       GroupAddresses and correct documentation is available */
+void hostap_set_multicast_list_queue(void *data)
+{
+	struct net_device *dev = (struct net_device *) data;
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
+			    local->is_promisc)) {
+		printk(KERN_INFO "%s: %sabling promiscuous mode failed\n",
+		       dev->name, local->is_promisc ? "en" : "dis");
+	}
+}
+
+
+static void hostap_set_multicast_list(struct net_device *dev)
+{
+#if 0
+	/* FIX: promiscuous mode seems to be causing a lot of problems with
+	 * some station firmware versions (FCSErr frames, invalid MACPort, etc.
+	 * corrupted incoming frames). This code is now commented out while the
+	 * problems are investigated. */
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	if ((dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) {
+		local->is_promisc = 1;
+	} else {
+		local->is_promisc = 0;
+	}
+
+	schedule_work(&local->set_multicast_list_queue);
+#endif
+}
+
+
+static int prism2_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (new_mtu < PRISM2_MIN_MTU || new_mtu > PRISM2_MAX_MTU)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+
+static void prism2_tx_timeout(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_regs regs;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	printk(KERN_WARNING "%s Tx timed out! Resetting card\n", dev->name);
+	netif_stop_queue(local->dev);
+
+	local->func->read_regs(dev, &regs);
+	printk(KERN_DEBUG "%s: CMD=%04x EVSTAT=%04x "
+	       "OFFSET0=%04x OFFSET1=%04x SWSUPPORT0=%04x\n",
+	       dev->name, regs.cmd, regs.evstat, regs.offset0, regs.offset1,
+	       regs.swsupport0);
+
+	local->func->schedule_reset(local);
+}
+
+
+void hostap_setup_dev(struct net_device *dev, local_info_t *local,
+		      int main_dev)
+{
+	struct hostap_interface *iface;
+
+	iface = netdev_priv(dev);
+	ether_setup(dev);
+
+	/* kernel callbacks */
+	dev->get_stats = hostap_get_stats;
+	if (iface) {
+		/* Currently, we point to the proper spy_data only on
+		 * the main_dev. This could be fixed. Jean II */
+		iface->wireless_data.spy_data = &iface->spy_data;
+		dev->wireless_data = &iface->wireless_data;
+	}
+	dev->wireless_handlers =
+		(struct iw_handler_def *) &hostap_iw_handler_def;
+	dev->do_ioctl = hostap_ioctl;
+	dev->open = prism2_open;
+	dev->stop = prism2_close;
+	dev->hard_start_xmit = hostap_data_start_xmit;
+	dev->set_mac_address = prism2_set_mac_address;
+	dev->set_multicast_list = hostap_set_multicast_list;
+	dev->change_mtu = prism2_change_mtu;
+	dev->tx_timeout = prism2_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	dev->mtu = local->mtu;
+	if (!main_dev) {
+		/* use main radio device queue */
+		dev->tx_queue_len = 0;
+	}
+
+	SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
+
+	netif_stop_queue(dev);
+}
+
+
+static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
+{
+	struct net_device *dev = local->dev;
+
+	if (local->apdev)
+		return -EEXIST;
+
+	printk(KERN_DEBUG "%s: enabling hostapd mode\n", dev->name);
+
+	local->apdev = hostap_add_interface(local, HOSTAP_INTERFACE_AP,
+					    rtnl_locked, local->ddev->name,
+					    "ap");
+	if (local->apdev == NULL)
+		return -ENOMEM;
+
+	local->apdev->hard_start_xmit = hostap_mgmt_start_xmit;
+	local->apdev->type = ARPHRD_IEEE80211;
+	local->apdev->hard_header_parse = hostap_80211_header_parse;
+
+	return 0;
+}
+
+
+static int hostap_disable_hostapd(local_info_t *local, int rtnl_locked)
+{
+	struct net_device *dev = local->dev;
+
+	printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
+
+	hostap_remove_interface(local->apdev, rtnl_locked, 1);
+	local->apdev = NULL;
+
+	return 0;
+}
+
+
+static int hostap_enable_hostapd_sta(local_info_t *local, int rtnl_locked)
+{
+	struct net_device *dev = local->dev;
+
+	if (local->stadev)
+		return -EEXIST;
+
+	printk(KERN_DEBUG "%s: enabling hostapd STA mode\n", dev->name);
+
+	local->stadev = hostap_add_interface(local, HOSTAP_INTERFACE_STA,
+					     rtnl_locked, local->ddev->name,
+					     "sta");
+	if (local->stadev == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+
+static int hostap_disable_hostapd_sta(local_info_t *local, int rtnl_locked)
+{
+	struct net_device *dev = local->dev;
+
+	printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name);
+
+	hostap_remove_interface(local->stadev, rtnl_locked, 1);
+	local->stadev = NULL;
+
+	return 0;
+}
+
+
+int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked)
+{
+	int ret;
+
+	if (val < 0 || val > 1)
+		return -EINVAL;
+
+	if (local->hostapd == val)
+		return 0;
+
+	if (val) {
+		ret = hostap_enable_hostapd(local, rtnl_locked);
+		if (ret == 0)
+			local->hostapd = 1;
+	} else {
+		local->hostapd = 0;
+		ret = hostap_disable_hostapd(local, rtnl_locked);
+		if (ret != 0)
+			local->hostapd = 1;
+	}
+
+	return ret;
+}
+
+
+int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked)
+{
+	int ret;
+
+	if (val < 0 || val > 1)
+		return -EINVAL;
+
+	if (local->hostapd_sta == val)
+		return 0;
+
+	if (val) {
+		ret = hostap_enable_hostapd_sta(local, rtnl_locked);
+		if (ret == 0)
+			local->hostapd_sta = 1;
+	} else {
+		local->hostapd_sta = 0;
+		ret = hostap_disable_hostapd_sta(local, rtnl_locked);
+		if (ret != 0)
+			local->hostapd_sta = 1;
+	}
+
+
+	return ret;
+}
+
+
+int prism2_update_comms_qual(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 0;
+	struct hfa384x_comms_quality sq;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	if (!local->sta_fw_ver)
+		ret = -1;
+	else if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
+		if (local->func->get_rid(local->dev,
+					 HFA384X_RID_DBMCOMMSQUALITY,
+					 &sq, sizeof(sq), 1) >= 0) {
+			local->comms_qual = (s16) le16_to_cpu(sq.comm_qual);
+			local->avg_signal = (s16) le16_to_cpu(sq.signal_level);
+			local->avg_noise = (s16) le16_to_cpu(sq.noise_level);
+			local->last_comms_qual_update = jiffies;
+		} else
+			ret = -1;
+	} else {
+		if (local->func->get_rid(local->dev, HFA384X_RID_COMMSQUALITY,
+					 &sq, sizeof(sq), 1) >= 0) {
+			local->comms_qual = le16_to_cpu(sq.comm_qual);
+			local->avg_signal = HFA384X_LEVEL_TO_dBm(
+				le16_to_cpu(sq.signal_level));
+			local->avg_noise = HFA384X_LEVEL_TO_dBm(
+				le16_to_cpu(sq.noise_level));
+			local->last_comms_qual_update = jiffies;
+		} else
+			ret = -1;
+	}
+
+	return ret;
+}
+
+
+int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
+			 u8 *body, size_t bodylen)
+{
+	struct sk_buff *skb;
+	struct hostap_ieee80211_mgmt *mgmt;
+	struct hostap_skb_tx_data *meta;
+	struct net_device *dev = local->dev;
+
+	skb = dev_alloc_skb(IEEE80211_MGMT_HDR_LEN + bodylen);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	mgmt = (struct hostap_ieee80211_mgmt *)
+		skb_put(skb, IEEE80211_MGMT_HDR_LEN);
+	memset(mgmt, 0, IEEE80211_MGMT_HDR_LEN);
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype);
+	memcpy(mgmt->da, dst, ETH_ALEN);
+	memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN);
+	memcpy(mgmt->bssid, dst, ETH_ALEN);
+	if (body)
+		memcpy(skb_put(skb, bodylen), body, bodylen);
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	memset(meta, 0, sizeof(*meta));
+	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
+	meta->iface = netdev_priv(dev);
+
+	skb->dev = dev;
+	skb->mac.raw = skb->nh.raw = skb->data;
+	dev_queue_xmit(skb);
+
+	return 0;
+}
+
+
+int prism2_sta_deauth(local_info_t *local, u16 reason)
+{
+	union iwreq_data wrqu;
+	int ret;
+
+	if (local->iw_mode != IW_MODE_INFRA ||
+	    memcmp(local->bssid, "\x00\x00\x00\x00\x00\x00", ETH_ALEN) == 0 ||
+	    memcmp(local->bssid, "\x44\x44\x44\x44\x44\x44", ETH_ALEN) == 0)
+		return 0;
+
+	reason = cpu_to_le16(reason);
+	ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH,
+				   (u8 *) &reason, 2);
+	memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+	wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
+	return ret;
+}
+
+
+struct proc_dir_entry *hostap_proc;
+
+static int __init hostap_init(void)
+{
+	if (proc_net != NULL) {
+		hostap_proc = proc_mkdir("hostap", proc_net);
+		if (!hostap_proc)
+			printk(KERN_WARNING "Failed to mkdir "
+			       "/proc/net/hostap\n");
+	} else
+		hostap_proc = NULL;
+
+	return 0;
+}
+
+
+static void __exit hostap_exit(void)
+{
+	if (hostap_proc != NULL) {
+		hostap_proc = NULL;
+		remove_proc_entry("hostap", proc_net);
+	}
+}
+
+
+EXPORT_SYMBOL(hostap_set_word);
+EXPORT_SYMBOL(hostap_set_string);
+EXPORT_SYMBOL(hostap_get_porttype);
+EXPORT_SYMBOL(hostap_set_encryption);
+EXPORT_SYMBOL(hostap_set_antsel);
+EXPORT_SYMBOL(hostap_set_roaming);
+EXPORT_SYMBOL(hostap_set_auth_algs);
+EXPORT_SYMBOL(hostap_dump_rx_header);
+EXPORT_SYMBOL(hostap_dump_tx_header);
+EXPORT_SYMBOL(hostap_80211_header_parse);
+EXPORT_SYMBOL(hostap_80211_prism_header_parse);
+EXPORT_SYMBOL(hostap_80211_get_hdrlen);
+EXPORT_SYMBOL(hostap_get_stats);
+EXPORT_SYMBOL(hostap_setup_dev);
+EXPORT_SYMBOL(hostap_proc);
+EXPORT_SYMBOL(hostap_set_multicast_list_queue);
+EXPORT_SYMBOL(hostap_set_hostapd);
+EXPORT_SYMBOL(hostap_set_hostapd_sta);
+EXPORT_SYMBOL(hostap_add_interface);
+EXPORT_SYMBOL(hostap_remove_interface);
+EXPORT_SYMBOL(prism2_update_comms_qual);
+
+module_init(hostap_init);
+module_exit(hostap_exit);
diff --git a/drivers/net/wireless/hostap/hostap.h b/drivers/net/wireless/hostap/hostap.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap.h
@@ -0,0 +1,57 @@
+#ifndef HOSTAP_H
+#define HOSTAP_H
+
+/* hostap.c */
+
+extern struct proc_dir_entry *hostap_proc;
+
+u16 hostap_tx_callback_register(local_info_t *local,
+				void (*func)(struct sk_buff *, int ok, void *),
+				void *data);
+int hostap_tx_callback_unregister(local_info_t *local, u16 idx);
+int hostap_set_word(struct net_device *dev, int rid, u16 val);
+int hostap_set_string(struct net_device *dev, int rid, const char *val);
+u16 hostap_get_porttype(local_info_t *local);
+int hostap_set_encryption(local_info_t *local);
+int hostap_set_antsel(local_info_t *local);
+int hostap_set_roaming(local_info_t *local);
+int hostap_set_auth_algs(local_info_t *local);
+void hostap_dump_rx_header(const char *name,
+			   const struct hfa384x_rx_frame *rx);
+void hostap_dump_tx_header(const char *name,
+			   const struct hfa384x_tx_frame *tx);
+int hostap_80211_header_parse(struct sk_buff *skb, unsigned char *haddr);
+int hostap_80211_prism_header_parse(struct sk_buff *skb, unsigned char *haddr);
+int hostap_80211_get_hdrlen(u16 fc);
+struct net_device_stats *hostap_get_stats(struct net_device *dev);
+void hostap_setup_dev(struct net_device *dev, local_info_t *local,
+		      int main_dev);
+void hostap_set_multicast_list_queue(void *data);
+int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked);
+int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked);
+void hostap_cleanup(local_info_t *local);
+void hostap_cleanup_handler(void *data);
+struct net_device * hostap_add_interface(struct local_info *local,
+					 int type, int rtnl_locked,
+					 const char *prefix, const char *name);
+void hostap_remove_interface(struct net_device *dev, int rtnl_locked,
+			     int remove_from_list);
+int prism2_update_comms_qual(struct net_device *dev);
+int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
+			 u8 *body, size_t bodylen);
+int prism2_sta_deauth(local_info_t *local, u16 reason);
+
+
+/* hostap_proc.c */
+
+void hostap_init_proc(local_info_t *local);
+void hostap_remove_proc(local_info_t *local);
+
+
+/* hostap_info.c */
+
+void hostap_info_init(local_info_t *local);
+void hostap_info_process(local_info_t *local, struct sk_buff *skb);
+
+
+#endif /* HOSTAP_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211.h b/drivers/net/wireless/hostap/hostap_80211.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211.h
@@ -0,0 +1,96 @@
+#ifndef HOSTAP_80211_H
+#define HOSTAP_80211_H
+
+struct hostap_ieee80211_mgmt {
+	u16 frame_control;
+	u16 duration;
+	u8 da[6];
+	u8 sa[6];
+	u8 bssid[6];
+	u16 seq_ctrl;
+	union {
+		struct {
+			u16 auth_alg;
+			u16 auth_transaction;
+			u16 status_code;
+			/* possibly followed by Challenge text */
+			u8 variable[0];
+		} __attribute__ ((packed)) auth;
+		struct {
+			u16 reason_code;
+		} __attribute__ ((packed)) deauth;
+		struct {
+			u16 capab_info;
+			u16 listen_interval;
+			/* followed by SSID and Supported rates */
+			u8 variable[0];
+		} __attribute__ ((packed)) assoc_req;
+		struct {
+			u16 capab_info;
+			u16 status_code;
+			u16 aid;
+			/* followed by Supported rates */
+			u8 variable[0];
+		} __attribute__ ((packed)) assoc_resp, reassoc_resp;
+		struct {
+			u16 capab_info;
+			u16 listen_interval;
+			u8 current_ap[6];
+			/* followed by SSID and Supported rates */
+			u8 variable[0];
+		} __attribute__ ((packed)) reassoc_req;
+		struct {
+			u16 reason_code;
+		} __attribute__ ((packed)) disassoc;
+		struct {
+		} __attribute__ ((packed)) probe_req;
+		struct {
+			u8 timestamp[8];
+			u16 beacon_int;
+			u16 capab_info;
+			/* followed by some of SSID, Supported rates,
+			 * FH Params, DS Params, CF Params, IBSS Params, TIM */
+			u8 variable[0];
+		} __attribute__ ((packed)) beacon, probe_resp;
+	} u;
+} __attribute__ ((packed));
+
+
+#define IEEE80211_MGMT_HDR_LEN 24
+#define IEEE80211_DATA_HDR3_LEN 24
+#define IEEE80211_DATA_HDR4_LEN 30
+
+
+struct hostap_80211_rx_status {
+	u32 mac_time;
+	u8 signal;
+	u8 noise;
+	u16 rate; /* in 100 kbps */
+};
+
+
+void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
+		     struct hostap_80211_rx_status *rx_stats);
+
+
+/* prism2_rx_80211 'type' argument */
+enum {
+	PRISM2_RX_MONITOR, PRISM2_RX_MGMT, PRISM2_RX_NON_ASSOC,
+	PRISM2_RX_NULLFUNC_ACK
+};
+
+int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
+		    struct hostap_80211_rx_status *rx_stats, int type);
+void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
+		     struct hostap_80211_rx_status *rx_stats);
+void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
+			  struct hostap_80211_rx_status *rx_stats);
+
+void hostap_dump_tx_80211(const char *name, struct sk_buff *skb);
+int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev);
+int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev);
+struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
+				   struct ieee80211_crypt_data *crypt);
+int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+#endif /* HOSTAP_80211_H */
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -0,0 +1,1091 @@
+#include <linux/etherdevice.h>
+
+#include "hostap_80211.h"
+#include "hostap.h"
+
+void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
+			  struct hostap_80211_rx_status *rx_stats)
+{
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d "
+	       "jiffies=%ld\n",
+	       name, rx_stats->signal, rx_stats->noise, rx_stats->rate,
+	       skb->len, jiffies);
+
+	if (skb->len < 2)
+		return;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	printk(KERN_DEBUG "   FC=0x%04x (type=%d:%d)%s%s",
+	       fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
+	       fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
+	       fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
+
+	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
+		printk("\n");
+		return;
+	}
+
+	printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
+	       le16_to_cpu(hdr->seq_ctl));
+
+	printk(KERN_DEBUG "   A1=" MACSTR " A2=" MACSTR " A3=" MACSTR,
+	       MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3));
+	if (skb->len >= 30)
+		printk(" A4=" MACSTR, MAC2STR(hdr->addr4));
+	printk("\n");
+}
+
+
+/* Send RX frame to netif with 802.11 (and possible prism) header.
+ * Called from hardware or software IRQ context. */
+int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
+		    struct hostap_80211_rx_status *rx_stats, int type)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int hdrlen, phdrlen, head_need, tail_need;
+	u16 fc;
+	int prism_header, ret;
+	struct ieee80211_hdr *hdr;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	dev->last_rx = jiffies;
+
+	if (dev->type == ARPHRD_IEEE80211_PRISM) {
+		if (local->monitor_type == PRISM2_MONITOR_PRISM) {
+			prism_header = 1;
+			phdrlen = sizeof(struct linux_wlan_ng_prism_hdr);
+		} else { /* local->monitor_type == PRISM2_MONITOR_CAPHDR */
+			prism_header = 2;
+			phdrlen = sizeof(struct linux_wlan_ng_cap_hdr);
+		}
+	} else {
+		prism_header = 0;
+		phdrlen = 0;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
+		printk(KERN_DEBUG "%s: dropped management frame with header "
+		       "version %d\n", dev->name, fc & IEEE80211_FCTL_VERS);
+		dev_kfree_skb_any(skb);
+		return 0;
+	}
+
+	hdrlen = hostap_80211_get_hdrlen(fc);
+
+	/* check if there is enough room for extra data; if not, expand skb
+	 * buffer to be large enough for the changes */
+	head_need = phdrlen;
+	tail_need = 0;
+#ifdef PRISM2_ADD_BOGUS_CRC
+	tail_need += 4;
+#endif /* PRISM2_ADD_BOGUS_CRC */
+
+	head_need -= skb_headroom(skb);
+	tail_need -= skb_tailroom(skb);
+
+	if (head_need > 0 || tail_need > 0) {
+		if (pskb_expand_head(skb, head_need > 0 ? head_need : 0,
+				     tail_need > 0 ? tail_need : 0,
+				     GFP_ATOMIC)) {
+			printk(KERN_DEBUG "%s: prism2_rx_80211 failed to "
+			       "reallocate skb buffer\n", dev->name);
+			dev_kfree_skb_any(skb);
+			return 0;
+		}
+	}
+
+	/* We now have an skb with enough head and tail room, so just insert
+	 * the extra data */
+
+#ifdef PRISM2_ADD_BOGUS_CRC
+	memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */
+#endif /* PRISM2_ADD_BOGUS_CRC */
+
+	if (prism_header == 1) {
+		struct linux_wlan_ng_prism_hdr *hdr;
+		hdr = (struct linux_wlan_ng_prism_hdr *)
+			skb_push(skb, phdrlen);
+		memset(hdr, 0, phdrlen);
+		hdr->msgcode = LWNG_CAP_DID_BASE;
+		hdr->msglen = sizeof(*hdr);
+		memcpy(hdr->devname, dev->name, sizeof(hdr->devname));
+#define LWNG_SETVAL(f,i,s,l,d) \
+hdr->f.did = LWNG_CAP_DID_BASE | (i << 12); \
+hdr->f.status = s; hdr->f.len = l; hdr->f.data = d
+		LWNG_SETVAL(hosttime, 1, 0, 4, jiffies);
+		LWNG_SETVAL(mactime, 2, 0, 4, rx_stats->mac_time);
+		LWNG_SETVAL(channel, 3, 1 /* no value */, 4, 0);
+		LWNG_SETVAL(rssi, 4, 1 /* no value */, 4, 0);
+		LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0);
+		LWNG_SETVAL(signal, 6, 0, 4, rx_stats->signal);
+		LWNG_SETVAL(noise, 7, 0, 4, rx_stats->noise);
+		LWNG_SETVAL(rate, 8, 0, 4, rx_stats->rate / 5);
+		LWNG_SETVAL(istx, 9, 0, 4, 0);
+		LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen);
+#undef LWNG_SETVAL
+	} else if (prism_header == 2) {
+		struct linux_wlan_ng_cap_hdr *hdr;
+		hdr = (struct linux_wlan_ng_cap_hdr *)
+			skb_push(skb, phdrlen);
+		memset(hdr, 0, phdrlen);
+		hdr->version    = htonl(LWNG_CAPHDR_VERSION);
+		hdr->length     = htonl(phdrlen);
+		hdr->mactime    = __cpu_to_be64(rx_stats->mac_time);
+		hdr->hosttime   = __cpu_to_be64(jiffies);
+		hdr->phytype    = htonl(4); /* dss_dot11_b */
+		hdr->channel    = htonl(local->channel);
+		hdr->datarate   = htonl(rx_stats->rate);
+		hdr->antenna    = htonl(0); /* unknown */
+		hdr->priority   = htonl(0); /* unknown */
+		hdr->ssi_type   = htonl(3); /* raw */
+		hdr->ssi_signal = htonl(rx_stats->signal);
+		hdr->ssi_noise  = htonl(rx_stats->noise);
+		hdr->preamble   = htonl(0); /* unknown */
+		hdr->encoding   = htonl(1); /* cck */
+	}
+
+	ret = skb->len - phdrlen;
+	skb->dev = dev;
+	skb->mac.raw = skb->data;
+	skb_pull(skb, hdrlen);
+	if (prism_header)
+		skb_pull(skb, phdrlen);
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = __constant_htons(ETH_P_802_2);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	netif_rx(skb);
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void monitor_rx(struct net_device *dev, struct sk_buff *skb,
+		       struct hostap_80211_rx_status *rx_stats)
+{
+	struct net_device_stats *stats;
+	int len;
+
+	len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR);
+	stats = hostap_get_stats(dev);
+	stats->rx_packets++;
+	stats->rx_bytes += len;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static struct prism2_frag_entry *
+prism2_frag_cache_find(local_info_t *local, unsigned int seq,
+		       unsigned int frag, u8 *src, u8 *dst)
+{
+	struct prism2_frag_entry *entry;
+	int i;
+
+	for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
+		entry = &local->frag_cache[i];
+		if (entry->skb != NULL &&
+		    time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
+			printk(KERN_DEBUG "%s: expiring fragment cache entry "
+			       "seq=%u last_frag=%u\n",
+			       local->dev->name, entry->seq, entry->last_frag);
+			dev_kfree_skb(entry->skb);
+			entry->skb = NULL;
+		}
+
+		if (entry->skb != NULL && entry->seq == seq &&
+		    (entry->last_frag + 1 == frag || frag == -1) &&
+		    memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
+		    memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
+			return entry;
+	}
+
+	return NULL;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static struct sk_buff *
+prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr)
+{
+	struct sk_buff *skb = NULL;
+	u16 sc;
+	unsigned int frag, seq;
+	struct prism2_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	seq = WLAN_GET_SEQ_SEQ(sc) >> 4;
+
+	if (frag == 0) {
+		/* Reserve enough space to fit maximum frame length */
+		skb = dev_alloc_skb(local->dev->mtu +
+				    sizeof(struct ieee80211_hdr) +
+				    8 /* LLC */ +
+				    2 /* alignment */ +
+				    8 /* WEP */ + ETH_ALEN /* WDS */);
+		if (skb == NULL)
+			return NULL;
+
+		entry = &local->frag_cache[local->frag_next_idx];
+		local->frag_next_idx++;
+		if (local->frag_next_idx >= PRISM2_FRAG_CACHE_LEN)
+			local->frag_next_idx = 0;
+
+		if (entry->skb != NULL)
+			dev_kfree_skb(entry->skb);
+
+		entry->first_frag_time = jiffies;
+		entry->seq = seq;
+		entry->last_frag = frag;
+		entry->skb = skb;
+		memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
+		memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
+	} else {
+		/* received a fragment of a frame for which the head fragment
+		 * should have already been received */
+		entry = prism2_frag_cache_find(local, seq, frag, hdr->addr2,
+					       hdr->addr1);
+		if (entry != NULL) {
+			entry->last_frag = frag;
+			skb = entry->skb;
+		}
+	}
+
+	return skb;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static int prism2_frag_cache_invalidate(local_info_t *local,
+					struct ieee80211_hdr *hdr)
+{
+	u16 sc;
+	unsigned int seq;
+	struct prism2_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	seq = WLAN_GET_SEQ_SEQ(sc) >> 4;
+
+	entry = prism2_frag_cache_find(local, seq, -1, hdr->addr2, hdr->addr1);
+
+	if (entry == NULL) {
+		printk(KERN_DEBUG "%s: could not invalidate fragment cache "
+		       "entry (seq=%u)\n",
+		       local->dev->name, seq);
+		return -1;
+	}
+
+	entry->skb = NULL;
+	return 0;
+}
+
+
+static struct hostap_bss_info *__hostap_get_bss(local_info_t *local, u8 *bssid,
+						u8 *ssid, size_t ssid_len)
+{
+	struct list_head *ptr;
+	struct hostap_bss_info *bss;
+
+	list_for_each(ptr, &local->bss_list) {
+		bss = list_entry(ptr, struct hostap_bss_info, list);
+		if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0 &&
+		    (ssid == NULL ||
+		     (ssid_len == bss->ssid_len &&
+		      memcmp(ssid, bss->ssid, ssid_len) == 0))) {
+			list_move(&bss->list, &local->bss_list);
+			return bss;
+		}
+	}
+
+	return NULL;
+}
+
+
+static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
+						u8 *ssid, size_t ssid_len)
+{
+	struct hostap_bss_info *bss;
+
+	if (local->num_bss_info >= HOSTAP_MAX_BSS_COUNT) {
+		bss = list_entry(local->bss_list.prev,
+				 struct hostap_bss_info, list);
+		list_del(&bss->list);
+		local->num_bss_info--;
+	} else {
+		bss = (struct hostap_bss_info *)
+			kmalloc(sizeof(*bss), GFP_ATOMIC);
+		if (bss == NULL)
+			return NULL;
+	}
+
+	memset(bss, 0, sizeof(*bss));
+	memcpy(bss->bssid, bssid, ETH_ALEN);
+	memcpy(bss->ssid, ssid, ssid_len);
+	bss->ssid_len = ssid_len;
+	local->num_bss_info++;
+	list_add(&bss->list, &local->bss_list);
+	return bss;
+}
+
+
+static void __hostap_expire_bss(local_info_t *local)
+{
+	struct hostap_bss_info *bss;
+
+	while (local->num_bss_info > 0) {
+		bss = list_entry(local->bss_list.prev,
+				 struct hostap_bss_info, list);
+		if (!time_after(jiffies, bss->last_update + 60 * HZ))
+			break;
+
+		list_del(&bss->list);
+		local->num_bss_info--;
+		kfree(bss);
+	}
+}
+
+
+/* Both IEEE 802.11 Beacon and Probe Response frames have similar structure, so
+ * the same routine can be used to parse both of them. */
+static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb,
+				 int stype)
+{
+	struct hostap_ieee80211_mgmt *mgmt;
+	int left, chan = 0;
+	u8 *pos;
+	u8 *ssid = NULL, *wpa = NULL, *rsn = NULL;
+	size_t ssid_len = 0, wpa_len = 0, rsn_len = 0;
+	struct hostap_bss_info *bss;
+
+	if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon))
+		return;
+
+	mgmt = (struct hostap_ieee80211_mgmt *) skb->data;
+	pos = mgmt->u.beacon.variable;
+	left = skb->len - (pos - skb->data);
+
+	while (left >= 2) {
+		if (2 + pos[1] > left)
+			return; /* parse failed */
+		switch (*pos) {
+		case WLAN_EID_SSID:
+			ssid = pos + 2;
+			ssid_len = pos[1];
+			break;
+		case WLAN_EID_GENERIC:
+			if (pos[1] >= 4 &&
+			    pos[2] == 0x00 && pos[3] == 0x50 &&
+			    pos[4] == 0xf2 && pos[5] == 1) {
+				wpa = pos;
+				wpa_len = pos[1] + 2;
+			}
+			break;
+		case WLAN_EID_RSN:
+			rsn = pos;
+			rsn_len = pos[1] + 2;
+			break;
+		case WLAN_EID_DS_PARAMS:
+			if (pos[1] >= 1)
+				chan = pos[2];
+			break;
+		}
+		left -= 2 + pos[1];
+		pos += 2 + pos[1];
+	}
+
+	if (wpa_len > MAX_WPA_IE_LEN)
+		wpa_len = MAX_WPA_IE_LEN;
+	if (rsn_len > MAX_WPA_IE_LEN)
+		rsn_len = MAX_WPA_IE_LEN;
+	if (ssid_len > sizeof(bss->ssid))
+		ssid_len = sizeof(bss->ssid);
+
+	spin_lock(&local->lock);
+	bss = __hostap_get_bss(local, mgmt->bssid, ssid, ssid_len);
+	if (bss == NULL)
+		bss = __hostap_add_bss(local, mgmt->bssid, ssid, ssid_len);
+	if (bss) {
+		bss->last_update = jiffies;
+		bss->count++;
+		bss->capab_info = le16_to_cpu(mgmt->u.beacon.capab_info);
+		if (wpa) {
+			memcpy(bss->wpa_ie, wpa, wpa_len);
+			bss->wpa_ie_len = wpa_len;
+		} else
+			bss->wpa_ie_len = 0;
+		if (rsn) {
+			memcpy(bss->rsn_ie, rsn, rsn_len);
+			bss->rsn_ie_len = rsn_len;
+		} else
+			bss->rsn_ie_len = 0;
+		bss->chan = chan;
+	}
+	__hostap_expire_bss(local);
+	spin_unlock(&local->lock);
+}
+
+
+static inline int
+hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb,
+		     struct hostap_80211_rx_status *rx_stats, u16 type,
+		     u16 stype)
+{
+	if (local->iw_mode == IW_MODE_MASTER) {
+		hostap_update_sta_ps(local, (struct ieee80211_hdr *)
+				     skb->data);
+	}
+
+	if (local->hostapd && type == IEEE80211_FTYPE_MGMT) {
+		if (stype == IEEE80211_STYPE_BEACON &&
+		    local->iw_mode == IW_MODE_MASTER) {
+			struct sk_buff *skb2;
+			/* Process beacon frames also in kernel driver to
+			 * update STA(AP) table statistics */
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2)
+				hostap_rx(skb2->dev, skb2, rx_stats);
+		}
+
+		/* send management frames to the user space daemon for
+		 * processing */
+		local->apdevstats.rx_packets++;
+		local->apdevstats.rx_bytes += skb->len;
+		if (local->apdev == NULL)
+			return -1;
+		prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT);
+		return 0;
+	}
+
+	if (local->iw_mode == IW_MODE_MASTER) {
+		if (type != IEEE80211_FTYPE_MGMT &&
+		    type != IEEE80211_FTYPE_CTL) {
+			printk(KERN_DEBUG "%s: unknown management frame "
+			       "(type=0x%02x, stype=0x%02x) dropped\n",
+			       skb->dev->name, type >> 2, stype >> 4);
+			return -1;
+		}
+
+		hostap_rx(skb->dev, skb, rx_stats);
+		return 0;
+	} else if (type == IEEE80211_FTYPE_MGMT &&
+		   (stype == IEEE80211_STYPE_BEACON ||
+		    stype == IEEE80211_STYPE_PROBE_RESP)) {
+		hostap_rx_sta_beacon(local, skb, stype);
+		return -1;
+	} else if (type == IEEE80211_FTYPE_MGMT &&
+		   (stype == IEEE80211_STYPE_ASSOC_RESP ||
+		    stype == IEEE80211_STYPE_REASSOC_RESP)) {
+		/* Ignore (Re)AssocResp silently since these are not currently
+		 * needed but are still received when WPA/RSN mode is enabled.
+		 */
+		return -1;
+	} else {
+		printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: dropped unhandled"
+		       " management frame in non-Host AP mode (type=%d:%d)\n",
+		       skb->dev->name, type >> 2, stype >> 4);
+		return -1;
+	}
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static inline struct net_device *prism2_rx_get_wds(local_info_t *local,
+						   u8 *addr)
+{
+	struct hostap_interface *iface = NULL;
+	struct list_head *ptr;
+
+	read_lock_bh(&local->iface_lock);
+	list_for_each(ptr, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		if (iface->type == HOSTAP_INTERFACE_WDS &&
+		    memcmp(iface->u.wds.remote_addr, addr, ETH_ALEN) == 0)
+			break;
+		iface = NULL;
+	}
+	read_unlock_bh(&local->iface_lock);
+
+	return iface ? iface->dev : NULL;
+}
+
+
+static inline int
+hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr,
+		    u16 fc, struct net_device **wds)
+{
+	/* FIX: is this really supposed to accept WDS frames only in Master
+	 * mode? What about Repeater or Managed with WDS frames? */
+	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) !=
+	    (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS) &&
+	    (local->iw_mode != IW_MODE_MASTER || !(fc & IEEE80211_FCTL_TODS)))
+		return 0; /* not a WDS frame */
+
+	/* Possible WDS frame: either IEEE 802.11 compliant (if FromDS)
+	 * or own non-standard frame with 4th address after payload */
+	if (memcmp(hdr->addr1, local->dev->dev_addr, ETH_ALEN) != 0 &&
+	    (hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff ||
+	     hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
+	     hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
+		/* RA (or BSSID) is not ours - drop */
+		PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with "
+		       "not own or broadcast %s=" MACSTR "\n",
+		       local->dev->name,
+		       fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",
+		       MAC2STR(hdr->addr1));
+		return -1;
+	}
+
+	/* check if the frame came from a registered WDS connection */
+	*wds = prism2_rx_get_wds(local, hdr->addr2);
+	if (*wds == NULL && fc & IEEE80211_FCTL_FROMDS &&
+	    (local->iw_mode != IW_MODE_INFRA ||
+	     !(local->wds_type & HOSTAP_WDS_AP_CLIENT) ||
+	     memcmp(hdr->addr2, local->bssid, ETH_ALEN) != 0)) {
+		/* require that WDS link has been registered with TA or the
+		 * frame is from current AP when using 'AP client mode' */
+		PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame "
+		       "from unknown TA=" MACSTR "\n",
+		       local->dev->name, MAC2STR(hdr->addr2));
+		if (local->ap && local->ap->autom_ap_wds)
+			hostap_wds_link_oper(local, hdr->addr2, WDS_ADD);
+		return -1;
+	}
+
+	if (*wds && !(fc & IEEE80211_FCTL_FROMDS) && local->ap &&
+	    hostap_is_sta_assoc(local->ap, hdr->addr2)) {
+		/* STA is actually associated with us even though it has a
+		 * registered WDS link. Assume it is in 'AP client' mode.
+		 * Since this is a 3-addr frame, assume it is not (bogus) WDS
+		 * frame and process it like any normal ToDS frame from
+		 * associated STA. */
+		*wds = NULL;
+	}
+
+	return 0;
+}
+
+
+static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb)
+{
+	struct net_device *dev = local->dev;
+	u16 fc, ethertype;
+	struct ieee80211_hdr *hdr;
+	u8 *pos;
+
+	if (skb->len < 24)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* check that the frame is unicast frame to us */
+	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+	    IEEE80211_FCTL_TODS &&
+	    memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
+	    memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+		/* ToDS frame with own addr BSSID and DA */
+	} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		   IEEE80211_FCTL_FROMDS &&
+		   memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+		/* FromDS frame with own addr as DA */
+	} else
+		return 0;
+
+	if (skb->len < 24 + 8)
+		return 0;
+
+	/* check for port access entity Ethernet type */
+	pos = skb->data + 24;
+	ethertype = (pos[6] << 8) | pos[7];
+	if (ethertype == ETH_P_PAE)
+		return 1;
+
+	return 0;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static inline int
+hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb,
+			struct ieee80211_crypt_data *crypt)
+{
+	struct ieee80211_hdr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+	if (local->tkip_countermeasures &&
+	    strcmp(crypt->ops->name, "TKIP") == 0) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+			       "received packet from " MACSTR "\n",
+			       local->dev->name, MAC2STR(hdr->addr2));
+		}
+		return -1;
+	}
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_DEBUG "%s: decryption failed (SA=" MACSTR
+		       ") res=%d\n",
+		       local->dev->name, MAC2STR(hdr->addr2), res);
+		local->comm_tallies.rx_discards_wep_undecryptable++;
+		return -1;
+	}
+
+	return res;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static inline int
+hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb,
+			     int keyidx, struct ieee80211_crypt_data *crypt)
+{
+	struct ieee80211_hdr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+		       " (SA=" MACSTR " keyidx=%d)\n",
+		       local->dev->name, MAC2STR(hdr->addr2), keyidx);
+		return -1;
+	}
+
+	return 0;
+}
+
+
+/* All received frames are sent to this function. @skb contains the frame in
+ * IEEE 802.11 format, i.e., in the format it was sent over air.
+ * This function is called only as a tasklet (software IRQ). */
+void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb,
+		     struct hostap_80211_rx_status *rx_stats)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct ieee80211_hdr *hdr;
+	size_t hdrlen;
+	u16 fc, type, stype, sc;
+	struct net_device *wds = NULL;
+	struct net_device_stats *stats;
+	unsigned int frag;
+	u8 *payload;
+	struct sk_buff *skb2 = NULL;
+	u16 ethertype;
+	int frame_authorized = 0;
+	int from_assoc_ap = 0;
+	u8 dst[ETH_ALEN];
+	u8 src[ETH_ALEN];
+	struct ieee80211_crypt_data *crypt = NULL;
+	void *sta = NULL;
+	int keyidx = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	iface->stats.rx_packets++;
+	iface->stats.rx_bytes += skb->len;
+
+	/* dev is the master radio device; change this to be the default
+	 * virtual interface (this may be changed to WDS device below) */
+	dev = local->ddev;
+	iface = netdev_priv(dev);
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	stats = hostap_get_stats(dev);
+
+	if (skb->len < 10)
+		goto rx_dropped;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	type = WLAN_FC_GET_TYPE(fc);
+	stype = WLAN_FC_GET_STYPE(fc);
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	hdrlen = hostap_80211_get_hdrlen(fc);
+
+	/* Put this code here so that we avoid duplicating it in all
+	 * Rx paths. - Jean II */
+#ifdef IW_WIRELESS_SPY		/* defined in iw_handler.h */
+	/* If spy monitoring on */
+	if (iface->spy_data.spy_number > 0) {
+		struct iw_quality wstats;
+		wstats.level = rx_stats->signal;
+		wstats.noise = rx_stats->noise;
+		wstats.updated = 6;	/* No qual value */
+		/* Update spy records */
+		wireless_spy_update(dev, hdr->addr2, &wstats);
+	}
+#endif /* IW_WIRELESS_SPY */
+	hostap_update_rx_stats(local->ap, hdr, rx_stats);
+
+	if (local->iw_mode == IW_MODE_MONITOR) {
+		monitor_rx(dev, skb, rx_stats);
+		return;
+	}
+
+	if (local->host_decrypt) {
+		int idx = 0;
+		if (skb->len >= hdrlen + 3)
+			idx = skb->data[hdrlen + 3] >> 6;
+		crypt = local->crypt[idx];
+		sta = NULL;
+
+		/* Use station specific key to override default keys if the
+		 * receiver address is a unicast address ("individual RA"). If
+		 * bcrx_sta_key parameter is set, station specific key is used
+		 * even with broad/multicast targets (this is against IEEE
+		 * 802.11, but makes it easier to use different keys with
+		 * stations that do not support WEP key mapping). */
+
+		if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
+			(void) hostap_handle_sta_crypto(local, hdr, &crypt,
+							&sta);
+
+		/* allow NULL decrypt to indicate an station specific override
+		 * for default encryption */
+		if (crypt && (crypt->ops == NULL ||
+			      crypt->ops->decrypt_mpdu == NULL))
+			crypt = NULL;
+
+		if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
+#if 0
+			/* This seems to be triggered by some (multicast?)
+			 * frames from other than current BSS, so just drop the
+			 * frames silently instead of filling system log with
+			 * these reports. */
+			printk(KERN_DEBUG "%s: WEP decryption failed (not set)"
+			       " (SA=" MACSTR ")\n",
+			       local->dev->name, MAC2STR(hdr->addr2));
+#endif
+			local->comm_tallies.rx_discards_wep_undecryptable++;
+			goto rx_dropped;
+		}
+	}
+
+	if (type != IEEE80211_FTYPE_DATA) {
+		if (type == IEEE80211_FTYPE_MGMT &&
+		    stype == IEEE80211_STYPE_AUTH &&
+		    fc & IEEE80211_FCTL_WEP && local->host_decrypt &&
+		    (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
+		{
+			printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
+			       "from " MACSTR "\n", dev->name,
+			       MAC2STR(hdr->addr2));
+			/* TODO: could inform hostapd about this so that it
+			 * could send auth failure report */
+			goto rx_dropped;
+		}
+
+		if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype))
+			goto rx_dropped;
+		else
+			goto rx_exit;
+	}
+
+	/* Data frame - extract src/dst addresses */
+	if (skb->len < IEEE80211_DATA_HDR3_LEN)
+		goto rx_dropped;
+
+	switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+	case IEEE80211_FCTL_FROMDS:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr3, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_TODS:
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+		if (skb->len < IEEE80211_DATA_HDR4_LEN)
+			goto rx_dropped;
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr4, ETH_ALEN);
+		break;
+	case 0:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	}
+
+	if (hostap_rx_frame_wds(local, hdr, fc, &wds))
+		goto rx_dropped;
+	if (wds) {
+		skb->dev = dev = wds;
+		stats = hostap_get_stats(dev);
+	}
+
+	if (local->iw_mode == IW_MODE_MASTER && !wds &&
+	    (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+	    IEEE80211_FCTL_FROMDS &&
+	    local->stadev &&
+	    memcmp(hdr->addr2, local->assoc_ap_addr, ETH_ALEN) == 0) {
+		/* Frame from BSSID of the AP for which we are a client */
+		skb->dev = dev = local->stadev;
+		stats = hostap_get_stats(dev);
+		from_assoc_ap = 1;
+	}
+
+	dev->last_rx = jiffies;
+
+	if ((local->iw_mode == IW_MODE_MASTER ||
+	     local->iw_mode == IW_MODE_REPEAT) &&
+	    !from_assoc_ap) {
+		switch (hostap_handle_sta_rx(local, dev, skb, rx_stats,
+					     wds != NULL)) {
+		case AP_RX_CONTINUE_NOT_AUTHORIZED:
+			frame_authorized = 0;
+			break;
+		case AP_RX_CONTINUE:
+			frame_authorized = 1;
+			break;
+		case AP_RX_DROP:
+			goto rx_dropped;
+		case AP_RX_EXIT:
+			goto rx_exit;
+		}
+	}
+
+	/* Nullfunc frames may have PS-bit set, so they must be passed to
+	 * hostap_handle_sta_rx() before being dropped here. */
+	if (stype != IEEE80211_STYPE_DATA &&
+	    stype != IEEE80211_STYPE_DATA_CFACK &&
+	    stype != IEEE80211_STYPE_DATA_CFPOLL &&
+	    stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
+		if (stype != IEEE80211_STYPE_NULLFUNC)
+			printk(KERN_DEBUG "%s: RX: dropped data frame "
+			       "with no data (type=0x%02x, subtype=0x%02x)\n",
+			       dev->name, type >> 2, stype >> 4);
+		goto rx_dropped;
+	}
+
+	/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
+
+	if (local->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	    (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0)
+		goto rx_dropped;
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	/* skb: hdr + (possibly fragmented) plaintext payload */
+
+	if (local->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	    (frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
+		int flen;
+		struct sk_buff *frag_skb =
+			prism2_frag_cache_get(local, hdr);
+		if (!frag_skb) {
+			printk(KERN_DEBUG "%s: Rx cannot get skb from "
+			       "fragment cache (morefrag=%d seq=%u frag=%u)\n",
+			       dev->name, (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
+			       WLAN_GET_SEQ_SEQ(sc) >> 4, frag);
+			goto rx_dropped;
+		}
+
+		flen = skb->len;
+		if (frag != 0)
+			flen -= hdrlen;
+
+		if (frag_skb->tail + flen > frag_skb->end) {
+			printk(KERN_WARNING "%s: host decrypted and "
+			       "reassembled frame did not fit skb\n",
+			       dev->name);
+			prism2_frag_cache_invalidate(local, hdr);
+			goto rx_dropped;
+		}
+
+		if (frag == 0) {
+			/* copy first fragment (including full headers) into
+			 * beginning of the fragment cache skb */
+			memcpy(skb_put(frag_skb, flen), skb->data, flen);
+		} else {
+			/* append frame payload to the end of the fragment
+			 * cache skb */
+			memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
+			       flen);
+		}
+		dev_kfree_skb(skb);
+		skb = NULL;
+
+		if (fc & IEEE80211_FCTL_MOREFRAGS) {
+			/* more fragments expected - leave the skb in fragment
+			 * cache for now; it will be delivered to upper layers
+			 * after all fragments have been received */
+			goto rx_exit;
+		}
+
+		/* this was the last fragment and the frame will be
+		 * delivered, so remove skb from fragment cache */
+		skb = frag_skb;
+		hdr = (struct ieee80211_hdr *) skb->data;
+		prism2_frag_cache_invalidate(local, hdr);
+	}
+
+	/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
+	 * encrypted/authenticated */
+
+	if (local->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	    hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt))
+		goto rx_dropped;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	if (crypt && !(fc & IEEE80211_FCTL_WEP) && !local->open_wep) {
+		if (local->ieee_802_1x &&
+		    hostap_is_eapol_frame(local, skb)) {
+			/* pass unencrypted EAPOL frames even if encryption is
+			 * configured */
+			PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X - passing "
+			       "unencrypted EAPOL frame\n", local->dev->name);
+		} else {
+			printk(KERN_DEBUG "%s: encryption configured, but RX "
+			       "frame not encrypted (SA=" MACSTR ")\n",
+			       local->dev->name, MAC2STR(hdr->addr2));
+			goto rx_dropped;
+		}
+	}
+
+	if (local->drop_unencrypted && !(fc & IEEE80211_FCTL_WEP) &&
+	    !hostap_is_eapol_frame(local, skb)) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: dropped unencrypted RX data "
+			       "frame from " MACSTR " (drop_unencrypted=1)\n",
+			       dev->name, MAC2STR(hdr->addr2));
+		}
+		goto rx_dropped;
+	}
+
+	/* skb: hdr + (possible reassembled) full plaintext payload */
+
+	payload = skb->data + hdrlen;
+	ethertype = (payload[6] << 8) | payload[7];
+
+	/* If IEEE 802.1X is used, check whether the port is authorized to send
+	 * the received frame. */
+	if (local->ieee_802_1x && local->iw_mode == IW_MODE_MASTER) {
+		if (ethertype == ETH_P_PAE) {
+			PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X frame\n",
+			       dev->name);
+			if (local->hostapd && local->apdev) {
+				/* Send IEEE 802.1X frames to the user
+				 * space daemon for processing */
+				prism2_rx_80211(local->apdev, skb, rx_stats,
+						PRISM2_RX_MGMT);
+				local->apdevstats.rx_packets++;
+				local->apdevstats.rx_bytes += skb->len;
+				goto rx_exit;
+			}
+		} else if (!frame_authorized) {
+			printk(KERN_DEBUG "%s: dropped frame from "
+			       "unauthorized port (IEEE 802.1X): "
+			       "ethertype=0x%04x\n",
+			       dev->name, ethertype);
+			goto rx_dropped;
+		}
+	}
+
+	/* convert hdr + possible LLC headers into Ethernet header */
+	if (skb->len - hdrlen >= 8 &&
+	    ((memcmp(payload, rfc1042_header, 6) == 0 &&
+	      ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+	     memcmp(payload, bridge_tunnel_header, 6) == 0)) {
+		/* remove RFC1042 or Bridge-Tunnel encapsulation and
+		 * replace EtherType */
+		skb_pull(skb, hdrlen + 6);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	} else {
+		u16 len;
+		/* Leave Ethernet header part of hdr and full payload */
+		skb_pull(skb, hdrlen);
+		len = htons(skb->len);
+		memcpy(skb_push(skb, 2), &len, 2);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	}
+
+	if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		    IEEE80211_FCTL_TODS) &&
+	    skb->len >= ETH_HLEN + ETH_ALEN) {
+		/* Non-standard frame: get addr4 from its bogus location after
+		 * the payload */
+		memcpy(skb->data + ETH_ALEN,
+		       skb->data + skb->len - ETH_ALEN, ETH_ALEN);
+		skb_trim(skb, skb->len - ETH_ALEN);
+	}
+
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+
+	if (local->iw_mode == IW_MODE_MASTER && !wds &&
+	    local->ap->bridge_packets) {
+		if (dst[0] & 0x01) {
+			/* copy multicast frame both to the higher layers and
+			 * to the wireless media */
+			local->ap->bridged_multicast++;
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2 == NULL)
+				printk(KERN_DEBUG "%s: skb_clone failed for "
+				       "multicast frame\n", dev->name);
+		} else if (hostap_is_sta_authorized(local->ap, dst)) {
+			/* send frame directly to the associated STA using
+			 * wireless media and not passing to higher layers */
+			local->ap->bridged_unicast++;
+			skb2 = skb;
+			skb = NULL;
+		}
+	}
+
+	if (skb2 != NULL) {
+		/* send to wireless media */
+		skb2->protocol = __constant_htons(ETH_P_802_3);
+		skb2->mac.raw = skb2->nh.raw = skb2->data;
+		/* skb2->nh.raw = skb2->data + ETH_HLEN; */
+		skb2->dev = dev;
+		dev_queue_xmit(skb2);
+	}
+
+	if (skb) {
+		skb->protocol = eth_type_trans(skb, dev);
+		memset(skb->cb, 0, sizeof(skb->cb));
+		skb->dev = dev;
+		netif_rx(skb);
+	}
+
+ rx_exit:
+	if (sta)
+		hostap_handle_sta_release(sta);
+	return;
+
+ rx_dropped:
+	dev_kfree_skb(skb);
+
+	stats->rx_dropped++;
+	goto rx_exit;
+}
+
+
+EXPORT_SYMBOL(hostap_80211_rx);
diff --git a/drivers/net/wireless/hostap/hostap_80211_tx.c b/drivers/net/wireless/hostap/hostap_80211_tx.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_80211_tx.c
@@ -0,0 +1,524 @@
+void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
+{
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n",
+	       name, skb->len, jiffies);
+
+	if (skb->len < 2)
+		return;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	printk(KERN_DEBUG "   FC=0x%04x (type=%d:%d)%s%s",
+	       fc, WLAN_FC_GET_TYPE(fc) >> 2, WLAN_FC_GET_STYPE(fc) >> 4,
+	       fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "",
+	       fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : "");
+
+	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
+		printk("\n");
+		return;
+	}
+
+	printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id),
+	       le16_to_cpu(hdr->seq_ctl));
+
+	printk(KERN_DEBUG "   A1=" MACSTR " A2=" MACSTR " A3=" MACSTR,
+	       MAC2STR(hdr->addr1), MAC2STR(hdr->addr2), MAC2STR(hdr->addr3));
+	if (skb->len >= 30)
+		printk(" A4=" MACSTR, MAC2STR(hdr->addr4));
+	printk("\n");
+}
+
+
+/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
+ * Convert Ethernet header into a suitable IEEE 802.11 header depending on
+ * device configuration. */
+int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int need_headroom, need_tailroom = 0;
+	struct ieee80211_hdr hdr;
+	u16 fc, ethertype = 0;
+	enum {
+		WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
+	} use_wds = WDS_NO;
+	u8 *encaps_data;
+	int hdr_len, encaps_len, skip_header_bytes;
+	int to_assoc_ap = 0;
+	struct hostap_skb_tx_data *meta;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (skb->len < ETH_HLEN) {
+		printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
+		       "(len=%d)\n", dev->name, skb->len);
+		kfree_skb(skb);
+		return 0;
+	}
+
+	if (local->ddev != dev) {
+		use_wds = (local->iw_mode == IW_MODE_MASTER &&
+			   !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
+			WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
+		if (dev == local->stadev) {
+			to_assoc_ap = 1;
+			use_wds = WDS_NO;
+		} else if (dev == local->apdev) {
+			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
+			       "AP device with Ethernet net dev\n", dev->name);
+			kfree_skb(skb);
+			return 0;
+		}
+	} else {
+		if (local->iw_mode == IW_MODE_REPEAT) {
+			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
+			       "non-WDS link in Repeater mode\n", dev->name);
+			kfree_skb(skb);
+			return 0;
+		} else if (local->iw_mode == IW_MODE_INFRA &&
+			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
+			   memcmp(skb->data + ETH_ALEN, dev->dev_addr,
+				  ETH_ALEN) != 0) {
+			/* AP client mode: send frames with foreign src addr
+			 * using 4-addr WDS frames */
+			use_wds = WDS_COMPLIANT_FRAME;
+		}
+	}
+
+	/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
+	 * ==>
+	 * Prism2 TX frame with 802.11 header:
+	 * txdesc (address order depending on used mode; includes dst_addr and
+	 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
+	 * proto[2], payload {, possible addr4[6]} */
+
+	ethertype = (skb->data[12] << 8) | skb->data[13];
+
+	memset(&hdr, 0, sizeof(hdr));
+
+	/* Length of data after IEEE 802.11 header */
+	encaps_data = NULL;
+	encaps_len = 0;
+	skip_header_bytes = ETH_HLEN;
+	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
+		encaps_data = bridge_tunnel_header;
+		encaps_len = sizeof(bridge_tunnel_header);
+		skip_header_bytes -= 2;
+	} else if (ethertype >= 0x600) {
+		encaps_data = rfc1042_header;
+		encaps_len = sizeof(rfc1042_header);
+		skip_header_bytes -= 2;
+	}
+
+	fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
+	hdr_len = IEEE80211_DATA_HDR3_LEN;
+
+	if (use_wds != WDS_NO) {
+		/* Note! Prism2 station firmware has problems with sending real
+		 * 802.11 frames with four addresses; until these problems can
+		 * be fixed or worked around, 4-addr frames needed for WDS are
+		 * using incompatible format: FromDS flag is not set and the
+		 * fourth address is added after the frame payload; it is
+		 * assumed, that the receiving station knows how to handle this
+		 * frame format */
+
+		if (use_wds == WDS_COMPLIANT_FRAME) {
+			fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
+			/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
+			 * Addr4 = SA */
+			memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+			hdr_len += ETH_ALEN;
+		} else {
+			/* bogus 4-addr format to workaround Prism2 station
+			 * f/w bug */
+			fc |= IEEE80211_FCTL_TODS;
+			/* From DS: Addr1 = DA (used as RA),
+			 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
+			 */
+
+			/* SA from skb->data + ETH_ALEN will be added after
+			 * frame payload; use hdr.addr4 as a temporary buffer
+			 */
+			memcpy(&hdr.addr4, skb->data + ETH_ALEN, ETH_ALEN);
+			need_tailroom += ETH_ALEN;
+		}
+
+		/* send broadcast and multicast frames to broadcast RA, if
+		 * configured; otherwise, use unicast RA of the WDS link */
+		if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
+		    skb->data[0] & 0x01)
+			memset(&hdr.addr1, 0xff, ETH_ALEN);
+		else if (iface->type == HOSTAP_INTERFACE_WDS)
+			memcpy(&hdr.addr1, iface->u.wds.remote_addr,
+			       ETH_ALEN);
+		else
+			memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
+		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
+		memcpy(&hdr.addr3, skb->data, ETH_ALEN);
+	} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
+		fc |= IEEE80211_FCTL_FROMDS;
+		/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
+		memcpy(&hdr.addr1, skb->data, ETH_ALEN);
+		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
+		memcpy(&hdr.addr3, skb->data + ETH_ALEN, ETH_ALEN);
+	} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
+		fc |= IEEE80211_FCTL_TODS;
+		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
+		memcpy(&hdr.addr1, to_assoc_ap ?
+		       local->assoc_ap_addr : local->bssid, ETH_ALEN);
+		memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+		memcpy(&hdr.addr3, skb->data, ETH_ALEN);
+	} else if (local->iw_mode == IW_MODE_ADHOC) {
+		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
+		memcpy(&hdr.addr1, skb->data, ETH_ALEN);
+		memcpy(&hdr.addr2, skb->data + ETH_ALEN, ETH_ALEN);
+		memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
+	}
+
+	hdr.frame_ctl = cpu_to_le16(fc);
+
+	skb_pull(skb, skip_header_bytes);
+	need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
+	if (skb_tailroom(skb) < need_tailroom) {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (skb == NULL) {
+			iface->stats.tx_dropped++;
+			return 0;
+		}
+		if (pskb_expand_head(skb, need_headroom, need_tailroom,
+				     GFP_ATOMIC)) {
+			kfree_skb(skb);
+			iface->stats.tx_dropped++;
+			return 0;
+		}
+	} else if (skb_headroom(skb) < need_headroom) {
+		struct sk_buff *tmp = skb;
+		skb = skb_realloc_headroom(skb, need_headroom);
+		kfree_skb(tmp);
+		if (skb == NULL) {
+			iface->stats.tx_dropped++;
+			return 0;
+		}
+	} else {
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (skb == NULL) {
+			iface->stats.tx_dropped++;
+			return 0;
+		}
+	}
+
+	if (encaps_data)
+		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
+	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
+	if (use_wds == WDS_OWN_FRAME) {
+		memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
+	}
+
+	iface->stats.tx_packets++;
+	iface->stats.tx_bytes += skb->len;
+
+	skb->mac.raw = skb->data;
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	memset(meta, 0, sizeof(*meta));
+	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
+	if (use_wds)
+		meta->flags |= HOSTAP_TX_FLAGS_WDS;
+	meta->ethertype = ethertype;
+	meta->iface = iface;
+
+	/* Send IEEE 802.11 encapsulated frame using the master radio device */
+	skb->dev = local->dev;
+	dev_queue_xmit(skb);
+	return 0;
+}
+
+
+/* hard_start_xmit function for hostapd wlan#ap interfaces */
+int hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hostap_skb_tx_data *meta;
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (skb->len < 10) {
+		printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb "
+		       "(len=%d)\n", dev->name, skb->len);
+		kfree_skb(skb);
+		return 0;
+	}
+
+	iface->stats.tx_packets++;
+	iface->stats.tx_bytes += skb->len;
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	memset(meta, 0, sizeof(*meta));
+	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
+	meta->iface = iface;
+
+	if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) {
+		hdr = (struct ieee80211_hdr *) skb->data;
+		fc = le16_to_cpu(hdr->frame_ctl);
+		if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
+		    WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_DATA) {
+			u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN +
+					     sizeof(rfc1042_header)];
+			meta->ethertype = (pos[0] << 8) | pos[1];
+		}
+	}
+
+	/* Send IEEE 802.11 encapsulated frame using the master radio device */
+	skb->dev = local->dev;
+	dev_queue_xmit(skb);
+	return 0;
+}
+
+
+/* Called only from software IRQ */
+struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
+				   struct ieee80211_crypt_data *crypt)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+	int hdr_len, res;
+
+	iface = netdev_priv(skb->dev);
+	local = iface->local;
+
+	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	if (local->tkip_countermeasures &&
+	    crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
+		hdr = (struct ieee80211_hdr *) skb->data;
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+			       "TX packet to " MACSTR "\n",
+			       local->dev->name, MAC2STR(hdr->addr1));
+		}
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	skb = skb_unshare(skb, GFP_ATOMIC);
+	if (skb == NULL)
+		return NULL;
+
+	if ((skb_headroom(skb) < crypt->ops->extra_prefix_len ||
+	     skb_tailroom(skb) < crypt->ops->extra_postfix_len) &&
+	    pskb_expand_head(skb, crypt->ops->extra_prefix_len,
+			     crypt->ops->extra_postfix_len, GFP_ATOMIC)) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+ 	fc = le16_to_cpu(hdr->frame_ctl);
+	hdr_len = hostap_80211_get_hdrlen(fc);
+
+	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
+	 * call both MSDU and MPDU encryption functions from here. */
+	atomic_inc(&crypt->refcnt);
+	res = 0;
+	if (crypt->ops->encrypt_msdu)
+		res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
+	if (res == 0 && crypt->ops->encrypt_mpdu)
+		res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		kfree_skb(skb);
+		return NULL;
+	}
+
+	return skb;
+}
+
+
+/* hard_start_xmit function for master radio interface wifi#.
+ * AP processing (TX rate control, power save buffering, etc.).
+ * Use hardware TX function to send the frame. */
+int hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 1;
+	u16 fc;
+	struct hostap_tx_data tx;
+	ap_tx_ret tx_ret;
+	struct hostap_skb_tx_data *meta;
+	int no_encrypt = 0;
+	struct ieee80211_hdr *hdr;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	tx.skb = skb;
+	tx.sta_ptr = NULL;
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
+		printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
+		       "expected 0x%08x)\n",
+		       dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC);
+		ret = 0;
+		iface->stats.tx_dropped++;
+		goto fail;
+	}
+
+	if (local->host_encrypt) {
+		/* Set crypt to default algorithm and key; will be replaced in
+		 * AP code if STA has own alg/key */
+		tx.crypt = local->crypt[local->tx_keyidx];
+		tx.host_encrypt = 1;
+	} else {
+		tx.crypt = NULL;
+		tx.host_encrypt = 0;
+	}
+
+	if (skb->len < 24) {
+		printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb "
+		       "(len=%d)\n", dev->name, skb->len);
+		ret = 0;
+		iface->stats.tx_dropped++;
+		goto fail;
+	}
+
+	/* FIX (?):
+	 * Wi-Fi 802.11b test plan suggests that AP should ignore power save
+	 * bit in authentication and (re)association frames and assume tha
+	 * STA remains awake for the response. */
+	tx_ret = hostap_handle_sta_tx(local, &tx);
+	skb = tx.skb;
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	hdr = (struct ieee80211_hdr *) skb->data;
+ 	fc = le16_to_cpu(hdr->frame_ctl);
+	switch (tx_ret) {
+	case AP_TX_CONTINUE:
+		break;
+	case AP_TX_CONTINUE_NOT_AUTHORIZED:
+		if (local->ieee_802_1x &&
+		    WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
+		    meta->ethertype != ETH_P_PAE &&
+		    !(meta->flags & HOSTAP_TX_FLAGS_WDS)) {
+			printk(KERN_DEBUG "%s: dropped frame to unauthorized "
+			       "port (IEEE 802.1X): ethertype=0x%04x\n",
+			       dev->name, meta->ethertype);
+			hostap_dump_tx_80211(dev->name, skb);
+
+			ret = 0; /* drop packet */
+			iface->stats.tx_dropped++;
+			goto fail;
+		}
+		break;
+	case AP_TX_DROP:
+		ret = 0; /* drop packet */
+		iface->stats.tx_dropped++;
+		goto fail;
+	case AP_TX_RETRY:
+		goto fail;
+	case AP_TX_BUFFERED:
+		/* do not free skb here, it will be freed when the
+		 * buffered frame is sent/timed out */
+		ret = 0;
+		goto tx_exit;
+	}
+
+	/* Request TX callback if protocol version is 2 in 802.11 header;
+	 * this version 2 is a special case used between hostapd and kernel
+	 * driver */
+	if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) &&
+	    local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) {
+		meta->tx_cb_idx = local->ap->tx_callback_idx;
+
+		/* remove special version from the frame header */
+		fc &= ~IEEE80211_FCTL_VERS;
+		hdr->frame_ctl = cpu_to_le16(fc);
+	}
+
+	if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_DATA) {
+		no_encrypt = 1;
+		tx.crypt = NULL;
+	}
+
+	if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt &&
+	    !(fc & IEEE80211_FCTL_VERS)) {
+		no_encrypt = 1;
+		PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing "
+		       "unencrypted EAPOL frame\n", dev->name);
+		tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */
+	}
+
+	if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu))
+		tx.crypt = NULL;
+	else if ((tx.crypt || local->crypt[local->tx_keyidx]) && !no_encrypt) {
+		/* Add ISWEP flag both for firmware and host based encryption
+		 */
+		fc |= IEEE80211_FCTL_WEP;
+		hdr->frame_ctl = cpu_to_le16(fc);
+	} else if (local->drop_unencrypted &&
+		   WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
+		   meta->ethertype != ETH_P_PAE) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: dropped unencrypted TX data "
+			       "frame (drop_unencrypted=1)\n", dev->name);
+		}
+		iface->stats.tx_dropped++;
+		ret = 0;
+		goto fail;
+	}
+
+	if (tx.crypt) {
+		skb = hostap_tx_encrypt(skb, tx.crypt);
+		if (skb == NULL) {
+			printk(KERN_DEBUG "%s: TX - encryption failed\n",
+			       dev->name);
+			ret = 0;
+			goto fail;
+		}
+		meta = (struct hostap_skb_tx_data *) skb->cb;
+		if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) {
+			printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, "
+			       "expected 0x%08x) after hostap_tx_encrypt\n",
+			       dev->name, meta->magic,
+			       HOSTAP_SKB_TX_DATA_MAGIC);
+			ret = 0;
+			iface->stats.tx_dropped++;
+			goto fail;
+		}
+	}
+
+	if (local->func->tx == NULL || local->func->tx(skb, dev)) {
+		ret = 0;
+		iface->stats.tx_dropped++;
+	} else {
+		ret = 0;
+		iface->stats.tx_packets++;
+		iface->stats.tx_bytes += skb->len;
+	}
+
+ fail:
+	if (!ret && skb)
+		dev_kfree_skb(skb);
+ tx_exit:
+	if (tx.sta_ptr)
+		hostap_handle_sta_release(tx.sta_ptr);
+	return ret;
+}
+
+
+EXPORT_SYMBOL(hostap_dump_tx_80211);
+EXPORT_SYMBOL(hostap_tx_encrypt);
+EXPORT_SYMBOL(hostap_master_start_xmit);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -0,0 +1,3288 @@
+/*
+ * Intersil Prism2 driver with Host AP (software access point) support
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This file is to be included into hostap.c when S/W AP functionality is
+ * compiled.
+ *
+ * AP:  FIX:
+ * - if unicast Class 2 (assoc,reassoc,disassoc) frame received from
+ *   unauthenticated STA, send deauth. frame (8802.11: 5.5)
+ * - if unicast Class 3 (data with to/from DS,deauth,pspoll) frame received
+ *   from authenticated, but unassoc STA, send disassoc frame (8802.11: 5.5)
+ * - if unicast Class 3 received from unauthenticated STA, send deauth. frame
+ *   (8802.11: 5.5)
+ */
+
+static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
+						 DEF_INTS };
+module_param_array(other_ap_policy, int, NULL, 0444);
+MODULE_PARM_DESC(other_ap_policy, "Other AP beacon monitoring policy (0-3)");
+
+static int ap_max_inactivity[MAX_PARM_DEVICES] = { AP_MAX_INACTIVITY_SEC,
+						   DEF_INTS };
+module_param_array(ap_max_inactivity, int, NULL, 0444);
+MODULE_PARM_DESC(ap_max_inactivity, "AP timeout (in seconds) for station "
+		 "inactivity");
+
+static int ap_bridge_packets[MAX_PARM_DEVICES] = { 1, DEF_INTS };
+module_param_array(ap_bridge_packets, int, NULL, 0444);
+MODULE_PARM_DESC(ap_bridge_packets, "Bridge packets directly between "
+		 "stations");
+
+static int autom_ap_wds[MAX_PARM_DEVICES] = { 0, DEF_INTS };
+module_param_array(autom_ap_wds, int, NULL, 0444);
+MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs "
+		 "automatically");
+
+
+static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta);
+static void hostap_event_expired_sta(struct net_device *dev,
+				     struct sta_info *sta);
+static void handle_add_proc_queue(void *data);
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+static void handle_wds_oper_queue(void *data);
+static void prism2_send_mgmt(struct net_device *dev,
+			     u16 type_subtype, char *body,
+			     int body_len, u8 *addr, u16 tx_cb_idx);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+static int ap_debug_proc_read(char *page, char **start, off_t off,
+			      int count, int *eof, void *data)
+{
+	char *p = page;
+	struct ap_data *ap = (struct ap_data *) data;
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
+	p += sprintf(p, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
+	p += sprintf(p, "max_inactivity=%u\n", ap->max_inactivity / HZ);
+	p += sprintf(p, "bridge_packets=%u\n", ap->bridge_packets);
+	p += sprintf(p, "nullfunc_ack=%u\n", ap->nullfunc_ack);
+	p += sprintf(p, "autom_ap_wds=%u\n", ap->autom_ap_wds);
+	p += sprintf(p, "auth_algs=%u\n", ap->local->auth_algs);
+	p += sprintf(p, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc);
+
+	return (p - page);
+}
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+
+static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta)
+{
+	sta->hnext = ap->sta_hash[STA_HASH(sta->addr)];
+	ap->sta_hash[STA_HASH(sta->addr)] = sta;
+}
+
+static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta)
+{
+	struct sta_info *s;
+
+	s = ap->sta_hash[STA_HASH(sta->addr)];
+	if (s == NULL) return;
+	if (memcmp(s->addr, sta->addr, ETH_ALEN) == 0) {
+		ap->sta_hash[STA_HASH(sta->addr)] = s->hnext;
+		return;
+	}
+
+	while (s->hnext != NULL && memcmp(s->hnext->addr, sta->addr, ETH_ALEN)
+	       != 0)
+		s = s->hnext;
+	if (s->hnext != NULL)
+		s->hnext = s->hnext->hnext;
+	else
+		printk("AP: could not remove STA " MACSTR " from hash table\n",
+		       MAC2STR(sta->addr));
+}
+
+static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
+{
+	if (sta->ap && sta->local)
+		hostap_event_expired_sta(sta->local->dev, sta);
+
+	if (ap->proc != NULL) {
+		char name[20];
+		sprintf(name, MACSTR, MAC2STR(sta->addr));
+		remove_proc_entry(name, ap->proc);
+	}
+
+	if (sta->crypt) {
+		sta->crypt->ops->deinit(sta->crypt->priv);
+		kfree(sta->crypt);
+		sta->crypt = NULL;
+	}
+
+	skb_queue_purge(&sta->tx_buf);
+
+	ap->num_sta--;
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (sta->aid > 0)
+		ap->sta_aid[sta->aid - 1] = NULL;
+
+	if (!sta->ap && sta->u.sta.challenge)
+		kfree(sta->u.sta.challenge);
+	del_timer(&sta->timer);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	kfree(sta);
+}
+
+
+static void hostap_set_tim(local_info_t *local, int aid, int set)
+{
+	if (local->func->set_tim)
+		local->func->set_tim(local->dev, aid, set);
+}
+
+
+static void hostap_event_new_sta(struct net_device *dev, struct sta_info *sta)
+{
+	union iwreq_data wrqu;
+	memset(&wrqu, 0, sizeof(wrqu));
+	memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(dev, IWEVREGISTERED, &wrqu, NULL);
+}
+
+
+static void hostap_event_expired_sta(struct net_device *dev,
+				     struct sta_info *sta)
+{
+	union iwreq_data wrqu;
+	memset(&wrqu, 0, sizeof(wrqu));
+	memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN);
+	wrqu.addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(dev, IWEVEXPIRED, &wrqu, NULL);
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+
+static void ap_handle_timer(unsigned long data)
+{
+	struct sta_info *sta = (struct sta_info *) data;
+	local_info_t *local;
+	struct ap_data *ap;
+	unsigned long next_time = 0;
+	int was_assoc;
+
+	if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) {
+		PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n");
+		return;
+	}
+
+	local = sta->local;
+	ap = local->ap;
+	was_assoc = sta->flags & WLAN_STA_ASSOC;
+
+	if (atomic_read(&sta->users) != 0)
+		next_time = jiffies + HZ;
+	else if ((sta->flags & WLAN_STA_PERM) && !(sta->flags & WLAN_STA_AUTH))
+		next_time = jiffies + ap->max_inactivity;
+
+	if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) {
+		/* station activity detected; reset timeout state */
+		sta->timeout_next = STA_NULLFUNC;
+		next_time = sta->last_rx + ap->max_inactivity;
+	} else if (sta->timeout_next == STA_DISASSOC &&
+		   !(sta->flags & WLAN_STA_PENDING_POLL)) {
+		/* STA ACKed data nullfunc frame poll */
+		sta->timeout_next = STA_NULLFUNC;
+		next_time = jiffies + ap->max_inactivity;
+	}
+
+	if (next_time) {
+		sta->timer.expires = next_time;
+		add_timer(&sta->timer);
+		return;
+	}
+
+	if (sta->ap)
+		sta->timeout_next = STA_DEAUTH;
+
+	if (sta->timeout_next == STA_DEAUTH && !(sta->flags & WLAN_STA_PERM)) {
+		spin_lock(&ap->sta_table_lock);
+		ap_sta_hash_del(ap, sta);
+		list_del(&sta->list);
+		spin_unlock(&ap->sta_table_lock);
+		sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
+	} else if (sta->timeout_next == STA_DISASSOC)
+		sta->flags &= ~WLAN_STA_ASSOC;
+
+	if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap)
+		hostap_event_expired_sta(local->dev, sta);
+
+	if (sta->timeout_next == STA_DEAUTH && sta->aid > 0 &&
+	    !skb_queue_empty(&sta->tx_buf)) {
+		hostap_set_tim(local, sta->aid, 0);
+		sta->flags &= ~WLAN_STA_TIM;
+	}
+
+	if (sta->ap) {
+		if (ap->autom_ap_wds) {
+			PDEBUG(DEBUG_AP, "%s: removing automatic WDS "
+			       "connection to AP " MACSTR "\n",
+			       local->dev->name, MAC2STR(sta->addr));
+			hostap_wds_link_oper(local, sta->addr, WDS_DEL);
+		}
+	} else if (sta->timeout_next == STA_NULLFUNC) {
+		/* send data frame to poll STA and check whether this frame
+		 * is ACKed */
+		/* FIX: IEEE80211_STYPE_NULLFUNC would be more appropriate, but
+		 * it is apparently not retried so TX Exc events are not
+		 * received for it */
+		sta->flags |= WLAN_STA_PENDING_POLL;
+		prism2_send_mgmt(local->dev, IEEE80211_FTYPE_DATA |
+				 IEEE80211_STYPE_DATA, NULL, 0,
+				 sta->addr, ap->tx_callback_poll);
+	} else {
+		int deauth = sta->timeout_next == STA_DEAUTH;
+		u16 resp;
+		PDEBUG(DEBUG_AP, "%s: sending %s info to STA " MACSTR
+		       "(last=%lu, jiffies=%lu)\n",
+		       local->dev->name,
+		       deauth ? "deauthentication" : "disassociation",
+		       MAC2STR(sta->addr), sta->last_rx, jiffies);
+
+		resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID :
+				   WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
+		prism2_send_mgmt(local->dev, IEEE80211_FTYPE_MGMT |
+				 (deauth ? IEEE80211_STYPE_DEAUTH :
+				  IEEE80211_STYPE_DISASSOC),
+				 (char *) &resp, 2, sta->addr, 0);
+	}
+
+	if (sta->timeout_next == STA_DEAUTH) {
+		if (sta->flags & WLAN_STA_PERM) {
+			PDEBUG(DEBUG_AP, "%s: STA " MACSTR " would have been "
+			       "removed, but it has 'perm' flag\n",
+			       local->dev->name, MAC2STR(sta->addr));
+		} else
+			ap_free_sta(ap, sta);
+		return;
+	}
+
+	if (sta->timeout_next == STA_NULLFUNC) {
+		sta->timeout_next = STA_DISASSOC;
+		sta->timer.expires = jiffies + AP_DISASSOC_DELAY;
+	} else {
+		sta->timeout_next = STA_DEAUTH;
+		sta->timer.expires = jiffies + AP_DEAUTH_DELAY;
+	}
+
+	add_timer(&sta->timer);
+}
+
+
+void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
+			    int resend)
+{
+	u8 addr[ETH_ALEN];
+	u16 resp;
+	int i;
+
+	PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name);
+	memset(addr, 0xff, ETH_ALEN);
+
+	resp = __constant_cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
+
+	/* deauth message sent; try to resend it few times; the message is
+	 * broadcast, so it may be delayed until next DTIM; there is not much
+	 * else we can do at this point since the driver is going to be shut
+	 * down */
+	for (i = 0; i < 5; i++) {
+		prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
+				 IEEE80211_STYPE_DEAUTH,
+				 (char *) &resp, 2, addr, 0);
+
+		if (!resend || ap->num_sta <= 0)
+			return;
+
+		mdelay(50);
+	}
+}
+
+
+static int ap_control_proc_read(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	char *p = page;
+	struct ap_data *ap = (struct ap_data *) data;
+	char *policy_txt;
+	struct list_head *ptr;
+	struct mac_entry *entry;
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	switch (ap->mac_restrictions.policy) {
+	case MAC_POLICY_OPEN:
+		policy_txt = "open";
+		break;
+	case MAC_POLICY_ALLOW:
+		policy_txt = "allow";
+		break;
+	case MAC_POLICY_DENY:
+		policy_txt = "deny";
+		break;
+	default:
+		policy_txt = "unknown";
+		break;
+	};
+	p += sprintf(p, "MAC policy: %s\n", policy_txt);
+	p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
+	p += sprintf(p, "MAC list:\n");
+	spin_lock_bh(&ap->mac_restrictions.lock);
+	for (ptr = ap->mac_restrictions.mac_list.next;
+	     ptr != &ap->mac_restrictions.mac_list; ptr = ptr->next) {
+		if (p - page > PAGE_SIZE - 80) {
+			p += sprintf(p, "All entries did not fit one page.\n");
+			break;
+		}
+
+		entry = list_entry(ptr, struct mac_entry, list);
+		p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr));
+	}
+	spin_unlock_bh(&ap->mac_restrictions.lock);
+
+	return (p - page);
+}
+
+
+static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
+			      u8 *mac)
+{
+	struct mac_entry *entry;
+
+	entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL);
+	if (entry == NULL)
+		return -1;
+
+	memcpy(entry->addr, mac, ETH_ALEN);
+
+	spin_lock_bh(&mac_restrictions->lock);
+	list_add_tail(&entry->list, &mac_restrictions->mac_list);
+	mac_restrictions->entries++;
+	spin_unlock_bh(&mac_restrictions->lock);
+
+	return 0;
+}
+
+
+static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
+			      u8 *mac)
+{
+	struct list_head *ptr;
+	struct mac_entry *entry;
+
+	spin_lock_bh(&mac_restrictions->lock);
+	for (ptr = mac_restrictions->mac_list.next;
+	     ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
+		entry = list_entry(ptr, struct mac_entry, list);
+
+		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+			list_del(ptr);
+			kfree(entry);
+			mac_restrictions->entries--;
+			spin_unlock_bh(&mac_restrictions->lock);
+			return 0;
+		}
+	}
+	spin_unlock_bh(&mac_restrictions->lock);
+	return -1;
+}
+
+
+static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
+			       u8 *mac)
+{
+	struct list_head *ptr;
+	struct mac_entry *entry;
+	int found = 0;
+
+	if (mac_restrictions->policy == MAC_POLICY_OPEN)
+		return 0;
+
+	spin_lock_bh(&mac_restrictions->lock);
+	for (ptr = mac_restrictions->mac_list.next;
+	     ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
+		entry = list_entry(ptr, struct mac_entry, list);
+
+		if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_bh(&mac_restrictions->lock);
+
+	if (mac_restrictions->policy == MAC_POLICY_ALLOW)
+		return !found;
+	else
+		return found;
+}
+
+
+static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
+{
+	struct list_head *ptr, *n;
+	struct mac_entry *entry;
+
+	if (mac_restrictions->entries == 0)
+		return;
+
+	spin_lock_bh(&mac_restrictions->lock);
+	for (ptr = mac_restrictions->mac_list.next, n = ptr->next;
+	     ptr != &mac_restrictions->mac_list;
+	     ptr = n, n = ptr->next) {
+		entry = list_entry(ptr, struct mac_entry, list);
+		list_del(ptr);
+		kfree(entry);
+	}
+	mac_restrictions->entries = 0;
+	spin_unlock_bh(&mac_restrictions->lock);
+}
+
+
+static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
+			       u8 *mac)
+{
+	struct sta_info *sta;
+	u16 resp;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, mac);
+	if (sta) {
+		ap_sta_hash_del(ap, sta);
+		list_del(&sta->list);
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta)
+		return -EINVAL;
+
+	resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID);
+	prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH,
+			 (char *) &resp, 2, sta->addr, 0);
+
+	if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
+		hostap_event_expired_sta(dev, sta);
+
+	ap_free_sta(ap, sta);
+
+	return 0;
+}
+
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+static void ap_control_kickall(struct ap_data *ap)
+{
+	struct list_head *ptr, *n;
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list;
+	     ptr = n, n = ptr->next) {
+		sta = list_entry(ptr, struct sta_info, list);
+		ap_sta_hash_del(ap, sta);
+		list_del(&sta->list);
+		if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
+			hostap_event_expired_sta(sta->local->dev, sta);
+		ap_free_sta(ap, sta);
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+
+#define PROC_LIMIT (PAGE_SIZE - 80)
+
+static int prism2_ap_proc_read(char *page, char **start, off_t off,
+			       int count, int *eof, void *data)
+{
+	char *p = page;
+	struct ap_data *ap = (struct ap_data *) data;
+	struct list_head *ptr;
+	int i;
+
+	if (off > PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
+	spin_lock_bh(&ap->sta_table_lock);
+	for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
+		struct sta_info *sta = (struct sta_info *) ptr;
+
+		if (!sta->ap)
+			continue;
+
+		p += sprintf(p, MACSTR " %d %d %d %d '", MAC2STR(sta->addr),
+			     sta->u.ap.channel, sta->last_rx_signal,
+			     sta->last_rx_silence, sta->last_rx_rate);
+		for (i = 0; i < sta->u.ap.ssid_len; i++)
+			p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
+					  sta->u.ap.ssid[i] < 127) ?
+					 "%c" : "<%02x>"),
+				     sta->u.ap.ssid[i]);
+		p += sprintf(p, "'");
+		if (sta->capability & WLAN_CAPABILITY_ESS)
+			p += sprintf(p, " [ESS]");
+		if (sta->capability & WLAN_CAPABILITY_IBSS)
+			p += sprintf(p, " [IBSS]");
+		if (sta->capability & WLAN_CAPABILITY_PRIVACY)
+			p += sprintf(p, " [WEP]");
+		p += sprintf(p, "\n");
+
+		if ((p - page) > PROC_LIMIT) {
+			printk(KERN_DEBUG "hostap: ap proc did not fit\n");
+			break;
+		}
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if ((p - page) <= off) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = page + off;
+
+	return (p - page - off);
+}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver)
+{
+	if (!ap)
+		return;
+
+	if (sta_fw_ver == PRISM2_FW_VER(0,8,0)) {
+		PDEBUG(DEBUG_AP, "Using data::nullfunc ACK workaround - "
+		       "firmware upgrade recommended\n");
+		ap->nullfunc_ack = 1;
+	} else
+		ap->nullfunc_ack = 0;
+
+	if (sta_fw_ver == PRISM2_FW_VER(1,4,2)) {
+		printk(KERN_WARNING "%s: Warning: secondary station firmware "
+		       "version 1.4.2 does not seem to work in Host AP mode\n",
+		       ap->local->dev->name);
+	}
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data)
+{
+	struct ap_data *ap = data;
+	u16 fc;
+	struct ieee80211_hdr *hdr;
+
+	if (!ap->local->hostapd || !ap->local->apdev) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* Pass the TX callback frame to the hostapd; use 802.11 header version
+	 * 1 to indicate failure (no ACK) and 2 success (frame ACKed) */
+
+	fc &= ~IEEE80211_FCTL_VERS;
+	fc |= ok ? BIT(1) : BIT(0);
+	hdr->frame_ctl = cpu_to_le16(fc);
+
+	skb->dev = ap->local->apdev;
+	skb_pull(skb, hostap_80211_get_hdrlen(fc));
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = __constant_htons(ETH_P_802_2);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	netif_rx(skb);
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+/* Called only as a tasklet (software IRQ) */
+static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data)
+{
+	struct ap_data *ap = data;
+	struct net_device *dev = ap->local->dev;
+	struct ieee80211_hdr *hdr;
+	u16 fc, *pos, auth_alg, auth_transaction, status;
+	struct sta_info *sta = NULL;
+	char *txt = NULL;
+
+	if (ap->local->hostapd) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+	if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
+	    WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_AUTH ||
+	    skb->len < IEEE80211_MGMT_HDR_LEN + 6) {
+		printk(KERN_DEBUG "%s: hostap_ap_tx_cb_auth received invalid "
+		       "frame\n", dev->name);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
+	auth_alg = le16_to_cpu(*pos++);
+	auth_transaction = le16_to_cpu(*pos++);
+	status = le16_to_cpu(*pos++);
+
+	if (!ok) {
+		txt = "frame was not ACKed";
+		goto done;
+	}
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, hdr->addr1);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&ap->sta_table_lock);
+
+	if (!sta) {
+		txt = "STA not found";
+		goto done;
+	}
+
+	if (status == WLAN_STATUS_SUCCESS &&
+	    ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 2) ||
+	     (auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 4))) {
+		txt = "STA authenticated";
+		sta->flags |= WLAN_STA_AUTH;
+		sta->last_auth = jiffies;
+	} else if (status != WLAN_STATUS_SUCCESS)
+		txt = "authentication failed";
+
+ done:
+	if (sta)
+		atomic_dec(&sta->users);
+	if (txt) {
+		PDEBUG(DEBUG_AP, "%s: " MACSTR " auth_cb - alg=%d trans#=%d "
+		       "status=%d - %s\n",
+		       dev->name, MAC2STR(hdr->addr1), auth_alg,
+		       auth_transaction, status, txt);
+	}
+	dev_kfree_skb(skb);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data)
+{
+	struct ap_data *ap = data;
+	struct net_device *dev = ap->local->dev;
+	struct ieee80211_hdr *hdr;
+	u16 fc, *pos, status;
+	struct sta_info *sta = NULL;
+	char *txt = NULL;
+
+	if (ap->local->hostapd) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+	if (WLAN_FC_GET_TYPE(fc) != IEEE80211_FTYPE_MGMT ||
+	    (WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_ASSOC_RESP &&
+	     WLAN_FC_GET_STYPE(fc) != IEEE80211_STYPE_REASSOC_RESP) ||
+	    skb->len < IEEE80211_MGMT_HDR_LEN + 4) {
+		printk(KERN_DEBUG "%s: hostap_ap_tx_cb_assoc received invalid "
+		       "frame\n", dev->name);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (!ok) {
+		txt = "frame was not ACKed";
+		goto done;
+	}
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, hdr->addr1);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&ap->sta_table_lock);
+
+	if (!sta) {
+		txt = "STA not found";
+		goto done;
+	}
+
+	pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
+	pos++;
+	status = le16_to_cpu(*pos++);
+	if (status == WLAN_STATUS_SUCCESS) {
+		if (!(sta->flags & WLAN_STA_ASSOC))
+			hostap_event_new_sta(dev, sta);
+		txt = "STA associated";
+		sta->flags |= WLAN_STA_ASSOC;
+		sta->last_assoc = jiffies;
+	} else
+		txt = "association failed";
+
+ done:
+	if (sta)
+		atomic_dec(&sta->users);
+	if (txt) {
+		PDEBUG(DEBUG_AP, "%s: " MACSTR " assoc_cb - %s\n",
+		       dev->name, MAC2STR(hdr->addr1), txt);
+	}
+	dev_kfree_skb(skb);
+}
+
+/* Called only as a tasklet (software IRQ); TX callback for poll frames used
+ * in verifying whether the STA is still present. */
+static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data)
+{
+	struct ap_data *ap = data;
+	struct ieee80211_hdr *hdr;
+	struct sta_info *sta;
+
+	if (skb->len < 24)
+		goto fail;
+	hdr = (struct ieee80211_hdr *) skb->data;
+	if (ok) {
+		spin_lock(&ap->sta_table_lock);
+		sta = ap_get_sta(ap, hdr->addr1);
+		if (sta)
+			sta->flags &= ~WLAN_STA_PENDING_POLL;
+		spin_unlock(&ap->sta_table_lock);
+	} else {
+		PDEBUG(DEBUG_AP, "%s: STA " MACSTR " did not ACK activity "
+		       "poll frame\n", ap->local->dev->name,
+		       MAC2STR(hdr->addr1));
+	}
+
+ fail:
+	dev_kfree_skb(skb);
+}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+void hostap_init_data(local_info_t *local)
+{
+	struct ap_data *ap = local->ap;
+
+	if (ap == NULL) {
+		printk(KERN_WARNING "hostap_init_data: ap == NULL\n");
+		return;
+	}
+	memset(ap, 0, sizeof(struct ap_data));
+	ap->local = local;
+
+	ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx);
+	ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx);
+	ap->max_inactivity =
+		GET_INT_PARM(ap_max_inactivity, local->card_idx) * HZ;
+	ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx);
+
+	spin_lock_init(&ap->sta_table_lock);
+	INIT_LIST_HEAD(&ap->sta_list);
+
+	/* Initialize task queue structure for AP management */
+	INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue, ap);
+
+	ap->tx_callback_idx =
+		hostap_tx_callback_register(local, hostap_ap_tx_cb, ap);
+	if (ap->tx_callback_idx == 0)
+		printk(KERN_WARNING "%s: failed to register TX callback for "
+		       "AP\n", local->dev->name);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue, local);
+
+	ap->tx_callback_auth =
+		hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap);
+	ap->tx_callback_assoc =
+		hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap);
+	ap->tx_callback_poll =
+		hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap);
+	if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 ||
+		ap->tx_callback_poll == 0)
+		printk(KERN_WARNING "%s: failed to register TX callback for "
+		       "AP\n", local->dev->name);
+
+	spin_lock_init(&ap->mac_restrictions.lock);
+	INIT_LIST_HEAD(&ap->mac_restrictions.mac_list);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	ap->initialized = 1;
+}
+
+
+void hostap_init_ap_proc(local_info_t *local)
+{
+	struct ap_data *ap = local->ap;
+
+	ap->proc = local->proc;
+	if (ap->proc == NULL)
+		return;
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+	create_proc_read_entry("ap_debug", 0, ap->proc,
+			       ap_debug_proc_read, ap);
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	create_proc_read_entry("ap_control", 0, ap->proc,
+			       ap_control_proc_read, ap);
+	create_proc_read_entry("ap", 0, ap->proc,
+			       prism2_ap_proc_read, ap);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+}
+
+
+void hostap_free_data(struct ap_data *ap)
+{
+	struct list_head *n, *ptr;
+
+	if (ap == NULL || !ap->initialized) {
+		printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
+		       "initialized - skip resource freeing\n");
+		return;
+	}
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (ap->crypt)
+		ap->crypt->deinit(ap->crypt_priv);
+	ap->crypt = ap->crypt_priv = NULL;
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	list_for_each_safe(ptr, n, &ap->sta_list) {
+		struct sta_info *sta = list_entry(ptr, struct sta_info, list);
+		ap_sta_hash_del(ap, sta);
+		list_del(&sta->list);
+		if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
+			hostap_event_expired_sta(sta->local->dev, sta);
+		ap_free_sta(ap, sta);
+	}
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+	if (ap->proc != NULL) {
+		remove_proc_entry("ap_debug", ap->proc);
+	}
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (ap->proc != NULL) {
+	  remove_proc_entry("ap", ap->proc);
+		remove_proc_entry("ap_control", ap->proc);
+	}
+	ap_control_flush_macs(&ap->mac_restrictions);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	ap->initialized = 0;
+}
+
+
+/* caller should have mutex for AP STA list handling */
+static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta)
+{
+	struct sta_info *s;
+
+	s = ap->sta_hash[STA_HASH(sta)];
+	while (s != NULL && memcmp(s->addr, sta, ETH_ALEN) != 0)
+		s = s->hnext;
+	return s;
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+
+/* Called from timer handler and from scheduled AP queue handlers */
+static void prism2_send_mgmt(struct net_device *dev,
+			     u16 type_subtype, char *body,
+			     int body_len, u8 *addr, u16 tx_cb_idx)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct ieee80211_hdr *hdr;
+	u16 fc;
+	struct sk_buff *skb;
+	struct hostap_skb_tx_data *meta;
+	int hdrlen;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	dev = local->dev; /* always use master radio device */
+	iface = netdev_priv(dev);
+
+	if (!(dev->flags & IFF_UP)) {
+		PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt - device is not UP - "
+		       "cannot send frame\n", dev->name);
+		return;
+	}
+
+	skb = dev_alloc_skb(sizeof(*hdr) + body_len);
+	if (skb == NULL) {
+		PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt failed to allocate "
+		       "skb\n", dev->name);
+		return;
+	}
+
+	fc = type_subtype;
+	hdrlen = hostap_80211_get_hdrlen(fc);
+	hdr = (struct ieee80211_hdr *) skb_put(skb, hdrlen);
+	if (body)
+		memcpy(skb_put(skb, body_len), body, body_len);
+
+	memset(hdr, 0, hdrlen);
+
+	/* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11
+	 * tx_control instead of using local->tx_control */
+
+
+	memcpy(hdr->addr1, addr, ETH_ALEN); /* DA / RA */
+	if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) {
+		fc |= IEEE80211_FCTL_FROMDS;
+		memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* BSSID */
+		memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */
+	} else if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_CTL) {
+		/* control:ACK does not have addr2 or addr3 */
+		memset(hdr->addr2, 0, ETH_ALEN);
+		memset(hdr->addr3, 0, ETH_ALEN);
+	} else {
+		memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */
+		memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */
+	}
+
+	hdr->frame_ctl = cpu_to_le16(fc);
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	memset(meta, 0, sizeof(*meta));
+	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
+	meta->iface = iface;
+	meta->tx_cb_idx = tx_cb_idx;
+
+	skb->dev = dev;
+	skb->mac.raw = skb->nh.raw = skb->data;
+	dev_queue_xmit(skb);
+}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+static int prism2_sta_proc_read(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	char *p = page;
+	struct sta_info *sta = (struct sta_info *) data;
+	int i;
+
+	/* FIX: possible race condition.. the STA data could have just expired,
+	 * but proc entry was still here so that the read could have started;
+	 * some locking should be done here.. */
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "%s=" MACSTR "\nusers=%d\naid=%d\n"
+		     "flags=0x%04x%s%s%s%s%s%s%s\n"
+		     "capability=0x%02x\nlisten_interval=%d\nsupported_rates=",
+		     sta->ap ? "AP" : "STA",
+		     MAC2STR(sta->addr), atomic_read(&sta->users), sta->aid,
+		     sta->flags,
+		     sta->flags & WLAN_STA_AUTH ? " AUTH" : "",
+		     sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "",
+		     sta->flags & WLAN_STA_PS ? " PS" : "",
+		     sta->flags & WLAN_STA_TIM ? " TIM" : "",
+		     sta->flags & WLAN_STA_PERM ? " PERM" : "",
+		     sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "",
+		     sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "",
+		     sta->capability, sta->listen_interval);
+	/* supported_rates: 500 kbit/s units with msb ignored */
+	for (i = 0; i < sizeof(sta->supported_rates); i++)
+		if (sta->supported_rates[i] != 0)
+			p += sprintf(p, "%d%sMbps ",
+				     (sta->supported_rates[i] & 0x7f) / 2,
+				     sta->supported_rates[i] & 1 ? ".5" : "");
+	p += sprintf(p, "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n"
+		     "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n"
+		     "tx_packets=%lu\n"
+		     "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n"
+		     "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n"
+		     "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n"
+		     "tx[11M]=%d\n"
+		     "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n",
+		     jiffies, sta->last_auth, sta->last_assoc, sta->last_rx,
+		     sta->last_tx,
+		     sta->rx_packets, sta->tx_packets, sta->rx_bytes,
+		     sta->tx_bytes, skb_queue_len(&sta->tx_buf),
+		     sta->last_rx_silence,
+		     sta->last_rx_signal, sta->last_rx_rate / 10,
+		     sta->last_rx_rate % 10 ? ".5" : "",
+		     sta->tx_rate, sta->tx_count[0], sta->tx_count[1],
+		     sta->tx_count[2], sta->tx_count[3],  sta->rx_count[0],
+		     sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]);
+	if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats)
+		p = sta->crypt->ops->print_stats(p, sta->crypt->priv);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (sta->ap) {
+		if (sta->u.ap.channel >= 0)
+			p += sprintf(p, "channel=%d\n", sta->u.ap.channel);
+		p += sprintf(p, "ssid=");
+		for (i = 0; i < sta->u.ap.ssid_len; i++)
+			p += sprintf(p, ((sta->u.ap.ssid[i] >= 32 &&
+					  sta->u.ap.ssid[i] < 127) ?
+					 "%c" : "<%02x>"),
+				     sta->u.ap.ssid[i]);
+		p += sprintf(p, "\n");
+	}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	return (p - page);
+}
+
+
+static void handle_add_proc_queue(void *data)
+{
+	struct ap_data *ap = (struct ap_data *) data;
+	struct sta_info *sta;
+	char name[20];
+	struct add_sta_proc_data *entry, *prev;
+
+	entry = ap->add_sta_proc_entries;
+	ap->add_sta_proc_entries = NULL;
+
+	while (entry) {
+		spin_lock_bh(&ap->sta_table_lock);
+		sta = ap_get_sta(ap, entry->addr);
+		if (sta)
+			atomic_inc(&sta->users);
+		spin_unlock_bh(&ap->sta_table_lock);
+
+		if (sta) {
+			sprintf(name, MACSTR, MAC2STR(sta->addr));
+			sta->proc = create_proc_read_entry(
+				name, 0, ap->proc,
+				prism2_sta_proc_read, sta);
+
+			atomic_dec(&sta->users);
+		}
+
+		prev = entry;
+		entry = entry->next;
+		kfree(prev);
+	}
+}
+
+
+static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr)
+{
+	struct sta_info *sta;
+
+	sta = (struct sta_info *)
+		kmalloc(sizeof(struct sta_info), GFP_ATOMIC);
+	if (sta == NULL) {
+		PDEBUG(DEBUG_AP, "AP: kmalloc failed\n");
+		return NULL;
+	}
+
+	/* initialize STA info data */
+	memset(sta, 0, sizeof(struct sta_info));
+	sta->local = ap->local;
+	skb_queue_head_init(&sta->tx_buf);
+	memcpy(sta->addr, addr, ETH_ALEN);
+
+	atomic_inc(&sta->users);
+	spin_lock_bh(&ap->sta_table_lock);
+	list_add(&sta->list, &ap->sta_list);
+	ap->num_sta++;
+	ap_sta_hash_add(ap, sta);
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (ap->proc) {
+		struct add_sta_proc_data *entry;
+		/* schedule a non-interrupt context process to add a procfs
+		 * entry for the STA since procfs code use GFP_KERNEL */
+		entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+		if (entry) {
+			memcpy(entry->addr, sta->addr, ETH_ALEN);
+			entry->next = ap->add_sta_proc_entries;
+			ap->add_sta_proc_entries = entry;
+			schedule_work(&ap->add_sta_proc_queue);
+		} else
+			printk(KERN_DEBUG "Failed to add STA proc data\n");
+	}
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	init_timer(&sta->timer);
+	sta->timer.expires = jiffies + ap->max_inactivity;
+	sta->timer.data = (unsigned long) sta;
+	sta->timer.function = ap_handle_timer;
+	if (!ap->local->hostapd)
+		add_timer(&sta->timer);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	return sta;
+}
+
+
+static int ap_tx_rate_ok(int rateidx, struct sta_info *sta,
+			 local_info_t *local)
+{
+	if (rateidx > sta->tx_max_rate ||
+	    !(sta->tx_supp_rates & (1 << rateidx)))
+		return 0;
+
+	if (local->tx_rate_control != 0 &&
+	    !(local->tx_rate_control & (1 << rateidx)))
+		return 0;
+
+	return 1;
+}
+
+
+static void prism2_check_tx_rates(struct sta_info *sta)
+{
+	int i;
+
+	sta->tx_supp_rates = 0;
+	for (i = 0; i < sizeof(sta->supported_rates); i++) {
+		if ((sta->supported_rates[i] & 0x7f) == 2)
+			sta->tx_supp_rates |= WLAN_RATE_1M;
+		if ((sta->supported_rates[i] & 0x7f) == 4)
+			sta->tx_supp_rates |= WLAN_RATE_2M;
+		if ((sta->supported_rates[i] & 0x7f) == 11)
+			sta->tx_supp_rates |= WLAN_RATE_5M5;
+		if ((sta->supported_rates[i] & 0x7f) == 22)
+			sta->tx_supp_rates |= WLAN_RATE_11M;
+	}
+	sta->tx_max_rate = sta->tx_rate = sta->tx_rate_idx = 0;
+	if (sta->tx_supp_rates & WLAN_RATE_1M) {
+		sta->tx_max_rate = 0;
+		if (ap_tx_rate_ok(0, sta, sta->local)) {
+			sta->tx_rate = 10;
+			sta->tx_rate_idx = 0;
+		}
+	}
+	if (sta->tx_supp_rates & WLAN_RATE_2M) {
+		sta->tx_max_rate = 1;
+		if (ap_tx_rate_ok(1, sta, sta->local)) {
+			sta->tx_rate = 20;
+			sta->tx_rate_idx = 1;
+		}
+	}
+	if (sta->tx_supp_rates & WLAN_RATE_5M5) {
+		sta->tx_max_rate = 2;
+		if (ap_tx_rate_ok(2, sta, sta->local)) {
+			sta->tx_rate = 55;
+			sta->tx_rate_idx = 2;
+		}
+	}
+	if (sta->tx_supp_rates & WLAN_RATE_11M) {
+		sta->tx_max_rate = 3;
+		if (ap_tx_rate_ok(3, sta, sta->local)) {
+			sta->tx_rate = 110;
+			sta->tx_rate_idx = 3;
+		}
+	}
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+
+static void ap_crypt_init(struct ap_data *ap)
+{
+	ap->crypt = ieee80211_get_crypto_ops("WEP");
+
+	if (ap->crypt) {
+		if (ap->crypt->init) {
+			ap->crypt_priv = ap->crypt->init(0);
+			if (ap->crypt_priv == NULL)
+				ap->crypt = NULL;
+			else {
+				u8 key[WEP_KEY_LEN];
+				get_random_bytes(key, WEP_KEY_LEN);
+				ap->crypt->set_key(key, WEP_KEY_LEN, NULL,
+						   ap->crypt_priv);
+			}
+		}
+	}
+
+	if (ap->crypt == NULL) {
+		printk(KERN_WARNING "AP could not initialize WEP: load module "
+		       "ieee80211_crypt_wep.ko\n");
+	}
+}
+
+
+/* Generate challenge data for shared key authentication. IEEE 802.11 specifies
+ * that WEP algorithm is used for generating challange. This should be unique,
+ * but otherwise there is not really need for randomness etc. Initialize WEP
+ * with pseudo random key and then use increasing IV to get unique challenge
+ * streams.
+ *
+ * Called only as a scheduled task for pending AP frames.
+ */
+static char * ap_auth_make_challenge(struct ap_data *ap)
+{
+	char *tmpbuf;
+	struct sk_buff *skb;
+
+	if (ap->crypt == NULL) {
+		ap_crypt_init(ap);
+		if (ap->crypt == NULL)
+			return NULL;
+	}
+
+	tmpbuf = (char *) kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC);
+	if (tmpbuf == NULL) {
+		PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n");
+		return NULL;
+	}
+
+	skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN +
+			    ap->crypt->extra_prefix_len +
+			    ap->crypt->extra_postfix_len);
+	if (skb == NULL) {
+		kfree(tmpbuf);
+		return NULL;
+	}
+
+	skb_reserve(skb, ap->crypt->extra_prefix_len);
+	memset(skb_put(skb, WLAN_AUTH_CHALLENGE_LEN), 0,
+	       WLAN_AUTH_CHALLENGE_LEN);
+	if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) {
+		dev_kfree_skb(skb);
+		kfree(tmpbuf);
+		return NULL;
+	}
+
+	memcpy(tmpbuf, skb->data + ap->crypt->extra_prefix_len,
+	       WLAN_AUTH_CHALLENGE_LEN);
+	dev_kfree_skb(skb);
+
+	return tmpbuf;
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_authen(local_info_t *local, struct sk_buff *skb,
+			  struct hostap_80211_rx_status *rx_stats)
+{
+	struct net_device *dev = local->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	size_t hdrlen;
+	struct ap_data *ap = local->ap;
+	char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL;
+	int len, olen;
+	u16 auth_alg, auth_transaction, status_code, *pos;
+	u16 resp = WLAN_STATUS_SUCCESS, fc;
+	struct sta_info *sta = NULL;
+	struct ieee80211_crypt_data *crypt;
+	char *txt = "";
+
+	len = skb->len - IEEE80211_MGMT_HDR_LEN;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	hdrlen = hostap_80211_get_hdrlen(fc);
+
+	if (len < 6) {
+		PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload "
+		       "(len=%d) from " MACSTR "\n", dev->name, len,
+		       MAC2STR(hdr->addr2));
+		return;
+	}
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&local->ap->sta_table_lock);
+
+	if (sta && sta->crypt)
+		crypt = sta->crypt;
+	else {
+		int idx = 0;
+		if (skb->len >= hdrlen + 3)
+			idx = skb->data[hdrlen + 3] >> 6;
+		crypt = local->crypt[idx];
+	}
+
+	pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
+	auth_alg = __le16_to_cpu(*pos);
+	pos++;
+	auth_transaction = __le16_to_cpu(*pos);
+	pos++;
+	status_code = __le16_to_cpu(*pos);
+	pos++;
+
+	if (memcmp(dev->dev_addr, hdr->addr2, ETH_ALEN) == 0 ||
+	    ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) {
+		txt = "authentication denied";
+		resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+		goto fail;
+	}
+
+	if (((local->auth_algs & PRISM2_AUTH_OPEN) &&
+	     auth_alg == WLAN_AUTH_OPEN) ||
+	    ((local->auth_algs & PRISM2_AUTH_SHARED_KEY) &&
+	     crypt && auth_alg == WLAN_AUTH_SHARED_KEY)) {
+	} else {
+		txt = "unsupported algorithm";
+		resp = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG;
+		goto fail;
+	}
+
+	if (len >= 8) {
+		u8 *u = (u8 *) pos;
+		if (*u == WLAN_EID_CHALLENGE) {
+			if (*(u + 1) != WLAN_AUTH_CHALLENGE_LEN) {
+				txt = "invalid challenge len";
+				resp = WLAN_STATUS_CHALLENGE_FAIL;
+				goto fail;
+			}
+			if (len - 8 < WLAN_AUTH_CHALLENGE_LEN) {
+				txt = "challenge underflow";
+				resp = WLAN_STATUS_CHALLENGE_FAIL;
+				goto fail;
+			}
+			challenge = (char *) (u + 2);
+		}
+	}
+
+	if (sta && sta->ap) {
+		if (time_after(jiffies, sta->u.ap.last_beacon +
+			       (10 * sta->listen_interval * HZ) / 1024)) {
+			PDEBUG(DEBUG_AP, "%s: no beacons received for a while,"
+			       " assuming AP " MACSTR " is now STA\n",
+			       dev->name, MAC2STR(sta->addr));
+			sta->ap = 0;
+			sta->flags = 0;
+			sta->u.sta.challenge = NULL;
+		} else {
+			txt = "AP trying to authenticate?";
+			resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+			goto fail;
+		}
+	}
+
+	if ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) ||
+	    (auth_alg == WLAN_AUTH_SHARED_KEY &&
+	     (auth_transaction == 1 ||
+	      (auth_transaction == 3 && sta != NULL &&
+	       sta->u.sta.challenge != NULL)))) {
+	} else {
+		txt = "unknown authentication transaction number";
+		resp = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION;
+		goto fail;
+	}
+
+	if (sta == NULL) {
+		txt = "new STA";
+
+		if (local->ap->num_sta >= MAX_STA_COUNT) {
+			/* FIX: might try to remove some old STAs first? */
+			txt = "no more room for new STAs";
+			resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+			goto fail;
+		}
+
+		sta = ap_add_sta(local->ap, hdr->addr2);
+		if (sta == NULL) {
+			txt = "ap_add_sta failed";
+			resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+			goto fail;
+		}
+	}
+
+	switch (auth_alg) {
+	case WLAN_AUTH_OPEN:
+		txt = "authOK";
+		/* IEEE 802.11 standard is not completely clear about
+		 * whether STA is considered authenticated after
+		 * authentication OK frame has been send or after it
+		 * has been ACKed. In order to reduce interoperability
+		 * issues, mark the STA authenticated before ACK. */
+		sta->flags |= WLAN_STA_AUTH;
+		break;
+
+	case WLAN_AUTH_SHARED_KEY:
+		if (auth_transaction == 1) {
+			if (sta->u.sta.challenge == NULL) {
+				sta->u.sta.challenge =
+					ap_auth_make_challenge(local->ap);
+				if (sta->u.sta.challenge == NULL) {
+					resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+					goto fail;
+				}
+			}
+		} else {
+			if (sta->u.sta.challenge == NULL ||
+			    challenge == NULL ||
+			    memcmp(sta->u.sta.challenge, challenge,
+				   WLAN_AUTH_CHALLENGE_LEN) != 0 ||
+			    !(fc & IEEE80211_FCTL_WEP)) {
+				txt = "challenge response incorrect";
+				resp = WLAN_STATUS_CHALLENGE_FAIL;
+				goto fail;
+			}
+
+			txt = "challenge OK - authOK";
+			/* IEEE 802.11 standard is not completely clear about
+			 * whether STA is considered authenticated after
+			 * authentication OK frame has been send or after it
+			 * has been ACKed. In order to reduce interoperability
+			 * issues, mark the STA authenticated before ACK. */
+			sta->flags |= WLAN_STA_AUTH;
+			kfree(sta->u.sta.challenge);
+			sta->u.sta.challenge = NULL;
+		}
+		break;
+	}
+
+ fail:
+	pos = (u16 *) body;
+	*pos = cpu_to_le16(auth_alg);
+	pos++;
+	*pos = cpu_to_le16(auth_transaction + 1);
+	pos++;
+	*pos = cpu_to_le16(resp); /* status_code */
+	pos++;
+	olen = 6;
+
+	if (resp == WLAN_STATUS_SUCCESS && sta != NULL &&
+	    sta->u.sta.challenge != NULL &&
+	    auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 1) {
+		u8 *tmp = (u8 *) pos;
+		*tmp++ = WLAN_EID_CHALLENGE;
+		*tmp++ = WLAN_AUTH_CHALLENGE_LEN;
+		pos++;
+		memcpy(pos, sta->u.sta.challenge, WLAN_AUTH_CHALLENGE_LEN);
+		olen += 2 + WLAN_AUTH_CHALLENGE_LEN;
+	}
+
+	prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH,
+			 body, olen, hdr->addr2, ap->tx_callback_auth);
+
+	if (sta) {
+		sta->last_rx = jiffies;
+		atomic_dec(&sta->users);
+	}
+
+	if (resp) {
+		PDEBUG(DEBUG_AP, "%s: " MACSTR " auth (alg=%d trans#=%d "
+		       "stat=%d len=%d fc=%04x) ==> %d (%s)\n",
+		       dev->name, MAC2STR(hdr->addr2), auth_alg,
+		       auth_transaction, status_code, len, fc, resp, txt);
+	}
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_assoc(local_info_t *local, struct sk_buff *skb,
+			 struct hostap_80211_rx_status *rx_stats, int reassoc)
+{
+	struct net_device *dev = local->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	char body[12], *p, *lpos;
+	int len, left;
+	u16 *pos;
+	u16 resp = WLAN_STATUS_SUCCESS;
+	struct sta_info *sta = NULL;
+	int send_deauth = 0;
+	char *txt = "";
+	u8 prev_ap[ETH_ALEN];
+
+	left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
+
+	if (len < (reassoc ? 10 : 4)) {
+		PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload "
+		       "(len=%d, reassoc=%d) from " MACSTR "\n",
+		       dev->name, len, reassoc, MAC2STR(hdr->addr2));
+		return;
+	}
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta == NULL || (sta->flags & WLAN_STA_AUTH) == 0) {
+		spin_unlock_bh(&local->ap->sta_table_lock);
+		txt = "trying to associate before authentication";
+		send_deauth = 1;
+		resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+		sta = NULL; /* do not decrement sta->users */
+		goto fail;
+	}
+	atomic_inc(&sta->users);
+	spin_unlock_bh(&local->ap->sta_table_lock);
+
+	pos = (u16 *) (skb->data + IEEE80211_MGMT_HDR_LEN);
+	sta->capability = __le16_to_cpu(*pos);
+	pos++; left -= 2;
+	sta->listen_interval = __le16_to_cpu(*pos);
+	pos++; left -= 2;
+
+	if (reassoc) {
+		memcpy(prev_ap, pos, ETH_ALEN);
+		pos++; pos++; pos++; left -= 6;
+	} else
+		memset(prev_ap, 0, ETH_ALEN);
+
+	if (left >= 2) {
+		unsigned int ileft;
+		unsigned char *u = (unsigned char *) pos;
+
+		if (*u == WLAN_EID_SSID) {
+			u++; left--;
+			ileft = *u;
+			u++; left--;
+
+			if (ileft > left || ileft > MAX_SSID_LEN) {
+				txt = "SSID overflow";
+				resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+				goto fail;
+			}
+
+			if (ileft != strlen(local->essid) ||
+			    memcmp(local->essid, u, ileft) != 0) {
+				txt = "not our SSID";
+				resp = WLAN_STATUS_ASSOC_DENIED_UNSPEC;
+				goto fail;
+			}
+
+			u += ileft;
+			left -= ileft;
+		}
+
+		if (left >= 2 && *u == WLAN_EID_SUPP_RATES) {
+			u++; left--;
+			ileft = *u;
+			u++; left--;
+
+			if (ileft > left || ileft == 0 ||
+			    ileft > WLAN_SUPP_RATES_MAX) {
+				txt = "SUPP_RATES len error";
+				resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+				goto fail;
+			}
+
+			memset(sta->supported_rates, 0,
+			       sizeof(sta->supported_rates));
+			memcpy(sta->supported_rates, u, ileft);
+			prism2_check_tx_rates(sta);
+
+			u += ileft;
+			left -= ileft;
+		}
+
+		if (left > 0) {
+			PDEBUG(DEBUG_AP, "%s: assoc from " MACSTR " with extra"
+			       " data (%d bytes) [",
+			       dev->name, MAC2STR(hdr->addr2), left);
+			while (left > 0) {
+				PDEBUG2(DEBUG_AP, "<%02x>", *u);
+				u++; left--;
+			}
+			PDEBUG2(DEBUG_AP, "]\n");
+		}
+	} else {
+		txt = "frame underflow";
+		resp = WLAN_STATUS_UNSPECIFIED_FAILURE;
+		goto fail;
+	}
+
+	/* get a unique AID */
+	if (sta->aid > 0)
+		txt = "OK, old AID";
+	else {
+		spin_lock_bh(&local->ap->sta_table_lock);
+		for (sta->aid = 1; sta->aid <= MAX_AID_TABLE_SIZE; sta->aid++)
+			if (local->ap->sta_aid[sta->aid - 1] == NULL)
+				break;
+		if (sta->aid > MAX_AID_TABLE_SIZE) {
+			sta->aid = 0;
+			spin_unlock_bh(&local->ap->sta_table_lock);
+			resp = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+			txt = "no room for more AIDs";
+		} else {
+			local->ap->sta_aid[sta->aid - 1] = sta;
+			spin_unlock_bh(&local->ap->sta_table_lock);
+			txt = "OK, new AID";
+		}
+	}
+
+ fail:
+	pos = (u16 *) body;
+
+	if (send_deauth) {
+		*pos = __constant_cpu_to_le16(
+			WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH);
+		pos++;
+	} else {
+		/* FIX: CF-Pollable and CF-PollReq should be set to match the
+		 * values in beacons/probe responses */
+		/* FIX: how about privacy and WEP? */
+		/* capability */
+		*pos = __constant_cpu_to_le16(WLAN_CAPABILITY_ESS);
+		pos++;
+
+		/* status_code */
+		*pos = __cpu_to_le16(resp);
+		pos++;
+
+		*pos = __cpu_to_le16((sta && sta->aid > 0 ? sta->aid : 0) |
+				     BIT(14) | BIT(15)); /* AID */
+		pos++;
+
+		/* Supported rates (Information element) */
+		p = (char *) pos;
+		*p++ = WLAN_EID_SUPP_RATES;
+		lpos = p;
+		*p++ = 0; /* len */
+		if (local->tx_rate_control & WLAN_RATE_1M) {
+			*p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02;
+			(*lpos)++;
+		}
+		if (local->tx_rate_control & WLAN_RATE_2M) {
+			*p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04;
+			(*lpos)++;
+		}
+		if (local->tx_rate_control & WLAN_RATE_5M5) {
+			*p++ = local->basic_rates & WLAN_RATE_5M5 ?
+				0x8b : 0x0b;
+			(*lpos)++;
+		}
+		if (local->tx_rate_control & WLAN_RATE_11M) {
+			*p++ = local->basic_rates & WLAN_RATE_11M ?
+				0x96 : 0x16;
+			(*lpos)++;
+		}
+		pos = (u16 *) p;
+	}
+
+	prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
+			 (send_deauth ? IEEE80211_STYPE_DEAUTH :
+			  (reassoc ? IEEE80211_STYPE_REASSOC_RESP :
+			   IEEE80211_STYPE_ASSOC_RESP)),
+			 body, (u8 *) pos - (u8 *) body,
+			 hdr->addr2,
+			 send_deauth ? 0 : local->ap->tx_callback_assoc);
+
+	if (sta) {
+		if (resp == WLAN_STATUS_SUCCESS) {
+			sta->last_rx = jiffies;
+			/* STA will be marked associated from TX callback, if
+			 * AssocResp is ACKed */
+		}
+		atomic_dec(&sta->users);
+	}
+
+#if 0
+	PDEBUG(DEBUG_AP, "%s: " MACSTR " %sassoc (len=%d prev_ap=" MACSTR
+	       ") => %d(%d) (%s)\n",
+	       dev->name, MAC2STR(hdr->addr2), reassoc ? "re" : "", len,
+	       MAC2STR(prev_ap), resp, send_deauth, txt);
+#endif
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_deauth(local_info_t *local, struct sk_buff *skb,
+			  struct hostap_80211_rx_status *rx_stats)
+{
+	struct net_device *dev = local->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN);
+	int len;
+	u16 reason_code, *pos;
+	struct sta_info *sta = NULL;
+
+	len = skb->len - IEEE80211_MGMT_HDR_LEN;
+
+	if (len < 2) {
+		printk("handle_deauth - too short payload (len=%d)\n", len);
+		return;
+	}
+
+	pos = (u16 *) body;
+	reason_code = __le16_to_cpu(*pos);
+
+	PDEBUG(DEBUG_AP, "%s: deauthentication: " MACSTR " len=%d, "
+	       "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len,
+	       reason_code);
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta != NULL) {
+		if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
+			hostap_event_expired_sta(local->dev, sta);
+		sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC);
+	}
+	spin_unlock_bh(&local->ap->sta_table_lock);
+	if (sta == NULL) {
+		printk("%s: deauthentication from " MACSTR ", "
+	       "reason_code=%d, but STA not authenticated\n", dev->name,
+		       MAC2STR(hdr->addr2), reason_code);
+	}
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_disassoc(local_info_t *local, struct sk_buff *skb,
+			    struct hostap_80211_rx_status *rx_stats)
+{
+	struct net_device *dev = local->dev;
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
+	int len;
+	u16 reason_code, *pos;
+	struct sta_info *sta = NULL;
+
+	len = skb->len - IEEE80211_MGMT_HDR_LEN;
+
+	if (len < 2) {
+		printk("handle_disassoc - too short payload (len=%d)\n", len);
+		return;
+	}
+
+	pos = (u16 *) body;
+	reason_code = __le16_to_cpu(*pos);
+
+	PDEBUG(DEBUG_AP, "%s: disassociation: " MACSTR " len=%d, "
+	       "reason_code=%d\n", dev->name, MAC2STR(hdr->addr2), len,
+	       reason_code);
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta != NULL) {
+		if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap)
+			hostap_event_expired_sta(local->dev, sta);
+		sta->flags &= ~WLAN_STA_ASSOC;
+	}
+	spin_unlock_bh(&local->ap->sta_table_lock);
+	if (sta == NULL) {
+		printk("%s: disassociation from " MACSTR ", "
+		       "reason_code=%d, but STA not authenticated\n",
+		       dev->name, MAC2STR(hdr->addr2), reason_code);
+	}
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void ap_handle_data_nullfunc(local_info_t *local,
+				    struct ieee80211_hdr *hdr)
+{
+	struct net_device *dev = local->dev;
+
+	/* some STA f/w's seem to require control::ACK frame for
+	 * data::nullfunc, but at least Prism2 station f/w version 0.8.0 does
+	 * not send this..
+	 * send control::ACK for the data::nullfunc */
+
+	printk(KERN_DEBUG "Sending control::ACK for data::nullfunc\n");
+	prism2_send_mgmt(dev, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK,
+			 NULL, 0, hdr->addr2, 0);
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void ap_handle_dropped_data(local_info_t *local,
+				   struct ieee80211_hdr *hdr)
+{
+	struct net_device *dev = local->dev;
+	struct sta_info *sta;
+	u16 reason;
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&local->ap->sta_table_lock);
+
+	if (sta != NULL && (sta->flags & WLAN_STA_ASSOC)) {
+		PDEBUG(DEBUG_AP, "ap_handle_dropped_data: STA is now okay?\n");
+		atomic_dec(&sta->users);
+		return;
+	}
+
+	reason = __constant_cpu_to_le16(
+		WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+	prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT |
+			 ((sta == NULL || !(sta->flags & WLAN_STA_ASSOC)) ?
+			  IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC),
+			 (char *) &reason, sizeof(reason), hdr->addr2, 0);
+
+	if (sta)
+		atomic_dec(&sta->users);
+}
+
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta,
+				 struct sk_buff *skb)
+{
+	struct hostap_skb_tx_data *meta;
+
+	if (!(sta->flags & WLAN_STA_PS)) {
+		/* Station has moved to non-PS mode, so send all buffered
+		 * frames using normal device queue. */
+		dev_queue_xmit(skb);
+		return;
+	}
+
+	/* add a flag for hostap_handle_sta_tx() to know that this skb should
+	 * be passed through even though STA is using PS */
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	meta->flags |= HOSTAP_TX_FLAGS_BUFFERED_FRAME;
+	if (!skb_queue_empty(&sta->tx_buf)) {
+		/* indicate to STA that more frames follow */
+		meta->flags |= HOSTAP_TX_FLAGS_ADD_MOREDATA;
+	}
+	dev_queue_xmit(skb);
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_pspoll(local_info_t *local,
+			  struct ieee80211_hdr *hdr,
+			  struct hostap_80211_rx_status *rx_stats)
+{
+	struct net_device *dev = local->dev;
+	struct sta_info *sta;
+	u16 aid;
+	struct sk_buff *skb;
+
+	PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=" MACSTR ", TA=" MACSTR
+	       " PWRMGT=%d\n",
+	       MAC2STR(hdr->addr1), MAC2STR(hdr->addr2),
+	       !!(le16_to_cpu(hdr->frame_ctl) & IEEE80211_FCTL_PM));
+
+	if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+		PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=" MACSTR
+		       " not own MAC\n", MAC2STR(hdr->addr1));
+		return;
+	}
+
+	aid = __le16_to_cpu(hdr->duration_id);
+	if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) {
+		PDEBUG(DEBUG_PS, "   PSPOLL and AID[15:14] not set\n");
+		return;
+	}
+	aid &= ~BIT(15) & ~BIT(14);
+	if (aid == 0 || aid > MAX_AID_TABLE_SIZE) {
+		PDEBUG(DEBUG_PS, "   invalid aid=%d\n", aid);
+		return;
+	}
+	PDEBUG(DEBUG_PS2, "   aid=%d\n", aid);
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&local->ap->sta_table_lock);
+
+	if (sta == NULL) {
+		PDEBUG(DEBUG_PS, "   STA not found\n");
+		return;
+	}
+	if (sta->aid != aid) {
+		PDEBUG(DEBUG_PS, "   received aid=%i does not match with "
+		       "assoc.aid=%d\n", aid, sta->aid);
+		return;
+	}
+
+	/* FIX: todo:
+	 * - add timeout for buffering (clear aid in TIM vector if buffer timed
+	 *   out (expiry time must be longer than ListenInterval for
+	 *   the corresponding STA; "8802-11: 11.2.1.9 AP aging function"
+	 * - what to do, if buffered, pspolled, and sent frame is not ACKed by
+	 *   sta; store buffer for later use and leave TIM aid bit set? use
+	 *   TX event to check whether frame was ACKed?
+	 */
+
+	while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) {
+		/* send buffered frame .. */
+		PDEBUG(DEBUG_PS2, "Sending buffered frame to STA after PS POLL"
+		       " (buffer_count=%d)\n", skb_queue_len(&sta->tx_buf));
+
+		pspoll_send_buffered(local, sta, skb);
+
+		if (sta->flags & WLAN_STA_PS) {
+			/* send only one buffered packet per PS Poll */
+			/* FIX: should ignore further PS Polls until the
+			 * buffered packet that was just sent is acknowledged
+			 * (Tx or TxExc event) */
+			break;
+		}
+	}
+
+	if (skb_queue_empty(&sta->tx_buf)) {
+		/* try to clear aid from TIM */
+		if (!(sta->flags & WLAN_STA_TIM))
+			PDEBUG(DEBUG_PS2,  "Re-unsetting TIM for aid %d\n",
+			       aid);
+		hostap_set_tim(local, aid, 0);
+		sta->flags &= ~WLAN_STA_TIM;
+	}
+
+	atomic_dec(&sta->users);
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+
+static void handle_wds_oper_queue(void *data)
+{
+	local_info_t *local = data;
+	struct wds_oper_data *entry, *prev;
+
+	spin_lock_bh(&local->lock);
+	entry = local->ap->wds_oper_entries;
+	local->ap->wds_oper_entries = NULL;
+	spin_unlock_bh(&local->lock);
+
+	while (entry) {
+		PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection "
+		       "to AP " MACSTR "\n",
+		       local->dev->name,
+		       entry->type == WDS_ADD ? "adding" : "removing",
+		       MAC2STR(entry->addr));
+		if (entry->type == WDS_ADD)
+			prism2_wds_add(local, entry->addr, 0);
+		else if (entry->type == WDS_DEL)
+			prism2_wds_del(local, entry->addr, 0, 1);
+
+		prev = entry;
+		entry = entry->next;
+		kfree(prev);
+	}
+}
+
+
+/* Called only as a scheduled task for pending AP frames. */
+static void handle_beacon(local_info_t *local, struct sk_buff *skb,
+			  struct hostap_80211_rx_status *rx_stats)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+	char *body = skb->data + IEEE80211_MGMT_HDR_LEN;
+	int len, left;
+	u16 *pos, beacon_int, capability;
+	char *ssid = NULL;
+	unsigned char *supp_rates = NULL;
+	int ssid_len = 0, supp_rates_len = 0;
+	struct sta_info *sta = NULL;
+	int new_sta = 0, channel = -1;
+
+	len = skb->len - IEEE80211_MGMT_HDR_LEN;
+
+	if (len < 8 + 2 + 2) {
+		printk(KERN_DEBUG "handle_beacon - too short payload "
+		       "(len=%d)\n", len);
+		return;
+	}
+
+	pos = (u16 *) body;
+	left = len;
+
+	/* Timestamp (8 octets) */
+	pos += 4; left -= 8;
+	/* Beacon interval (2 octets) */
+	beacon_int = __le16_to_cpu(*pos);
+	pos++; left -= 2;
+	/* Capability information (2 octets) */
+	capability = __le16_to_cpu(*pos);
+	pos++; left -= 2;
+
+	if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS &&
+	    capability & WLAN_CAPABILITY_IBSS)
+		return;
+
+	if (left >= 2) {
+		unsigned int ileft;
+		unsigned char *u = (unsigned char *) pos;
+
+		if (*u == WLAN_EID_SSID) {
+			u++; left--;
+			ileft = *u;
+			u++; left--;
+
+			if (ileft > left || ileft > MAX_SSID_LEN) {
+				PDEBUG(DEBUG_AP, "SSID: overflow\n");
+				return;
+			}
+
+			if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID &&
+			    (ileft != strlen(local->essid) ||
+			     memcmp(local->essid, u, ileft) != 0)) {
+				/* not our SSID */
+				return;
+			}
+
+			ssid = u;
+			ssid_len = ileft;
+
+			u += ileft;
+			left -= ileft;
+		}
+
+		if (*u == WLAN_EID_SUPP_RATES) {
+			u++; left--;
+			ileft = *u;
+			u++; left--;
+
+			if (ileft > left || ileft == 0 || ileft > 8) {
+				PDEBUG(DEBUG_AP, " - SUPP_RATES len error\n");
+				return;
+			}
+
+			supp_rates = u;
+			supp_rates_len = ileft;
+
+			u += ileft;
+			left -= ileft;
+		}
+
+		if (*u == WLAN_EID_DS_PARAMS) {
+			u++; left--;
+			ileft = *u;
+			u++; left--;
+
+			if (ileft > left || ileft != 1) {
+				PDEBUG(DEBUG_AP, " - DS_PARAMS len error\n");
+				return;
+			}
+
+			channel = *u;
+
+			u += ileft;
+			left -= ileft;
+		}
+	}
+
+	spin_lock_bh(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta != NULL)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&local->ap->sta_table_lock);
+
+	if (sta == NULL) {
+		/* add new AP */
+		new_sta = 1;
+		sta = ap_add_sta(local->ap, hdr->addr2);
+		if (sta == NULL) {
+			printk(KERN_INFO "prism2: kmalloc failed for AP "
+			       "data structure\n");
+			return;
+		}
+		hostap_event_new_sta(local->dev, sta);
+
+		/* mark APs authentication and associated for pseudo ad-hoc
+		 * style communication */
+		sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
+
+		if (local->ap->autom_ap_wds) {
+			hostap_wds_link_oper(local, sta->addr, WDS_ADD);
+		}
+	}
+
+	sta->ap = 1;
+	if (ssid) {
+		sta->u.ap.ssid_len = ssid_len;
+		memcpy(sta->u.ap.ssid, ssid, ssid_len);
+		sta->u.ap.ssid[ssid_len] = '\0';
+	} else {
+		sta->u.ap.ssid_len = 0;
+		sta->u.ap.ssid[0] = '\0';
+	}
+	sta->u.ap.channel = channel;
+	sta->rx_packets++;
+	sta->rx_bytes += len;
+	sta->u.ap.last_beacon = sta->last_rx = jiffies;
+	sta->capability = capability;
+	sta->listen_interval = beacon_int;
+
+	atomic_dec(&sta->users);
+
+	if (new_sta) {
+		memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
+		memcpy(sta->supported_rates, supp_rates, supp_rates_len);
+		prism2_check_tx_rates(sta);
+	}
+}
+
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+/* Called only as a tasklet. */
+static void handle_ap_item(local_info_t *local, struct sk_buff *skb,
+			   struct hostap_80211_rx_status *rx_stats)
+{
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	struct net_device *dev = local->dev;
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+	u16 fc, type, stype;
+	struct ieee80211_hdr *hdr;
+
+	/* FIX: should give skb->len to handler functions and check that the
+	 * buffer is long enough */
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+	type = WLAN_FC_GET_TYPE(fc);
+	stype = WLAN_FC_GET_STYPE(fc);
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (!local->hostapd && type == IEEE80211_FTYPE_DATA) {
+		PDEBUG(DEBUG_AP, "handle_ap_item - data frame\n");
+
+		if (!(fc & IEEE80211_FCTL_TODS) ||
+		    (fc & IEEE80211_FCTL_FROMDS)) {
+			if (stype == IEEE80211_STYPE_NULLFUNC) {
+				/* no ToDS nullfunc seems to be used to check
+				 * AP association; so send reject message to
+				 * speed up re-association */
+				ap_handle_dropped_data(local, hdr);
+				goto done;
+			}
+			PDEBUG(DEBUG_AP, "   not ToDS frame (fc=0x%04x)\n",
+			       fc);
+			goto done;
+		}
+
+		if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+			PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)="
+			       MACSTR " not own MAC\n",
+			       MAC2STR(hdr->addr1));
+			goto done;
+		}
+
+		if (local->ap->nullfunc_ack &&
+		    stype == IEEE80211_STYPE_NULLFUNC)
+			ap_handle_data_nullfunc(local, hdr);
+		else
+			ap_handle_dropped_data(local, hdr);
+		goto done;
+	}
+
+	if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_BEACON) {
+		handle_beacon(local, skb, rx_stats);
+		goto done;
+	}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+	if (type == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) {
+		handle_pspoll(local, hdr, rx_stats);
+		goto done;
+	}
+
+	if (local->hostapd) {
+		PDEBUG(DEBUG_AP, "Unknown frame in AP queue: type=0x%02x "
+		       "subtype=0x%02x\n", type, stype);
+		goto done;
+	}
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	if (type != IEEE80211_FTYPE_MGMT) {
+		PDEBUG(DEBUG_AP, "handle_ap_item - not a management frame?\n");
+		goto done;
+	}
+
+	if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN)) {
+		PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=" MACSTR
+		       " not own MAC\n", MAC2STR(hdr->addr1));
+		goto done;
+	}
+
+	if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN)) {
+		PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=" MACSTR
+		       " not own MAC\n", MAC2STR(hdr->addr3));
+		goto done;
+	}
+
+	switch (stype) {
+	case IEEE80211_STYPE_ASSOC_REQ:
+		handle_assoc(local, skb, rx_stats, 0);
+		break;
+	case IEEE80211_STYPE_ASSOC_RESP:
+		PDEBUG(DEBUG_AP, "==> ASSOC RESP (ignored)\n");
+		break;
+	case IEEE80211_STYPE_REASSOC_REQ:
+		handle_assoc(local, skb, rx_stats, 1);
+		break;
+	case IEEE80211_STYPE_REASSOC_RESP:
+		PDEBUG(DEBUG_AP, "==> REASSOC RESP (ignored)\n");
+		break;
+	case IEEE80211_STYPE_ATIM:
+		PDEBUG(DEBUG_AP, "==> ATIM (ignored)\n");
+		break;
+	case IEEE80211_STYPE_DISASSOC:
+		handle_disassoc(local, skb, rx_stats);
+		break;
+	case IEEE80211_STYPE_AUTH:
+		handle_authen(local, skb, rx_stats);
+		break;
+	case IEEE80211_STYPE_DEAUTH:
+		handle_deauth(local, skb, rx_stats);
+		break;
+	default:
+		PDEBUG(DEBUG_AP, "Unknown mgmt frame subtype 0x%02x\n",
+		       stype >> 4);
+		break;
+	}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+ done:
+	dev_kfree_skb(skb);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+void hostap_rx(struct net_device *dev, struct sk_buff *skb,
+	       struct hostap_80211_rx_status *rx_stats)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 fc;
+	struct ieee80211_hdr *hdr;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (skb->len < 16)
+		goto drop;
+
+	local->stats.rx_packets++;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL &&
+	    WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_MGMT &&
+	    WLAN_FC_GET_STYPE(fc) == IEEE80211_STYPE_BEACON)
+		goto drop;
+
+	skb->protocol = __constant_htons(ETH_P_HOSTAP);
+	handle_ap_item(local, skb, rx_stats);
+	return;
+
+ drop:
+	dev_kfree_skb(skb);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
+{
+	struct sk_buff *skb;
+	struct ieee80211_hdr *hdr;
+	struct hostap_80211_rx_status rx_stats;
+
+	if (skb_queue_empty(&sta->tx_buf))
+		return;
+
+	skb = dev_alloc_skb(16);
+	if (skb == NULL) {
+		printk(KERN_DEBUG "%s: schedule_packet_send: skb alloc "
+		       "failed\n", local->dev->name);
+		return;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb_put(skb, 16);
+
+	/* Generate a fake pspoll frame to start packet delivery */
+	hdr->frame_ctl = __constant_cpu_to_le16(
+		IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+	memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN);
+	memcpy(hdr->addr2, sta->addr, ETH_ALEN);
+	hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14));
+
+	PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for "
+	       "STA " MACSTR "\n", local->dev->name, MAC2STR(sta->addr));
+
+	skb->dev = local->dev;
+
+	memset(&rx_stats, 0, sizeof(rx_stats));
+	hostap_rx(local->dev, skb, &rx_stats);
+}
+
+
+static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
+				  struct iw_quality qual[], int buf_size,
+				  int aplist)
+{
+	struct ap_data *ap = local->ap;
+	struct list_head *ptr;
+	int count = 0;
+
+	spin_lock_bh(&ap->sta_table_lock);
+
+	for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
+	     ptr = ptr->next) {
+		struct sta_info *sta = (struct sta_info *) ptr;
+
+		if (aplist && !sta->ap)
+			continue;
+		addr[count].sa_family = ARPHRD_ETHER;
+		memcpy(addr[count].sa_data, sta->addr, ETH_ALEN);
+		if (sta->last_rx_silence == 0)
+			qual[count].qual = sta->last_rx_signal < 27 ?
+				0 : (sta->last_rx_signal - 27) * 92 / 127;
+		else
+			qual[count].qual = sta->last_rx_signal -
+				sta->last_rx_silence - 35;
+		qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+		qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
+		qual[count].updated = sta->last_rx_updated;
+
+		sta->last_rx_updated = 0;
+
+		count++;
+		if (count >= buf_size)
+			break;
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	return count;
+}
+
+
+/* Translate our list of Access Points & Stations to a card independant
+ * format that the Wireless Tools will understand - Jean II */
+static int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct ap_data *ap;
+	struct list_head *ptr;
+	struct iw_event iwe;
+	char *current_ev = buffer;
+	char *end_buf = buffer + IW_SCAN_MAX_DATA;
+#if !defined(PRISM2_NO_KERNEL_IEEE80211_MGMT)
+	char buf[64];
+#endif
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	ap = local->ap;
+
+	spin_lock_bh(&ap->sta_table_lock);
+
+	for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list;
+	     ptr = ptr->next) {
+		struct sta_info *sta = (struct sta_info *) ptr;
+
+		/* First entry *MUST* be the AP MAC address */
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = SIOCGIWAP;
+		iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(iwe.u.ap_addr.sa_data, sta->addr, ETH_ALEN);
+		iwe.len = IW_EV_ADDR_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_ADDR_LEN);
+
+		/* Use the mode to indicate if it's a station or
+		 * an Access Point */
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = SIOCGIWMODE;
+		if (sta->ap)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_INFRA;
+		iwe.len = IW_EV_UINT_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_UINT_LEN);
+
+		/* Some quality */
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVQUAL;
+		if (sta->last_rx_silence == 0)
+			iwe.u.qual.qual = sta->last_rx_signal < 27 ?
+				0 : (sta->last_rx_signal - 27) * 92 / 127;
+		else
+			iwe.u.qual.qual = sta->last_rx_signal -
+				sta->last_rx_silence - 35;
+		iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal);
+		iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence);
+		iwe.u.qual.updated = sta->last_rx_updated;
+		iwe.len = IW_EV_QUAL_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_QUAL_LEN);
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+		if (sta->ap) {
+			memset(&iwe, 0, sizeof(iwe));
+			iwe.cmd = SIOCGIWESSID;
+			iwe.u.data.length = sta->u.ap.ssid_len;
+			iwe.u.data.flags = 1;
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+							  &iwe,
+							  sta->u.ap.ssid);
+
+			memset(&iwe, 0, sizeof(iwe));
+			iwe.cmd = SIOCGIWENCODE;
+			if (sta->capability & WLAN_CAPABILITY_PRIVACY)
+				iwe.u.data.flags =
+					IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+			else
+				iwe.u.data.flags = IW_ENCODE_DISABLED;
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+							  &iwe,
+							  sta->u.ap.ssid
+							  /* 0 byte memcpy */);
+
+			if (sta->u.ap.channel > 0 &&
+			    sta->u.ap.channel <= FREQ_COUNT) {
+				memset(&iwe, 0, sizeof(iwe));
+				iwe.cmd = SIOCGIWFREQ;
+				iwe.u.freq.m = freq_list[sta->u.ap.channel - 1]
+					* 100000;
+				iwe.u.freq.e = 1;
+				current_ev = iwe_stream_add_event(
+					current_ev, end_buf, &iwe,
+					IW_EV_FREQ_LEN);
+			}
+
+			memset(&iwe, 0, sizeof(iwe));
+			iwe.cmd = IWEVCUSTOM;
+			sprintf(buf, "beacon_interval=%d",
+				sta->listen_interval);
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+							  &iwe, buf);
+		}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+		sta->last_rx_updated = 0;
+
+		/* To be continued, we should make good use of IWEVCUSTOM */
+	}
+
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	return current_ev - buffer;
+}
+
+
+static int prism2_hostapd_add_sta(struct ap_data *ap,
+				  struct prism2_hostapd_param *param)
+{
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, param->sta_addr);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (sta == NULL) {
+		sta = ap_add_sta(ap, param->sta_addr);
+		if (sta == NULL)
+			return -1;
+	}
+
+	if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
+		hostap_event_new_sta(sta->local->dev, sta);
+
+	sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC;
+	sta->last_rx = jiffies;
+	sta->aid = param->u.add_sta.aid;
+	sta->capability = param->u.add_sta.capability;
+	sta->tx_supp_rates = param->u.add_sta.tx_supp_rates;
+	if (sta->tx_supp_rates & WLAN_RATE_1M)
+		sta->supported_rates[0] = 2;
+	if (sta->tx_supp_rates & WLAN_RATE_2M)
+		sta->supported_rates[1] = 4;
+ 	if (sta->tx_supp_rates & WLAN_RATE_5M5)
+		sta->supported_rates[2] = 11;
+	if (sta->tx_supp_rates & WLAN_RATE_11M)
+		sta->supported_rates[3] = 22;
+	prism2_check_tx_rates(sta);
+	atomic_dec(&sta->users);
+	return 0;
+}
+
+
+static int prism2_hostapd_remove_sta(struct ap_data *ap,
+				     struct prism2_hostapd_param *param)
+{
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, param->sta_addr);
+	if (sta) {
+		ap_sta_hash_del(ap, sta);
+		list_del(&sta->list);
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta)
+		return -ENOENT;
+
+	if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
+		hostap_event_expired_sta(sta->local->dev, sta);
+	ap_free_sta(ap, sta);
+
+	return 0;
+}
+
+
+static int prism2_hostapd_get_info_sta(struct ap_data *ap,
+				       struct prism2_hostapd_param *param)
+{
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, param->sta_addr);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta)
+		return -ENOENT;
+
+	param->u.get_info_sta.inactive_sec = (jiffies - sta->last_rx) / HZ;
+
+	atomic_dec(&sta->users);
+
+	return 1;
+}
+
+
+static int prism2_hostapd_set_flags_sta(struct ap_data *ap,
+					struct prism2_hostapd_param *param)
+{
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, param->sta_addr);
+	if (sta) {
+		sta->flags |= param->u.set_flags_sta.flags_or;
+		sta->flags &= param->u.set_flags_sta.flags_and;
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta)
+		return -ENOENT;
+
+	return 0;
+}
+
+
+static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
+					  struct prism2_hostapd_param *param)
+{
+	struct sta_info *sta;
+	int rate;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, param->sta_addr);
+	if (sta) {
+		sta->rx_packets = sta->tx_packets = 0;
+		sta->rx_bytes = sta->tx_bytes = 0;
+		for (rate = 0; rate < WLAN_RATE_COUNT; rate++) {
+			sta->tx_count[rate] = 0;
+			sta->rx_count[rate] = 0;
+		}
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta)
+		return -ENOENT;
+
+	return 0;
+}
+
+
+static int prism2_hostapd(struct ap_data *ap,
+			  struct prism2_hostapd_param *param)
+{
+	switch (param->cmd) {
+	case PRISM2_HOSTAPD_FLUSH:
+		ap_control_kickall(ap);
+		return 0;
+	case PRISM2_HOSTAPD_ADD_STA:
+		return prism2_hostapd_add_sta(ap, param);
+	case PRISM2_HOSTAPD_REMOVE_STA:
+		return prism2_hostapd_remove_sta(ap, param);
+	case PRISM2_HOSTAPD_GET_INFO_STA:
+		return prism2_hostapd_get_info_sta(ap, param);
+	case PRISM2_HOSTAPD_SET_FLAGS_STA:
+		return prism2_hostapd_set_flags_sta(ap, param);
+	case PRISM2_HOSTAPD_STA_CLEAR_STATS:
+		return prism2_hostapd_sta_clear_stats(ap, param);
+	default:
+		printk(KERN_WARNING "prism2_hostapd: unknown cmd=%d\n",
+		       param->cmd);
+		return -EOPNOTSUPP;
+	}
+}
+
+
+/* Update station info for host-based TX rate control and return current
+ * TX rate */
+static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev)
+{
+	int ret = sta->tx_rate;
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	sta->tx_count[sta->tx_rate_idx]++;
+	sta->tx_since_last_failure++;
+	sta->tx_consecutive_exc = 0;
+	if (sta->tx_since_last_failure >= WLAN_RATE_UPDATE_COUNT &&
+	    sta->tx_rate_idx < sta->tx_max_rate) {
+		/* use next higher rate */
+		int old_rate, new_rate;
+		old_rate = new_rate = sta->tx_rate_idx;
+		while (new_rate < sta->tx_max_rate) {
+			new_rate++;
+			if (ap_tx_rate_ok(new_rate, sta, local)) {
+				sta->tx_rate_idx = new_rate;
+				break;
+			}
+		}
+		if (old_rate != sta->tx_rate_idx) {
+			switch (sta->tx_rate_idx) {
+			case 0: sta->tx_rate = 10; break;
+			case 1: sta->tx_rate = 20; break;
+			case 2: sta->tx_rate = 55; break;
+			case 3: sta->tx_rate = 110; break;
+			default: sta->tx_rate = 0; break;
+			}
+			PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate raised to"
+			       " %d\n", dev->name, MAC2STR(sta->addr),
+			       sta->tx_rate);
+		}
+		sta->tx_since_last_failure = 0;
+	}
+
+	return ret;
+}
+
+
+/* Called only from software IRQ. Called for each TX frame prior possible
+ * encryption and transmit. */
+ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
+{
+	struct sta_info *sta = NULL;
+	struct sk_buff *skb = tx->skb;
+	int set_tim, ret;
+	struct ieee80211_hdr *hdr;
+	struct hostap_skb_tx_data *meta;
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+	ret = AP_TX_CONTINUE;
+	if (local->ap == NULL || skb->len < 10 ||
+	    meta->iface->type == HOSTAP_INTERFACE_STA)
+		goto out;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	if (hdr->addr1[0] & 0x01) {
+		/* broadcast/multicast frame - no AP related processing */
+		goto out;
+	}
+
+	/* unicast packet - check whether destination STA is associated */
+	spin_lock(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr1);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&local->ap->sta_table_lock);
+
+	if (local->iw_mode == IW_MODE_MASTER && sta == NULL &&
+	    !(meta->flags & HOSTAP_TX_FLAGS_WDS) &&
+	    meta->iface->type != HOSTAP_INTERFACE_MASTER &&
+	    meta->iface->type != HOSTAP_INTERFACE_AP) {
+#if 0
+		/* This can happen, e.g., when wlan0 is added to a bridge and
+		 * bridging code does not know which port is the correct target
+		 * for a unicast frame. In this case, the packet is send to all
+		 * ports of the bridge. Since this is a valid scenario, do not
+		 * print out any errors here. */
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "AP: drop packet to non-associated "
+			       "STA " MACSTR "\n", MAC2STR(hdr->addr1));
+		}
+#endif
+		local->ap->tx_drop_nonassoc++;
+		ret = AP_TX_DROP;
+		goto out;
+	}
+
+	if (sta == NULL)
+		goto out;
+
+	if (!(sta->flags & WLAN_STA_AUTHORIZED))
+		ret = AP_TX_CONTINUE_NOT_AUTHORIZED;
+
+	/* Set tx_rate if using host-based TX rate control */
+	if (!local->fw_tx_rate_control)
+		local->ap->last_tx_rate = meta->rate =
+			ap_update_sta_tx_rate(sta, local->dev);
+
+	if (local->iw_mode != IW_MODE_MASTER)
+		goto out;
+
+	if (!(sta->flags & WLAN_STA_PS))
+		goto out;
+
+	if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) {
+		/* indicate to STA that more frames follow */
+		hdr->frame_ctl |=
+			__constant_cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+	}
+
+	if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) {
+		/* packet was already buffered and now send due to
+		 * PS poll, so do not rebuffer it */
+		goto out;
+	}
+
+	if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) {
+		PDEBUG(DEBUG_PS, "%s: No more space in STA (" MACSTR ")'s PS "
+		       "mode buffer\n", local->dev->name, MAC2STR(sta->addr));
+		/* Make sure that TIM is set for the station (it might not be
+		 * after AP wlan hw reset). */
+		/* FIX: should fix hw reset to restore bits based on STA
+		 * buffer state.. */
+		hostap_set_tim(local, sta->aid, 1);
+		sta->flags |= WLAN_STA_TIM;
+		ret = AP_TX_DROP;
+		goto out;
+	}
+
+	/* STA in PS mode, buffer frame for later delivery */
+	set_tim = skb_queue_empty(&sta->tx_buf);
+	skb_queue_tail(&sta->tx_buf, skb);
+	/* FIX: could save RX time to skb and expire buffered frames after
+	 * some time if STA does not poll for them */
+
+	if (set_tim) {
+		if (sta->flags & WLAN_STA_TIM)
+			PDEBUG(DEBUG_PS2, "Re-setting TIM for aid %d\n",
+			       sta->aid);
+		hostap_set_tim(local, sta->aid, 1);
+		sta->flags |= WLAN_STA_TIM;
+	}
+
+	ret = AP_TX_BUFFERED;
+
+ out:
+	if (sta != NULL) {
+		if (ret == AP_TX_CONTINUE ||
+		    ret == AP_TX_CONTINUE_NOT_AUTHORIZED) {
+			sta->tx_packets++;
+			sta->tx_bytes += skb->len;
+			sta->last_tx = jiffies;
+		}
+
+		if ((ret == AP_TX_CONTINUE ||
+		     ret == AP_TX_CONTINUE_NOT_AUTHORIZED) &&
+		    sta->crypt && tx->host_encrypt) {
+			tx->crypt = sta->crypt;
+			tx->sta_ptr = sta; /* hostap_handle_sta_release() will
+					    * be called to release sta info
+					    * later */
+		} else
+			atomic_dec(&sta->users);
+	}
+
+	return ret;
+}
+
+
+void hostap_handle_sta_release(void *ptr)
+{
+	struct sta_info *sta = ptr;
+	atomic_dec(&sta->users);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb)
+{
+	struct sta_info *sta;
+	struct ieee80211_hdr *hdr;
+	struct hostap_skb_tx_data *meta;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+
+	spin_lock(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr1);
+	if (!sta) {
+		spin_unlock(&local->ap->sta_table_lock);
+		PDEBUG(DEBUG_AP, "%s: Could not find STA " MACSTR " for this "
+		       "TX error (@%lu)\n",
+		       local->dev->name, MAC2STR(hdr->addr1), jiffies);
+		return;
+	}
+
+	sta->tx_since_last_failure = 0;
+	sta->tx_consecutive_exc++;
+
+	if (sta->tx_consecutive_exc >= WLAN_RATE_DECREASE_THRESHOLD &&
+	    sta->tx_rate_idx > 0 && meta->rate <= sta->tx_rate) {
+		/* use next lower rate */
+		int old, rate;
+		old = rate = sta->tx_rate_idx;
+		while (rate > 0) {
+			rate--;
+			if (ap_tx_rate_ok(rate, sta, local)) {
+				sta->tx_rate_idx = rate;
+				break;
+			}
+		}
+		if (old != sta->tx_rate_idx) {
+			switch (sta->tx_rate_idx) {
+			case 0: sta->tx_rate = 10; break;
+			case 1: sta->tx_rate = 20; break;
+			case 2: sta->tx_rate = 55; break;
+			case 3: sta->tx_rate = 110; break;
+			default: sta->tx_rate = 0; break;
+			}
+			PDEBUG(DEBUG_AP, "%s: STA " MACSTR " TX rate lowered "
+			       "to %d\n", local->dev->name, MAC2STR(sta->addr),
+			       sta->tx_rate);
+		}
+		sta->tx_consecutive_exc = 0;
+	}
+	spin_unlock(&local->ap->sta_table_lock);
+}
+
+
+static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta,
+				  int pwrmgt, int type, int stype)
+{
+	if (pwrmgt && !(sta->flags & WLAN_STA_PS)) {
+		sta->flags |= WLAN_STA_PS;
+		PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to use PS "
+		       "mode (type=0x%02X, stype=0x%02X)\n",
+		       MAC2STR(sta->addr), type >> 2, stype >> 4);
+	} else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) {
+		sta->flags &= ~WLAN_STA_PS;
+		PDEBUG(DEBUG_PS2, "STA " MACSTR " changed to not use "
+		       "PS mode (type=0x%02X, stype=0x%02X)\n",
+		       MAC2STR(sta->addr), type >> 2, stype >> 4);
+		if (type != IEEE80211_FTYPE_CTL ||
+		    stype != IEEE80211_STYPE_PSPOLL)
+			schedule_packet_send(local, sta);
+	}
+}
+
+
+/* Called only as a tasklet (software IRQ). Called for each RX frame to update
+ * STA power saving state. pwrmgt is a flag from 802.11 frame_ctl field. */
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr)
+{
+	struct sta_info *sta;
+	u16 fc;
+
+	spin_lock(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&local->ap->sta_table_lock);
+
+	if (!sta)
+		return -1;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
+			      WLAN_FC_GET_TYPE(fc), WLAN_FC_GET_STYPE(fc));
+
+	atomic_dec(&sta->users);
+	return 0;
+}
+
+
+/* Called only as a tasklet (software IRQ). Called for each RX frame after
+ * getting RX header and payload from hardware. */
+ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
+			       struct sk_buff *skb,
+			       struct hostap_80211_rx_status *rx_stats,
+			       int wds)
+{
+	int ret;
+	struct sta_info *sta;
+	u16 fc, type, stype;
+	struct ieee80211_hdr *hdr;
+
+	if (local->ap == NULL)
+		return AP_RX_CONTINUE;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	type = WLAN_FC_GET_TYPE(fc);
+	stype = WLAN_FC_GET_STYPE(fc);
+
+	spin_lock(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&local->ap->sta_table_lock);
+
+	if (sta && !(sta->flags & WLAN_STA_AUTHORIZED))
+		ret = AP_RX_CONTINUE_NOT_AUTHORIZED;
+	else
+		ret = AP_RX_CONTINUE;
+
+
+	if (fc & IEEE80211_FCTL_TODS) {
+		if (!wds && (sta == NULL || !(sta->flags & WLAN_STA_ASSOC))) {
+			if (local->hostapd) {
+				prism2_rx_80211(local->apdev, skb, rx_stats,
+						PRISM2_RX_NON_ASSOC);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+			} else {
+				printk(KERN_DEBUG "%s: dropped received packet"
+				       " from non-associated STA " MACSTR
+				       " (type=0x%02x, subtype=0x%02x)\n",
+				       dev->name, MAC2STR(hdr->addr2),
+				       type >> 2, stype >> 4);
+				hostap_rx(dev, skb, rx_stats);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+			}
+			ret = AP_RX_EXIT;
+			goto out;
+		}
+	} else if (fc & IEEE80211_FCTL_FROMDS) {
+		if (!wds) {
+			/* FromDS frame - not for us; probably
+			 * broadcast/multicast in another BSS - drop */
+			if (memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+				printk(KERN_DEBUG "Odd.. FromDS packet "
+				       "received with own BSSID\n");
+				hostap_dump_rx_80211(dev->name, skb, rx_stats);
+			}
+			ret = AP_RX_DROP;
+			goto out;
+		}
+	} else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL &&
+		   memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+
+		if (local->hostapd) {
+			prism2_rx_80211(local->apdev, skb, rx_stats,
+					PRISM2_RX_NON_ASSOC);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+		} else {
+			/* At least Lucent f/w seems to send data::nullfunc
+			 * frames with no ToDS flag when the current AP returns
+			 * after being unavailable for some time. Speed up
+			 * re-association by informing the station about it not
+			 * being associated. */
+			printk(KERN_DEBUG "%s: rejected received nullfunc "
+			       "frame without ToDS from not associated STA "
+			       MACSTR "\n",
+			       dev->name, MAC2STR(hdr->addr2));
+			hostap_rx(dev, skb, rx_stats);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+		}
+		ret = AP_RX_EXIT;
+		goto out;
+	} else if (stype == IEEE80211_STYPE_NULLFUNC) {
+		/* At least Lucent cards seem to send periodic nullfunc
+		 * frames with ToDS. Let these through to update SQ
+		 * stats and PS state. Nullfunc frames do not contain
+		 * any data and they will be dropped below. */
+	} else {
+		/* If BSSID (Addr3) is foreign, this frame is a normal
+		 * broadcast frame from an IBSS network. Drop it silently.
+		 * If BSSID is own, report the dropping of this frame. */
+		if (memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+			printk(KERN_DEBUG "%s: dropped received packet from "
+			       MACSTR " with no ToDS flag (type=0x%02x, "
+			       "subtype=0x%02x)\n", dev->name,
+			       MAC2STR(hdr->addr2), type >> 2, stype >> 4);
+			hostap_dump_rx_80211(dev->name, skb, rx_stats);
+		}
+		ret = AP_RX_DROP;
+		goto out;
+	}
+
+	if (sta) {
+		hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM,
+				      type, stype);
+
+		sta->rx_packets++;
+		sta->rx_bytes += skb->len;
+		sta->last_rx = jiffies;
+	}
+
+	if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC &&
+	    fc & IEEE80211_FCTL_TODS) {
+		if (local->hostapd) {
+			prism2_rx_80211(local->apdev, skb, rx_stats,
+					PRISM2_RX_NULLFUNC_ACK);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+		} else {
+			/* some STA f/w's seem to require control::ACK frame
+			 * for data::nullfunc, but Prism2 f/w 0.8.0 (at least
+			 * from Compaq) does not send this.. Try to generate
+			 * ACK for these frames from the host driver to make
+			 * power saving work with, e.g., Lucent WaveLAN f/w */
+			hostap_rx(dev, skb, rx_stats);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+		}
+		ret = AP_RX_EXIT;
+		goto out;
+	}
+
+ out:
+	if (sta)
+		atomic_dec(&sta->users);
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+int hostap_handle_sta_crypto(local_info_t *local,
+			     struct ieee80211_hdr *hdr,
+			     struct ieee80211_crypt_data **crypt,
+			     void **sta_ptr)
+{
+	struct sta_info *sta;
+
+	spin_lock(&local->ap->sta_table_lock);
+	sta = ap_get_sta(local->ap, hdr->addr2);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock(&local->ap->sta_table_lock);
+
+	if (!sta)
+		return -1;
+
+	if (sta->crypt) {
+		*crypt = sta->crypt;
+		*sta_ptr = sta;
+		/* hostap_handle_sta_release() will be called to release STA
+		 * info */
+	} else
+		atomic_dec(&sta->users);
+
+	return 0;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr)
+{
+	struct sta_info *sta;
+	int ret = 0;
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, sta_addr);
+	if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap)
+		ret = 1;
+	spin_unlock(&ap->sta_table_lock);
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr)
+{
+	struct sta_info *sta;
+	int ret = 0;
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, sta_addr);
+	if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap &&
+	    ((sta->flags & WLAN_STA_AUTHORIZED) ||
+	     ap->local->ieee_802_1x == 0))
+		ret = 1;
+	spin_unlock(&ap->sta_table_lock);
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+int hostap_add_sta(struct ap_data *ap, u8 *sta_addr)
+{
+	struct sta_info *sta;
+	int ret = 1;
+
+	if (!ap)
+		return -1;
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, sta_addr);
+	if (sta)
+		ret = 0;
+	spin_unlock(&ap->sta_table_lock);
+
+	if (ret == 1) {
+		sta = ap_add_sta(ap, sta_addr);
+		if (!sta)
+			ret = -1;
+		sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC;
+		sta->ap = 1;
+		memset(sta->supported_rates, 0, sizeof(sta->supported_rates));
+		/* No way of knowing which rates are supported since we did not
+		 * get supported rates element from beacon/assoc req. Assume
+		 * that remote end supports all 802.11b rates. */
+		sta->supported_rates[0] = 0x82;
+		sta->supported_rates[1] = 0x84;
+		sta->supported_rates[2] = 0x0b;
+		sta->supported_rates[3] = 0x16;
+		sta->tx_supp_rates = WLAN_RATE_1M | WLAN_RATE_2M |
+			WLAN_RATE_5M5 | WLAN_RATE_11M;
+		sta->tx_rate = 110;
+		sta->tx_max_rate = sta->tx_rate_idx = 3;
+	}
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+int hostap_update_rx_stats(struct ap_data *ap,
+			   struct ieee80211_hdr *hdr,
+			   struct hostap_80211_rx_status *rx_stats)
+{
+	struct sta_info *sta;
+
+	if (!ap)
+		return -1;
+
+	spin_lock(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, hdr->addr2);
+	if (sta) {
+		sta->last_rx_silence = rx_stats->noise;
+		sta->last_rx_signal = rx_stats->signal;
+		sta->last_rx_rate = rx_stats->rate;
+		sta->last_rx_updated = 7;
+		if (rx_stats->rate == 10)
+			sta->rx_count[0]++;
+		else if (rx_stats->rate == 20)
+			sta->rx_count[1]++;
+		else if (rx_stats->rate == 55)
+			sta->rx_count[2]++;
+		else if (rx_stats->rate == 110)
+			sta->rx_count[3]++;
+	}
+	spin_unlock(&ap->sta_table_lock);
+
+	return sta ? 0 : -1;
+}
+
+
+void hostap_update_rates(local_info_t *local)
+{
+	struct list_head *ptr;
+	struct ap_data *ap = local->ap;
+
+	if (!ap)
+		return;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) {
+		struct sta_info *sta = (struct sta_info *) ptr;
+		prism2_check_tx_rates(sta);
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+}
+
+
+static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
+				struct ieee80211_crypt_data ***crypt)
+{
+	struct sta_info *sta;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	sta = ap_get_sta(ap, addr);
+	if (sta)
+		atomic_inc(&sta->users);
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	if (!sta && permanent)
+		sta = ap_add_sta(ap, addr);
+
+	if (!sta)
+		return NULL;
+
+	if (permanent)
+		sta->flags |= WLAN_STA_PERM;
+
+	*crypt = &sta->crypt;
+
+	return sta;
+}
+
+
+void hostap_add_wds_links(local_info_t *local)
+{
+	struct ap_data *ap = local->ap;
+	struct list_head *ptr;
+
+	spin_lock_bh(&ap->sta_table_lock);
+	list_for_each(ptr, &ap->sta_list) {
+		struct sta_info *sta = list_entry(ptr, struct sta_info, list);
+		if (sta->ap)
+			hostap_wds_link_oper(local, sta->addr, WDS_ADD);
+	}
+	spin_unlock_bh(&ap->sta_table_lock);
+
+	schedule_work(&local->ap->wds_oper_queue);
+}
+
+
+void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type)
+{
+	struct wds_oper_data *entry;
+
+	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry)
+		return;
+	memcpy(entry->addr, addr, ETH_ALEN);
+	entry->type = type;
+	spin_lock_bh(&local->lock);
+	entry->next = local->ap->wds_oper_entries;
+	local->ap->wds_oper_entries = entry;
+	spin_unlock_bh(&local->lock);
+
+	schedule_work(&local->ap->wds_oper_queue);
+}
+
+
+EXPORT_SYMBOL(hostap_init_data);
+EXPORT_SYMBOL(hostap_init_ap_proc);
+EXPORT_SYMBOL(hostap_free_data);
+EXPORT_SYMBOL(hostap_check_sta_fw_version);
+EXPORT_SYMBOL(hostap_handle_sta_tx);
+EXPORT_SYMBOL(hostap_handle_sta_release);
+EXPORT_SYMBOL(hostap_handle_sta_tx_exc);
+EXPORT_SYMBOL(hostap_update_sta_ps);
+EXPORT_SYMBOL(hostap_handle_sta_rx);
+EXPORT_SYMBOL(hostap_is_sta_assoc);
+EXPORT_SYMBOL(hostap_is_sta_authorized);
+EXPORT_SYMBOL(hostap_add_sta);
+EXPORT_SYMBOL(hostap_update_rates);
+EXPORT_SYMBOL(hostap_add_wds_links);
+EXPORT_SYMBOL(hostap_wds_link_oper);
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+EXPORT_SYMBOL(hostap_deauth_all_stas);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
diff --git a/drivers/net/wireless/hostap/hostap_ap.h b/drivers/net/wireless/hostap/hostap_ap.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ap.h
@@ -0,0 +1,261 @@
+#ifndef HOSTAP_AP_H
+#define HOSTAP_AP_H
+
+/* AP data structures for STAs */
+
+/* maximum number of frames to buffer per STA */
+#define STA_MAX_TX_BUFFER 32
+
+/* STA flags */
+#define WLAN_STA_AUTH BIT(0)
+#define WLAN_STA_ASSOC BIT(1)
+#define WLAN_STA_PS BIT(2)
+#define WLAN_STA_TIM BIT(3) /* TIM bit is on for PS stations */
+#define WLAN_STA_PERM BIT(4) /* permanent; do not remove entry on expiration */
+#define WLAN_STA_AUTHORIZED BIT(5) /* If 802.1X is used, this flag is
+				    * controlling whether STA is authorized to
+				    * send and receive non-IEEE 802.1X frames
+				    */
+#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
+
+#define WLAN_RATE_1M BIT(0)
+#define WLAN_RATE_2M BIT(1)
+#define WLAN_RATE_5M5 BIT(2)
+#define WLAN_RATE_11M BIT(3)
+#define WLAN_RATE_COUNT 4
+
+/* Maximum size of Supported Rates info element. IEEE 802.11 has a limit of 8,
+ * but some pre-standard IEEE 802.11g products use longer elements. */
+#define WLAN_SUPP_RATES_MAX 32
+
+/* Try to increase TX rate after # successfully sent consecutive packets */
+#define WLAN_RATE_UPDATE_COUNT 50
+
+/* Decrease TX rate after # consecutive dropped packets */
+#define WLAN_RATE_DECREASE_THRESHOLD 2
+
+struct sta_info {
+	struct list_head list;
+	struct sta_info *hnext; /* next entry in hash table list */
+	atomic_t users; /* number of users (do not remove if > 0) */
+	struct proc_dir_entry *proc;
+
+	u8 addr[6];
+	u16 aid; /* STA's unique AID (1 .. 2007) or 0 if not yet assigned */
+	u32 flags;
+	u16 capability;
+	u16 listen_interval; /* or beacon_int for APs */
+	u8 supported_rates[WLAN_SUPP_RATES_MAX];
+
+	unsigned long last_auth;
+	unsigned long last_assoc;
+	unsigned long last_rx;
+	unsigned long last_tx;
+	unsigned long rx_packets, tx_packets;
+	unsigned long rx_bytes, tx_bytes;
+	struct sk_buff_head tx_buf;
+	/* FIX: timeout buffers with an expiry time somehow derived from
+	 * listen_interval */
+
+	s8 last_rx_silence; /* Noise in dBm */
+	s8 last_rx_signal; /* Signal strength in dBm */
+	u8 last_rx_rate; /* TX rate in 0.1 Mbps */
+	u8 last_rx_updated; /* IWSPY's struct iw_quality::updated */
+
+	u8 tx_supp_rates; /* bit field of supported TX rates */
+	u8 tx_rate; /* current TX rate (in 0.1 Mbps) */
+	u8 tx_rate_idx; /* current TX rate (WLAN_RATE_*) */
+	u8 tx_max_rate; /* max TX rate (WLAN_RATE_*) */
+	u32 tx_count[WLAN_RATE_COUNT]; /* number of frames sent (per rate) */
+	u32 rx_count[WLAN_RATE_COUNT]; /* number of frames received (per rate)
+					*/
+	u32 tx_since_last_failure;
+	u32 tx_consecutive_exc;
+
+	struct ieee80211_crypt_data *crypt;
+
+	int ap; /* whether this station is an AP */
+
+	local_info_t *local;
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	union {
+		struct {
+			char *challenge; /* shared key authentication
+					  * challenge */
+		} sta;
+		struct {
+			int ssid_len;
+			unsigned char ssid[MAX_SSID_LEN + 1]; /* AP's ssid */
+			int channel;
+			unsigned long last_beacon; /* last RX beacon time */
+		} ap;
+	} u;
+
+	struct timer_list timer;
+	enum { STA_NULLFUNC = 0, STA_DISASSOC, STA_DEAUTH } timeout_next;
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+};
+
+
+#define MAX_STA_COUNT 1024
+
+/* Maximum number of AIDs to use for STAs; must be 2007 or lower
+ * (8802.11 limitation) */
+#define MAX_AID_TABLE_SIZE 128
+
+#define STA_HASH_SIZE 256
+#define STA_HASH(sta) (sta[5])
+
+
+/* Default value for maximum station inactivity. After AP_MAX_INACTIVITY_SEC
+ * has passed since last received frame from the station, a nullfunc data
+ * frame is sent to the station. If this frame is not acknowledged and no other
+ * frames have been received, the station will be disassociated after
+ * AP_DISASSOC_DELAY. Similarily, a the station will be deauthenticated after
+ * AP_DEAUTH_DELAY. AP_TIMEOUT_RESOLUTION is the resolution that is used with
+ * max inactivity timer. */
+#define AP_MAX_INACTIVITY_SEC (5 * 60)
+#define AP_DISASSOC_DELAY (HZ)
+#define AP_DEAUTH_DELAY (HZ)
+
+/* ap_policy: whether to accept frames to/from other APs/IBSS */
+typedef enum {
+	AP_OTHER_AP_SKIP_ALL = 0,
+	AP_OTHER_AP_SAME_SSID = 1,
+	AP_OTHER_AP_ALL = 2,
+	AP_OTHER_AP_EVEN_IBSS = 3
+} ap_policy_enum;
+
+#define PRISM2_AUTH_OPEN BIT(0)
+#define PRISM2_AUTH_SHARED_KEY BIT(1)
+
+
+/* MAC address-based restrictions */
+struct mac_entry {
+	struct list_head list;
+	u8 addr[6];
+};
+
+struct mac_restrictions {
+	enum { MAC_POLICY_OPEN = 0, MAC_POLICY_ALLOW, MAC_POLICY_DENY } policy;
+	unsigned int entries;
+	struct list_head mac_list;
+	spinlock_t lock;
+};
+
+
+struct add_sta_proc_data {
+	u8 addr[ETH_ALEN];
+	struct add_sta_proc_data *next;
+};
+
+
+typedef enum { WDS_ADD, WDS_DEL } wds_oper_type;
+struct wds_oper_data {
+	wds_oper_type type;
+	u8 addr[ETH_ALEN];
+	struct wds_oper_data *next;
+};
+
+
+struct ap_data {
+	int initialized; /* whether ap_data has been initialized */
+	local_info_t *local;
+	int bridge_packets; /* send packet to associated STAs directly to the
+			     * wireless media instead of higher layers in the
+			     * kernel */
+	unsigned int bridged_unicast; /* number of unicast frames bridged on
+				       * wireless media */
+	unsigned int bridged_multicast; /* number of non-unicast frames
+					 * bridged on wireless media */
+	unsigned int tx_drop_nonassoc; /* number of unicast TX packets dropped
+					* because they were to an address that
+					* was not associated */
+	int nullfunc_ack; /* use workaround for nullfunc frame ACKs */
+
+	spinlock_t sta_table_lock;
+	int num_sta; /* number of entries in sta_list */
+	struct list_head sta_list; /* STA info list head */
+	struct sta_info *sta_hash[STA_HASH_SIZE];
+
+	struct proc_dir_entry *proc;
+
+	ap_policy_enum ap_policy;
+	unsigned int max_inactivity;
+	int autom_ap_wds;
+
+	struct mac_restrictions mac_restrictions; /* MAC-based auth */
+	int last_tx_rate;
+
+	struct work_struct add_sta_proc_queue;
+	struct add_sta_proc_data *add_sta_proc_entries;
+
+	struct work_struct wds_oper_queue;
+	struct wds_oper_data *wds_oper_entries;
+
+	u16 tx_callback_idx;
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	/* pointers to STA info; based on allocated AID or NULL if AID free
+	 * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
+	 * and so on
+	 */
+	struct sta_info *sta_aid[MAX_AID_TABLE_SIZE];
+
+	u16 tx_callback_auth, tx_callback_assoc, tx_callback_poll;
+
+	/* WEP operations for generating challenges to be used with shared key
+	 * authentication */
+	struct ieee80211_crypto_ops *crypt;
+	void *crypt_priv;
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+};
+
+
+void hostap_rx(struct net_device *dev, struct sk_buff *skb,
+	       struct hostap_80211_rx_status *rx_stats);
+void hostap_init_data(local_info_t *local);
+void hostap_init_ap_proc(local_info_t *local);
+void hostap_free_data(struct ap_data *ap);
+void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver);
+
+typedef enum {
+	AP_TX_CONTINUE, AP_TX_DROP, AP_TX_RETRY, AP_TX_BUFFERED,
+	AP_TX_CONTINUE_NOT_AUTHORIZED
+} ap_tx_ret;
+struct hostap_tx_data {
+	struct sk_buff *skb;
+	int host_encrypt;
+	struct ieee80211_crypt_data *crypt;
+	void *sta_ptr;
+};
+ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx);
+void hostap_handle_sta_release(void *ptr);
+void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb);
+int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr);
+typedef enum {
+	AP_RX_CONTINUE, AP_RX_DROP, AP_RX_EXIT, AP_RX_CONTINUE_NOT_AUTHORIZED
+} ap_rx_ret;
+ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev,
+			       struct sk_buff *skb,
+			       struct hostap_80211_rx_status *rx_stats,
+			       int wds);
+int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr,
+			     struct ieee80211_crypt_data **crypt,
+			     void **sta_ptr);
+int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr);
+int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr);
+int hostap_add_sta(struct ap_data *ap, u8 *sta_addr);
+int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr,
+			   struct hostap_80211_rx_status *rx_stats);
+void hostap_update_rates(local_info_t *local);
+void hostap_add_wds_links(local_info_t *local);
+void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type);
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
+			    int resend);
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+#endif /* HOSTAP_AP_H */
diff --git a/drivers/net/wireless/hostap/hostap_common.h b/drivers/net/wireless/hostap/hostap_common.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_common.h
@@ -0,0 +1,435 @@
+#ifndef HOSTAP_COMMON_H
+#define HOSTAP_COMMON_H
+
+#define BIT(x) (1 << (x))
+
+#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
+
+
+/* IEEE 802.11 defines */
+
+/* Information Element IDs */
+#define WLAN_EID_SSID 0
+#define WLAN_EID_SUPP_RATES 1
+#define WLAN_EID_FH_PARAMS 2
+#define WLAN_EID_DS_PARAMS 3
+#define WLAN_EID_CF_PARAMS 4
+#define WLAN_EID_TIM 5
+#define WLAN_EID_IBSS_PARAMS 6
+#define WLAN_EID_CHALLENGE 16
+#define WLAN_EID_RSN 48
+#define WLAN_EID_GENERIC 221
+
+
+/* HFA384X Configuration RIDs */
+#define HFA384X_RID_CNFPORTTYPE 0xFC00
+#define HFA384X_RID_CNFOWNMACADDR 0xFC01
+#define HFA384X_RID_CNFDESIREDSSID 0xFC02
+#define HFA384X_RID_CNFOWNCHANNEL 0xFC03
+#define HFA384X_RID_CNFOWNSSID 0xFC04
+#define HFA384X_RID_CNFOWNATIMWINDOW 0xFC05
+#define HFA384X_RID_CNFSYSTEMSCALE 0xFC06
+#define HFA384X_RID_CNFMAXDATALEN 0xFC07
+#define HFA384X_RID_CNFWDSADDRESS 0xFC08
+#define HFA384X_RID_CNFPMENABLED 0xFC09
+#define HFA384X_RID_CNFPMEPS 0xFC0A
+#define HFA384X_RID_CNFMULTICASTRECEIVE 0xFC0B
+#define HFA384X_RID_CNFMAXSLEEPDURATION 0xFC0C
+#define HFA384X_RID_CNFPMHOLDOVERDURATION 0xFC0D
+#define HFA384X_RID_CNFOWNNAME 0xFC0E
+#define HFA384X_RID_CNFOWNDTIMPERIOD 0xFC10
+#define HFA384X_RID_CNFWDSADDRESS1 0xFC11 /* AP f/w only */
+#define HFA384X_RID_CNFWDSADDRESS2 0xFC12 /* AP f/w only */
+#define HFA384X_RID_CNFWDSADDRESS3 0xFC13 /* AP f/w only */
+#define HFA384X_RID_CNFWDSADDRESS4 0xFC14 /* AP f/w only */
+#define HFA384X_RID_CNFWDSADDRESS5 0xFC15 /* AP f/w only */
+#define HFA384X_RID_CNFWDSADDRESS6 0xFC16 /* AP f/w only */
+#define HFA384X_RID_CNFMULTICASTPMBUFFERING 0xFC17 /* AP f/w only */
+#define HFA384X_RID_UNKNOWN1 0xFC20
+#define HFA384X_RID_UNKNOWN2 0xFC21
+#define HFA384X_RID_CNFWEPDEFAULTKEYID 0xFC23
+#define HFA384X_RID_CNFDEFAULTKEY0 0xFC24
+#define HFA384X_RID_CNFDEFAULTKEY1 0xFC25
+#define HFA384X_RID_CNFDEFAULTKEY2 0xFC26
+#define HFA384X_RID_CNFDEFAULTKEY3 0xFC27
+#define HFA384X_RID_CNFWEPFLAGS 0xFC28
+#define HFA384X_RID_CNFWEPKEYMAPPINGTABLE 0xFC29
+#define HFA384X_RID_CNFAUTHENTICATION 0xFC2A
+#define HFA384X_RID_CNFMAXASSOCSTA 0xFC2B /* AP f/w only */
+#define HFA384X_RID_CNFTXCONTROL 0xFC2C
+#define HFA384X_RID_CNFROAMINGMODE 0xFC2D
+#define HFA384X_RID_CNFHOSTAUTHENTICATION 0xFC2E /* AP f/w only */
+#define HFA384X_RID_CNFRCVCRCERROR 0xFC30
+#define HFA384X_RID_CNFMMLIFE 0xFC31
+#define HFA384X_RID_CNFALTRETRYCOUNT 0xFC32
+#define HFA384X_RID_CNFBEACONINT 0xFC33
+#define HFA384X_RID_CNFAPPCFINFO 0xFC34 /* AP f/w only */
+#define HFA384X_RID_CNFSTAPCFINFO 0xFC35
+#define HFA384X_RID_CNFPRIORITYQUSAGE 0xFC37
+#define HFA384X_RID_CNFTIMCTRL 0xFC40
+#define HFA384X_RID_UNKNOWN3 0xFC41 /* added in STA f/w 0.7.x */
+#define HFA384X_RID_CNFTHIRTY2TALLY 0xFC42 /* added in STA f/w 0.8.0 */
+#define HFA384X_RID_CNFENHSECURITY 0xFC43 /* AP f/w or STA f/w >= 1.6.3 */
+#define HFA384X_RID_CNFDBMADJUST 0xFC46 /* added in STA f/w 1.3.1 */
+#define HFA384X_RID_GENERICELEMENT 0xFC48 /* added in STA f/w 1.7.0;
+					   * write only */
+#define HFA384X_RID_PROPAGATIONDELAY 0xFC49 /* added in STA f/w 1.7.6 */
+#define HFA384X_RID_GROUPADDRESSES 0xFC80
+#define HFA384X_RID_CREATEIBSS 0xFC81
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD 0xFC82
+#define HFA384X_RID_RTSTHRESHOLD 0xFC83
+#define HFA384X_RID_TXRATECONTROL 0xFC84
+#define HFA384X_RID_PROMISCUOUSMODE 0xFC85
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD0 0xFC90 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD1 0xFC91 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD2 0xFC92 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD3 0xFC93 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD4 0xFC94 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD5 0xFC95 /* AP f/w only */
+#define HFA384X_RID_FRAGMENTATIONTHRESHOLD6 0xFC96 /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD0 0xFC97 /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD1 0xFC98 /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD2 0xFC99 /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD3 0xFC9A /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD4 0xFC9B /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD5 0xFC9C /* AP f/w only */
+#define HFA384X_RID_RTSTHRESHOLD6 0xFC9D /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL0 0xFC9E /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL1 0xFC9F /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL2 0xFCA0 /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL3 0xFCA1 /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL4 0xFCA2 /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL5 0xFCA3 /* AP f/w only */
+#define HFA384X_RID_TXRATECONTROL6 0xFCA4 /* AP f/w only */
+#define HFA384X_RID_CNFSHORTPREAMBLE 0xFCB0
+#define HFA384X_RID_CNFEXCLUDELONGPREAMBLE 0xFCB1
+#define HFA384X_RID_CNFAUTHENTICATIONRSPTO 0xFCB2
+#define HFA384X_RID_CNFBASICRATES 0xFCB3
+#define HFA384X_RID_CNFSUPPORTEDRATES 0xFCB4
+#define HFA384X_RID_CNFFALLBACKCTRL 0xFCB5 /* added in STA f/w 1.3.1 */
+#define HFA384X_RID_WEPKEYDISABLE 0xFCB6 /* added in STA f/w 1.3.1 */
+#define HFA384X_RID_WEPKEYMAPINDEX 0xFCB7 /* ? */
+#define HFA384X_RID_BROADCASTKEYID 0xFCB8 /* ? */
+#define HFA384X_RID_ENTSECFLAGEYID 0xFCB9 /* ? */
+#define HFA384X_RID_CNFPASSIVESCANCTRL 0xFCBA /* added in STA f/w 1.5.0 */
+#define HFA384X_RID_SSNHANDLINGMODE 0xFCBB /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_MDCCONTROL 0xFCBC /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_MDCCOUNTRY 0xFCBD /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_TXPOWERMAX 0xFCBE /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_CNFLFOENABLED 0xFCBF /* added in STA f/w 1.6.3 */
+#define HFA384X_RID_CAPINFO 0xFCC0 /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_LISTENINTERVAL 0xFCC1 /* added in STA f/w 1.7.0 */
+#define HFA384X_RID_SW_ANT_DIV 0xFCC2 /* added in STA f/w 1.7.0; Prism3 */
+#define HFA384X_RID_LED_CTRL 0xFCC4 /* added in STA f/w 1.7.6 */
+#define HFA384X_RID_HFODELAY 0xFCC5 /* added in STA f/w 1.7.6 */
+#define HFA384X_RID_DISALLOWEDBSSID 0xFCC6 /* added in STA f/w 1.8.0 */
+#define HFA384X_RID_TICKTIME 0xFCE0
+#define HFA384X_RID_SCANREQUEST 0xFCE1
+#define HFA384X_RID_JOINREQUEST 0xFCE2
+#define HFA384X_RID_AUTHENTICATESTATION 0xFCE3 /* AP f/w only */
+#define HFA384X_RID_CHANNELINFOREQUEST 0xFCE4 /* AP f/w only */
+#define HFA384X_RID_HOSTSCAN 0xFCE5 /* added in STA f/w 1.3.1 */
+
+/* HFA384X Information RIDs */
+#define HFA384X_RID_MAXLOADTIME 0xFD00
+#define HFA384X_RID_DOWNLOADBUFFER 0xFD01
+#define HFA384X_RID_PRIID 0xFD02
+#define HFA384X_RID_PRISUPRANGE 0xFD03
+#define HFA384X_RID_CFIACTRANGES 0xFD04
+#define HFA384X_RID_NICSERNUM 0xFD0A
+#define HFA384X_RID_NICID 0xFD0B
+#define HFA384X_RID_MFISUPRANGE 0xFD0C
+#define HFA384X_RID_CFISUPRANGE 0xFD0D
+#define HFA384X_RID_CHANNELLIST 0xFD10
+#define HFA384X_RID_REGULATORYDOMAINS 0xFD11
+#define HFA384X_RID_TEMPTYPE 0xFD12
+#define HFA384X_RID_CIS 0xFD13
+#define HFA384X_RID_STAID 0xFD20
+#define HFA384X_RID_STASUPRANGE 0xFD21
+#define HFA384X_RID_MFIACTRANGES 0xFD22
+#define HFA384X_RID_CFIACTRANGES2 0xFD23
+#define HFA384X_RID_PRODUCTNAME 0xFD24 /* added in STA f/w 1.3.1;
+					* only Prism2.5(?) */
+#define HFA384X_RID_PORTSTATUS 0xFD40
+#define HFA384X_RID_CURRENTSSID 0xFD41
+#define HFA384X_RID_CURRENTBSSID 0xFD42
+#define HFA384X_RID_COMMSQUALITY 0xFD43
+#define HFA384X_RID_CURRENTTXRATE 0xFD44
+#define HFA384X_RID_CURRENTBEACONINTERVAL 0xFD45
+#define HFA384X_RID_CURRENTSCALETHRESHOLDS 0xFD46
+#define HFA384X_RID_PROTOCOLRSPTIME 0xFD47
+#define HFA384X_RID_SHORTRETRYLIMIT 0xFD48
+#define HFA384X_RID_LONGRETRYLIMIT 0xFD49
+#define HFA384X_RID_MAXTRANSMITLIFETIME 0xFD4A
+#define HFA384X_RID_MAXRECEIVELIFETIME 0xFD4B
+#define HFA384X_RID_CFPOLLABLE 0xFD4C
+#define HFA384X_RID_AUTHENTICATIONALGORITHMS 0xFD4D
+#define HFA384X_RID_PRIVACYOPTIONIMPLEMENTED 0xFD4F
+#define HFA384X_RID_DBMCOMMSQUALITY 0xFD51 /* added in STA f/w 1.3.1 */
+#define HFA384X_RID_CURRENTTXRATE1 0xFD80 /* AP f/w only */
+#define HFA384X_RID_CURRENTTXRATE2 0xFD81 /* AP f/w only */
+#define HFA384X_RID_CURRENTTXRATE3 0xFD82 /* AP f/w only */
+#define HFA384X_RID_CURRENTTXRATE4 0xFD83 /* AP f/w only */
+#define HFA384X_RID_CURRENTTXRATE5 0xFD84 /* AP f/w only */
+#define HFA384X_RID_CURRENTTXRATE6 0xFD85 /* AP f/w only */
+#define HFA384X_RID_OWNMACADDR 0xFD86 /* AP f/w only */
+#define HFA384X_RID_SCANRESULTSTABLE 0xFD88 /* added in STA f/w 0.8.3 */
+#define HFA384X_RID_HOSTSCANRESULTS 0xFD89 /* added in STA f/w 1.3.1 */
+#define HFA384X_RID_AUTHENTICATIONUSED 0xFD8A /* added in STA f/w 1.3.4 */
+#define HFA384X_RID_CNFFAASWITCHCTRL 0xFD8B /* added in STA f/w 1.6.3 */
+#define HFA384X_RID_ASSOCIATIONFAILURE 0xFD8D /* added in STA f/w 1.8.0 */
+#define HFA384X_RID_PHYTYPE 0xFDC0
+#define HFA384X_RID_CURRENTCHANNEL 0xFDC1
+#define HFA384X_RID_CURRENTPOWERSTATE 0xFDC2
+#define HFA384X_RID_CCAMODE 0xFDC3
+#define HFA384X_RID_SUPPORTEDDATARATES 0xFDC6
+#define HFA384X_RID_LFO_VOLT_REG_TEST_RES 0xFDC7 /* added in STA f/w 1.7.1 */
+#define HFA384X_RID_BUILDSEQ 0xFFFE
+#define HFA384X_RID_FWID 0xFFFF
+
+
+struct hfa384x_comp_ident
+{
+	u16 id;
+	u16 variant;
+	u16 major;
+	u16 minor;
+} __attribute__ ((packed));
+
+#define HFA384X_COMP_ID_PRI 0x15
+#define HFA384X_COMP_ID_STA 0x1f
+#define HFA384X_COMP_ID_FW_AP 0x14b
+
+struct hfa384x_sup_range
+{
+	u16 role;
+	u16 id;
+	u16 variant;
+	u16 bottom;
+	u16 top;
+} __attribute__ ((packed));
+
+
+struct hfa384x_build_id
+{
+	u16 pri_seq;
+	u16 sec_seq;
+} __attribute__ ((packed));
+
+/* FD01 - Download Buffer */
+struct hfa384x_rid_download_buffer
+{
+	u16 page;
+	u16 offset;
+	u16 length;
+} __attribute__ ((packed));
+
+/* BSS connection quality (RID FD43 range, RID FD51 dBm-normalized) */
+struct hfa384x_comms_quality {
+	u16 comm_qual; /* 0 .. 92 */
+	u16 signal_level; /* 27 .. 154 */
+	u16 noise_level; /* 27 .. 154 */
+} __attribute__ ((packed));
+
+
+/* netdevice private ioctls (used, e.g., with iwpriv from user space) */
+
+/* New wireless extensions API - SET/GET convention (even ioctl numbers are
+ * root only)
+ */
+#define PRISM2_IOCTL_PRISM2_PARAM (SIOCIWFIRSTPRIV + 0)
+#define PRISM2_IOCTL_GET_PRISM2_PARAM (SIOCIWFIRSTPRIV + 1)
+#define PRISM2_IOCTL_WRITEMIF (SIOCIWFIRSTPRIV + 2)
+#define PRISM2_IOCTL_READMIF (SIOCIWFIRSTPRIV + 3)
+#define PRISM2_IOCTL_MONITOR (SIOCIWFIRSTPRIV + 4)
+#define PRISM2_IOCTL_RESET (SIOCIWFIRSTPRIV + 6)
+#define PRISM2_IOCTL_INQUIRE (SIOCIWFIRSTPRIV + 8)
+#define PRISM2_IOCTL_WDS_ADD (SIOCIWFIRSTPRIV + 10)
+#define PRISM2_IOCTL_WDS_DEL (SIOCIWFIRSTPRIV + 12)
+#define PRISM2_IOCTL_SET_RID_WORD (SIOCIWFIRSTPRIV + 14)
+#define PRISM2_IOCTL_MACCMD (SIOCIWFIRSTPRIV + 16)
+#define PRISM2_IOCTL_ADDMAC (SIOCIWFIRSTPRIV + 18)
+#define PRISM2_IOCTL_DELMAC (SIOCIWFIRSTPRIV + 20)
+#define PRISM2_IOCTL_KICKMAC (SIOCIWFIRSTPRIV + 22)
+
+/* following are not in SIOCGIWPRIV list; check permission in the driver code
+ */
+#define PRISM2_IOCTL_DOWNLOAD (SIOCDEVPRIVATE + 13)
+#define PRISM2_IOCTL_HOSTAPD (SIOCDEVPRIVATE + 14)
+
+
+/* PRISM2_IOCTL_PRISM2_PARAM ioctl() subtypes: */
+enum {
+	/* PRISM2_PARAM_PTYPE = 1, */ /* REMOVED 2003-10-22 */
+	PRISM2_PARAM_TXRATECTRL = 2,
+	PRISM2_PARAM_BEACON_INT = 3,
+	PRISM2_PARAM_PSEUDO_IBSS = 4,
+	PRISM2_PARAM_ALC = 5,
+	/* PRISM2_PARAM_TXPOWER = 6, */ /* REMOVED 2003-10-22 */
+	PRISM2_PARAM_DUMP = 7,
+	PRISM2_PARAM_OTHER_AP_POLICY = 8,
+	PRISM2_PARAM_AP_MAX_INACTIVITY = 9,
+	PRISM2_PARAM_AP_BRIDGE_PACKETS = 10,
+	PRISM2_PARAM_DTIM_PERIOD = 11,
+	PRISM2_PARAM_AP_NULLFUNC_ACK = 12,
+	PRISM2_PARAM_MAX_WDS = 13,
+	PRISM2_PARAM_AP_AUTOM_AP_WDS = 14,
+	PRISM2_PARAM_AP_AUTH_ALGS = 15,
+	PRISM2_PARAM_MONITOR_ALLOW_FCSERR = 16,
+	PRISM2_PARAM_HOST_ENCRYPT = 17,
+	PRISM2_PARAM_HOST_DECRYPT = 18,
+	/* PRISM2_PARAM_BUS_MASTER_THRESHOLD_RX = 19, REMOVED 2005-08-14 */
+	/* PRISM2_PARAM_BUS_MASTER_THRESHOLD_TX = 20, REMOVED 2005-08-14 */
+	PRISM2_PARAM_HOST_ROAMING = 21,
+	PRISM2_PARAM_BCRX_STA_KEY = 22,
+	PRISM2_PARAM_IEEE_802_1X = 23,
+	PRISM2_PARAM_ANTSEL_TX = 24,
+	PRISM2_PARAM_ANTSEL_RX = 25,
+	PRISM2_PARAM_MONITOR_TYPE = 26,
+	PRISM2_PARAM_WDS_TYPE = 27,
+	PRISM2_PARAM_HOSTSCAN = 28,
+	PRISM2_PARAM_AP_SCAN = 29,
+	PRISM2_PARAM_ENH_SEC = 30,
+	PRISM2_PARAM_IO_DEBUG = 31,
+	PRISM2_PARAM_BASIC_RATES = 32,
+	PRISM2_PARAM_OPER_RATES = 33,
+	PRISM2_PARAM_HOSTAPD = 34,
+	PRISM2_PARAM_HOSTAPD_STA = 35,
+	PRISM2_PARAM_WPA = 36,
+	PRISM2_PARAM_PRIVACY_INVOKED = 37,
+	PRISM2_PARAM_TKIP_COUNTERMEASURES = 38,
+	PRISM2_PARAM_DROP_UNENCRYPTED = 39,
+	PRISM2_PARAM_SCAN_CHANNEL_MASK = 40,
+};
+
+enum { HOSTAP_ANTSEL_DO_NOT_TOUCH = 0, HOSTAP_ANTSEL_DIVERSITY = 1,
+       HOSTAP_ANTSEL_LOW = 2, HOSTAP_ANTSEL_HIGH = 3 };
+
+
+/* PRISM2_IOCTL_MACCMD ioctl() subcommands: */
+enum { AP_MAC_CMD_POLICY_OPEN = 0, AP_MAC_CMD_POLICY_ALLOW = 1,
+       AP_MAC_CMD_POLICY_DENY = 2, AP_MAC_CMD_FLUSH = 3,
+       AP_MAC_CMD_KICKALL = 4 };
+
+
+/* PRISM2_IOCTL_DOWNLOAD ioctl() dl_cmd: */
+enum {
+	PRISM2_DOWNLOAD_VOLATILE = 1 /* RAM */,
+	/* Note! Old versions of prism2_srec have a fatal error in CRC-16
+	 * calculation, which will corrupt all non-volatile downloads.
+	 * PRISM2_DOWNLOAD_NON_VOLATILE used to be 2, but it is now 3 to
+	 * prevent use of old versions of prism2_srec for non-volatile
+	 * download. */
+	PRISM2_DOWNLOAD_NON_VOLATILE = 3 /* FLASH */,
+	PRISM2_DOWNLOAD_VOLATILE_GENESIS = 4 /* RAM in Genesis mode */,
+	/* Persistent versions of volatile download commands (keep firmware
+	 * data in memory and automatically re-download after hw_reset */
+	PRISM2_DOWNLOAD_VOLATILE_PERSISTENT = 5,
+	PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT = 6,
+};
+
+struct prism2_download_param {
+	u32 dl_cmd;
+	u32 start_addr;
+	u32 num_areas;
+	struct prism2_download_area {
+		u32 addr; /* wlan card address */
+		u32 len;
+		void __user *ptr; /* pointer to data in user space */
+	} data[0];
+};
+
+#define PRISM2_MAX_DOWNLOAD_AREA_LEN 131072
+#define PRISM2_MAX_DOWNLOAD_LEN 262144
+
+
+/* PRISM2_IOCTL_HOSTAPD ioctl() cmd: */
+enum {
+	PRISM2_HOSTAPD_FLUSH = 1,
+	PRISM2_HOSTAPD_ADD_STA = 2,
+	PRISM2_HOSTAPD_REMOVE_STA = 3,
+	PRISM2_HOSTAPD_GET_INFO_STA = 4,
+	/* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
+	PRISM2_SET_ENCRYPTION = 6,
+	PRISM2_GET_ENCRYPTION = 7,
+	PRISM2_HOSTAPD_SET_FLAGS_STA = 8,
+	PRISM2_HOSTAPD_GET_RID = 9,
+	PRISM2_HOSTAPD_SET_RID = 10,
+	PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
+	PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
+	PRISM2_HOSTAPD_MLME = 13,
+	PRISM2_HOSTAPD_SCAN_REQ = 14,
+	PRISM2_HOSTAPD_STA_CLEAR_STATS = 15,
+};
+
+#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
+#define PRISM2_HOSTAPD_RID_HDR_LEN \
+((int) (&((struct prism2_hostapd_param *) 0)->u.rid.data))
+#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
+((int) (&((struct prism2_hostapd_param *) 0)->u.generic_elem.data))
+
+/* Maximum length for algorithm names (-1 for nul termination) used in ioctl()
+ */
+#define HOSTAP_CRYPT_ALG_NAME_LEN 16
+
+
+struct prism2_hostapd_param {
+	u32 cmd;
+	u8 sta_addr[ETH_ALEN];
+	union {
+		struct {
+			u16 aid;
+			u16 capability;
+			u8 tx_supp_rates;
+		} add_sta;
+		struct {
+			u32 inactive_sec;
+		} get_info_sta;
+		struct {
+			u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
+			u32 flags;
+			u32 err;
+			u8 idx;
+			u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+			u16 key_len;
+			u8 key[0];
+		} crypt;
+		struct {
+			u32 flags_and;
+			u32 flags_or;
+		} set_flags_sta;
+		struct {
+			u16 rid;
+			u16 len;
+			u8 data[0];
+		} rid;
+		struct {
+			u8 len;
+			u8 data[0];
+		} generic_elem;
+		struct {
+#define MLME_STA_DEAUTH 0
+#define MLME_STA_DISASSOC 1
+			u16 cmd;
+			u16 reason_code;
+		} mlme;
+		struct {
+			u8 ssid_len;
+			u8 ssid[32];
+		} scan_req;
+	} u;
+};
+
+#define HOSTAP_CRYPT_FLAG_SET_TX_KEY BIT(0)
+#define HOSTAP_CRYPT_FLAG_PERMANENT BIT(1)
+
+#define HOSTAP_CRYPT_ERR_UNKNOWN_ALG 2
+#define HOSTAP_CRYPT_ERR_UNKNOWN_ADDR 3
+#define HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED 4
+#define HOSTAP_CRYPT_ERR_KEY_SET_FAILED 5
+#define HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED 6
+#define HOSTAP_CRYPT_ERR_CARD_CONF_FAILED 7
+
+
+#endif /* HOSTAP_COMMON_H */
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -0,0 +1,55 @@
+#ifndef HOSTAP_CONFIG_H
+#define HOSTAP_CONFIG_H
+
+#define PRISM2_VERSION "0.4.1-kernel"
+
+/* In the previous versions of Host AP driver, support for user space version
+ * of IEEE 802.11 management (hostapd) used to be disabled in the default
+ * configuration. From now on, support for hostapd is always included and it is
+ * possible to disable kernel driver version of IEEE 802.11 management with a
+ * separate define, PRISM2_NO_KERNEL_IEEE80211_MGMT. */
+/* #define PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+/* Maximum number of events handler per one interrupt */
+#define PRISM2_MAX_INTERRUPT_EVENTS 20
+
+/* Include code for downloading firmware images into volatile RAM. */
+#define PRISM2_DOWNLOAD_SUPPORT
+
+/* Allow kernel configuration to enable download support. */
+#if !defined(PRISM2_DOWNLOAD_SUPPORT) && defined(CONFIG_HOSTAP_FIRMWARE)
+#define PRISM2_DOWNLOAD_SUPPORT
+#endif
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+/* Allow writing firmware images into flash, i.e., to non-volatile storage.
+ * Before you enable this option, you should make absolutely sure that you are
+ * using prism2_srec utility that comes with THIS version of the driver!
+ * In addition, please note that it is possible to kill your card with
+ * non-volatile download if you are using incorrect image. This feature has not
+ * been fully tested, so please be careful with it. */
+/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+/* Save low-level I/O for debugging. This should not be enabled in normal use.
+ */
+/* #define PRISM2_IO_DEBUG */
+
+/* Following defines can be used to remove unneeded parts of the driver, e.g.,
+ * to limit the size of the kernel module. Definitions can be added here in
+ * hostap_config.h or they can be added to make command with EXTRA_CFLAGS,
+ * e.g.,
+ * 'make pccard EXTRA_CFLAGS="-DPRISM2_NO_DEBUG -DPRISM2_NO_PROCFS_DEBUG"'
+ */
+
+/* Do not include debug messages into the driver */
+/* #define PRISM2_NO_DEBUG */
+
+/* Do not include /proc/net/prism2/wlan#/{registers,debug} */
+/* #define PRISM2_NO_PROCFS_DEBUG */
+
+/* Do not include station functionality (i.e., allow only Master (Host AP) mode
+ */
+/* #define PRISM2_NO_STATION_MODES */
+
+#endif /* HOSTAP_CONFIG_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -0,0 +1,1030 @@
+#define PRISM2_PCCARD
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if.h>
+#include <linux/wait.h>
+#include <linux/timer.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#include <pcmcia/cs_types.h>
+#include <pcmcia/cs.h>
+#include <pcmcia/cistpl.h>
+#include <pcmcia/cisreg.h>
+#include <pcmcia/ds.h>
+
+#include <asm/io.h>
+
+#include "hostap_wlan.h"
+
+
+static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
+static dev_info_t dev_info = "hostap_cs";
+static dev_link_t *dev_list = NULL;
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
+		   "cards (PC Card).");
+MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PRISM2_VERSION);
+
+
+static int ignore_cis_vcc;
+module_param(ignore_cis_vcc, int, 0444);
+MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
+
+
+/* struct local_info::hw_priv */
+struct hostap_cs_priv {
+	dev_node_t node;
+	dev_link_t *link;
+	int sandisk_connectplus;
+};
+
+
+#ifdef PRISM2_IO_DEBUG
+
+static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
+	outb(v, dev->base_addr + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u8 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	v = inb(dev->base_addr + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
+	outw(v, dev->base_addr + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u16 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	v = inw(dev->base_addr + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
+				       u8 *buf, int wc)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
+	outsw(dev->base_addr + a, buf, wc);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline void hfa384x_insw_debug(struct net_device *dev, int a,
+				      u8 *buf, int wc)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
+	insw(dev->base_addr + a, buf, wc);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
+#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
+#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
+#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
+#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
+#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
+
+#else /* PRISM2_IO_DEBUG */
+
+#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
+#define HFA384X_INB(a) inb(dev->base_addr + (a))
+#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
+#define HFA384X_INW(a) inw(dev->base_addr + (a))
+#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
+#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
+
+#endif /* PRISM2_IO_DEBUG */
+
+
+static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
+			    int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	if (len / 2)
+		HFA384X_INSW(d_off, buf, len / 2);
+	pos += len / 2;
+
+	if (len & 1)
+		*((char *) pos) = HFA384X_INB(d_off);
+
+	return 0;
+}
+
+
+static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	if (len / 2)
+		HFA384X_OUTSW(d_off, buf, len / 2);
+	pos += len / 2;
+
+	if (len & 1)
+		HFA384X_OUTB(*((char *) pos), d_off);
+
+	return 0;
+}
+
+
+/* FIX: This might change at some point.. */
+#include "hostap_hw.c"
+
+
+
+static void prism2_detach(dev_link_t *link);
+static void prism2_release(u_long arg);
+static int prism2_event(event_t event, int priority,
+			event_callback_args_t *args);
+
+
+static int prism2_pccard_card_present(local_info_t *local)
+{
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+	if (hw_priv->link != NULL &&
+	    ((hw_priv->link->state & (DEV_PRESENT | DEV_CONFIG)) ==
+	     (DEV_PRESENT | DEV_CONFIG)))
+		return 1;
+	return 0;
+}
+
+
+/*
+ * SanDisk CompactFlash WLAN Flashcard - Product Manual v1.0
+ * Document No. 20-10-00058, January 2004
+ * http://www.sandisk.com/pdf/industrial/ProdManualCFWLANv1.0.pdf
+ */
+#define SANDISK_WLAN_ACTIVATION_OFF 0x40
+#define SANDISK_HCR_OFF 0x42
+
+
+static void sandisk_set_iobase(local_info_t *local)
+{
+	int res;
+	conf_reg_t reg;
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+
+	reg.Function = 0;
+	reg.Action = CS_WRITE;
+	reg.Offset = 0x10; /* 0x3f0 IO base 1 */
+	reg.Value = hw_priv->link->io.BasePort1 & 0x00ff;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -"
+		       " res=%d\n", res);
+	}
+	udelay(10);
+
+	reg.Function = 0;
+	reg.Action = CS_WRITE;
+	reg.Offset = 0x12; /* 0x3f2 IO base 2 */
+	reg.Value = (hw_priv->link->io.BasePort1 & 0xff00) >> 8;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -"
+		       " res=%d\n", res);
+	}
+}
+
+
+static void sandisk_write_hcr(local_info_t *local, int hcr)
+{
+	struct net_device *dev = local->dev;
+	int i;
+
+	HFA384X_OUTB(0x80, SANDISK_WLAN_ACTIVATION_OFF);
+	udelay(50);
+	for (i = 0; i < 10; i++) {
+		HFA384X_OUTB(hcr, SANDISK_HCR_OFF);
+	}
+	udelay(55);
+	HFA384X_OUTB(0x45, SANDISK_WLAN_ACTIVATION_OFF);
+}
+
+
+static int sandisk_enable_wireless(struct net_device *dev)
+{
+	int res, ret = 0;
+	conf_reg_t reg;
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	tuple_t tuple;
+	cisparse_t *parse = NULL;
+	u_char buf[64];
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+
+	if (hw_priv->link->io.NumPorts1 < 0x42) {
+		/* Not enough ports to be SanDisk multi-function card */
+		ret = -ENODEV;
+		goto done;
+	}
+
+	parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
+	if (parse == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	tuple.DesiredTuple = CISTPL_MANFID;
+	tuple.Attributes = TUPLE_RETURN_COMMON;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
+	if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
+	    pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
+	    pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
+	    parse->manfid.manf != 0xd601 || parse->manfid.card != 0x0101) {
+		/* No SanDisk manfid found */
+		ret = -ENODEV;
+		goto done;
+	}
+
+	tuple.DesiredTuple = CISTPL_LONGLINK_MFC;
+	if (pcmcia_get_first_tuple(hw_priv->link->handle, &tuple) ||
+	    pcmcia_get_tuple_data(hw_priv->link->handle, &tuple) ||
+	    pcmcia_parse_tuple(hw_priv->link->handle, &tuple, parse) ||
+		parse->longlink_mfc.nfn < 2) {
+		/* No multi-function links found */
+		ret = -ENODEV;
+		goto done;
+	}
+
+	printk(KERN_DEBUG "%s: Multi-function SanDisk ConnectPlus detected"
+	       " - using vendor-specific initialization\n", dev->name);
+	hw_priv->sandisk_connectplus = 1;
+
+	reg.Function = 0;
+	reg.Action = CS_WRITE;
+	reg.Offset = CISREG_COR;
+	reg.Value = COR_SOFT_RESET;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
+		       dev->name, res);
+		goto done;
+	}
+	mdelay(5);
+
+	reg.Function = 0;
+	reg.Action = CS_WRITE;
+	reg.Offset = CISREG_COR;
+	/*
+	 * Do not enable interrupts here to avoid some bogus events. Interrupts
+	 * will be enabled during the first cor_sreset call.
+	 */
+	reg.Value = COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n",
+		       dev->name, res);
+		goto done;
+	}
+	mdelay(5);
+
+	sandisk_set_iobase(local);
+
+	HFA384X_OUTB(0xc5, SANDISK_WLAN_ACTIVATION_OFF);
+	udelay(10);
+	HFA384X_OUTB(0x4b, SANDISK_WLAN_ACTIVATION_OFF);
+	udelay(10);
+
+done:
+	kfree(parse);
+	return ret;
+}
+
+
+static void prism2_pccard_cor_sreset(local_info_t *local)
+{
+	int res;
+	conf_reg_t reg;
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+
+	if (!prism2_pccard_card_present(local))
+	       return;
+
+	reg.Function = 0;
+	reg.Action = CS_READ;
+	reg.Offset = CISREG_COR;
+	reg.Value = 0;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n",
+		       res);
+		return;
+	}
+	printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n",
+	       reg.Value);
+
+	reg.Action = CS_WRITE;
+	reg.Value |= COR_SOFT_RESET;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n",
+		       res);
+		return;
+	}
+
+	mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
+
+	reg.Value &= ~COR_SOFT_RESET;
+	if (hw_priv->sandisk_connectplus)
+		reg.Value |= COR_IREQ_ENA;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n",
+		       res);
+		return;
+	}
+
+	mdelay(hw_priv->sandisk_connectplus ? 5 : 2);
+
+	if (hw_priv->sandisk_connectplus)
+		sandisk_set_iobase(local);
+}
+
+
+static void prism2_pccard_genesis_reset(local_info_t *local, int hcr)
+{
+	int res;
+	conf_reg_t reg;
+	int old_cor;
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+
+	if (!prism2_pccard_card_present(local))
+	       return;
+
+	if (hw_priv->sandisk_connectplus) {
+		sandisk_write_hcr(local, hcr);
+		return;
+	}
+
+	reg.Function = 0;
+	reg.Action = CS_READ;
+	reg.Offset = CISREG_COR;
+	reg.Value = 0;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 1 "
+		       "(%d)\n", res);
+		return;
+	}
+	printk(KERN_DEBUG "prism2_pccard_genesis_sreset: original COR %02x\n",
+	       reg.Value);
+	old_cor = reg.Value;
+
+	reg.Action = CS_WRITE;
+	reg.Value |= COR_SOFT_RESET;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 2 "
+		       "(%d)\n", res);
+		return;
+	}
+
+	mdelay(10);
+
+	/* Setup Genesis mode */
+	reg.Action = CS_WRITE;
+	reg.Value = hcr;
+	reg.Offset = CISREG_CCSR;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 3 "
+		       "(%d)\n", res);
+		return;
+	}
+	mdelay(10);
+
+	reg.Action = CS_WRITE;
+	reg.Offset = CISREG_COR;
+	reg.Value = old_cor & ~COR_SOFT_RESET;
+	res = pcmcia_access_configuration_register(hw_priv->link->handle,
+						   &reg);
+	if (res != CS_SUCCESS) {
+		printk(KERN_DEBUG "prism2_pccard_genesis_sreset failed 4 "
+		       "(%d)\n", res);
+		return;
+	}
+
+	mdelay(10);
+}
+
+
+static int prism2_pccard_dev_open(local_info_t *local)
+{
+	struct hostap_cs_priv *hw_priv = local->hw_priv;
+	hw_priv->link->open++;
+	return 0;
+}
+
+
+static int prism2_pccard_dev_close(local_info_t *local)
+{
+	struct hostap_cs_priv *hw_priv;
+
+	if (local == NULL || local->hw_priv == NULL)
+		return 1;
+	hw_priv = local->hw_priv;
+	if (hw_priv->link == NULL)
+		return 1;
+
+	if (!hw_priv->link->open) {
+		printk(KERN_WARNING "%s: prism2_pccard_dev_close(): "
+		       "link not open?!\n", local->dev->name);
+		return 1;
+	}
+
+	hw_priv->link->open--;
+
+	return 0;
+}
+
+
+static struct prism2_helper_functions prism2_pccard_funcs =
+{
+	.card_present	= prism2_pccard_card_present,
+	.cor_sreset	= prism2_pccard_cor_sreset,
+	.dev_open	= prism2_pccard_dev_open,
+	.dev_close	= prism2_pccard_dev_close,
+	.genesis_reset	= prism2_pccard_genesis_reset,
+	.hw_type	= HOSTAP_HW_PCCARD,
+};
+
+
+/* allocate local data and register with CardServices
+ * initialize dev_link structure, but do not configure the card yet */
+static dev_link_t *prism2_attach(void)
+{
+	dev_link_t *link;
+	client_reg_t client_reg;
+	int ret;
+
+	link = kmalloc(sizeof(dev_link_t), GFP_KERNEL);
+	if (link == NULL)
+		return NULL;
+
+	memset(link, 0, sizeof(dev_link_t));
+
+	PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info);
+	link->conf.Vcc = 33;
+	link->conf.IntType = INT_MEMORY_AND_IO;
+
+	/* register with CardServices */
+	link->next = dev_list;
+	dev_list = link;
+	client_reg.dev_info = &dev_info;
+	client_reg.Version = 0x0210;
+	client_reg.event_callback_args.client_data = link;
+	ret = pcmcia_register_client(&link->handle, &client_reg);
+	if (ret != CS_SUCCESS) {
+		cs_error(link->handle, RegisterClient, ret);
+		prism2_detach(link);
+		return NULL;
+	}
+	return link;
+}
+
+
+static void prism2_detach(dev_link_t *link)
+{
+	dev_link_t **linkp;
+
+	PDEBUG(DEBUG_FLOW, "prism2_detach\n");
+
+	for (linkp = &dev_list; *linkp; linkp = &(*linkp)->next)
+		if (*linkp == link)
+			break;
+	if (*linkp == NULL) {
+		printk(KERN_WARNING "%s: Attempt to detach non-existing "
+		       "PCMCIA client\n", dev_info);
+		return;
+	}
+
+	if (link->state & DEV_CONFIG) {
+		prism2_release((u_long)link);
+	}
+
+	if (link->handle) {
+		int res = pcmcia_deregister_client(link->handle);
+		if (res) {
+			printk("CardService(DeregisterClient) => %d\n", res);
+			cs_error(link->handle, DeregisterClient, res);
+		}
+	}
+
+	*linkp = link->next;
+	/* release net devices */
+	if (link->priv) {
+		struct net_device *dev;
+		struct hostap_interface *iface;
+		dev = link->priv;
+		iface = netdev_priv(dev);
+		kfree(iface->local->hw_priv);
+		iface->local->hw_priv = NULL;
+		prism2_free_local_data(dev);
+	}
+	kfree(link);
+}
+
+
+#define CS_CHECK(fn, ret) \
+do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
+
+#define CFG_CHECK2(fn, retf) \
+do { int ret = (retf); \
+if (ret != 0) { \
+	PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \
+	cs_error(link->handle, fn, ret); \
+	goto next_entry; \
+} \
+} while (0)
+
+
+/* run after a CARD_INSERTION event is received to configure the PCMCIA
+ * socket and make the device available to the system */
+static int prism2_config(dev_link_t *link)
+{
+	struct net_device *dev;
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 1;
+	tuple_t tuple;
+	cisparse_t *parse;
+	int last_fn, last_ret;
+	u_char buf[64];
+	config_info_t conf;
+	cistpl_cftable_entry_t dflt = { 0 };
+	struct hostap_cs_priv *hw_priv;
+
+	PDEBUG(DEBUG_FLOW, "prism2_config()\n");
+
+	parse = kmalloc(sizeof(cisparse_t), GFP_KERNEL);
+	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	if (parse == NULL || hw_priv == NULL) {
+		kfree(parse);
+		kfree(hw_priv);
+		ret = -ENOMEM;
+		goto failed;
+	}
+	memset(hw_priv, 0, sizeof(*hw_priv));
+
+	tuple.DesiredTuple = CISTPL_CONFIG;
+	tuple.Attributes = 0;
+	tuple.TupleData = buf;
+	tuple.TupleDataMax = sizeof(buf);
+	tuple.TupleOffset = 0;
+	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
+	CS_CHECK(GetTupleData, pcmcia_get_tuple_data(link->handle, &tuple));
+	CS_CHECK(ParseTuple, pcmcia_parse_tuple(link->handle, &tuple, parse));
+	link->conf.ConfigBase = parse->config.base;
+	link->conf.Present = parse->config.rmask[0];
+
+	CS_CHECK(GetConfigurationInfo,
+		 pcmcia_get_configuration_info(link->handle, &conf));
+	PDEBUG(DEBUG_HW, "%s: %s Vcc=%d (from config)\n", dev_info,
+	       ignore_cis_vcc ? "ignoring" : "setting", conf.Vcc);
+	link->conf.Vcc = conf.Vcc;
+
+	/* Look for an appropriate configuration table entry in the CIS */
+	tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
+	CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link->handle, &tuple));
+	for (;;) {
+		cistpl_cftable_entry_t *cfg = &(parse->cftable_entry);
+		CFG_CHECK2(GetTupleData,
+			   pcmcia_get_tuple_data(link->handle, &tuple));
+		CFG_CHECK2(ParseTuple,
+			   pcmcia_parse_tuple(link->handle, &tuple, parse));
+
+		if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
+			dflt = *cfg;
+		if (cfg->index == 0)
+			goto next_entry;
+		link->conf.ConfigIndex = cfg->index;
+		PDEBUG(DEBUG_EXTRA, "Checking CFTABLE_ENTRY 0x%02X "
+		       "(default 0x%02X)\n", cfg->index, dflt.index);
+
+		/* Does this card need audio output? */
+		if (cfg->flags & CISTPL_CFTABLE_AUDIO) {
+			link->conf.Attributes |= CONF_ENABLE_SPKR;
+			link->conf.Status = CCSR_AUDIO_ENA;
+		}
+
+		/* Use power settings for Vcc and Vpp if present */
+		/*  Note that the CIS values need to be rescaled */
+		if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
+			if (conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] /
+			    10000 && !ignore_cis_vcc) {
+				PDEBUG(DEBUG_EXTRA, "  Vcc mismatch - skipping"
+				       " this entry\n");
+				goto next_entry;
+			}
+		} else if (dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
+			if (conf.Vcc != dflt.vcc.param[CISTPL_POWER_VNOM] /
+			    10000 && !ignore_cis_vcc) {
+				PDEBUG(DEBUG_EXTRA, "  Vcc (default) mismatch "
+				       "- skipping this entry\n");
+				goto next_entry;
+			}
+		}
+
+		if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
+			link->conf.Vpp1 = link->conf.Vpp2 =
+				cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
+		else if (dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
+			link->conf.Vpp1 = link->conf.Vpp2 =
+				dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
+
+		/* Do we need to allocate an interrupt? */
+		if (cfg->irq.IRQInfo1 || dflt.irq.IRQInfo1)
+			link->conf.Attributes |= CONF_ENABLE_IRQ;
+		else if (!(link->conf.Attributes & CONF_ENABLE_IRQ)) {
+			/* At least Compaq WL200 does not have IRQInfo1 set,
+			 * but it does not work without interrupts.. */
+			printk("Config has no IRQ info, but trying to enable "
+			       "IRQ anyway..\n");
+			link->conf.Attributes |= CONF_ENABLE_IRQ;
+		}
+
+		/* IO window settings */
+		PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
+		       "dflt.io.nwin=%d\n",
+		       cfg->io.nwin, dflt.io.nwin);
+		link->io.NumPorts1 = link->io.NumPorts2 = 0;
+		if ((cfg->io.nwin > 0) || (dflt.io.nwin > 0)) {
+			cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &dflt.io;
+			link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
+			PDEBUG(DEBUG_EXTRA, "io->flags = 0x%04X, "
+			       "io.base=0x%04x, len=%d\n", io->flags,
+			       io->win[0].base, io->win[0].len);
+			if (!(io->flags & CISTPL_IO_8BIT))
+				link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
+			if (!(io->flags & CISTPL_IO_16BIT))
+				link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
+			link->io.IOAddrLines = io->flags &
+				CISTPL_IO_LINES_MASK;
+			link->io.BasePort1 = io->win[0].base;
+			link->io.NumPorts1 = io->win[0].len;
+			if (io->nwin > 1) {
+				link->io.Attributes2 = link->io.Attributes1;
+				link->io.BasePort2 = io->win[1].base;
+				link->io.NumPorts2 = io->win[1].len;
+			}
+		}
+
+		/* This reserves IO space but doesn't actually enable it */
+		CFG_CHECK2(RequestIO,
+			   pcmcia_request_io(link->handle, &link->io));
+
+		/* This configuration table entry is OK */
+		break;
+
+	next_entry:
+		CS_CHECK(GetNextTuple,
+			 pcmcia_get_next_tuple(link->handle, &tuple));
+	}
+
+	/* Need to allocate net_device before requesting IRQ handler */
+	dev = prism2_init_local_data(&prism2_pccard_funcs, 0,
+				     &handle_to_dev(link->handle));
+	if (dev == NULL)
+		goto failed;
+	link->priv = dev;
+
+	/*
+	 * Allocate an interrupt line.  Note that this does not assign a
+	 * handler to the interrupt, unless the 'Handler' member of the
+	 * irq structure is initialized.
+	 */
+	if (link->conf.Attributes & CONF_ENABLE_IRQ) {
+		link->irq.Attributes = IRQ_TYPE_EXCLUSIVE | IRQ_HANDLE_PRESENT;
+		link->irq.IRQInfo1 = IRQ_LEVEL_ID;
+		link->irq.Handler = prism2_interrupt;
+		link->irq.Instance = dev;
+		CS_CHECK(RequestIRQ,
+			 pcmcia_request_irq(link->handle, &link->irq));
+	}
+
+	/*
+	 * This actually configures the PCMCIA socket -- setting up
+	 * the I/O windows and the interrupt mapping, and putting the
+	 * card and host interface into "Memory and IO" mode.
+	 */
+	CS_CHECK(RequestConfiguration,
+		 pcmcia_request_configuration(link->handle, &link->conf));
+
+	dev->irq = link->irq.AssignedIRQ;
+	dev->base_addr = link->io.BasePort1;
+
+	/* Finally, report what we've done */
+	printk(KERN_INFO "%s: index 0x%02x: Vcc %d.%d",
+	       dev_info, link->conf.ConfigIndex,
+	       link->conf.Vcc / 10, link->conf.Vcc % 10);
+	if (link->conf.Vpp1)
+		printk(", Vpp %d.%d", link->conf.Vpp1 / 10,
+		       link->conf.Vpp1 % 10);
+	if (link->conf.Attributes & CONF_ENABLE_IRQ)
+		printk(", irq %d", link->irq.AssignedIRQ);
+	if (link->io.NumPorts1)
+		printk(", io 0x%04x-0x%04x", link->io.BasePort1,
+		       link->io.BasePort1+link->io.NumPorts1-1);
+	if (link->io.NumPorts2)
+		printk(" & 0x%04x-0x%04x", link->io.BasePort2,
+		       link->io.BasePort2+link->io.NumPorts2-1);
+	printk("\n");
+
+	link->state |= DEV_CONFIG;
+	link->state &= ~DEV_CONFIG_PENDING;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	local->hw_priv = hw_priv;
+	hw_priv->link = link;
+	strcpy(hw_priv->node.dev_name, dev->name);
+	link->dev = &hw_priv->node;
+
+	local->shutdown = 0;
+
+	sandisk_enable_wireless(dev);
+
+	ret = prism2_hw_config(dev, 1);
+	if (!ret) {
+		ret = hostap_hw_ready(dev);
+		if (ret == 0 && local->ddev)
+			strcpy(hw_priv->node.dev_name, local->ddev->name);
+	}
+	kfree(parse);
+	return ret;
+
+ cs_failed:
+	cs_error(link->handle, last_fn, last_ret);
+
+ failed:
+	kfree(parse);
+	kfree(hw_priv);
+	prism2_release((u_long)link);
+	return ret;
+}
+
+
+static void prism2_release(u_long arg)
+{
+	dev_link_t *link = (dev_link_t *)arg;
+
+	PDEBUG(DEBUG_FLOW, "prism2_release\n");
+
+	if (link->priv) {
+		struct net_device *dev = link->priv;
+		struct hostap_interface *iface;
+
+		iface = netdev_priv(dev);
+		if (link->state & DEV_CONFIG)
+			prism2_hw_shutdown(dev, 0);
+		iface->local->shutdown = 1;
+	}
+
+	if (link->win)
+		pcmcia_release_window(link->win);
+	pcmcia_release_configuration(link->handle);
+	if (link->io.NumPorts1)
+		pcmcia_release_io(link->handle, &link->io);
+	if (link->irq.AssignedIRQ)
+		pcmcia_release_irq(link->handle, &link->irq);
+
+	link->state &= ~DEV_CONFIG;
+
+	PDEBUG(DEBUG_FLOW, "release - done\n");
+}
+
+
+static int prism2_event(event_t event, int priority,
+			event_callback_args_t *args)
+{
+	dev_link_t *link = args->client_data;
+	struct net_device *dev = (struct net_device *) link->priv;
+
+	switch (event) {
+	case CS_EVENT_CARD_INSERTION:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_INSERTION\n", dev_info);
+		link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
+		if (prism2_config(link)) {
+			PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n");
+		}
+		break;
+
+	case CS_EVENT_CARD_REMOVAL:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_REMOVAL\n", dev_info);
+		link->state &= ~DEV_PRESENT;
+		if (link->state & DEV_CONFIG) {
+			netif_stop_queue(dev);
+			netif_device_detach(dev);
+			prism2_release((u_long) link);
+		}
+		break;
+
+	case CS_EVENT_PM_SUSPEND:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info);
+		link->state |= DEV_SUSPEND;
+		/* fall through */
+
+	case CS_EVENT_RESET_PHYSICAL:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_RESET_PHYSICAL\n", dev_info);
+		if (link->state & DEV_CONFIG) {
+			if (link->open) {
+				netif_stop_queue(dev);
+				netif_device_detach(dev);
+			}
+			prism2_suspend(dev);
+			pcmcia_release_configuration(link->handle);
+		}
+		break;
+
+	case CS_EVENT_PM_RESUME:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info);
+		link->state &= ~DEV_SUSPEND;
+		/* fall through */
+
+	case CS_EVENT_CARD_RESET:
+		PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_CARD_RESET\n", dev_info);
+		if (link->state & DEV_CONFIG) {
+			pcmcia_request_configuration(link->handle,
+						     &link->conf);
+			prism2_hw_shutdown(dev, 1);
+			prism2_hw_config(dev, link->open ? 0 : 1);
+			if (link->open) {
+				netif_device_attach(dev);
+				netif_start_queue(dev);
+			}
+		}
+		break;
+
+	default:
+		PDEBUG(DEBUG_EXTRA, "%s: prism2_event() - unknown event %d\n",
+		       dev_info, event);
+		break;
+	}
+	return 0;
+}
+
+
+static struct pcmcia_device_id hostap_cs_ids[] = {
+	PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100),
+	PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300),
+	PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777),
+	PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000),
+	PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b),
+	PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612),
+	PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613),
+	PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0x02d2, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001),
+	PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300),
+	PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000),
+	PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002),
+	PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005),
+	PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010),
+	PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
+				    0x7a954bd9, 0x74be00c6),
+	PCMCIA_DEVICE_PROD_ID1234(
+		"Intersil", "PRISM 2_5 PCMCIA ADAPTER",	"ISL37300P",
+		"Eval-RevA",
+		0x4b801a17, 0x6345a0bf, 0xc9049a39, 0xc23adc0e),
+	PCMCIA_DEVICE_PROD_ID123(
+		"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
+		0xe6ec52ce, 0x08649af2, 0x4b74baa0),
+	PCMCIA_DEVICE_PROD_ID123(
+		"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
+		0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
+	PCMCIA_DEVICE_PROD_ID123(
+		"Instant Wireless ", " Network PC CARD", "Version 01.02",
+		0x11d901af, 0x6e9bd926, 0x4b74baa0),
+	PCMCIA_DEVICE_PROD_ID123(
+		"SMC", "SMC2632W", "Version 01.02",
+		0xc4f8b18b, 0x474a1f2a, 0x4b74baa0),
+	PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 
+				0x1b01a57b, 0xefd5102a),
+	PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card",
+				0x54f7c49c, 0x15a75e5b),
+	PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE",
+				0x74c5e40d, 0xdb472a18),
+	PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card",
+				0x0733cc81, 0x0c52f395),
+	PCMCIA_DEVICE_PROD_ID12(
+		"ZoomAir 11Mbps High", "Rate wireless Networking",
+		0x273fe3db, 0x32a1eaee),
+	PCMCIA_DEVICE_NULL
+};
+MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
+
+
+static struct pcmcia_driver hostap_driver = {
+	.drv		= {
+		.name	= "hostap_cs",
+	},
+	.attach		= prism2_attach,
+	.detach		= prism2_detach,
+	.owner		= THIS_MODULE,
+	.event		= prism2_event,
+	.id_table	= hostap_cs_ids,
+};
+
+static int __init init_prism2_pccard(void)
+{
+	printk(KERN_INFO "%s: %s\n", dev_info, version);
+	return pcmcia_register_driver(&hostap_driver);
+}
+
+static void __exit exit_prism2_pccard(void)
+{
+	pcmcia_unregister_driver(&hostap_driver);
+	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+
+
+module_init(init_prism2_pccard);
+module_exit(exit_prism2_pccard);
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -0,0 +1,766 @@
+static int prism2_enable_aux_port(struct net_device *dev, int enable)
+{
+	u16 val, reg;
+	int i, tries;
+	unsigned long flags;
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->no_pri) {
+		if (enable) {
+			PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux "
+			       "port is already enabled\n", dev->name);
+		}
+		return 0;
+	}
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+
+	/* wait until busy bit is clear */
+	tries = HFA384X_CMD_BUSY_TIMEOUT;
+	while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
+		tries--;
+		udelay(1);
+	}
+	if (tries == 0) {
+		reg = HFA384X_INW(HFA384X_CMD_OFF);
+		spin_unlock_irqrestore(&local->cmdlock, flags);
+		printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n",
+		       dev->name, reg);
+		return -ETIMEDOUT;
+	}
+
+	val = HFA384X_INW(HFA384X_CONTROL_OFF);
+
+	if (enable) {
+		HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF);
+		HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF);
+		HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF);
+
+		if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED)
+			printk("prism2_enable_aux_port: was not disabled!?\n");
+		val &= ~HFA384X_AUX_PORT_MASK;
+		val |= HFA384X_AUX_PORT_ENABLE;
+	} else {
+		HFA384X_OUTW(0, HFA384X_PARAM0_OFF);
+		HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
+		HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
+
+		if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED)
+			printk("prism2_enable_aux_port: was not enabled!?\n");
+		val &= ~HFA384X_AUX_PORT_MASK;
+		val |= HFA384X_AUX_PORT_DISABLE;
+	}
+	HFA384X_OUTW(val, HFA384X_CONTROL_OFF);
+
+	udelay(5);
+
+	i = 10000;
+	while (i > 0) {
+		val = HFA384X_INW(HFA384X_CONTROL_OFF);
+		val &= HFA384X_AUX_PORT_MASK;
+
+		if ((enable && val == HFA384X_AUX_PORT_ENABLED) ||
+		    (!enable && val == HFA384X_AUX_PORT_DISABLED))
+			break;
+
+		udelay(10);
+		i--;
+	}
+
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+
+	if (i == 0) {
+		printk("prism2_enable_aux_port(%d) timed out\n",
+		       enable);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+
+static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len,
+			    void *buf)
+{
+	u16 page, offset;
+	if (addr & 1 || len & 1)
+		return -1;
+
+	page = addr >> 7;
+	offset = addr & 0x7f;
+
+	HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
+	HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
+
+	udelay(5);
+
+#ifdef PRISM2_PCI
+	{
+		u16 *pos = (u16 *) buf;
+		while (len > 0) {
+			*pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF);
+			len -= 2;
+		}
+	}
+#else /* PRISM2_PCI */
+	HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2);
+#endif /* PRISM2_PCI */
+
+	return 0;
+}
+
+
+static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len,
+			  void *buf)
+{
+	u16 page, offset;
+	if (addr & 1 || len & 1)
+		return -1;
+
+	page = addr >> 7;
+	offset = addr & 0x7f;
+
+	HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF);
+	HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF);
+
+	udelay(5);
+
+#ifdef PRISM2_PCI
+	{
+		u16 *pos = (u16 *) buf;
+		while (len > 0) {
+			HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF);
+			len -= 2;
+		}
+	}
+#else /* PRISM2_PCI */
+	HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2);
+#endif /* PRISM2_PCI */
+
+	return 0;
+}
+
+
+static int prism2_pda_ok(u8 *buf)
+{
+	u16 *pda = (u16 *) buf;
+	int pos;
+	u16 len, pdr;
+
+	if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff &&
+	    buf[3] == 0x00)
+		return 0;
+
+	pos = 0;
+	while (pos + 1 < PRISM2_PDA_SIZE / 2) {
+		len = le16_to_cpu(pda[pos]);
+		pdr = le16_to_cpu(pda[pos + 1]);
+		if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2)
+			return 0;
+
+		if (pdr == 0x0000 && len == 2) {
+			/* PDA end found */
+			return 1;
+		}
+
+		pos += len + 1;
+	}
+
+	return 0;
+}
+
+
+static int prism2_download_aux_dump(struct net_device *dev,
+				     unsigned int addr, int len, u8 *buf)
+{
+	int res;
+
+	prism2_enable_aux_port(dev, 1);
+	res = hfa384x_from_aux(dev, addr, len, buf);
+	prism2_enable_aux_port(dev, 0);
+	if (res)
+		return -1;
+
+	return 0;
+}
+
+
+static u8 * prism2_read_pda(struct net_device *dev)
+{
+	u8 *buf;
+	int res, i, found = 0;
+#define NUM_PDA_ADDRS 4
+	unsigned int pda_addr[NUM_PDA_ADDRS] = {
+		0x7f0000 /* others than HFA3841 */,
+		0x3f0000 /* HFA3841 */,
+		0x390000 /* apparently used in older cards */,
+		0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */,
+	};
+
+	buf = (u8 *) kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return NULL;
+
+	/* Note: wlan card should be in initial state (just after init cmd)
+	 * and no other operations should be performed concurrently. */
+
+	prism2_enable_aux_port(dev, 1);
+
+	for (i = 0; i < NUM_PDA_ADDRS; i++) {
+		PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x",
+		       dev->name, pda_addr[i]);
+		res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf);
+		if (res)
+			continue;
+		if (res == 0 && prism2_pda_ok(buf)) {
+			PDEBUG2(DEBUG_EXTRA2, ": OK\n");
+			found = 1;
+			break;
+		} else {
+			PDEBUG2(DEBUG_EXTRA2, ": failed\n");
+		}
+	}
+
+	prism2_enable_aux_port(dev, 0);
+
+	if (!found) {
+		printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name);
+		kfree(buf);
+		buf = NULL;
+	}
+
+	return buf;
+}
+
+
+static int prism2_download_volatile(local_info_t *local,
+				    struct prism2_download_data *param)
+{
+	struct net_device *dev = local->dev;
+	int ret = 0, i;
+	u16 param0, param1;
+
+	if (local->hw_downloading) {
+		printk(KERN_WARNING "%s: Already downloading - aborting new "
+		       "request\n", dev->name);
+		return -1;
+	}
+
+	local->hw_downloading = 1;
+	if (local->pri_only) {
+		hfa384x_disable_interrupts(dev);
+	} else {
+		prism2_hw_shutdown(dev, 0);
+
+		if (prism2_hw_init(dev, 0)) {
+			printk(KERN_WARNING "%s: Could not initialize card for"
+			       " download\n", dev->name);
+			ret = -1;
+			goto out;
+		}
+	}
+
+	if (prism2_enable_aux_port(dev, 1)) {
+		printk(KERN_WARNING "%s: Could not enable AUX port\n",
+		       dev->name);
+		ret = -1;
+		goto out;
+	}
+
+	param0 = param->start_addr & 0xffff;
+	param1 = param->start_addr >> 16;
+
+	HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
+	HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
+	if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
+			     (HFA384X_PROGMODE_ENABLE_VOLATILE << 8),
+			     param0)) {
+		printk(KERN_WARNING "%s: Download command execution failed\n",
+		       dev->name);
+		ret = -1;
+		goto out;
+	}
+
+	for (i = 0; i < param->num_areas; i++) {
+		PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
+		       dev->name, param->data[i].len, param->data[i].addr);
+		if (hfa384x_to_aux(dev, param->data[i].addr,
+				   param->data[i].len, param->data[i].data)) {
+			printk(KERN_WARNING "%s: RAM download at 0x%08x "
+			       "(len=%d) failed\n", dev->name,
+			       param->data[i].addr, param->data[i].len);
+			ret = -1;
+			goto out;
+		}
+	}
+
+	HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
+	HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
+	if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
+				(HFA384X_PROGMODE_DISABLE << 8), param0)) {
+		printk(KERN_WARNING "%s: Download command execution failed\n",
+		       dev->name);
+		ret = -1;
+		goto out;
+	}
+	/* ProgMode disable causes the hardware to restart itself from the
+	 * given starting address. Give hw some time and ACK command just in
+	 * case restart did not happen. */
+	mdelay(5);
+	HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+
+	if (prism2_enable_aux_port(dev, 0)) {
+		printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
+		       dev->name);
+		/* continue anyway.. restart should have taken care of this */
+	}
+
+	mdelay(5);
+	local->hw_downloading = 0;
+	if (prism2_hw_config(dev, 2)) {
+		printk(KERN_WARNING "%s: Card configuration after RAM "
+		       "download failed\n", dev->name);
+		ret = -1;
+		goto out;
+	}
+
+ out:
+	local->hw_downloading = 0;
+	return ret;
+}
+
+
+static int prism2_enable_genesis(local_info_t *local, int hcr)
+{
+	struct net_device *dev = local->dev;
+	u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff };
+	u8 readbuf[4];
+
+	printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n",
+	       dev->name, hcr);
+	local->func->cor_sreset(local);
+	hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
+	local->func->genesis_reset(local, hcr);
+
+	/* Readback test */
+	hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
+	hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq);
+	hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf);
+
+	if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) {
+		printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n",
+		       hcr);
+		return 0;
+	} else {
+		printk(KERN_DEBUG "Readback test failed, HCR 0x%02x "
+		       "write %02x %02x %02x %02x read %02x %02x %02x %02x\n",
+		       hcr, initseq[0], initseq[1], initseq[2], initseq[3],
+		       readbuf[0], readbuf[1], readbuf[2], readbuf[3]);
+		return 1;
+	}
+}
+
+
+static int prism2_get_ram_size(local_info_t *local)
+{
+	int ret;
+
+	/* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */
+	if (prism2_enable_genesis(local, 0x1f) == 0)
+		ret = 8;
+	else if (prism2_enable_genesis(local, 0x0f) == 0)
+		ret = 16;
+	else
+		ret = -1;
+
+	/* Disable genesis mode */
+	local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17);
+
+	return ret;
+}
+
+
+static int prism2_download_genesis(local_info_t *local,
+				   struct prism2_download_data *param)
+{
+	struct net_device *dev = local->dev;
+	int ram16 = 0, i;
+	int ret = 0;
+
+	if (local->hw_downloading) {
+		printk(KERN_WARNING "%s: Already downloading - aborting new "
+		       "request\n", dev->name);
+		return -EBUSY;
+	}
+
+	if (!local->func->genesis_reset || !local->func->cor_sreset) {
+		printk(KERN_INFO "%s: Genesis mode downloading not supported "
+		       "with this hwmodel\n", dev->name);
+		return -EOPNOTSUPP;
+	}
+
+	local->hw_downloading = 1;
+
+	if (prism2_enable_aux_port(dev, 1)) {
+		printk(KERN_DEBUG "%s: failed to enable AUX port\n",
+		       dev->name);
+		ret = -EIO;
+		goto out;
+	}
+
+	if (local->sram_type == -1) {
+		/* 0x1F for x8 SRAM or 0x0F for x16 SRAM */
+		if (prism2_enable_genesis(local, 0x1f) == 0) {
+			ram16 = 0;
+			PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 "
+			       "SRAM\n", dev->name);
+		} else if (prism2_enable_genesis(local, 0x0f) == 0) {
+			ram16 = 1;
+			PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 "
+			       "SRAM\n", dev->name);
+		} else {
+			printk(KERN_DEBUG "%s: Could not initiate genesis "
+			       "mode\n", dev->name);
+			ret = -EIO;
+			goto out;
+		}
+	} else {
+		if (prism2_enable_genesis(local, local->sram_type == 8 ?
+					  0x1f : 0x0f)) {
+			printk(KERN_DEBUG "%s: Failed to set Genesis "
+			       "mode (sram_type=%d)\n", dev->name,
+			       local->sram_type);
+			ret = -EIO;
+			goto out;
+		}
+		ram16 = local->sram_type != 8;
+	}
+
+	for (i = 0; i < param->num_areas; i++) {
+		PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n",
+		       dev->name, param->data[i].len, param->data[i].addr);
+		if (hfa384x_to_aux(dev, param->data[i].addr,
+				   param->data[i].len, param->data[i].data)) {
+			printk(KERN_WARNING "%s: RAM download at 0x%08x "
+			       "(len=%d) failed\n", dev->name,
+			       param->data[i].addr, param->data[i].len);
+			ret = -EIO;
+			goto out;
+		}
+	}
+
+	PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n");
+	local->func->genesis_reset(local, ram16 ? 0x07 : 0x17);
+	if (prism2_enable_aux_port(dev, 0)) {
+		printk(KERN_DEBUG "%s: Failed to disable AUX port\n",
+		       dev->name);
+	}
+
+	mdelay(5);
+	local->hw_downloading = 0;
+
+	PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n");
+	/*
+	 * Make sure the INIT command does not generate a command completion
+	 * event by disabling interrupts.
+	 */
+	hfa384x_disable_interrupts(dev);
+	if (prism2_hw_init(dev, 1)) {
+		printk(KERN_DEBUG "%s: Initialization after genesis mode "
+		       "download failed\n", dev->name);
+		ret = -EIO;
+		goto out;
+	}
+
+	PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n");
+	if (prism2_hw_init2(dev, 1)) {
+		printk(KERN_DEBUG "%s: Initialization(2) after genesis mode "
+		       "download failed\n", dev->name);
+		ret = -EIO;
+		goto out;
+	}
+
+ out:
+	local->hw_downloading = 0;
+	return ret;
+}
+
+
+#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
+/* Note! Non-volatile downloading functionality has not yet been tested
+ * thoroughly and it may corrupt flash image and effectively kill the card that
+ * is being updated. You have been warned. */
+
+static inline int prism2_download_block(struct net_device *dev,
+					u32 addr, u8 *data,
+					u32 bufaddr, int rest_len)
+{
+	u16 param0, param1;
+	int block_len;
+
+	block_len = rest_len < 4096 ? rest_len : 4096;
+
+	param0 = addr & 0xffff;
+	param1 = addr >> 16;
+
+	HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF);
+	HFA384X_OUTW(param1, HFA384X_PARAM1_OFF);
+
+	if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
+			     (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8),
+			     param0)) {
+		printk(KERN_WARNING "%s: Flash download command execution "
+		       "failed\n", dev->name);
+		return -1;
+	}
+
+	if (hfa384x_to_aux(dev, bufaddr, block_len, data)) {
+		printk(KERN_WARNING "%s: flash download at 0x%08x "
+		       "(len=%d) failed\n", dev->name, addr, block_len);
+		return -1;
+	}
+
+	HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
+	HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
+	if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
+			     (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8),
+			     0)) {
+		printk(KERN_WARNING "%s: Flash write command execution "
+		       "failed\n", dev->name);
+		return -1;
+	}
+
+	return block_len;
+}
+
+
+static int prism2_download_nonvolatile(local_info_t *local,
+				       struct prism2_download_data *dl)
+{
+	struct net_device *dev = local->dev;
+	int ret = 0, i;
+	struct {
+		u16 page;
+		u16 offset;
+		u16 len;
+	} dlbuffer;
+	u32 bufaddr;
+
+	if (local->hw_downloading) {
+		printk(KERN_WARNING "%s: Already downloading - aborting new "
+		       "request\n", dev->name);
+		return -1;
+	}
+
+	ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER,
+				   &dlbuffer, 6, 0);
+
+	if (ret < 0) {
+		printk(KERN_WARNING "%s: Could not read download buffer "
+		       "parameters\n", dev->name);
+		goto out;
+	}
+
+	dlbuffer.page = le16_to_cpu(dlbuffer.page);
+	dlbuffer.offset = le16_to_cpu(dlbuffer.offset);
+	dlbuffer.len = le16_to_cpu(dlbuffer.len);
+
+	printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n",
+	       dlbuffer.len, dlbuffer.page, dlbuffer.offset);
+
+	bufaddr = (dlbuffer.page << 7) + dlbuffer.offset;
+
+	local->hw_downloading = 1;
+
+	if (!local->pri_only) {
+		prism2_hw_shutdown(dev, 0);
+
+		if (prism2_hw_init(dev, 0)) {
+			printk(KERN_WARNING "%s: Could not initialize card for"
+			       " download\n", dev->name);
+			ret = -1;
+			goto out;
+		}
+	}
+
+	hfa384x_disable_interrupts(dev);
+
+	if (prism2_enable_aux_port(dev, 1)) {
+		printk(KERN_WARNING "%s: Could not enable AUX port\n",
+		       dev->name);
+		ret = -1;
+		goto out;
+	}
+
+	printk(KERN_DEBUG "%s: starting flash download\n", dev->name);
+	for (i = 0; i < dl->num_areas; i++) {
+		int rest_len = dl->data[i].len;
+		int data_off = 0;
+
+		while (rest_len > 0) {
+			int block_len;
+
+			block_len = prism2_download_block(
+				dev, dl->data[i].addr + data_off,
+				dl->data[i].data + data_off, bufaddr,
+				rest_len);
+
+			if (block_len < 0) {
+				ret = -1;
+				goto out;
+			}
+
+			rest_len -= block_len;
+			data_off += block_len;
+		}
+	}
+
+	HFA384X_OUTW(0, HFA384X_PARAM1_OFF);
+	HFA384X_OUTW(0, HFA384X_PARAM2_OFF);
+	if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD |
+				(HFA384X_PROGMODE_DISABLE << 8), 0)) {
+		printk(KERN_WARNING "%s: Download command execution failed\n",
+		       dev->name);
+		ret = -1;
+		goto out;
+	}
+
+	if (prism2_enable_aux_port(dev, 0)) {
+		printk(KERN_DEBUG "%s: Disabling AUX port failed\n",
+		       dev->name);
+		/* continue anyway.. restart should have taken care of this */
+	}
+
+	mdelay(5);
+
+	local->func->hw_reset(dev);
+	local->hw_downloading = 0;
+	if (prism2_hw_config(dev, 2)) {
+		printk(KERN_WARNING "%s: Card configuration after flash "
+		       "download failed\n", dev->name);
+		ret = -1;
+	} else {
+		printk(KERN_INFO "%s: Card initialized successfully after "
+		       "flash download\n", dev->name);
+	}
+
+ out:
+	local->hw_downloading = 0;
+	return ret;
+}
+#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
+
+
+static void prism2_download_free_data(struct prism2_download_data *dl)
+{
+	int i;
+
+	if (dl == NULL)
+		return;
+
+	for (i = 0; i < dl->num_areas; i++)
+		kfree(dl->data[i].data);
+	kfree(dl);
+}
+
+
+static int prism2_download(local_info_t *local,
+			   struct prism2_download_param *param)
+{
+	int ret = 0;
+	int i;
+	u32 total_len = 0;
+	struct prism2_download_data *dl = NULL;
+
+	printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x "
+	       "num_areas=%d\n",
+	       param->dl_cmd, param->start_addr, param->num_areas);
+
+	if (param->num_areas > 100) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dl = kmalloc(sizeof(*dl) + param->num_areas *
+		     sizeof(struct prism2_download_data_area), GFP_KERNEL);
+	if (dl == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	memset(dl, 0, sizeof(*dl) + param->num_areas *
+	       sizeof(struct prism2_download_data_area));
+	dl->dl_cmd = param->dl_cmd;
+	dl->start_addr = param->start_addr;
+	dl->num_areas = param->num_areas;
+	for (i = 0; i < param->num_areas; i++) {
+		PDEBUG(DEBUG_EXTRA2,
+		       "  area %d: addr=0x%08x len=%d ptr=0x%p\n",
+		       i, param->data[i].addr, param->data[i].len,
+		       param->data[i].ptr);
+
+		dl->data[i].addr = param->data[i].addr;
+		dl->data[i].len = param->data[i].len;
+
+		total_len += param->data[i].len;
+		if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN ||
+		    total_len > PRISM2_MAX_DOWNLOAD_LEN) {
+			ret = -E2BIG;
+			goto out;
+		}
+
+		dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL);
+		if (dl->data[i].data == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		if (copy_from_user(dl->data[i].data, param->data[i].ptr,
+				   param->data[i].len)) {
+			ret = -EFAULT;
+			goto out;
+		}
+	}
+
+	switch (param->dl_cmd) {
+	case PRISM2_DOWNLOAD_VOLATILE:
+	case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT:
+		ret = prism2_download_volatile(local, dl);
+		break;
+	case PRISM2_DOWNLOAD_VOLATILE_GENESIS:
+	case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT:
+		ret = prism2_download_genesis(local, dl);
+		break;
+	case PRISM2_DOWNLOAD_NON_VOLATILE:
+#ifdef PRISM2_NON_VOLATILE_DOWNLOAD
+		ret = prism2_download_nonvolatile(local, dl);
+#else /* PRISM2_NON_VOLATILE_DOWNLOAD */
+		printk(KERN_INFO "%s: non-volatile downloading not enabled\n",
+		       local->dev->name);
+		ret = -EOPNOTSUPP;
+#endif /* PRISM2_NON_VOLATILE_DOWNLOAD */
+		break;
+	default:
+		printk(KERN_DEBUG "%s: unsupported download command %d\n",
+		       local->dev->name, param->dl_cmd);
+		ret = -EINVAL;
+		break;
+	};
+
+ out:
+	if (ret == 0 && dl &&
+	    param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) {
+		prism2_download_free_data(local->dl_pri);
+		local->dl_pri = dl;
+	} else if (ret == 0 && dl &&
+		   param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) {
+		prism2_download_free_data(local->dl_sec);
+		local->dl_sec = dl;
+	} else
+		prism2_download_free_data(dl);
+
+	return ret;
+}
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -0,0 +1,3445 @@
+/*
+ * Host AP (software wireless LAN access point) driver for
+ * Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2005, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ * FIX:
+ * - there is currently no way of associating TX packets to correct wds device
+ *   when TX Exc/OK event occurs, so all tx_packets and some
+ *   tx_errors/tx_dropped are added to the main netdevice; using sw_support
+ *   field in txdesc might be used to fix this (using Alloc event to increment
+ *   tx_packets would need some further info in txfid table)
+ *
+ * Buffer Access Path (BAP) usage:
+ *   Prism2 cards have two separate BAPs for accessing the card memory. These
+ *   should allow concurrent access to two different frames and the driver
+ *   previously used BAP0 for sending data and BAP1 for receiving data.
+ *   However, there seems to be number of issues with concurrent access and at
+ *   least one know hardware bug in using BAP0 and BAP1 concurrently with PCI
+ *   Prism2.5. Therefore, the driver now only uses BAP0 for moving data between
+ *   host and card memories. BAP0 accesses are protected with local->baplock
+ *   (spin_lock_bh) to prevent concurrent use.
+ */
+
+
+#include <linux/config.h>
+#include <linux/version.h>
+
+#include <asm/delay.h>
+#include <asm/uaccess.h>
+
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/rtnetlink.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <net/ieee80211.h>
+#include <net/ieee80211_crypt.h>
+#include <asm/irq.h>
+
+#include "hostap_80211.h"
+#include "hostap.h"
+#include "hostap_ap.h"
+
+
+/* #define final_version */
+
+static int mtu = 1500;
+module_param(mtu, int, 0444);
+MODULE_PARM_DESC(mtu, "Maximum transfer unit");
+
+static int channel[MAX_PARM_DEVICES] = { 3, DEF_INTS };
+module_param_array(channel, int, NULL, 0444);
+MODULE_PARM_DESC(channel, "Initial channel");
+
+static char essid[33] = "test";
+module_param_string(essid, essid, sizeof(essid), 0444);
+MODULE_PARM_DESC(essid, "Host AP's ESSID");
+
+static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS };
+module_param_array(iw_mode, int, NULL, 0444);
+MODULE_PARM_DESC(iw_mode, "Initial operation mode");
+
+static int beacon_int[MAX_PARM_DEVICES] = { 100, DEF_INTS };
+module_param_array(beacon_int, int, NULL, 0444);
+MODULE_PARM_DESC(beacon_int, "Beacon interval (1 = 1024 usec)");
+
+static int dtim_period[MAX_PARM_DEVICES] = { 1, DEF_INTS };
+module_param_array(dtim_period, int, NULL, 0444);
+MODULE_PARM_DESC(dtim_period, "DTIM period");
+
+static char dev_template[16] = "wlan%d";
+module_param_string(dev_template, dev_template, sizeof(dev_template), 0444);
+MODULE_PARM_DESC(dev_template, "Prefix for network device name (default: "
+		 "wlan%d)");
+
+#ifdef final_version
+#define EXTRA_EVENTS_WTERR 0
+#else
+/* check WTERR events (Wait Time-out) in development versions */
+#define EXTRA_EVENTS_WTERR HFA384X_EV_WTERR
+#endif
+
+/* Events that will be using BAP0 */
+#define HFA384X_BAP0_EVENTS \
+	(HFA384X_EV_TXEXC | HFA384X_EV_RX | HFA384X_EV_INFO | HFA384X_EV_TX)
+
+/* event mask, i.e., events that will result in an interrupt */
+#define HFA384X_EVENT_MASK \
+	(HFA384X_BAP0_EVENTS | HFA384X_EV_ALLOC | HFA384X_EV_INFDROP | \
+	HFA384X_EV_CMD | HFA384X_EV_TICK | \
+	EXTRA_EVENTS_WTERR)
+
+/* Default TX control flags: use 802.11 headers and request interrupt for
+ * failed transmits. Frames that request ACK callback, will add
+ * _TX_OK flag and _ALT_RTRY flag may be used to select different retry policy.
+ */
+#define HFA384X_TX_CTRL_FLAGS \
+	(HFA384X_TX_CTRL_802_11 | HFA384X_TX_CTRL_TX_EX)
+
+
+/* ca. 1 usec */
+#define HFA384X_CMD_BUSY_TIMEOUT 5000
+#define HFA384X_BAP_BUSY_TIMEOUT 50000
+
+/* ca. 10 usec */
+#define HFA384X_CMD_COMPL_TIMEOUT 20000
+#define HFA384X_DL_COMPL_TIMEOUT 1000000
+
+/* Wait times for initialization; yield to other processes to avoid busy
+ * waiting for long time. */
+#define HFA384X_INIT_TIMEOUT (HZ / 2) /* 500 ms */
+#define HFA384X_ALLOC_COMPL_TIMEOUT (HZ / 20) /* 50 ms */
+
+
+static void prism2_hw_reset(struct net_device *dev);
+static void prism2_check_sta_fw_version(local_info_t *local);
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+/* hostap_download.c */
+static int prism2_download_aux_dump(struct net_device *dev,
+				    unsigned int addr, int len, u8 *buf);
+static u8 * prism2_read_pda(struct net_device *dev);
+static int prism2_download(local_info_t *local,
+			   struct prism2_download_param *param);
+static void prism2_download_free_data(struct prism2_download_data *dl);
+static int prism2_download_volatile(local_info_t *local,
+				    struct prism2_download_data *param);
+static int prism2_download_genesis(local_info_t *local,
+				   struct prism2_download_data *param);
+static int prism2_get_ram_size(local_info_t *local);
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+
+
+
+#ifndef final_version
+/* magic value written to SWSUPPORT0 reg. for detecting whether card is still
+ * present */
+#define HFA384X_MAGIC 0x8A32
+#endif
+
+
+static u16 hfa384x_read_reg(struct net_device *dev, u16 reg)
+{
+	return HFA384X_INW(reg);
+}
+
+
+static void hfa384x_read_regs(struct net_device *dev,
+			      struct hfa384x_regs *regs)
+{
+	regs->cmd = HFA384X_INW(HFA384X_CMD_OFF);
+	regs->evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
+	regs->offset0 = HFA384X_INW(HFA384X_OFFSET0_OFF);
+	regs->offset1 = HFA384X_INW(HFA384X_OFFSET1_OFF);
+	regs->swsupport0 = HFA384X_INW(HFA384X_SWSUPPORT0_OFF);
+}
+
+
+/**
+ * __hostap_cmd_queue_free - Free Prism2 command queue entry (private)
+ * @local: pointer to private Host AP driver data
+ * @entry: Prism2 command queue entry to be freed
+ * @del_req: request the entry to be removed
+ *
+ * Internal helper function for freeing Prism2 command queue entries.
+ * Caller must have acquired local->cmdlock before calling this function.
+ */
+static inline void __hostap_cmd_queue_free(local_info_t *local,
+					   struct hostap_cmd_queue *entry,
+					   int del_req)
+{
+	if (del_req) {
+		entry->del_req = 1;
+		if (!list_empty(&entry->list)) {
+			list_del_init(&entry->list);
+			local->cmd_queue_len--;
+		}
+	}
+
+	if (atomic_dec_and_test(&entry->usecnt) && entry->del_req)
+		kfree(entry);
+}
+
+
+/**
+ * hostap_cmd_queue_free - Free Prism2 command queue entry
+ * @local: pointer to private Host AP driver data
+ * @entry: Prism2 command queue entry to be freed
+ * @del_req: request the entry to be removed
+ *
+ * Free a Prism2 command queue entry.
+ */
+static inline void hostap_cmd_queue_free(local_info_t *local,
+					 struct hostap_cmd_queue *entry,
+					 int del_req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+	__hostap_cmd_queue_free(local, entry, del_req);
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+}
+
+
+/**
+ * prism2_clear_cmd_queue - Free all pending Prism2 command queue entries
+ * @local: pointer to private Host AP driver data
+ */
+static void prism2_clear_cmd_queue(local_info_t *local)
+{
+	struct list_head *ptr, *n;
+	unsigned long flags;
+	struct hostap_cmd_queue *entry;
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+	list_for_each_safe(ptr, n, &local->cmd_queue) {
+		entry = list_entry(ptr, struct hostap_cmd_queue, list);
+		atomic_inc(&entry->usecnt);
+		printk(KERN_DEBUG "%s: removed pending cmd_queue entry "
+		       "(type=%d, cmd=0x%04x, param0=0x%04x)\n",
+		       local->dev->name, entry->type, entry->cmd,
+		       entry->param0);
+		__hostap_cmd_queue_free(local, entry, 1);
+	}
+	if (local->cmd_queue_len) {
+		/* This should not happen; print debug message and clear
+		 * queue length. */
+		printk(KERN_DEBUG "%s: cmd_queue_len (%d) not zero after "
+		       "flush\n", local->dev->name, local->cmd_queue_len);
+		local->cmd_queue_len = 0;
+	}
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+}
+
+
+/**
+ * hfa384x_cmd_issue - Issue a Prism2 command to the hardware
+ * @dev: pointer to net_device
+ * @entry: Prism2 command queue entry to be issued
+ */
+static inline int hfa384x_cmd_issue(struct net_device *dev,
+				    struct hostap_cmd_queue *entry)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int tries;
+	u16 reg;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->card_present && !local->func->card_present(local))
+		return -ENODEV;
+
+	if (entry->issued) {
+		printk(KERN_DEBUG "%s: driver bug - re-issuing command @%p\n",
+		       dev->name, entry);
+	}
+
+	/* wait until busy bit is clear; this should always be clear since the
+	 * commands are serialized */
+	tries = HFA384X_CMD_BUSY_TIMEOUT;
+	while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
+		tries--;
+		udelay(1);
+	}
+#ifndef final_version
+	if (tries != HFA384X_CMD_BUSY_TIMEOUT) {
+		prism2_io_debug_error(dev, 1);
+		printk(KERN_DEBUG "%s: hfa384x_cmd_issue: cmd reg was busy "
+		       "for %d usec\n", dev->name,
+		       HFA384X_CMD_BUSY_TIMEOUT - tries);
+	}
+#endif
+	if (tries == 0) {
+		reg = HFA384X_INW(HFA384X_CMD_OFF);
+		prism2_io_debug_error(dev, 2);
+		printk(KERN_DEBUG "%s: hfa384x_cmd_issue - timeout - "
+		       "reg=0x%04x\n", dev->name, reg);
+		return -ETIMEDOUT;
+	}
+
+	/* write command */
+	spin_lock_irqsave(&local->cmdlock, flags);
+	HFA384X_OUTW(entry->param0, HFA384X_PARAM0_OFF);
+	HFA384X_OUTW(entry->param1, HFA384X_PARAM1_OFF);
+	HFA384X_OUTW(entry->cmd, HFA384X_CMD_OFF);
+	entry->issued = 1;
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+
+	return 0;
+}
+
+
+/**
+ * hfa384x_cmd - Issue a Prism2 command and wait (sleep) for completion
+ * @dev: pointer to net_device
+ * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
+ * @param0: value for Param0 register
+ * @param1: value for Param1 register (pointer; %NULL if not used)
+ * @resp0: pointer for Resp0 data or %NULL if Resp0 is not needed
+ *
+ * Issue given command (possibly after waiting in command queue) and sleep
+ * until the command is completed (or timed out or interrupted). This can be
+ * called only from user process context.
+ */
+static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
+		       u16 *param1, u16 *resp0)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int err, res, issue, issued = 0;
+	unsigned long flags;
+	struct hostap_cmd_queue *entry;
+	DECLARE_WAITQUEUE(wait, current);
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (in_interrupt()) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd called from interrupt "
+		       "context\n", dev->name);
+		return -1;
+	}
+
+	if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
+		       dev->name);
+		return -1;
+	}
+
+	if (signal_pending(current))
+		return -EINTR;
+
+	entry = (struct hostap_cmd_queue *)
+		kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (entry == NULL) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd - kmalloc failed\n",
+		       dev->name);
+		return -ENOMEM;
+	}
+	memset(entry, 0, sizeof(*entry));
+	atomic_set(&entry->usecnt, 1);
+	entry->type = CMD_SLEEP;
+	entry->cmd = cmd;
+	entry->param0 = param0;
+	if (param1)
+		entry->param1 = *param1;
+	init_waitqueue_head(&entry->compl);
+
+	/* prepare to wait for command completion event, but do not sleep yet
+	 */
+	add_wait_queue(&entry->compl, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+	issue = list_empty(&local->cmd_queue);
+	if (issue)
+		entry->issuing = 1;
+	list_add_tail(&entry->list, &local->cmd_queue);
+	local->cmd_queue_len++;
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+
+	err = 0;
+	if (!issue)
+		goto wait_completion;
+
+	if (signal_pending(current))
+		err = -EINTR;
+
+	if (!err) {
+		if (hfa384x_cmd_issue(dev, entry))
+			err = -ETIMEDOUT;
+		else
+			issued = 1;
+	}
+
+ wait_completion:
+	if (!err && entry->type != CMD_COMPLETED) {
+		/* sleep until command is completed or timed out */
+		res = schedule_timeout(2 * HZ);
+	} else
+		res = -1;
+
+	if (!err && signal_pending(current))
+		err = -EINTR;
+
+	if (err && issued) {
+		/* the command was issued, so a CmdCompl event should occur
+		 * soon; however, there's a pending signal and
+		 * schedule_timeout() would be interrupted; wait a short period
+		 * of time to avoid removing entry from the list before
+		 * CmdCompl event */
+		udelay(300);
+	}
+
+	set_current_state(TASK_RUNNING);
+	remove_wait_queue(&entry->compl, &wait);
+
+	/* If entry->list is still in the list, it must be removed
+	 * first and in this case prism2_cmd_ev() does not yet have
+	 * local reference to it, and the data can be kfree()'d
+	 * here. If the command completion event is still generated,
+	 * it will be assigned to next (possibly) pending command, but
+	 * the driver will reset the card anyway due to timeout
+	 *
+	 * If the entry is not in the list prism2_cmd_ev() has a local
+	 * reference to it, but keeps cmdlock as long as the data is
+	 * needed, so the data can be kfree()'d here. */
+
+	/* FIX: if the entry->list is in the list, it has not been completed
+	 * yet, so removing it here is somewhat wrong.. this could cause
+	 * references to freed memory and next list_del() causing NULL pointer
+	 * dereference.. it would probably be better to leave the entry in the
+	 * list and the list should be emptied during hw reset */
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+	if (!list_empty(&entry->list)) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd: entry still in list? "
+		       "(entry=%p, type=%d, res=%d)\n", dev->name, entry,
+		       entry->type, res);
+		list_del_init(&entry->list);
+		local->cmd_queue_len--;
+	}
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+
+	if (err) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd: interrupted; err=%d\n",
+		       dev->name, err);
+		res = err;
+		goto done;
+	}
+
+	if (entry->type != CMD_COMPLETED) {
+		u16 reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
+		printk(KERN_DEBUG "%s: hfa384x_cmd: command was not "
+		       "completed (res=%d, entry=%p, type=%d, cmd=0x%04x, "
+		       "param0=0x%04x, EVSTAT=%04x INTEN=%04x)\n", dev->name,
+		       res, entry, entry->type, entry->cmd, entry->param0, reg,
+		       HFA384X_INW(HFA384X_INTEN_OFF));
+		if (reg & HFA384X_EV_CMD) {
+			/* Command completion event is pending, but the
+			 * interrupt was not delivered - probably an issue
+			 * with pcmcia-cs configuration. */
+			printk(KERN_WARNING "%s: interrupt delivery does not "
+			       "seem to work\n", dev->name);
+		}
+		prism2_io_debug_error(dev, 3);
+		res = -ETIMEDOUT;
+		goto done;
+	}
+
+	if (resp0 != NULL)
+		*resp0 = entry->resp0;
+#ifndef final_version
+	if (entry->res) {
+		printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x, "
+		       "resp0=0x%04x\n",
+		       dev->name, cmd, entry->res, entry->resp0);
+	}
+#endif /* final_version */
+
+	res = entry->res;
+ done:
+	hostap_cmd_queue_free(local, entry, 1);
+	return res;
+}
+
+
+/**
+ * hfa384x_cmd_callback - Issue a Prism2 command; callback when completed
+ * @dev: pointer to net_device
+ * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
+ * @param0: value for Param0 register
+ * @callback: command completion callback function (%NULL = no callback)
+ * @context: context data to be given to the callback function
+ *
+ * Issue given command (possibly after waiting in command queue) and use
+ * callback function to indicate command completion. This can be called both
+ * from user and interrupt context. The callback function will be called in
+ * hardware IRQ context. It can be %NULL, when no function is called when
+ * command is completed.
+ */
+static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0,
+				void (*callback)(struct net_device *dev,
+						 long context, u16 resp0,
+						 u16 status),
+				long context)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int issue, ret;
+	unsigned long flags;
+	struct hostap_cmd_queue *entry;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN + 2) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
+		       dev->name);
+		return -1;
+	}
+
+	entry = (struct hostap_cmd_queue *)
+		kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (entry == NULL) {
+		printk(KERN_DEBUG "%s: hfa384x_cmd_callback - kmalloc "
+		       "failed\n", dev->name);
+		return -ENOMEM;
+	}
+	memset(entry, 0, sizeof(*entry));
+	atomic_set(&entry->usecnt, 1);
+	entry->type = CMD_CALLBACK;
+	entry->cmd = cmd;
+	entry->param0 = param0;
+	entry->callback = callback;
+	entry->context = context;
+
+	spin_lock_irqsave(&local->cmdlock, flags);
+	issue = list_empty(&local->cmd_queue);
+	if (issue)
+		entry->issuing = 1;
+	list_add_tail(&entry->list, &local->cmd_queue);
+	local->cmd_queue_len++;
+	spin_unlock_irqrestore(&local->cmdlock, flags);
+
+	if (issue && hfa384x_cmd_issue(dev, entry))
+		ret = -ETIMEDOUT;
+	else
+		ret = 0;
+
+	hostap_cmd_queue_free(local, entry, ret);
+
+	return ret;
+}
+
+
+/**
+ * __hfa384x_cmd_no_wait - Issue a Prism2 command (private)
+ * @dev: pointer to net_device
+ * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
+ * @param0: value for Param0 register
+ * @io_debug_num: I/O debug error number
+ *
+ * Shared helper function for hfa384x_cmd_wait() and hfa384x_cmd_no_wait().
+ */
+static int __hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0,
+				 int io_debug_num)
+{
+	int tries;
+	u16 reg;
+
+	/* wait until busy bit is clear; this should always be clear since the
+	 * commands are serialized */
+	tries = HFA384X_CMD_BUSY_TIMEOUT;
+	while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) {
+		tries--;
+		udelay(1);
+	}
+	if (tries == 0) {
+		reg = HFA384X_INW(HFA384X_CMD_OFF);
+		prism2_io_debug_error(dev, io_debug_num);
+		printk(KERN_DEBUG "%s: __hfa384x_cmd_no_wait(%d) - timeout - "
+		       "reg=0x%04x\n", dev->name, io_debug_num, reg);
+		return -ETIMEDOUT;
+	}
+
+	/* write command */
+	HFA384X_OUTW(param0, HFA384X_PARAM0_OFF);
+	HFA384X_OUTW(cmd, HFA384X_CMD_OFF);
+
+	return 0;
+}
+
+
+/**
+ * hfa384x_cmd_wait - Issue a Prism2 command and busy wait for completion
+ * @dev: pointer to net_device
+ * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
+ * @param0: value for Param0 register
+ */
+static int hfa384x_cmd_wait(struct net_device *dev, u16 cmd, u16 param0)
+{
+	int res, tries;
+	u16 reg;
+
+	res = __hfa384x_cmd_no_wait(dev, cmd, param0, 4);
+	if (res)
+		return res;
+
+        /* wait for command completion */
+	if ((cmd & HFA384X_CMDCODE_MASK) == HFA384X_CMDCODE_DOWNLOAD)
+		tries = HFA384X_DL_COMPL_TIMEOUT;
+	else
+		tries = HFA384X_CMD_COMPL_TIMEOUT;
+
+        while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
+               tries > 0) {
+                tries--;
+                udelay(10);
+        }
+        if (tries == 0) {
+                reg = HFA384X_INW(HFA384X_EVSTAT_OFF);
+		prism2_io_debug_error(dev, 5);
+                printk(KERN_DEBUG "%s: hfa384x_cmd_wait - timeout2 - "
+		       "reg=0x%04x\n", dev->name, reg);
+                return -ETIMEDOUT;
+        }
+
+        res = (HFA384X_INW(HFA384X_STATUS_OFF) &
+               (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) |
+                BIT(8))) >> 8;
+#ifndef final_version
+	if (res) {
+		printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x\n",
+		       dev->name, cmd, res);
+	}
+#endif
+
+	HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+
+	return res;
+}
+
+
+/**
+ * hfa384x_cmd_no_wait - Issue a Prism2 command; do not wait for completion
+ * @dev: pointer to net_device
+ * @cmd: Prism2 command code (HFA384X_CMD_CODE_*)
+ * @param0: value for Param0 register
+ */
+static inline int hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd,
+				      u16 param0)
+{
+	return __hfa384x_cmd_no_wait(dev, cmd, param0, 6);
+}
+
+
+/**
+ * prism2_cmd_ev - Prism2 command completion event handler
+ * @dev: pointer to net_device
+ *
+ * Interrupt handler for command completion events. Called by the main
+ * interrupt handler in hardware IRQ context. Read Resp0 and status registers
+ * from the hardware and ACK the event. Depending on the issued command type
+ * either wake up the sleeping process that is waiting for command completion
+ * or call the callback function. Issue the next command, if one is pending.
+ */
+static void prism2_cmd_ev(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hostap_cmd_queue *entry = NULL;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock(&local->cmdlock);
+	if (!list_empty(&local->cmd_queue)) {
+		entry = list_entry(local->cmd_queue.next,
+				   struct hostap_cmd_queue, list);
+		atomic_inc(&entry->usecnt);
+		list_del_init(&entry->list);
+		local->cmd_queue_len--;
+
+		if (!entry->issued) {
+			printk(KERN_DEBUG "%s: Command completion event, but "
+			       "cmd not issued\n", dev->name);
+			__hostap_cmd_queue_free(local, entry, 1);
+			entry = NULL;
+		}
+	}
+	spin_unlock(&local->cmdlock);
+
+	if (!entry) {
+		HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+		printk(KERN_DEBUG "%s: Command completion event, but no "
+		       "pending commands\n", dev->name);
+		return;
+	}
+
+	entry->resp0 = HFA384X_INW(HFA384X_RESP0_OFF);
+	entry->res = (HFA384X_INW(HFA384X_STATUS_OFF) &
+		      (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) |
+		       BIT(9) | BIT(8))) >> 8;
+	HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+
+	/* TODO: rest of the CmdEv handling could be moved to tasklet */
+	if (entry->type == CMD_SLEEP) {
+		entry->type = CMD_COMPLETED;
+		wake_up_interruptible(&entry->compl);
+	} else if (entry->type == CMD_CALLBACK) {
+		if (entry->callback)
+			entry->callback(dev, entry->context, entry->resp0,
+					entry->res);
+	} else {
+		printk(KERN_DEBUG "%s: Invalid command completion type %d\n",
+		       dev->name, entry->type);
+	}
+	hostap_cmd_queue_free(local, entry, 1);
+
+	/* issue next command, if pending */
+	entry = NULL;
+	spin_lock(&local->cmdlock);
+	if (!list_empty(&local->cmd_queue)) {
+		entry = list_entry(local->cmd_queue.next,
+				   struct hostap_cmd_queue, list);
+		if (entry->issuing) {
+			/* hfa384x_cmd() has already started issuing this
+			 * command, so do not start here */
+			entry = NULL;
+		}
+		if (entry)
+			atomic_inc(&entry->usecnt);
+	}
+	spin_unlock(&local->cmdlock);
+
+	if (entry) {
+		/* issue next command; if command issuing fails, remove the
+		 * entry from cmd_queue */
+		int res = hfa384x_cmd_issue(dev, entry);
+		spin_lock(&local->cmdlock);
+		__hostap_cmd_queue_free(local, entry, res);
+		spin_unlock(&local->cmdlock);
+	}
+}
+
+
+static inline int hfa384x_wait_offset(struct net_device *dev, u16 o_off)
+{
+	int tries = HFA384X_BAP_BUSY_TIMEOUT;
+	int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
+
+	while (res && tries > 0) {
+		tries--;
+		udelay(1);
+		res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY;
+	}
+	return res;
+}
+
+
+/* Offset must be even */
+static int hfa384x_setup_bap(struct net_device *dev, u16 bap, u16 id,
+			     int offset)
+{
+	u16 o_off, s_off;
+	int ret = 0;
+
+	if (offset % 2 || bap > 1)
+		return -EINVAL;
+
+	if (bap == BAP1) {
+		o_off = HFA384X_OFFSET1_OFF;
+		s_off = HFA384X_SELECT1_OFF;
+	} else {
+		o_off = HFA384X_OFFSET0_OFF;
+		s_off = HFA384X_SELECT0_OFF;
+	}
+
+	if (hfa384x_wait_offset(dev, o_off)) {
+		prism2_io_debug_error(dev, 7);
+		printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout before\n",
+		       dev->name);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	HFA384X_OUTW(id, s_off);
+	HFA384X_OUTW(offset, o_off);
+
+	if (hfa384x_wait_offset(dev, o_off)) {
+		prism2_io_debug_error(dev, 8);
+		printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout after\n",
+		       dev->name);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+#ifndef final_version
+	if (HFA384X_INW(o_off) & HFA384X_OFFSET_ERR) {
+		prism2_io_debug_error(dev, 9);
+		printk(KERN_DEBUG "%s: hfa384x_setup_bap - offset error "
+		       "(%d,0x04%x,%d); reg=0x%04x\n",
+		       dev->name, bap, id, offset, HFA384X_INW(o_off));
+		ret = -EINVAL;
+	}
+#endif
+
+ out:
+	return ret;
+}
+
+
+static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
+			   int exact_len)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int res, rlen = 0;
+	struct hfa384x_rid_hdr rec;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->no_pri) {
+		printk(KERN_DEBUG "%s: cannot get RID %04x (len=%d) - no PRI "
+		       "f/w\n", dev->name, rid, len);
+		return -ENOTTY; /* Well.. not really correct, but return
+				 * something unique enough.. */
+	}
+
+	if ((local->func->card_present && !local->func->card_present(local)) ||
+	    local->hw_downloading)
+		return -ENODEV;
+
+	res = down_interruptible(&local->rid_bap_sem);
+	if (res)
+		return res;
+
+	res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS, rid, NULL, NULL);
+	if (res) {
+		printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed "
+		       "(res=%d, rid=%04x, len=%d)\n",
+		       dev->name, res, rid, len);
+		up(&local->rid_bap_sem);
+		return res;
+	}
+
+	spin_lock_bh(&local->baplock);
+
+	res = hfa384x_setup_bap(dev, BAP0, rid, 0);
+	if (!res)
+		res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+
+	if (le16_to_cpu(rec.len) == 0) {
+		/* RID not available */
+		res = -ENODATA;
+	}
+
+	rlen = (le16_to_cpu(rec.len) - 1) * 2;
+	if (!res && exact_len && rlen != len) {
+		printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
+		       "rid=0x%04x, len=%d (expected %d)\n",
+		       dev->name, rid, rlen, len);
+		res = -ENODATA;
+	}
+
+	if (!res)
+		res = hfa384x_from_bap(dev, BAP0, buf, len);
+
+	spin_unlock_bh(&local->baplock);
+	up(&local->rid_bap_sem);
+
+	if (res) {
+		if (res != -ENODATA)
+			printk(KERN_DEBUG "%s: hfa384x_get_rid (rid=%04x, "
+			       "len=%d) - failed - res=%d\n", dev->name, rid,
+			       len, res);
+		if (res == -ETIMEDOUT)
+			prism2_hw_reset(dev);
+		return res;
+	}
+
+	return rlen;
+}
+
+
+static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_rid_hdr rec;
+	int res;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->no_pri) {
+		printk(KERN_DEBUG "%s: cannot set RID %04x (len=%d) - no PRI "
+		       "f/w\n", dev->name, rid, len);
+		return -ENOTTY; /* Well.. not really correct, but return
+				 * something unique enough.. */
+	}
+
+	if ((local->func->card_present && !local->func->card_present(local)) ||
+	    local->hw_downloading)
+		return -ENODEV;
+
+	rec.rid = cpu_to_le16(rid);
+	/* RID len in words and +1 for rec.rid */
+	rec.len = cpu_to_le16(len / 2 + len % 2 + 1);
+
+	res = down_interruptible(&local->rid_bap_sem);
+	if (res)
+		return res;
+
+	spin_lock_bh(&local->baplock);
+	res = hfa384x_setup_bap(dev, BAP0, rid, 0);
+	if (!res)
+		res = hfa384x_to_bap(dev, BAP0, &rec, sizeof(rec));
+	if (!res)
+		res = hfa384x_to_bap(dev, BAP0, buf, len);
+	spin_unlock_bh(&local->baplock);
+
+	if (res) {
+		printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - "
+		       "failed - res=%d\n", dev->name, rid, len, res);
+		up(&local->rid_bap_sem);
+		return res;
+	}
+
+	res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL);
+	up(&local->rid_bap_sem);
+	if (res) {
+		printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE "
+		       "failed (res=%d, rid=%04x, len=%d)\n",
+		       dev->name, res, rid, len);
+		return res;
+	}
+
+	if (res == -ETIMEDOUT)
+		prism2_hw_reset(dev);
+
+	return res;
+}
+
+
+static void hfa384x_disable_interrupts(struct net_device *dev)
+{
+	/* disable interrupts and clear event status */
+	HFA384X_OUTW(0, HFA384X_INTEN_OFF);
+	HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
+}
+
+
+static void hfa384x_enable_interrupts(struct net_device *dev)
+{
+	/* ack pending events and enable interrupts from selected events */
+	HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
+	HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
+}
+
+
+static void hfa384x_events_no_bap0(struct net_device *dev)
+{
+	HFA384X_OUTW(HFA384X_EVENT_MASK & ~HFA384X_BAP0_EVENTS,
+		     HFA384X_INTEN_OFF);
+}
+
+
+static void hfa384x_events_all(struct net_device *dev)
+{
+	HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF);
+}
+
+
+static void hfa384x_events_only_cmd(struct net_device *dev)
+{
+	HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_INTEN_OFF);
+}
+
+
+static u16 hfa384x_allocate_fid(struct net_device *dev, int len)
+{
+	u16 fid;
+	unsigned long delay;
+
+	/* FIX: this could be replace with hfa384x_cmd() if the Alloc event
+	 * below would be handled like CmdCompl event (sleep here, wake up from
+	 * interrupt handler */
+	if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_ALLOC, len)) {
+		printk(KERN_DEBUG "%s: cannot allocate fid, len=%d\n",
+		       dev->name, len);
+		return 0xffff;
+	}
+
+	delay = jiffies + HFA384X_ALLOC_COMPL_TIMEOUT;
+	while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC) &&
+	       time_before(jiffies, delay))
+		yield();
+	if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC)) {
+		printk("%s: fid allocate, len=%d - timeout\n", dev->name, len);
+		return 0xffff;
+	}
+
+	fid = HFA384X_INW(HFA384X_ALLOCFID_OFF);
+	HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
+
+	return fid;
+}
+
+
+static int prism2_reset_port(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int res;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (!local->dev_enabled)
+		return 0;
+
+	res = hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0,
+			  NULL, NULL);
+	if (res)
+		printk(KERN_DEBUG "%s: reset port failed to disable port\n",
+		       dev->name);
+	else {
+		res = hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0,
+				  NULL, NULL);
+		if (res)
+			printk(KERN_DEBUG "%s: reset port failed to enable "
+			       "port\n", dev->name);
+	}
+
+	/* It looks like at least some STA firmware versions reset
+	 * fragmentation threshold back to 2346 after enable command. Restore
+	 * the configured value, if it differs from this default. */
+	if (local->fragm_threshold != 2346 &&
+	    hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
+			    local->fragm_threshold)) {
+		printk(KERN_DEBUG "%s: failed to restore fragmentation "
+		       "threshold (%d) after Port0 enable\n",
+		       dev->name, local->fragm_threshold);
+	}
+
+	return res;
+}
+
+
+static int prism2_get_version_info(struct net_device *dev, u16 rid,
+				   const char *txt)
+{
+	struct hfa384x_comp_ident comp;
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->no_pri) {
+		/* PRI f/w not yet available - cannot read RIDs */
+		return -1;
+	}
+	if (hfa384x_get_rid(dev, rid, &comp, sizeof(comp), 1) < 0) {
+		printk(KERN_DEBUG "Could not get RID for component %s\n", txt);
+		return -1;
+	}
+
+	printk(KERN_INFO "%s: %s: id=0x%02x v%d.%d.%d\n", dev->name, txt,
+	       __le16_to_cpu(comp.id), __le16_to_cpu(comp.major),
+	       __le16_to_cpu(comp.minor), __le16_to_cpu(comp.variant));
+	return 0;
+}
+
+
+static int prism2_setup_rids(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 tmp;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	hostap_set_word(dev, HFA384X_RID_TICKTIME, 2000);
+
+	if (!local->fw_ap) {
+		tmp = hostap_get_porttype(local);
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, tmp);
+		if (ret) {
+			printk("%s: Port type setting to %d failed\n",
+			       dev->name, tmp);
+			goto fail;
+		}
+	}
+
+	/* Setting SSID to empty string seems to kill the card in Host AP mode
+	 */
+	if (local->iw_mode != IW_MODE_MASTER || local->essid[0] != '\0') {
+		ret = hostap_set_string(dev, HFA384X_RID_CNFOWNSSID,
+					local->essid);
+		if (ret) {
+			printk("%s: AP own SSID setting failed\n", dev->name);
+			goto fail;
+		}
+	}
+
+	ret = hostap_set_word(dev, HFA384X_RID_CNFMAXDATALEN,
+			      PRISM2_DATA_MAXLEN);
+	if (ret) {
+		printk("%s: MAC data length setting to %d failed\n",
+		       dev->name, PRISM2_DATA_MAXLEN);
+		goto fail;
+	}
+
+	if (hfa384x_get_rid(dev, HFA384X_RID_CHANNELLIST, &tmp, 2, 1) < 0) {
+		printk("%s: Channel list read failed\n", dev->name);
+		ret = -EINVAL;
+		goto fail;
+	}
+	local->channel_mask = __le16_to_cpu(tmp);
+
+	if (local->channel < 1 || local->channel > 14 ||
+	    !(local->channel_mask & (1 << (local->channel - 1)))) {
+		printk(KERN_WARNING "%s: Channel setting out of range "
+		       "(%d)!\n", dev->name, local->channel);
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	ret = hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel);
+	if (ret) {
+		printk("%s: Channel setting to %d failed\n",
+		       dev->name, local->channel);
+		goto fail;
+	}
+
+	ret = hostap_set_word(dev, HFA384X_RID_CNFBEACONINT,
+			      local->beacon_int);
+	if (ret) {
+		printk("%s: Beacon interval setting to %d failed\n",
+		       dev->name, local->beacon_int);
+		/* this may fail with Symbol/Lucent firmware */
+		if (ret == -ETIMEDOUT)
+			goto fail;
+	}
+
+	ret = hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD,
+			      local->dtim_period);
+	if (ret) {
+		printk("%s: DTIM period setting to %d failed\n",
+		       dev->name, local->dtim_period);
+		/* this may fail with Symbol/Lucent firmware */
+		if (ret == -ETIMEDOUT)
+			goto fail;
+	}
+
+	ret = hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE,
+			      local->is_promisc);
+	if (ret)
+		printk(KERN_INFO "%s: Setting promiscuous mode (%d) failed\n",
+		       dev->name, local->is_promisc);
+
+	if (!local->fw_ap) {
+		ret = hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID,
+					local->essid);
+		if (ret) {
+			printk("%s: Desired SSID setting failed\n", dev->name);
+			goto fail;
+		}
+	}
+
+	/* Setup TXRateControl, defaults to allow use of 1, 2, 5.5, and
+	 * 11 Mbps in automatic TX rate fallback and 1 and 2 Mbps as basic
+	 * rates */
+	if (local->tx_rate_control == 0) {
+		local->tx_rate_control =
+			HFA384X_RATES_1MBPS |
+			HFA384X_RATES_2MBPS |
+			HFA384X_RATES_5MBPS |
+			HFA384X_RATES_11MBPS;
+	}
+	if (local->basic_rates == 0)
+		local->basic_rates = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS;
+
+	if (!local->fw_ap) {
+		ret = hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
+				      local->tx_rate_control);
+		if (ret) {
+			printk("%s: TXRateControl setting to %d failed\n",
+			       dev->name, local->tx_rate_control);
+			goto fail;
+		}
+
+		ret = hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
+				      local->tx_rate_control);
+		if (ret) {
+			printk("%s: cnfSupportedRates setting to %d failed\n",
+			       dev->name, local->tx_rate_control);
+		}
+
+		ret = hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
+				      local->basic_rates);
+		if (ret) {
+			printk("%s: cnfBasicRates setting to %d failed\n",
+			       dev->name, local->basic_rates);
+		}
+
+		ret = hostap_set_word(dev, HFA384X_RID_CREATEIBSS, 1);
+		if (ret) {
+			printk("%s: Create IBSS setting to 1 failed\n",
+			       dev->name);
+		}
+	}
+
+	if (local->name_set)
+		(void) hostap_set_string(dev, HFA384X_RID_CNFOWNNAME,
+					 local->name);
+
+	if (hostap_set_encryption(local)) {
+		printk(KERN_INFO "%s: could not configure encryption\n",
+		       dev->name);
+	}
+
+	(void) hostap_set_antsel(local);
+
+	if (hostap_set_roaming(local)) {
+		printk(KERN_INFO "%s: could not set host roaming\n",
+		       dev->name);
+	}
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,6,3) &&
+	    hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec))
+		printk(KERN_INFO "%s: cnfEnhSecurity setting to 0x%x failed\n",
+		       dev->name, local->enh_sec);
+
+	/* 32-bit tallies were added in STA f/w 0.8.0, but they were apparently
+	 * not working correctly (last seven counters report bogus values).
+	 * This has been fixed in 0.8.2, so enable 32-bit tallies only
+	 * beginning with that firmware version. Another bug fix for 32-bit
+	 * tallies in 1.4.0; should 16-bit tallies be used for some other
+	 * versions, too? */
+	if (local->sta_fw_ver >= PRISM2_FW_VER(0,8,2)) {
+		if (hostap_set_word(dev, HFA384X_RID_CNFTHIRTY2TALLY, 1)) {
+			printk(KERN_INFO "%s: cnfThirty2Tally setting "
+			       "failed\n", dev->name);
+			local->tallies32 = 0;
+		} else
+			local->tallies32 = 1;
+	} else
+		local->tallies32 = 0;
+
+	hostap_set_auth_algs(local);
+
+	if (hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
+			    local->fragm_threshold)) {
+		printk(KERN_INFO "%s: setting FragmentationThreshold to %d "
+		       "failed\n", dev->name, local->fragm_threshold);
+	}
+
+	if (hostap_set_word(dev, HFA384X_RID_RTSTHRESHOLD,
+			    local->rts_threshold)) {
+		printk(KERN_INFO "%s: setting RTSThreshold to %d failed\n",
+		       dev->name, local->rts_threshold);
+	}
+
+	if (local->manual_retry_count >= 0 &&
+	    hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
+			    local->manual_retry_count)) {
+		printk(KERN_INFO "%s: setting cnfAltRetryCount to %d failed\n",
+		       dev->name, local->manual_retry_count);
+	}
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1) &&
+	    hfa384x_get_rid(dev, HFA384X_RID_CNFDBMADJUST, &tmp, 2, 1) == 2) {
+		local->rssi_to_dBm = le16_to_cpu(tmp);
+	}
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->wpa &&
+	    hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1)) {
+		printk(KERN_INFO "%s: setting ssnHandlingMode to 1 failed\n",
+		       dev->name);
+	}
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->generic_elem &&
+	    hfa384x_set_rid(dev, HFA384X_RID_GENERICELEMENT,
+			    local->generic_elem, local->generic_elem_len)) {
+		printk(KERN_INFO "%s: setting genericElement failed\n",
+		       dev->name);
+	}
+
+ fail:
+	return ret;
+}
+
+
+static int prism2_hw_init(struct net_device *dev, int initial)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret, first = 1;
+	unsigned long start, delay;
+
+	PDEBUG(DEBUG_FLOW, "prism2_hw_init()\n");
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits);
+
+ init:
+	/* initialize HFA 384x */
+	ret = hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_INIT, 0);
+	if (ret) {
+		printk(KERN_INFO "%s: first command failed - assuming card "
+		       "does not have primary firmware\n", dev_info);
+	}
+
+	if (first && (HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
+		/* EvStat has Cmd bit set in some cases, so retry once if no
+		 * wait was needed */
+		HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+		printk(KERN_DEBUG "%s: init command completed too quickly - "
+		       "retrying\n", dev->name);
+		first = 0;
+		goto init;
+	}
+
+	start = jiffies;
+	delay = jiffies + HFA384X_INIT_TIMEOUT;
+	while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) &&
+	       time_before(jiffies, delay))
+		yield();
+	if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) {
+		printk(KERN_DEBUG "%s: assuming no Primary image in "
+		       "flash - card initialization not completed\n",
+		       dev_info);
+		local->no_pri = 1;
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+			if (local->sram_type == -1)
+				local->sram_type = prism2_get_ram_size(local);
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+		return 1;
+	}
+	local->no_pri = 0;
+	printk(KERN_DEBUG "prism2_hw_init: initialized in %lu ms\n",
+	       (jiffies - start) * 1000 / HZ);
+	HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF);
+	return 0;
+}
+
+
+static int prism2_hw_init2(struct net_device *dev, int initial)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int i;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	kfree(local->pda);
+	if (local->no_pri)
+		local->pda = NULL;
+	else
+		local->pda = prism2_read_pda(dev);
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+	hfa384x_disable_interrupts(dev);
+
+#ifndef final_version
+	HFA384X_OUTW(HFA384X_MAGIC, HFA384X_SWSUPPORT0_OFF);
+	if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
+		printk("SWSUPPORT0 write/read failed: %04X != %04X\n",
+		       HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC);
+		goto failed;
+	}
+#endif
+
+	if (initial || local->pri_only) {
+		hfa384x_events_only_cmd(dev);
+		/* get card version information */
+		if (prism2_get_version_info(dev, HFA384X_RID_NICID, "NIC") ||
+		    prism2_get_version_info(dev, HFA384X_RID_PRIID, "PRI")) {
+			hfa384x_disable_interrupts(dev);
+			goto failed;
+		}
+
+		if (prism2_get_version_info(dev, HFA384X_RID_STAID, "STA")) {
+			printk(KERN_DEBUG "%s: Failed to read STA f/w version "
+			       "- only Primary f/w present\n", dev->name);
+			local->pri_only = 1;
+			return 0;
+		}
+		local->pri_only = 0;
+		hfa384x_disable_interrupts(dev);
+	}
+
+	/* FIX: could convert allocate_fid to use sleeping CmdCompl wait and
+	 * enable interrupts before this. This would also require some sort of
+	 * sleeping AllocEv waiting */
+
+	/* allocate TX FIDs */
+	local->txfid_len = PRISM2_TXFID_LEN;
+	for (i = 0; i < PRISM2_TXFID_COUNT; i++) {
+		local->txfid[i] = hfa384x_allocate_fid(dev, local->txfid_len);
+		if (local->txfid[i] == 0xffff && local->txfid_len > 1600) {
+			local->txfid[i] = hfa384x_allocate_fid(dev, 1600);
+			if (local->txfid[i] != 0xffff) {
+				printk(KERN_DEBUG "%s: Using shorter TX FID "
+				       "(1600 bytes)\n", dev->name);
+				local->txfid_len = 1600;
+			}
+		}
+		if (local->txfid[i] == 0xffff)
+			goto failed;
+		local->intransmitfid[i] = PRISM2_TXFID_EMPTY;
+	}
+
+	hfa384x_events_only_cmd(dev);
+
+	if (initial) {
+		struct list_head *ptr;
+		prism2_check_sta_fw_version(local);
+
+		if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
+				    &dev->dev_addr, 6, 1) < 0) {
+			printk("%s: could not get own MAC address\n",
+			       dev->name);
+		}
+		list_for_each(ptr, &local->hostap_interfaces) {
+			iface = list_entry(ptr, struct hostap_interface, list);
+			memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN);
+		}
+	} else if (local->fw_ap)
+		prism2_check_sta_fw_version(local);
+
+	prism2_setup_rids(dev);
+
+	/* MAC is now configured, but port 0 is not yet enabled */
+	return 0;
+
+ failed:
+	if (!local->no_pri)
+		printk(KERN_WARNING "%s: Initialization failed\n", dev_info);
+	return 1;
+}
+
+
+static int prism2_hw_enable(struct net_device *dev, int initial)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int was_resetting;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	was_resetting = local->hw_resetting;
+
+	if (hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) {
+		printk("%s: MAC port 0 enabling failed\n", dev->name);
+		return 1;
+	}
+
+	local->hw_ready = 1;
+	local->hw_reset_tries = 0;
+	local->hw_resetting = 0;
+	hfa384x_enable_interrupts(dev);
+
+	/* at least D-Link DWL-650 seems to require additional port reset
+	 * before it starts acting as an AP, so reset port automatically
+	 * here just in case */
+	if (initial && prism2_reset_port(dev)) {
+		printk("%s: MAC port 0 reseting failed\n", dev->name);
+		return 1;
+	}
+
+	if (was_resetting && netif_queue_stopped(dev)) {
+		/* If hw_reset() was called during pending transmit, netif
+		 * queue was stopped. Wake it up now since the wlan card has
+		 * been resetted. */
+		netif_wake_queue(dev);
+	}
+
+	return 0;
+}
+
+
+static int prism2_hw_config(struct net_device *dev, int initial)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->hw_downloading)
+		return 1;
+
+	if (prism2_hw_init(dev, initial)) {
+		return local->no_pri ? 0 : 1;
+	}
+
+	if (prism2_hw_init2(dev, initial))
+		return 1;
+
+	/* Enable firmware if secondary image is loaded and at least one of the
+	 * netdevices is up. */
+	if (!local->pri_only &&
+	    (initial == 0 || (initial == 2 && local->num_dev_open > 0))) {
+		if (!local->dev_enabled)
+			prism2_callback(local, PRISM2_CALLBACK_ENABLE);
+		local->dev_enabled = 1;
+		return prism2_hw_enable(dev, initial);
+	}
+
+	return 0;
+}
+
+
+static void prism2_hw_shutdown(struct net_device *dev, int no_disable)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* Allow only command completion events during disable */
+	hfa384x_events_only_cmd(dev);
+
+	local->hw_ready = 0;
+	if (local->dev_enabled)
+		prism2_callback(local, PRISM2_CALLBACK_DISABLE);
+	local->dev_enabled = 0;
+
+	if (local->func->card_present && !local->func->card_present(local)) {
+		printk(KERN_DEBUG "%s: card already removed or not configured "
+		       "during shutdown\n", dev->name);
+		return;
+	}
+
+	if ((no_disable & HOSTAP_HW_NO_DISABLE) == 0 &&
+	    hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL))
+		printk(KERN_WARNING "%s: Shutdown failed\n", dev_info);
+
+	hfa384x_disable_interrupts(dev);
+
+	if (no_disable & HOSTAP_HW_ENABLE_CMDCOMPL)
+		hfa384x_events_only_cmd(dev);
+	else
+		prism2_clear_cmd_queue(local);
+}
+
+
+static void prism2_hw_reset(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+#if 0
+	static long last_reset = 0;
+
+	/* do not reset card more than once per second to avoid ending up in a
+	 * busy loop reseting the card */
+	if (time_before_eq(jiffies, last_reset + HZ))
+		return;
+	last_reset = jiffies;
+#endif
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (in_interrupt()) {
+		printk(KERN_DEBUG "%s: driver bug - prism2_hw_reset() called "
+		       "in interrupt context\n", dev->name);
+		return;
+	}
+
+	if (local->hw_downloading)
+		return;
+
+	if (local->hw_resetting) {
+		printk(KERN_WARNING "%s: %s: already resetting card - "
+		       "ignoring reset request\n", dev_info, dev->name);
+		return;
+	}
+
+	local->hw_reset_tries++;
+	if (local->hw_reset_tries > 10) {
+		printk(KERN_WARNING "%s: too many reset tries, skipping\n",
+		       dev->name);
+		return;
+	}
+
+	printk(KERN_WARNING "%s: %s: resetting card\n", dev_info, dev->name);
+	hfa384x_disable_interrupts(dev);
+	local->hw_resetting = 1;
+	if (local->func->cor_sreset) {
+		/* Host system seems to hang in some cases with high traffic
+		 * load or shared interrupts during COR sreset. Disable shared
+		 * interrupts during reset to avoid these crashes. COS sreset
+		 * takes quite a long time, so it is unfortunate that this
+		 * seems to be needed. Anyway, I do not know of any better way
+		 * of avoiding the crash. */
+		disable_irq(dev->irq);
+		local->func->cor_sreset(local);
+		enable_irq(dev->irq);
+	}
+	prism2_hw_shutdown(dev, 1);
+	prism2_hw_config(dev, 0);
+	local->hw_resetting = 0;
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	if (local->dl_pri) {
+		printk(KERN_DEBUG "%s: persistent download of primary "
+		       "firmware\n", dev->name);
+		if (prism2_download_genesis(local, local->dl_pri) < 0)
+			printk(KERN_WARNING "%s: download (PRI) failed\n",
+			       dev->name);
+	}
+
+	if (local->dl_sec) {
+		printk(KERN_DEBUG "%s: persistent download of secondary "
+		       "firmware\n", dev->name);
+		if (prism2_download_volatile(local, local->dl_sec) < 0)
+			printk(KERN_WARNING "%s: download (SEC) failed\n",
+			       dev->name);
+	}
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+	/* TODO: restore beacon TIM bits for STAs that have buffered frames */
+}
+
+
+static void prism2_schedule_reset(local_info_t *local)
+{
+	schedule_work(&local->reset_queue);
+}
+
+
+/* Called only as scheduled task after noticing card timeout in interrupt
+ * context */
+static void handle_reset_queue(void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+
+	printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name);
+	prism2_hw_reset(local->dev);
+
+	if (netif_queue_stopped(local->dev)) {
+		int i;
+
+		for (i = 0; i < PRISM2_TXFID_COUNT; i++)
+			if (local->intransmitfid[i] == PRISM2_TXFID_EMPTY) {
+				PDEBUG(DEBUG_EXTRA, "prism2_tx_timeout: "
+				       "wake up queue\n");
+				netif_wake_queue(local->dev);
+				break;
+			}
+	}
+}
+
+
+static int prism2_get_txfid_idx(local_info_t *local)
+{
+	int idx, end;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local->txfidlock, flags);
+	end = idx = local->next_txfid;
+	do {
+		if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
+			local->intransmitfid[idx] = PRISM2_TXFID_RESERVED;
+			spin_unlock_irqrestore(&local->txfidlock, flags);
+			return idx;
+		}
+		idx++;
+		if (idx >= PRISM2_TXFID_COUNT)
+			idx = 0;
+	} while (idx != end);
+	spin_unlock_irqrestore(&local->txfidlock, flags);
+
+	PDEBUG(DEBUG_EXTRA2, "prism2_get_txfid_idx: no room in txfid buf: "
+	       "packet dropped\n");
+	local->stats.tx_dropped++;
+
+	return -1;
+}
+
+
+/* Called only from hardware IRQ */
+static void prism2_transmit_cb(struct net_device *dev, long context,
+			       u16 resp0, u16 res)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int idx = (int) context;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (res) {
+		printk(KERN_DEBUG "%s: prism2_transmit_cb - res=0x%02x\n",
+		       dev->name, res);
+		return;
+	}
+
+	if (idx < 0 || idx >= PRISM2_TXFID_COUNT) {
+		printk(KERN_DEBUG "%s: prism2_transmit_cb called with invalid "
+		       "idx=%d\n", dev->name, idx);
+		return;
+	}
+
+	if (!test_and_clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
+		printk(KERN_DEBUG "%s: driver bug: prism2_transmit_cb called "
+		       "with no pending transmit\n", dev->name);
+	}
+
+	if (netif_queue_stopped(dev)) {
+		/* ready for next TX, so wake up queue that was stopped in
+		 * prism2_transmit() */
+		netif_wake_queue(dev);
+	}
+
+	spin_lock(&local->txfidlock);
+
+	/* With reclaim, Resp0 contains new txfid for transmit; the old txfid
+	 * will be automatically allocated for the next TX frame */
+	local->intransmitfid[idx] = resp0;
+
+	PDEBUG(DEBUG_FID, "%s: prism2_transmit_cb: txfid[%d]=0x%04x, "
+	       "resp0=0x%04x, transmit_txfid=0x%04x\n",
+	       dev->name, idx, local->txfid[idx],
+	       resp0, local->intransmitfid[local->next_txfid]);
+
+	idx++;
+	if (idx >= PRISM2_TXFID_COUNT)
+		idx = 0;
+	local->next_txfid = idx;
+
+	/* check if all TX buffers are occupied */
+	do {
+		if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) {
+			spin_unlock(&local->txfidlock);
+			return;
+		}
+		idx++;
+		if (idx >= PRISM2_TXFID_COUNT)
+			idx = 0;
+	} while (idx != local->next_txfid);
+	spin_unlock(&local->txfidlock);
+
+	/* no empty TX buffers, stop queue */
+	netif_stop_queue(dev);
+}
+
+
+/* Called only from software IRQ if PCI bus master is not used (with bus master
+ * this can be called both from software and hardware IRQ) */
+static int prism2_transmit(struct net_device *dev, int idx)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int res;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* The driver tries to stop netif queue so that there would not be
+	 * more than one attempt to transmit frames going on; check that this
+	 * is really the case */
+
+	if (test_and_set_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
+		printk(KERN_DEBUG "%s: driver bug - prism2_transmit() called "
+		       "when previous TX was pending\n", dev->name);
+		return -1;
+	}
+
+	/* stop the queue for the time that transmit is pending */
+	netif_stop_queue(dev);
+
+	/* transmit packet */
+	res = hfa384x_cmd_callback(
+		dev,
+		HFA384X_CMDCODE_TRANSMIT | HFA384X_CMD_TX_RECLAIM,
+		local->txfid[idx],
+		prism2_transmit_cb, (long) idx);
+
+	if (res) {
+		struct net_device_stats *stats;
+		printk(KERN_DEBUG "%s: prism2_transmit: CMDCODE_TRANSMIT "
+		       "failed (res=%d)\n", dev->name, res);
+		stats = hostap_get_stats(dev);
+		stats->tx_dropped++;
+		netif_wake_queue(dev);
+		return -1;
+	}
+	dev->trans_start = jiffies;
+
+	/* Since we did not wait for command completion, the card continues
+	 * to process on the background and we will finish handling when
+	 * command completion event is handled (prism2_cmd_ev() function) */
+
+	return 0;
+}
+
+
+/* Send IEEE 802.11 frame (convert the header into Prism2 TX descriptor and
+ * send the payload with this descriptor) */
+/* Called only from software IRQ */
+static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_tx_frame txdesc;
+	struct hostap_skb_tx_data *meta;
+	int hdr_len, data_len, idx, res, ret = -1;
+	u16 tx_control, fc;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	meta = (struct hostap_skb_tx_data *) skb->cb;
+
+	prism2_callback(local, PRISM2_CALLBACK_TX_START);
+
+	if ((local->func->card_present && !local->func->card_present(local)) ||
+	    !local->hw_ready || local->hw_downloading || local->pri_only) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: prism2_tx_80211: hw not ready -"
+			       " skipping\n", dev->name);
+		}
+		goto fail;
+	}
+
+	memset(&txdesc, 0, sizeof(txdesc));
+
+	/* skb->data starts with txdesc->frame_control */
+	hdr_len = 24;
+	memcpy(&txdesc.frame_control, skb->data, hdr_len);
+ 	fc = le16_to_cpu(txdesc.frame_control);
+	if (WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA &&
+	    (fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS) &&
+	    skb->len >= 30) {
+		/* Addr4 */
+		memcpy(txdesc.addr4, skb->data + hdr_len, ETH_ALEN);
+		hdr_len += ETH_ALEN;
+	}
+
+	tx_control = local->tx_control;
+	if (meta->tx_cb_idx) {
+		tx_control |= HFA384X_TX_CTRL_TX_OK;
+		txdesc.sw_support = cpu_to_le16(meta->tx_cb_idx);
+	}
+	txdesc.tx_control = cpu_to_le16(tx_control);
+	txdesc.tx_rate = meta->rate;
+
+	data_len = skb->len - hdr_len;
+	txdesc.data_len = cpu_to_le16(data_len);
+	txdesc.len = cpu_to_be16(data_len);
+
+	idx = prism2_get_txfid_idx(local);
+	if (idx < 0)
+		goto fail;
+
+	if (local->frame_dump & PRISM2_DUMP_TX_HDR)
+		hostap_dump_tx_header(dev->name, &txdesc);
+
+	spin_lock(&local->baplock);
+	res = hfa384x_setup_bap(dev, BAP0, local->txfid[idx], 0);
+
+	if (!res)
+		res = hfa384x_to_bap(dev, BAP0, &txdesc, sizeof(txdesc));
+	if (!res)
+		res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len,
+				     skb->len - hdr_len);
+	spin_unlock(&local->baplock);
+
+	if (!res)
+		res = prism2_transmit(dev, idx);
+	if (res) {
+		printk(KERN_DEBUG "%s: prism2_tx_80211 - to BAP0 failed\n",
+		       dev->name);
+		local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
+		schedule_work(&local->reset_queue);
+		goto fail;
+	}
+
+	ret = 0;
+
+fail:
+	prism2_callback(local, PRISM2_CALLBACK_TX_END);
+	return ret;
+}
+
+
+/* Some SMP systems have reported number of odd errors with hostap_pci. fid
+ * register has changed values between consecutive reads for an unknown reason.
+ * This should really not happen, so more debugging is needed. This test
+ * version is a big slower, but it will detect most of such register changes
+ * and will try to get the correct fid eventually. */
+#define EXTRA_FID_READ_TESTS
+
+static inline u16 prism2_read_fid_reg(struct net_device *dev, u16 reg)
+{
+#ifdef EXTRA_FID_READ_TESTS
+	u16 val, val2, val3;
+	int i;
+
+	for (i = 0; i < 10; i++) {
+		val = HFA384X_INW(reg);
+		val2 = HFA384X_INW(reg);
+		val3 = HFA384X_INW(reg);
+
+		if (val == val2 && val == val3)
+			return val;
+
+		printk(KERN_DEBUG "%s: detected fid change (try=%d, reg=%04x):"
+		       " %04x %04x %04x\n",
+		       dev->name, i, reg, val, val2, val3);
+		if ((val == val2 || val == val3) && val != 0)
+			return val;
+		if (val2 == val3 && val2 != 0)
+			return val2;
+	}
+	printk(KERN_WARNING "%s: Uhhuh.. could not read good fid from reg "
+	       "%04x (%04x %04x %04x)\n", dev->name, reg, val, val2, val3);
+	return val;
+#else /* EXTRA_FID_READ_TESTS */
+	return HFA384X_INW(reg);
+#endif /* EXTRA_FID_READ_TESTS */
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_rx(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+	int res, rx_pending = 0;
+	u16 len, hdr_len, rxfid, status, macport;
+	struct net_device_stats *stats;
+	struct hfa384x_rx_frame rxdesc;
+	struct sk_buff *skb = NULL;
+
+	prism2_callback(local, PRISM2_CALLBACK_RX_START);
+	stats = hostap_get_stats(dev);
+
+	rxfid = prism2_read_fid_reg(dev, HFA384X_RXFID_OFF);
+#ifndef final_version
+	if (rxfid == 0) {
+		rxfid = HFA384X_INW(HFA384X_RXFID_OFF);
+		printk(KERN_DEBUG "prism2_rx: rxfid=0 (next 0x%04x)\n",
+		       rxfid);
+		if (rxfid == 0) {
+			schedule_work(&local->reset_queue);
+			goto rx_dropped;
+		}
+		/* try to continue with the new rxfid value */
+	}
+#endif
+
+	spin_lock(&local->baplock);
+	res = hfa384x_setup_bap(dev, BAP0, rxfid, 0);
+	if (!res)
+		res = hfa384x_from_bap(dev, BAP0, &rxdesc, sizeof(rxdesc));
+
+	if (res) {
+		spin_unlock(&local->baplock);
+		printk(KERN_DEBUG "%s: copy from BAP0 failed %d\n", dev->name,
+		       res);
+		if (res == -ETIMEDOUT) {
+			schedule_work(&local->reset_queue);
+		}
+		goto rx_dropped;
+	}
+
+	len = le16_to_cpu(rxdesc.data_len);
+	hdr_len = sizeof(rxdesc);
+	status = le16_to_cpu(rxdesc.status);
+	macport = (status >> 8) & 0x07;
+
+	/* Drop frames with too large reported payload length. Monitor mode
+	 * seems to sometimes pass frames (e.g., ctrl::ack) with signed and
+	 * negative value, so allow also values 65522 .. 65534 (-14 .. -2) for
+	 * macport 7 */
+	if (len > PRISM2_DATA_MAXLEN + 8 /* WEP */) {
+		if (macport == 7 && local->iw_mode == IW_MODE_MONITOR) {
+			if (len >= (u16) -14) {
+				hdr_len -= 65535 - len;
+				hdr_len--;
+			}
+			len = 0;
+		} else {
+			spin_unlock(&local->baplock);
+			printk(KERN_DEBUG "%s: Received frame with invalid "
+			       "length 0x%04x\n", dev->name, len);
+			hostap_dump_rx_header(dev->name, &rxdesc);
+			goto rx_dropped;
+		}
+	}
+
+	skb = dev_alloc_skb(len + hdr_len);
+	if (!skb) {
+		spin_unlock(&local->baplock);
+		printk(KERN_DEBUG "%s: RX failed to allocate skb\n",
+		       dev->name);
+		goto rx_dropped;
+	}
+	skb->dev = dev;
+	memcpy(skb_put(skb, hdr_len), &rxdesc, hdr_len);
+
+	if (len > 0)
+		res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len);
+	spin_unlock(&local->baplock);
+	if (res) {
+		printk(KERN_DEBUG "%s: RX failed to read "
+		       "frame data\n", dev->name);
+		goto rx_dropped;
+	}
+
+	skb_queue_tail(&local->rx_list, skb);
+	tasklet_schedule(&local->rx_tasklet);
+
+ rx_exit:
+	prism2_callback(local, PRISM2_CALLBACK_RX_END);
+	if (!rx_pending) {
+		HFA384X_OUTW(HFA384X_EV_RX, HFA384X_EVACK_OFF);
+	}
+
+	return;
+
+ rx_dropped:
+	stats->rx_dropped++;
+	if (skb)
+		dev_kfree_skb(skb);
+	goto rx_exit;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
+{
+	struct hfa384x_rx_frame *rxdesc;
+	struct net_device *dev = skb->dev;
+	struct hostap_80211_rx_status stats;
+	int hdrlen, rx_hdrlen;
+
+	rx_hdrlen = sizeof(*rxdesc);
+	if (skb->len < sizeof(*rxdesc)) {
+		/* Allow monitor mode to receive shorter frames */
+		if (local->iw_mode == IW_MODE_MONITOR &&
+		    skb->len >= sizeof(*rxdesc) - 30) {
+			rx_hdrlen = skb->len;
+		} else {
+			dev_kfree_skb(skb);
+			return;
+		}
+	}
+
+	rxdesc = (struct hfa384x_rx_frame *) skb->data;
+
+	if (local->frame_dump & PRISM2_DUMP_RX_HDR &&
+	    skb->len >= sizeof(*rxdesc))
+		hostap_dump_rx_header(dev->name, rxdesc);
+
+	if (le16_to_cpu(rxdesc->status) & HFA384X_RX_STATUS_FCSERR &&
+	    (!local->monitor_allow_fcserr ||
+	     local->iw_mode != IW_MODE_MONITOR))
+		goto drop;
+
+	if (skb->len > PRISM2_DATA_MAXLEN) {
+		printk(KERN_DEBUG "%s: RX: len(%d) > MAX(%d)\n",
+		       dev->name, skb->len, PRISM2_DATA_MAXLEN);
+		goto drop;
+	}
+
+	stats.mac_time = le32_to_cpu(rxdesc->time);
+	stats.signal = rxdesc->signal - local->rssi_to_dBm;
+	stats.noise = rxdesc->silence - local->rssi_to_dBm;
+	stats.rate = rxdesc->rate;
+
+	/* Convert Prism2 RX structure into IEEE 802.11 header */
+	hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(rxdesc->frame_control));
+	if (hdrlen > rx_hdrlen)
+		hdrlen = rx_hdrlen;
+
+	memmove(skb_pull(skb, rx_hdrlen - hdrlen),
+		&rxdesc->frame_control, hdrlen);
+
+	hostap_80211_rx(dev, skb, &stats);
+	return;
+
+ drop:
+	dev_kfree_skb(skb);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_rx_tasklet(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&local->rx_list)) != NULL)
+		hostap_rx_skb(local, skb);
+}
+
+
+/* Called only from hardware IRQ */
+static void prism2_alloc_ev(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int idx;
+	u16 fid;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	fid = prism2_read_fid_reg(dev, HFA384X_ALLOCFID_OFF);
+
+	PDEBUG(DEBUG_FID, "FID: interrupt: ALLOC - fid=0x%04x\n", fid);
+
+	spin_lock(&local->txfidlock);
+	idx = local->next_alloc;
+
+	do {
+		if (local->txfid[idx] == fid) {
+			PDEBUG(DEBUG_FID, "FID: found matching txfid[%d]\n",
+			       idx);
+
+#ifndef final_version
+			if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY)
+				printk("Already released txfid found at idx "
+				       "%d\n", idx);
+			if (local->intransmitfid[idx] == PRISM2_TXFID_RESERVED)
+				printk("Already reserved txfid found at idx "
+				       "%d\n", idx);
+#endif
+			local->intransmitfid[idx] = PRISM2_TXFID_EMPTY;
+			idx++;
+			local->next_alloc = idx >= PRISM2_TXFID_COUNT ? 0 :
+				idx;
+
+			if (!test_bit(HOSTAP_BITS_TRANSMIT, &local->bits) &&
+			    netif_queue_stopped(dev))
+				netif_wake_queue(dev);
+
+			spin_unlock(&local->txfidlock);
+			return;
+		}
+
+		idx++;
+		if (idx >= PRISM2_TXFID_COUNT)
+			idx = 0;
+	} while (idx != local->next_alloc);
+
+	printk(KERN_WARNING "%s: could not find matching txfid (0x%04x, new "
+	       "read 0x%04x) for alloc event\n", dev->name, fid,
+	       HFA384X_INW(HFA384X_ALLOCFID_OFF));
+	printk(KERN_DEBUG "TXFIDs:");
+	for (idx = 0; idx < PRISM2_TXFID_COUNT; idx++)
+		printk(" %04x[%04x]", local->txfid[idx],
+		       local->intransmitfid[idx]);
+	printk("\n");
+	spin_unlock(&local->txfidlock);
+
+	/* FIX: should probably schedule reset; reference to one txfid was lost
+	 * completely.. Bad things will happen if we run out of txfids
+	 * Actually, this will cause netdev watchdog to notice TX timeout and
+	 * then card reset after all txfids have been leaked. */
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_tx_callback(local_info_t *local,
+			       struct hfa384x_tx_frame *txdesc, int ok,
+			       char *payload)
+{
+	u16 sw_support, hdrlen, len;
+	struct sk_buff *skb;
+	struct hostap_tx_callback_info *cb;
+
+	/* Make sure that frame was from us. */
+	if (memcmp(txdesc->addr2, local->dev->dev_addr, ETH_ALEN)) {
+		printk(KERN_DEBUG "%s: TX callback - foreign frame\n",
+		       local->dev->name);
+		return;
+	}
+
+	sw_support = le16_to_cpu(txdesc->sw_support);
+
+	spin_lock(&local->lock);
+	cb = local->tx_callback;
+	while (cb != NULL && cb->idx != sw_support)
+		cb = cb->next;
+	spin_unlock(&local->lock);
+
+	if (cb == NULL) {
+		printk(KERN_DEBUG "%s: could not find TX callback (idx %d)\n",
+		       local->dev->name, sw_support);
+		return;
+	}
+
+	hdrlen = hostap_80211_get_hdrlen(le16_to_cpu(txdesc->frame_control));
+	len = le16_to_cpu(txdesc->data_len);
+	skb = dev_alloc_skb(hdrlen + len);
+	if (skb == NULL) {
+		printk(KERN_DEBUG "%s: hostap_tx_callback failed to allocate "
+		       "skb\n", local->dev->name);
+		return;
+	}
+
+	memcpy(skb_put(skb, hdrlen), (void *) &txdesc->frame_control, hdrlen);
+	if (payload)
+		memcpy(skb_put(skb, len), payload, len);
+
+	skb->dev = local->dev;
+	skb->mac.raw = skb->data;
+
+	cb->func(skb, ok, cb->data);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static int hostap_tx_compl_read(local_info_t *local, int error,
+				struct hfa384x_tx_frame *txdesc,
+				char **payload)
+{
+	u16 fid, len;
+	int res, ret = 0;
+	struct net_device *dev = local->dev;
+
+	fid = prism2_read_fid_reg(dev, HFA384X_TXCOMPLFID_OFF);
+
+	PDEBUG(DEBUG_FID, "interrupt: TX (err=%d) - fid=0x%04x\n", fid, error);
+
+	spin_lock(&local->baplock);
+	res = hfa384x_setup_bap(dev, BAP0, fid, 0);
+	if (!res)
+		res = hfa384x_from_bap(dev, BAP0, txdesc, sizeof(*txdesc));
+	if (res) {
+		PDEBUG(DEBUG_EXTRA, "%s: TX (err=%d) - fid=0x%04x - could not "
+		       "read txdesc\n", dev->name, error, fid);
+		if (res == -ETIMEDOUT) {
+			schedule_work(&local->reset_queue);
+		}
+		ret = -1;
+		goto fail;
+	}
+	if (txdesc->sw_support) {
+		len = le16_to_cpu(txdesc->data_len);
+		if (len < PRISM2_DATA_MAXLEN) {
+			*payload = (char *) kmalloc(len, GFP_ATOMIC);
+			if (*payload == NULL ||
+			    hfa384x_from_bap(dev, BAP0, *payload, len)) {
+				PDEBUG(DEBUG_EXTRA, "%s: could not read TX "
+				       "frame payload\n", dev->name);
+				kfree(*payload);
+				*payload = NULL;
+				ret = -1;
+				goto fail;
+			}
+		}
+	}
+
+ fail:
+	spin_unlock(&local->baplock);
+
+	return ret;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_tx_ev(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+	char *payload = NULL;
+	struct hfa384x_tx_frame txdesc;
+
+	if (hostap_tx_compl_read(local, 0, &txdesc, &payload))
+		goto fail;
+
+	if (local->frame_dump & PRISM2_DUMP_TX_HDR) {
+		PDEBUG(DEBUG_EXTRA, "%s: TX - status=0x%04x "
+		       "retry_count=%d tx_rate=%d seq_ctrl=%d "
+		       "duration_id=%d\n",
+		       dev->name, le16_to_cpu(txdesc.status),
+		       txdesc.retry_count, txdesc.tx_rate,
+		       le16_to_cpu(txdesc.seq_ctrl),
+		       le16_to_cpu(txdesc.duration_id));
+	}
+
+	if (txdesc.sw_support)
+		hostap_tx_callback(local, &txdesc, 1, payload);
+	kfree(payload);
+
+ fail:
+	HFA384X_OUTW(HFA384X_EV_TX, HFA384X_EVACK_OFF);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_sta_tx_exc_tasklet(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
+		struct hfa384x_tx_frame *txdesc =
+			(struct hfa384x_tx_frame *) skb->data;
+
+		if (skb->len >= sizeof(*txdesc)) {
+			/* Convert Prism2 RX structure into IEEE 802.11 header
+			 */
+			u16 fc = le16_to_cpu(txdesc->frame_control);
+			int hdrlen = hostap_80211_get_hdrlen(fc);
+			memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen),
+				&txdesc->frame_control, hdrlen);
+
+			hostap_handle_sta_tx_exc(local, skb);
+		}
+		dev_kfree_skb(skb);
+	}
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_txexc(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+	u16 status, fc;
+	int show_dump, res;
+	char *payload = NULL;
+	struct hfa384x_tx_frame txdesc;
+
+	show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR;
+	local->stats.tx_errors++;
+
+	res = hostap_tx_compl_read(local, 1, &txdesc, &payload);
+	HFA384X_OUTW(HFA384X_EV_TXEXC, HFA384X_EVACK_OFF);
+	if (res)
+		return;
+
+	status = le16_to_cpu(txdesc.status);
+
+	/* We produce a TXDROP event only for retry or lifetime
+	 * exceeded, because that's the only status that really mean
+	 * that this particular node went away.
+	 * Other errors means that *we* screwed up. - Jean II */
+	if (status & (HFA384X_TX_STATUS_RETRYERR | HFA384X_TX_STATUS_AGEDERR))
+	{
+		union iwreq_data wrqu;
+
+		/* Copy 802.11 dest address. */
+		memcpy(wrqu.addr.sa_data, txdesc.addr1, ETH_ALEN);
+		wrqu.addr.sa_family = ARPHRD_ETHER;
+		wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL);
+	} else
+		show_dump = 1;
+
+	if (local->iw_mode == IW_MODE_MASTER ||
+	    local->iw_mode == IW_MODE_REPEAT ||
+	    local->wds_type & HOSTAP_WDS_AP_CLIENT) {
+		struct sk_buff *skb;
+		skb = dev_alloc_skb(sizeof(txdesc));
+		if (skb) {
+			memcpy(skb_put(skb, sizeof(txdesc)), &txdesc,
+			       sizeof(txdesc));
+			skb_queue_tail(&local->sta_tx_exc_list, skb);
+			tasklet_schedule(&local->sta_tx_exc_tasklet);
+		}
+	}
+
+	if (txdesc.sw_support)
+		hostap_tx_callback(local, &txdesc, 0, payload);
+	kfree(payload);
+
+	if (!show_dump)
+		return;
+
+	PDEBUG(DEBUG_EXTRA, "%s: TXEXC - status=0x%04x (%s%s%s%s)"
+	       " tx_control=%04x\n",
+	       dev->name, status,
+	       status & HFA384X_TX_STATUS_RETRYERR ? "[RetryErr]" : "",
+	       status & HFA384X_TX_STATUS_AGEDERR ? "[AgedErr]" : "",
+	       status & HFA384X_TX_STATUS_DISCON ? "[Discon]" : "",
+	       status & HFA384X_TX_STATUS_FORMERR ? "[FormErr]" : "",
+	       le16_to_cpu(txdesc.tx_control));
+
+	fc = le16_to_cpu(txdesc.frame_control);
+	PDEBUG(DEBUG_EXTRA, "   retry_count=%d tx_rate=%d fc=0x%04x "
+	       "(%s%s%s::%d%s%s)\n",
+	       txdesc.retry_count, txdesc.tx_rate, fc,
+	       WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_MGMT ? "Mgmt" : "",
+	       WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_CTL ? "Ctrl" : "",
+	       WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA ? "Data" : "",
+	       WLAN_FC_GET_STYPE(fc) >> 4,
+	       fc & IEEE80211_FCTL_TODS ? " ToDS" : "",
+	       fc & IEEE80211_FCTL_FROMDS ? " FromDS" : "");
+	PDEBUG(DEBUG_EXTRA, "   A1=" MACSTR " A2=" MACSTR " A3="
+	       MACSTR " A4=" MACSTR "\n",
+	       MAC2STR(txdesc.addr1), MAC2STR(txdesc.addr2),
+	       MAC2STR(txdesc.addr3), MAC2STR(txdesc.addr4));
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_info_tasklet(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct sk_buff *skb;
+
+	while ((skb = skb_dequeue(&local->info_list)) != NULL) {
+		hostap_info_process(local, skb);
+		dev_kfree_skb(skb);
+	}
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+	u16 fid;
+	int res, left;
+	struct hfa384x_info_frame info;
+	struct sk_buff *skb;
+
+	fid = HFA384X_INW(HFA384X_INFOFID_OFF);
+
+	spin_lock(&local->baplock);
+	res = hfa384x_setup_bap(dev, BAP0, fid, 0);
+	if (!res)
+		res = hfa384x_from_bap(dev, BAP0, &info, sizeof(info));
+	if (res) {
+		spin_unlock(&local->baplock);
+		printk(KERN_DEBUG "Could not get info frame (fid=0x%04x)\n",
+		       fid);
+		if (res == -ETIMEDOUT) {
+			schedule_work(&local->reset_queue);
+		}
+		goto out;
+	}
+
+	le16_to_cpus(&info.len);
+	le16_to_cpus(&info.type);
+	left = (info.len - 1) * 2;
+
+	if (info.len & 0x8000 || info.len == 0 || left > 2060) {
+		/* data register seems to give 0x8000 in some error cases even
+		 * though busy bit is not set in offset register;
+		 * in addition, length must be at least 1 due to type field */
+		spin_unlock(&local->baplock);
+		printk(KERN_DEBUG "%s: Received info frame with invalid "
+		       "length 0x%04x (type 0x%04x)\n", dev->name, info.len,
+		       info.type);
+		goto out;
+	}
+
+	skb = dev_alloc_skb(sizeof(info) + left);
+	if (skb == NULL) {
+		spin_unlock(&local->baplock);
+		printk(KERN_DEBUG "%s: Could not allocate skb for info "
+		       "frame\n", dev->name);
+		goto out;
+	}
+
+	memcpy(skb_put(skb, sizeof(info)), &info, sizeof(info));
+	if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left))
+	{
+		spin_unlock(&local->baplock);
+		printk(KERN_WARNING "%s: Info frame read failed (fid=0x%04x, "
+		       "len=0x%04x, type=0x%04x\n",
+		       dev->name, fid, info.len, info.type);
+		dev_kfree_skb(skb);
+		goto out;
+	}
+	spin_unlock(&local->baplock);
+
+	skb_queue_tail(&local->info_list, skb);
+	tasklet_schedule(&local->info_tasklet);
+
+ out:
+	HFA384X_OUTW(HFA384X_EV_INFO, HFA384X_EVACK_OFF);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void hostap_bap_tasklet(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct net_device *dev = local->dev;
+	u16 ev;
+	int frames = 30;
+
+	if (local->func->card_present && !local->func->card_present(local))
+		return;
+
+	set_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
+
+	/* Process all pending BAP events without generating new interrupts
+	 * for them */
+	while (frames-- > 0) {
+		ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
+		if (ev == 0xffff || !(ev & HFA384X_BAP0_EVENTS))
+			break;
+		if (ev & HFA384X_EV_RX)
+			prism2_rx(local);
+		if (ev & HFA384X_EV_INFO)
+			prism2_info(local);
+		if (ev & HFA384X_EV_TX)
+			prism2_tx_ev(local);
+		if (ev & HFA384X_EV_TXEXC)
+			prism2_txexc(local);
+	}
+
+	set_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
+	clear_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits);
+
+	/* Enable interrupts for new BAP events */
+	hfa384x_events_all(dev);
+	clear_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits);
+}
+
+
+/* Called only from hardware IRQ */
+static void prism2_infdrop(struct net_device *dev)
+{
+	static unsigned long last_inquire = 0;
+
+	PDEBUG(DEBUG_EXTRA, "%s: INFDROP event\n", dev->name);
+
+	/* some firmware versions seem to get stuck with
+	 * full CommTallies in high traffic load cases; every
+	 * packet will then cause INFDROP event and CommTallies
+	 * info frame will not be sent automatically. Try to
+	 * get out of this state by inquiring CommTallies. */
+	if (!last_inquire || time_after(jiffies, last_inquire + HZ)) {
+		hfa384x_cmd_callback(dev, HFA384X_CMDCODE_INQUIRE,
+				     HFA384X_INFO_COMMTALLIES, NULL, 0);
+		last_inquire = jiffies;
+	}
+}
+
+
+/* Called only from hardware IRQ */
+static void prism2_ev_tick(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 evstat, inten;
+	static int prev_stuck = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (time_after(jiffies, local->last_tick_timer + 5 * HZ) &&
+	    local->last_tick_timer) {
+		evstat = HFA384X_INW(HFA384X_EVSTAT_OFF);
+		inten = HFA384X_INW(HFA384X_INTEN_OFF);
+		if (!prev_stuck) {
+			printk(KERN_INFO "%s: SW TICK stuck? "
+			       "bits=0x%lx EvStat=%04x IntEn=%04x\n",
+			       dev->name, local->bits, evstat, inten);
+		}
+		local->sw_tick_stuck++;
+		if ((evstat & HFA384X_BAP0_EVENTS) &&
+		    (inten & HFA384X_BAP0_EVENTS)) {
+			printk(KERN_INFO "%s: trying to recover from IRQ "
+			       "hang\n", dev->name);
+			hfa384x_events_no_bap0(dev);
+		}
+		prev_stuck = 1;
+	} else
+		prev_stuck = 0;
+}
+
+
+/* Called only from hardware IRQ */
+static inline void prism2_check_magic(local_info_t *local)
+{
+	/* at least PCI Prism2.5 with bus mastering seems to sometimes
+	 * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the
+	 * register once or twice seems to get the correct value.. PCI cards
+	 * cannot anyway be removed during normal operation, so there is not
+	 * really any need for this verification with them. */
+
+#ifndef PRISM2_PCI
+#ifndef final_version
+	static unsigned long last_magic_err = 0;
+	struct net_device *dev = local->dev;
+
+	if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) {
+		if (!local->hw_ready)
+			return;
+		HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
+		if (time_after(jiffies, last_magic_err + 10 * HZ)) {
+			printk("%s: Interrupt, but SWSUPPORT0 does not match: "
+			       "%04X != %04X - card removed?\n", dev->name,
+			       HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
+			       HFA384X_MAGIC);
+			last_magic_err = jiffies;
+		} else if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: interrupt - SWSUPPORT0=%04x "
+			       "MAGIC=%04x\n", dev->name,
+			       HFA384X_INW(HFA384X_SWSUPPORT0_OFF),
+			       HFA384X_MAGIC);
+		}
+		if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != 0xffff)
+			schedule_work(&local->reset_queue);
+		return;
+	}
+#endif /* final_version */
+#endif /* !PRISM2_PCI */
+}
+
+
+/* Called only from hardware IRQ */
+static irqreturn_t prism2_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct net_device *dev = (struct net_device *) dev_id;
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int events = 0;
+	u16 ev;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0);
+
+	if (local->func->card_present && !local->func->card_present(local)) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: Interrupt, but dev not OK\n",
+			       dev->name);
+		}
+		return IRQ_HANDLED;
+	}
+
+	prism2_check_magic(local);
+
+	for (;;) {
+		ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
+		if (ev == 0xffff) {
+			if (local->shutdown)
+				return IRQ_HANDLED;
+			HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF);
+			printk(KERN_DEBUG "%s: prism2_interrupt: ev=0xffff\n",
+			       dev->name);
+			return IRQ_HANDLED;
+		}
+
+		ev &= HFA384X_INW(HFA384X_INTEN_OFF);
+		if (ev == 0)
+			break;
+
+		if (ev & HFA384X_EV_CMD) {
+			prism2_cmd_ev(dev);
+		}
+
+		/* Above events are needed even before hw is ready, but other
+		 * events should be skipped during initialization. This may
+		 * change for AllocEv if allocate_fid is implemented without
+		 * busy waiting. */
+		if (!local->hw_ready || local->hw_resetting ||
+		    !local->dev_enabled) {
+			ev = HFA384X_INW(HFA384X_EVSTAT_OFF);
+			if (ev & HFA384X_EV_CMD)
+				goto next_event;
+			if ((ev & HFA384X_EVENT_MASK) == 0)
+				return IRQ_HANDLED;
+			if (local->dev_enabled && (ev & ~HFA384X_EV_TICK) &&
+			    net_ratelimit()) {
+				printk(KERN_DEBUG "%s: prism2_interrupt: hw "
+				       "not ready; skipping events 0x%04x "
+				       "(IntEn=0x%04x)%s%s%s\n",
+				       dev->name, ev,
+				       HFA384X_INW(HFA384X_INTEN_OFF),
+				       !local->hw_ready ? " (!hw_ready)" : "",
+				       local->hw_resetting ?
+				       " (hw_resetting)" : "",
+				       !local->dev_enabled ?
+				       " (!dev_enabled)" : "");
+			}
+			HFA384X_OUTW(ev, HFA384X_EVACK_OFF);
+			return IRQ_HANDLED;
+		}
+
+		if (ev & HFA384X_EV_TICK) {
+			prism2_ev_tick(dev);
+			HFA384X_OUTW(HFA384X_EV_TICK, HFA384X_EVACK_OFF);
+		}
+
+		if (ev & HFA384X_EV_ALLOC) {
+			prism2_alloc_ev(dev);
+			HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF);
+		}
+
+		/* Reading data from the card is quite time consuming, so do it
+		 * in tasklets. TX, TXEXC, RX, and INFO events will be ACKed
+		 * and unmasked after needed data has been read completely. */
+		if (ev & HFA384X_BAP0_EVENTS) {
+			hfa384x_events_no_bap0(dev);
+			tasklet_schedule(&local->bap_tasklet);
+		}
+
+#ifndef final_version
+		if (ev & HFA384X_EV_WTERR) {
+			PDEBUG(DEBUG_EXTRA, "%s: WTERR event\n", dev->name);
+			HFA384X_OUTW(HFA384X_EV_WTERR, HFA384X_EVACK_OFF);
+		}
+#endif /* final_version */
+
+		if (ev & HFA384X_EV_INFDROP) {
+			prism2_infdrop(dev);
+			HFA384X_OUTW(HFA384X_EV_INFDROP, HFA384X_EVACK_OFF);
+		}
+
+	next_event:
+		events++;
+		if (events >= PRISM2_MAX_INTERRUPT_EVENTS) {
+			PDEBUG(DEBUG_EXTRA, "prism2_interrupt: >%d events "
+			       "(EvStat=0x%04x)\n",
+			       PRISM2_MAX_INTERRUPT_EVENTS,
+			       HFA384X_INW(HFA384X_EVSTAT_OFF));
+			break;
+		}
+	}
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 1);
+	return IRQ_RETVAL(events);
+}
+
+
+static void prism2_check_sta_fw_version(local_info_t *local)
+{
+	struct hfa384x_comp_ident comp;
+	int id, variant, major, minor;
+
+	if (hfa384x_get_rid(local->dev, HFA384X_RID_STAID,
+			    &comp, sizeof(comp), 1) < 0)
+		return;
+
+	local->fw_ap = 0;
+	id = le16_to_cpu(comp.id);
+	if (id != HFA384X_COMP_ID_STA) {
+		if (id == HFA384X_COMP_ID_FW_AP)
+			local->fw_ap = 1;
+		return;
+	}
+
+	major = __le16_to_cpu(comp.major);
+	minor = __le16_to_cpu(comp.minor);
+	variant = __le16_to_cpu(comp.variant);
+	local->sta_fw_ver = PRISM2_FW_VER(major, minor, variant);
+
+	/* Station firmware versions before 1.4.x seem to have a bug in
+	 * firmware-based WEP encryption when using Host AP mode, so use
+	 * host_encrypt as a default for them. Firmware version 1.4.9 is the
+	 * first one that has been seen to produce correct encryption, but the
+	 * bug might be fixed before that (although, at least 1.4.2 is broken).
+	 */
+	local->fw_encrypt_ok = local->sta_fw_ver >= PRISM2_FW_VER(1,4,9);
+
+	if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
+	    !local->fw_encrypt_ok) {
+		printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
+		       "a workaround for firmware bug in Host AP mode WEP\n",
+		       local->dev->name);
+		local->host_encrypt = 1;
+	}
+
+	/* IEEE 802.11 standard compliant WDS frames (4 addresses) were broken
+	 * in station firmware versions before 1.5.x. With these versions, the
+	 * driver uses a workaround with bogus frame format (4th address after
+	 * the payload). This is not compatible with other AP devices. Since
+	 * the firmware bug is fixed in the latest station firmware versions,
+	 * automatically enable standard compliant mode for cards using station
+	 * firmware version 1.5.0 or newer. */
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,5,0))
+		local->wds_type |= HOSTAP_WDS_STANDARD_FRAME;
+	else {
+		printk(KERN_DEBUG "%s: defaulting to bogus WDS frame as a "
+		       "workaround for firmware bug in Host AP mode WDS\n",
+		       local->dev->name);
+	}
+
+	hostap_check_sta_fw_version(local->ap, local->sta_fw_ver);
+}
+
+
+static void prism2_crypt_deinit_entries(local_info_t *local, int force)
+{
+	struct list_head *ptr, *n;
+	struct ieee80211_crypt_data *entry;
+
+	for (ptr = local->crypt_deinit_list.next, n = ptr->next;
+	     ptr != &local->crypt_deinit_list; ptr = n, n = ptr->next) {
+		entry = list_entry(ptr, struct ieee80211_crypt_data, list);
+
+		if (atomic_read(&entry->refcnt) != 0 && !force)
+			continue;
+
+		list_del(ptr);
+
+		if (entry->ops)
+			entry->ops->deinit(entry->priv);
+		kfree(entry);
+	}
+}
+
+
+static void prism2_crypt_deinit_handler(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_crypt_deinit_entries(local, 0);
+	if (!list_empty(&local->crypt_deinit_list)) {
+		printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
+		       "deletion list\n", local->dev->name);
+		local->crypt_deinit_timer.expires = jiffies + HZ;
+		add_timer(&local->crypt_deinit_timer);
+	}
+	spin_unlock_irqrestore(&local->lock, flags);
+
+}
+
+
+static void hostap_passive_scan(unsigned long data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct net_device *dev = local->dev;
+	u16 channel;
+
+	if (local->passive_scan_interval <= 0)
+		return;
+
+	if (local->passive_scan_state == PASSIVE_SCAN_LISTEN) {
+		int max_tries = 16;
+
+		/* Even though host system does not really know when the WLAN
+		 * MAC is sending frames, try to avoid changing channels for
+		 * passive scanning when a host-generated frame is being
+		 * transmitted */
+		if (test_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) {
+			printk(KERN_DEBUG "%s: passive scan detected pending "
+			       "TX - delaying\n", dev->name);
+			local->passive_scan_timer.expires = jiffies + HZ / 10;
+			add_timer(&local->passive_scan_timer);
+			return;
+		}
+
+		do {
+			local->passive_scan_channel++;
+			if (local->passive_scan_channel > 14)
+				local->passive_scan_channel = 1;
+			max_tries--;
+		} while (!(local->channel_mask &
+			   (1 << (local->passive_scan_channel - 1))) &&
+			 max_tries > 0);
+
+		if (max_tries == 0) {
+			printk(KERN_INFO "%s: no allowed passive scan channels"
+			       " found\n", dev->name);
+			return;
+		}
+
+		printk(KERN_DEBUG "%s: passive scan channel %d\n",
+		       dev->name, local->passive_scan_channel);
+		channel = local->passive_scan_channel;
+		local->passive_scan_state = PASSIVE_SCAN_WAIT;
+		local->passive_scan_timer.expires = jiffies + HZ / 10;
+	} else {
+		channel = local->channel;
+		local->passive_scan_state = PASSIVE_SCAN_LISTEN;
+		local->passive_scan_timer.expires = jiffies +
+			local->passive_scan_interval * HZ;
+	}
+
+	if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST |
+				 (HFA384X_TEST_CHANGE_CHANNEL << 8),
+				 channel, NULL, 0))
+		printk(KERN_ERR "%s: passive scan channel set %d "
+		       "failed\n", dev->name, channel);
+
+	add_timer(&local->passive_scan_timer);
+}
+
+
+/* Called only as a scheduled task when communications quality values should
+ * be updated. */
+static void handle_comms_qual_update(void *data)
+{
+	local_info_t *local = data;
+	prism2_update_comms_qual(local->dev);
+}
+
+
+/* Software watchdog - called as a timer. Hardware interrupt (Tick event) is
+ * used to monitor that local->last_tick_timer is being updated. If not,
+ * interrupt busy-loop is assumed and driver tries to recover by masking out
+ * some events. */
+static void hostap_tick_timer(unsigned long data)
+{
+	static unsigned long last_inquire = 0;
+	local_info_t *local = (local_info_t *) data;
+	local->last_tick_timer = jiffies;
+
+	/* Inquire CommTallies every 10 seconds to keep the statistics updated
+	 * more often during low load and when using 32-bit tallies. */
+	if ((!last_inquire || time_after(jiffies, last_inquire + 10 * HZ)) &&
+	    !local->hw_downloading && local->hw_ready &&
+	    !local->hw_resetting && local->dev_enabled) {
+		hfa384x_cmd_callback(local->dev, HFA384X_CMDCODE_INQUIRE,
+				     HFA384X_INFO_COMMTALLIES, NULL, 0);
+		last_inquire = jiffies;
+	}
+
+	if ((local->last_comms_qual_update == 0 ||
+	     time_after(jiffies, local->last_comms_qual_update + 10 * HZ)) &&
+	    (local->iw_mode == IW_MODE_INFRA ||
+	     local->iw_mode == IW_MODE_ADHOC)) {
+		schedule_work(&local->comms_qual_update);
+	}
+
+	local->tick_timer.expires = jiffies + 2 * HZ;
+	add_timer(&local->tick_timer);
+}
+
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+static int prism2_registers_proc_read(char *page, char **start, off_t off,
+				      int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+#define SHOW_REG(n) \
+p += sprintf(p, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF))
+
+	SHOW_REG(CMD);
+	SHOW_REG(PARAM0);
+	SHOW_REG(PARAM1);
+	SHOW_REG(PARAM2);
+	SHOW_REG(STATUS);
+	SHOW_REG(RESP0);
+	SHOW_REG(RESP1);
+	SHOW_REG(RESP2);
+	SHOW_REG(INFOFID);
+	SHOW_REG(CONTROL);
+	SHOW_REG(SELECT0);
+	SHOW_REG(SELECT1);
+	SHOW_REG(OFFSET0);
+	SHOW_REG(OFFSET1);
+	SHOW_REG(RXFID);
+	SHOW_REG(ALLOCFID);
+	SHOW_REG(TXCOMPLFID);
+	SHOW_REG(SWSUPPORT0);
+	SHOW_REG(SWSUPPORT1);
+	SHOW_REG(SWSUPPORT2);
+	SHOW_REG(EVSTAT);
+	SHOW_REG(INTEN);
+	SHOW_REG(EVACK);
+	/* Do not read data registers, because they change the state of the
+	 * MAC (offset += 2) */
+	/* SHOW_REG(DATA0); */
+	/* SHOW_REG(DATA1); */
+	SHOW_REG(AUXPAGE);
+	SHOW_REG(AUXOFFSET);
+	/* SHOW_REG(AUXDATA); */
+#ifdef PRISM2_PCI
+	SHOW_REG(PCICOR);
+	SHOW_REG(PCIHCR);
+	SHOW_REG(PCI_M0_ADDRH);
+	SHOW_REG(PCI_M0_ADDRL);
+	SHOW_REG(PCI_M0_LEN);
+	SHOW_REG(PCI_M0_CTL);
+	SHOW_REG(PCI_STATUS);
+	SHOW_REG(PCI_M1_ADDRH);
+	SHOW_REG(PCI_M1_ADDRL);
+	SHOW_REG(PCI_M1_LEN);
+	SHOW_REG(PCI_M1_CTL);
+#endif /* PRISM2_PCI */
+
+	return (p - page);
+}
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+
+struct set_tim_data {
+	struct list_head list;
+	int aid;
+	int set;
+};
+
+static int prism2_set_tim(struct net_device *dev, int aid, int set)
+{
+	struct list_head *ptr;
+	struct set_tim_data *new_entry;
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	new_entry = (struct set_tim_data *)
+		kmalloc(sizeof(*new_entry), GFP_ATOMIC);
+	if (new_entry == NULL) {
+		printk(KERN_DEBUG "%s: prism2_set_tim: kmalloc failed\n",
+		       local->dev->name);
+		return -ENOMEM;
+	}
+	memset(new_entry, 0, sizeof(*new_entry));
+	new_entry->aid = aid;
+	new_entry->set = set;
+
+	spin_lock_bh(&local->set_tim_lock);
+	list_for_each(ptr, &local->set_tim_list) {
+		struct set_tim_data *entry =
+			list_entry(ptr, struct set_tim_data, list);
+		if (entry->aid == aid) {
+			PDEBUG(DEBUG_PS2, "%s: prism2_set_tim: aid=%d "
+			       "set=%d ==> %d\n",
+			       local->dev->name, aid, entry->set, set);
+			entry->set = set;
+			kfree(new_entry);
+			new_entry = NULL;
+			break;
+		}
+	}
+	if (new_entry)
+		list_add_tail(&new_entry->list, &local->set_tim_list);
+	spin_unlock_bh(&local->set_tim_lock);
+
+	schedule_work(&local->set_tim_queue);
+
+	return 0;
+}
+
+
+static void handle_set_tim_queue(void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+	struct set_tim_data *entry;
+	u16 val;
+
+	for (;;) {
+		entry = NULL;
+		spin_lock_bh(&local->set_tim_lock);
+		if (!list_empty(&local->set_tim_list)) {
+			entry = list_entry(local->set_tim_list.next,
+					   struct set_tim_data, list);
+			list_del(&entry->list);
+		}
+		spin_unlock_bh(&local->set_tim_lock);
+		if (!entry)
+			break;
+
+		PDEBUG(DEBUG_PS2, "%s: handle_set_tim_queue: aid=%d set=%d\n",
+		       local->dev->name, entry->aid, entry->set);
+
+		val = entry->aid;
+		if (entry->set)
+			val |= 0x8000;
+		if (hostap_set_word(local->dev, HFA384X_RID_CNFTIMCTRL, val)) {
+			printk(KERN_DEBUG "%s: set_tim failed (aid=%d "
+			       "set=%d)\n",
+			       local->dev->name, entry->aid, entry->set);
+		}
+
+		kfree(entry);
+	}
+}
+
+
+static void prism2_clear_set_tim_queue(local_info_t *local)
+{
+	struct list_head *ptr, *n;
+
+	list_for_each_safe(ptr, n, &local->set_tim_list) {
+		struct set_tim_data *entry;
+		entry = list_entry(ptr, struct set_tim_data, list);
+		list_del(&entry->list);
+		kfree(entry);
+	}
+}
+
+
+static struct net_device *
+prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
+		       struct device *sdev)
+{
+	struct net_device *dev;
+	struct hostap_interface *iface;
+	struct local_info *local;
+	int len, i, ret;
+
+	if (funcs == NULL)
+		return NULL;
+
+	len = strlen(dev_template);
+	if (len >= IFNAMSIZ || strstr(dev_template, "%d") == NULL) {
+		printk(KERN_WARNING "hostap: Invalid dev_template='%s'\n",
+		       dev_template);
+		return NULL;
+	}
+
+	len = sizeof(struct hostap_interface) +
+		3 + sizeof(struct local_info) +
+		3 + sizeof(struct ap_data);
+
+	dev = alloc_etherdev(len);
+	if (dev == NULL)
+		return NULL;
+
+	iface = netdev_priv(dev);
+	local = (struct local_info *) ((((long) (iface + 1)) + 3) & ~3);
+	local->ap = (struct ap_data *) ((((long) (local + 1)) + 3) & ~3);
+	local->dev = iface->dev = dev;
+	iface->local = local;
+	iface->type = HOSTAP_INTERFACE_MASTER;
+	INIT_LIST_HEAD(&local->hostap_interfaces);
+
+	local->hw_module = THIS_MODULE;
+
+#ifdef PRISM2_IO_DEBUG
+	local->io_debug_enabled = 1;
+#endif /* PRISM2_IO_DEBUG */
+
+	local->func = funcs;
+	local->func->cmd = hfa384x_cmd;
+	local->func->read_regs = hfa384x_read_regs;
+	local->func->get_rid = hfa384x_get_rid;
+	local->func->set_rid = hfa384x_set_rid;
+	local->func->hw_enable = prism2_hw_enable;
+	local->func->hw_config = prism2_hw_config;
+	local->func->hw_reset = prism2_hw_reset;
+	local->func->hw_shutdown = prism2_hw_shutdown;
+	local->func->reset_port = prism2_reset_port;
+	local->func->schedule_reset = prism2_schedule_reset;
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	local->func->read_aux = prism2_download_aux_dump;
+	local->func->download = prism2_download;
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+	local->func->tx = prism2_tx_80211;
+	local->func->set_tim = prism2_set_tim;
+	local->func->need_tx_headroom = 0; /* no need to add txdesc in
+					    * skb->data (FIX: maybe for DMA bus
+					    * mastering? */
+
+	local->mtu = mtu;
+
+	rwlock_init(&local->iface_lock);
+	spin_lock_init(&local->txfidlock);
+	spin_lock_init(&local->cmdlock);
+	spin_lock_init(&local->baplock);
+	spin_lock_init(&local->lock);
+	init_MUTEX(&local->rid_bap_sem);
+
+	if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES)
+		card_idx = 0;
+	local->card_idx = card_idx;
+
+	len = strlen(essid);
+	memcpy(local->essid, essid,
+	       len > MAX_SSID_LEN ? MAX_SSID_LEN : len);
+	local->essid[MAX_SSID_LEN] = '\0';
+	i = GET_INT_PARM(iw_mode, card_idx);
+	if ((i >= IW_MODE_ADHOC && i <= IW_MODE_REPEAT) ||
+	    i == IW_MODE_MONITOR) {
+		local->iw_mode = i;
+	} else {
+		printk(KERN_WARNING "prism2: Unknown iw_mode %d; using "
+		       "IW_MODE_MASTER\n", i);
+		local->iw_mode = IW_MODE_MASTER;
+	}
+	local->channel = GET_INT_PARM(channel, card_idx);
+	local->beacon_int = GET_INT_PARM(beacon_int, card_idx);
+	local->dtim_period = GET_INT_PARM(dtim_period, card_idx);
+	local->wds_max_connections = 16;
+	local->tx_control = HFA384X_TX_CTRL_FLAGS;
+	local->manual_retry_count = -1;
+	local->rts_threshold = 2347;
+	local->fragm_threshold = 2346;
+	local->rssi_to_dBm = 100; /* default; to be overriden by
+				   * cnfDbmAdjust, if available */
+	local->auth_algs = PRISM2_AUTH_OPEN | PRISM2_AUTH_SHARED_KEY;
+	local->sram_type = -1;
+	local->scan_channel_mask = 0xffff;
+
+	/* Initialize task queue structures */
+	INIT_WORK(&local->reset_queue, handle_reset_queue, local);
+	INIT_WORK(&local->set_multicast_list_queue,
+		  hostap_set_multicast_list_queue, local->dev);
+
+	INIT_WORK(&local->set_tim_queue, handle_set_tim_queue, local);
+	INIT_LIST_HEAD(&local->set_tim_list);
+	spin_lock_init(&local->set_tim_lock);
+
+	INIT_WORK(&local->comms_qual_update, handle_comms_qual_update, local);
+
+	/* Initialize tasklets for handling hardware IRQ related operations
+	 * outside hw IRQ handler */
+#define HOSTAP_TASKLET_INIT(q, f, d) \
+do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \
+while (0)
+	HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
+			    (unsigned long) local);
+
+	HOSTAP_TASKLET_INIT(&local->info_tasklet, hostap_info_tasklet,
+			    (unsigned long) local);
+	hostap_info_init(local);
+
+	HOSTAP_TASKLET_INIT(&local->rx_tasklet,
+			    hostap_rx_tasklet, (unsigned long) local);
+	skb_queue_head_init(&local->rx_list);
+
+	HOSTAP_TASKLET_INIT(&local->sta_tx_exc_tasklet,
+			    hostap_sta_tx_exc_tasklet, (unsigned long) local);
+	skb_queue_head_init(&local->sta_tx_exc_list);
+
+	INIT_LIST_HEAD(&local->cmd_queue);
+	init_waitqueue_head(&local->hostscan_wq);
+	INIT_LIST_HEAD(&local->crypt_deinit_list);
+	init_timer(&local->crypt_deinit_timer);
+	local->crypt_deinit_timer.data = (unsigned long) local;
+	local->crypt_deinit_timer.function = prism2_crypt_deinit_handler;
+
+	init_timer(&local->passive_scan_timer);
+	local->passive_scan_timer.data = (unsigned long) local;
+	local->passive_scan_timer.function = hostap_passive_scan;
+
+	init_timer(&local->tick_timer);
+	local->tick_timer.data = (unsigned long) local;
+	local->tick_timer.function = hostap_tick_timer;
+	local->tick_timer.expires = jiffies + 2 * HZ;
+	add_timer(&local->tick_timer);
+
+	INIT_LIST_HEAD(&local->bss_list);
+
+	hostap_setup_dev(dev, local, 1);
+	local->saved_eth_header_parse = dev->hard_header_parse;
+
+	dev->hard_start_xmit = hostap_master_start_xmit;
+	dev->type = ARPHRD_IEEE80211;
+	dev->hard_header_parse = hostap_80211_header_parse;
+
+	rtnl_lock();
+	ret = dev_alloc_name(dev, "wifi%d");
+	SET_NETDEV_DEV(dev, sdev);
+	if (ret >= 0)
+		ret = register_netdevice(dev);
+	rtnl_unlock();
+	if (ret < 0) {
+		printk(KERN_WARNING "%s: register netdevice failed!\n",
+		       dev_info);
+		goto fail;
+	}
+	printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name);
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+	create_proc_read_entry("registers", 0, local->proc,
+			       prism2_registers_proc_read, local);
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+	hostap_init_data(local);
+	return dev;
+
+ fail:
+	free_netdev(dev);
+	return NULL;
+}
+
+
+static int hostap_hw_ready(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	struct local_info *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+	local->ddev = hostap_add_interface(local, HOSTAP_INTERFACE_MAIN, 0,
+					   "", dev_template);
+
+	if (local->ddev) {
+		if (local->iw_mode == IW_MODE_INFRA ||
+		    local->iw_mode == IW_MODE_ADHOC) {
+			netif_carrier_off(local->dev);
+			netif_carrier_off(local->ddev);
+		}
+		hostap_init_proc(local);
+		hostap_init_ap_proc(local);
+		return 0;
+	}
+
+	return -1;
+}
+
+
+static void prism2_free_local_data(struct net_device *dev)
+{
+	struct hostap_tx_callback_info *tx_cb, *tx_cb_prev;
+	int i;
+	struct hostap_interface *iface;
+	struct local_info *local;
+	struct list_head *ptr, *n;
+
+	if (dev == NULL)
+		return;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	flush_scheduled_work();
+
+	if (timer_pending(&local->crypt_deinit_timer))
+		del_timer(&local->crypt_deinit_timer);
+	prism2_crypt_deinit_entries(local, 1);
+
+	if (timer_pending(&local->passive_scan_timer))
+		del_timer(&local->passive_scan_timer);
+
+	if (timer_pending(&local->tick_timer))
+		del_timer(&local->tick_timer);
+
+	prism2_clear_cmd_queue(local);
+
+	skb_queue_purge(&local->info_list);
+	skb_queue_purge(&local->rx_list);
+	skb_queue_purge(&local->sta_tx_exc_list);
+
+	if (local->dev_enabled)
+		prism2_callback(local, PRISM2_CALLBACK_DISABLE);
+
+	for (i = 0; i < WEP_KEYS; i++) {
+		struct ieee80211_crypt_data *crypt = local->crypt[i];
+		if (crypt) {
+			if (crypt->ops)
+				crypt->ops->deinit(crypt->priv);
+			kfree(crypt);
+			local->crypt[i] = NULL;
+		}
+	}
+
+	if (local->ap != NULL)
+		hostap_free_data(local->ap);
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+	if (local->proc != NULL)
+		remove_proc_entry("registers", local->proc);
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+	hostap_remove_proc(local);
+
+	tx_cb = local->tx_callback;
+	while (tx_cb != NULL) {
+		tx_cb_prev = tx_cb;
+		tx_cb = tx_cb->next;
+		kfree(tx_cb_prev);
+	}
+
+	hostap_set_hostapd(local, 0, 0);
+	hostap_set_hostapd_sta(local, 0, 0);
+
+	for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) {
+		if (local->frag_cache[i].skb != NULL)
+			dev_kfree_skb(local->frag_cache[i].skb);
+	}
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	prism2_download_free_data(local->dl_pri);
+	prism2_download_free_data(local->dl_sec);
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+	list_for_each_safe(ptr, n, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		if (iface->type == HOSTAP_INTERFACE_MASTER) {
+			/* special handling for this interface below */
+			continue;
+		}
+		hostap_remove_interface(iface->dev, 0, 1);
+	}
+
+	prism2_clear_set_tim_queue(local);
+
+	list_for_each_safe(ptr, n, &local->bss_list) {
+		struct hostap_bss_info *bss =
+			list_entry(ptr, struct hostap_bss_info, list);
+		kfree(bss);
+	}
+
+	kfree(local->pda);
+	kfree(local->last_scan_results);
+	kfree(local->generic_elem);
+
+	unregister_netdev(local->dev);
+	free_netdev(local->dev);
+}
+
+
+#ifndef PRISM2_PLX
+static void prism2_suspend(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	struct local_info *local;
+	union iwreq_data wrqu;
+
+	iface = dev->priv;
+	local = iface->local;
+
+	/* Send disconnect event, e.g., to trigger reassociation after resume
+	 * if wpa_supplicant is used. */
+	memset(&wrqu, 0, sizeof(wrqu));
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
+
+	/* Disable hardware and firmware */
+	prism2_hw_shutdown(dev, 0);
+}
+#endif /* PRISM2_PLX */
+
+
+/* These might at some point be compiled separately and used as separate
+ * kernel modules or linked into one */
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+#include "hostap_download.c"
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+#ifdef PRISM2_CALLBACK
+/* External hostap_callback.c file can be used to, e.g., blink activity led.
+ * This can use platform specific code and must define prism2_callback()
+ * function (if PRISM2_CALLBACK is not defined, these function calls are not
+ * used. */
+#include "hostap_callback.c"
+#endif /* PRISM2_CALLBACK */
diff --git a/drivers/net/wireless/hostap/hostap_info.c b/drivers/net/wireless/hostap/hostap_info.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_info.c
@@ -0,0 +1,499 @@
+/* Host AP driver Info Frame processing (part of hostap.o module) */
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
+				      int left)
+{
+	struct hfa384x_comm_tallies *tallies;
+
+	if (left < sizeof(struct hfa384x_comm_tallies)) {
+		printk(KERN_DEBUG "%s: too short (len=%d) commtallies "
+		       "info frame\n", local->dev->name, left);
+		return;
+	}
+
+	tallies = (struct hfa384x_comm_tallies *) buf;
+#define ADD_COMM_TALLIES(name) \
+local->comm_tallies.name += le16_to_cpu(tallies->name)
+	ADD_COMM_TALLIES(tx_unicast_frames);
+	ADD_COMM_TALLIES(tx_multicast_frames);
+	ADD_COMM_TALLIES(tx_fragments);
+	ADD_COMM_TALLIES(tx_unicast_octets);
+	ADD_COMM_TALLIES(tx_multicast_octets);
+	ADD_COMM_TALLIES(tx_deferred_transmissions);
+	ADD_COMM_TALLIES(tx_single_retry_frames);
+	ADD_COMM_TALLIES(tx_multiple_retry_frames);
+	ADD_COMM_TALLIES(tx_retry_limit_exceeded);
+	ADD_COMM_TALLIES(tx_discards);
+	ADD_COMM_TALLIES(rx_unicast_frames);
+	ADD_COMM_TALLIES(rx_multicast_frames);
+	ADD_COMM_TALLIES(rx_fragments);
+	ADD_COMM_TALLIES(rx_unicast_octets);
+	ADD_COMM_TALLIES(rx_multicast_octets);
+	ADD_COMM_TALLIES(rx_fcs_errors);
+	ADD_COMM_TALLIES(rx_discards_no_buffer);
+	ADD_COMM_TALLIES(tx_discards_wrong_sa);
+	ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
+	ADD_COMM_TALLIES(rx_message_in_msg_fragments);
+	ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
+#undef ADD_COMM_TALLIES
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_commtallies32(local_info_t *local, unsigned char *buf,
+				      int left)
+{
+	struct hfa384x_comm_tallies32 *tallies;
+
+	if (left < sizeof(struct hfa384x_comm_tallies32)) {
+		printk(KERN_DEBUG "%s: too short (len=%d) commtallies32 "
+		       "info frame\n", local->dev->name, left);
+		return;
+	}
+
+	tallies = (struct hfa384x_comm_tallies32 *) buf;
+#define ADD_COMM_TALLIES(name) \
+local->comm_tallies.name += le32_to_cpu(tallies->name)
+	ADD_COMM_TALLIES(tx_unicast_frames);
+	ADD_COMM_TALLIES(tx_multicast_frames);
+	ADD_COMM_TALLIES(tx_fragments);
+	ADD_COMM_TALLIES(tx_unicast_octets);
+	ADD_COMM_TALLIES(tx_multicast_octets);
+	ADD_COMM_TALLIES(tx_deferred_transmissions);
+	ADD_COMM_TALLIES(tx_single_retry_frames);
+	ADD_COMM_TALLIES(tx_multiple_retry_frames);
+	ADD_COMM_TALLIES(tx_retry_limit_exceeded);
+	ADD_COMM_TALLIES(tx_discards);
+	ADD_COMM_TALLIES(rx_unicast_frames);
+	ADD_COMM_TALLIES(rx_multicast_frames);
+	ADD_COMM_TALLIES(rx_fragments);
+	ADD_COMM_TALLIES(rx_unicast_octets);
+	ADD_COMM_TALLIES(rx_multicast_octets);
+	ADD_COMM_TALLIES(rx_fcs_errors);
+	ADD_COMM_TALLIES(rx_discards_no_buffer);
+	ADD_COMM_TALLIES(tx_discards_wrong_sa);
+	ADD_COMM_TALLIES(rx_discards_wep_undecryptable);
+	ADD_COMM_TALLIES(rx_message_in_msg_fragments);
+	ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments);
+#undef ADD_COMM_TALLIES
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_commtallies(local_info_t *local, unsigned char *buf,
+				    int left)
+{
+	if (local->tallies32)
+		prism2_info_commtallies32(local, buf, left);
+	else
+		prism2_info_commtallies16(local, buf, left);
+}
+
+
+#ifndef PRISM2_NO_STATION_MODES
+#ifndef PRISM2_NO_DEBUG
+static const char* hfa384x_linkstatus_str(u16 linkstatus)
+{
+	switch (linkstatus) {
+	case HFA384X_LINKSTATUS_CONNECTED:
+		return "Connected";
+	case HFA384X_LINKSTATUS_DISCONNECTED:
+		return "Disconnected";
+	case HFA384X_LINKSTATUS_AP_CHANGE:
+		return "Access point change";
+	case HFA384X_LINKSTATUS_AP_OUT_OF_RANGE:
+		return "Access point out of range";
+	case HFA384X_LINKSTATUS_AP_IN_RANGE:
+		return "Access point in range";
+	case HFA384X_LINKSTATUS_ASSOC_FAILED:
+		return "Association failed";
+	default:
+		return "Unknown";
+	}
+}
+#endif /* PRISM2_NO_DEBUG */
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_linkstatus(local_info_t *local, unsigned char *buf,
+				    int left)
+{
+	u16 val;
+	int non_sta_mode;
+
+	/* Alloc new JoinRequests to occur since LinkStatus for the previous
+	 * has been received */
+	local->last_join_time = 0;
+
+	if (left != 2) {
+		printk(KERN_DEBUG "%s: invalid linkstatus info frame "
+		       "length %d\n", local->dev->name, left);
+		return;
+	}
+
+	non_sta_mode = local->iw_mode == IW_MODE_MASTER ||
+		local->iw_mode == IW_MODE_REPEAT ||
+		local->iw_mode == IW_MODE_MONITOR;
+
+	val = buf[0] | (buf[1] << 8);
+	if (!non_sta_mode || val != HFA384X_LINKSTATUS_DISCONNECTED) {
+		PDEBUG(DEBUG_EXTRA, "%s: LinkStatus=%d (%s)\n",
+		       local->dev->name, val, hfa384x_linkstatus_str(val));
+	}
+
+	if (non_sta_mode) {
+		netif_carrier_on(local->dev);
+		netif_carrier_on(local->ddev);
+		return;
+	}
+
+	/* Get current BSSID later in scheduled task */
+	set_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info);
+	local->prev_link_status = val;
+	schedule_work(&local->info_queue);
+}
+
+
+static void prism2_host_roaming(local_info_t *local)
+{
+	struct hfa384x_join_request req;
+	struct net_device *dev = local->dev;
+	struct hfa384x_hostscan_result *selected, *entry;
+	int i;
+	unsigned long flags;
+
+	if (local->last_join_time &&
+	    time_before(jiffies, local->last_join_time + 10 * HZ)) {
+		PDEBUG(DEBUG_EXTRA, "%s: last join request has not yet been "
+		       "completed - waiting for it before issuing new one\n",
+		       dev->name);
+		return;
+	}
+
+	/* ScanResults are sorted: first ESS results in decreasing signal
+	 * quality then IBSS results in similar order.
+	 * Trivial roaming policy: just select the first entry.
+	 * This could probably be improved by adding hysteresis to limit
+	 * number of handoffs, etc.
+	 *
+	 * Could do periodic RID_SCANREQUEST or Inquire F101 to get new
+	 * ScanResults */
+	spin_lock_irqsave(&local->lock, flags);
+	if (local->last_scan_results == NULL ||
+	    local->last_scan_results_count == 0) {
+		spin_unlock_irqrestore(&local->lock, flags);
+		PDEBUG(DEBUG_EXTRA, "%s: no scan results for host roaming\n",
+		       dev->name);
+		return;
+	}
+
+	selected = &local->last_scan_results[0];
+
+	if (local->preferred_ap[0] || local->preferred_ap[1] ||
+	    local->preferred_ap[2] || local->preferred_ap[3] ||
+	    local->preferred_ap[4] || local->preferred_ap[5]) {
+		/* Try to find preferred AP */
+		PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID " MACSTR "\n",
+		       dev->name, MAC2STR(local->preferred_ap));
+		for (i = 0; i < local->last_scan_results_count; i++) {
+			entry = &local->last_scan_results[i];
+			if (memcmp(local->preferred_ap, entry->bssid, 6) == 0)
+			{
+				PDEBUG(DEBUG_EXTRA, "%s: using preferred AP "
+				       "selection\n", dev->name);
+				selected = entry;
+				break;
+			}
+		}
+	}
+
+	memcpy(req.bssid, selected->bssid, 6);
+	req.channel = selected->chid;
+	spin_unlock_irqrestore(&local->lock, flags);
+
+	PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=" MACSTR " channel=%d\n",
+	       dev->name, MAC2STR(req.bssid), le16_to_cpu(req.channel));
+	if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
+				 sizeof(req))) {
+		printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name);
+	}
+	local->last_join_time = jiffies;
+}
+
+
+static void hostap_report_scan_complete(local_info_t *local)
+{
+	union iwreq_data wrqu;
+
+	/* Inform user space about new scan results (just empty event,
+	 * SIOCGIWSCAN can be used to fetch data */
+	wrqu.data.length = 0;
+	wrqu.data.flags = 0;
+	wireless_send_event(local->dev, SIOCGIWSCAN, &wrqu, NULL);
+
+	/* Allow SIOCGIWSCAN handling to occur since we have received
+	 * scanning result */
+	local->scan_timestamp = 0;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_scanresults(local_info_t *local, unsigned char *buf,
+				    int left)
+{
+	u16 *pos;
+	int new_count, i;
+	unsigned long flags;
+	struct hfa384x_scan_result *res;
+	struct hfa384x_hostscan_result *results, *prev;
+
+	if (left < 4) {
+		printk(KERN_DEBUG "%s: invalid scanresult info frame "
+		       "length %d\n", local->dev->name, left);
+		return;
+	}
+
+	pos = (u16 *) buf;
+	pos++;
+	pos++;
+	left -= 4;
+
+	new_count = left / sizeof(struct hfa384x_scan_result);
+	results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
+			  GFP_ATOMIC);
+	if (results == NULL)
+		return;
+
+	/* Convert to hostscan result format. */
+	res = (struct hfa384x_scan_result *) pos;
+	for (i = 0; i < new_count; i++) {
+		memcpy(&results[i], &res[i],
+		       sizeof(struct hfa384x_scan_result));
+		results[i].atim = 0;
+	}
+
+	spin_lock_irqsave(&local->lock, flags);
+	local->last_scan_type = PRISM2_SCAN;
+	prev = local->last_scan_results;
+	local->last_scan_results = results;
+	local->last_scan_results_count = new_count;
+	spin_unlock_irqrestore(&local->lock, flags);
+	kfree(prev);
+
+	hostap_report_scan_complete(local);
+
+	/* Perform rest of ScanResults handling later in scheduled task */
+	set_bit(PRISM2_INFO_PENDING_SCANRESULTS, &local->pending_info);
+	schedule_work(&local->info_queue);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static void prism2_info_hostscanresults(local_info_t *local,
+					unsigned char *buf, int left)
+{
+	int i, result_size, copy_len, new_count;
+	struct hfa384x_hostscan_result *results, *prev;
+	unsigned long flags;
+	u16 *pos;
+	u8 *ptr;
+
+	wake_up_interruptible(&local->hostscan_wq);
+
+	if (left < 4) {
+		printk(KERN_DEBUG "%s: invalid hostscanresult info frame "
+		       "length %d\n", local->dev->name, left);
+		return;
+	}
+
+	pos = (u16 *) buf;
+	copy_len = result_size = le16_to_cpu(*pos);
+	if (result_size == 0) {
+		printk(KERN_DEBUG "%s: invalid result_size (0) in "
+		       "hostscanresults\n", local->dev->name);
+		return;
+	}
+	if (copy_len > sizeof(struct hfa384x_hostscan_result))
+		copy_len = sizeof(struct hfa384x_hostscan_result);
+
+	pos++;
+	pos++;
+	left -= 4;
+	ptr = (u8 *) pos;
+
+	new_count = left / result_size;
+	results = kmalloc(new_count * sizeof(struct hfa384x_hostscan_result),
+			  GFP_ATOMIC);
+	if (results == NULL)
+		return;
+	memset(results, 0, new_count * sizeof(struct hfa384x_hostscan_result));
+
+	for (i = 0; i < new_count; i++) {
+		memcpy(&results[i], ptr, copy_len);
+		ptr += result_size;
+		left -= result_size;
+	}
+
+	if (left) {
+		printk(KERN_DEBUG "%s: short HostScan result entry (%d/%d)\n",
+		       local->dev->name, left, result_size);
+	}
+
+	spin_lock_irqsave(&local->lock, flags);
+	local->last_scan_type = PRISM2_HOSTSCAN;
+	prev = local->last_scan_results;
+	local->last_scan_results = results;
+	local->last_scan_results_count = new_count;
+	spin_unlock_irqrestore(&local->lock, flags);
+	kfree(prev);
+
+	hostap_report_scan_complete(local);
+}
+#endif /* PRISM2_NO_STATION_MODES */
+
+
+/* Called only as a tasklet (software IRQ) */
+void hostap_info_process(local_info_t *local, struct sk_buff *skb)
+{
+	struct hfa384x_info_frame *info;
+	unsigned char *buf;
+	int left;
+#ifndef PRISM2_NO_DEBUG
+	int i;
+#endif /* PRISM2_NO_DEBUG */
+
+	info = (struct hfa384x_info_frame *) skb->data;
+	buf = skb->data + sizeof(*info);
+	left = skb->len - sizeof(*info);
+
+	switch (info->type) {
+	case HFA384X_INFO_COMMTALLIES:
+		prism2_info_commtallies(local, buf, left);
+		break;
+
+#ifndef PRISM2_NO_STATION_MODES
+	case HFA384X_INFO_LINKSTATUS:
+		prism2_info_linkstatus(local, buf, left);
+		break;
+
+	case HFA384X_INFO_SCANRESULTS:
+		prism2_info_scanresults(local, buf, left);
+		break;
+
+	case HFA384X_INFO_HOSTSCANRESULTS:
+		prism2_info_hostscanresults(local, buf, left);
+		break;
+#endif /* PRISM2_NO_STATION_MODES */
+
+#ifndef PRISM2_NO_DEBUG
+	default:
+		PDEBUG(DEBUG_EXTRA, "%s: INFO - len=%d type=0x%04x\n",
+		       local->dev->name, info->len, info->type);
+		PDEBUG(DEBUG_EXTRA, "Unknown info frame:");
+		for (i = 0; i < (left < 100 ? left : 100); i++)
+			PDEBUG2(DEBUG_EXTRA, " %02x", buf[i]);
+		PDEBUG2(DEBUG_EXTRA, "\n");
+		break;
+#endif /* PRISM2_NO_DEBUG */
+	}
+}
+
+
+#ifndef PRISM2_NO_STATION_MODES
+static void handle_info_queue_linkstatus(local_info_t *local)
+{
+	int val = local->prev_link_status;
+	int connected;
+	union iwreq_data wrqu;
+
+	connected =
+		val == HFA384X_LINKSTATUS_CONNECTED ||
+		val == HFA384X_LINKSTATUS_AP_CHANGE ||
+		val == HFA384X_LINKSTATUS_AP_IN_RANGE;
+
+	if (local->func->get_rid(local->dev, HFA384X_RID_CURRENTBSSID,
+				 local->bssid, ETH_ALEN, 1) < 0) {
+		printk(KERN_DEBUG "%s: could not read CURRENTBSSID after "
+		       "LinkStatus event\n", local->dev->name);
+	} else {
+		PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=" MACSTR "\n",
+		       local->dev->name,
+		       MAC2STR((unsigned char *) local->bssid));
+		if (local->wds_type & HOSTAP_WDS_AP_CLIENT)
+			hostap_add_sta(local->ap, local->bssid);
+	}
+
+	/* Get BSSID if we have a valid AP address */
+	if (connected) {
+		netif_carrier_on(local->dev);
+		netif_carrier_on(local->ddev);
+		memcpy(wrqu.ap_addr.sa_data, local->bssid, ETH_ALEN);
+	} else {
+		netif_carrier_off(local->dev);
+		netif_carrier_off(local->ddev);
+		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+	}
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+	/*
+	 * Filter out sequential disconnect events in order not to cause a
+	 * flood of SIOCGIWAP events that have a race condition with EAPOL
+	 * frames and can confuse wpa_supplicant about the current association
+	 * status.
+	 */
+	if (connected || local->prev_linkstatus_connected)
+		wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL);
+	local->prev_linkstatus_connected = connected;
+}
+
+
+static void handle_info_queue_scanresults(local_info_t *local)
+{
+	if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA)
+		prism2_host_roaming(local);
+
+	if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA &&
+	    memcmp(local->preferred_ap, "\x00\x00\x00\x00\x00\x00",
+		   ETH_ALEN) != 0) {
+		/*
+		 * Firmware seems to be getting into odd state in host_roaming
+		 * mode 2 when hostscan is used without join command, so try
+		 * to fix this by re-joining the current AP. This does not
+		 * actually trigger a new association if the current AP is
+		 * still in the scan results.
+		 */
+		prism2_host_roaming(local);
+	}
+}
+
+
+/* Called only as scheduled task after receiving info frames (used to avoid
+ * pending too much time in HW IRQ handler). */
+static void handle_info_queue(void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+
+	if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS,
+			       &local->pending_info))
+		handle_info_queue_linkstatus(local);
+
+	if (test_and_clear_bit(PRISM2_INFO_PENDING_SCANRESULTS,
+			       &local->pending_info))
+		handle_info_queue_scanresults(local);
+}
+#endif /* PRISM2_NO_STATION_MODES */
+
+
+void hostap_info_init(local_info_t *local)
+{
+	skb_queue_head_init(&local->info_list);
+#ifndef PRISM2_NO_STATION_MODES
+	INIT_WORK(&local->info_queue, handle_info_queue, local);
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+EXPORT_SYMBOL(hostap_info_init);
+EXPORT_SYMBOL(hostap_info_process);
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -0,0 +1,4102 @@
+/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
+
+#ifdef in_atomic
+/* Get kernel_locked() for in_atomic() */
+#include <linux/smp_lock.h>
+#endif
+#include <linux/ethtool.h>
+
+
+static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct iw_statistics *wstats;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* Why are we doing that ? Jean II */
+	if (iface->type != HOSTAP_INTERFACE_MAIN)
+		return NULL;
+
+	wstats = &local->wstats;
+
+	wstats->status = 0;
+	wstats->discard.code =
+		local->comm_tallies.rx_discards_wep_undecryptable;
+	wstats->discard.misc =
+		local->comm_tallies.rx_fcs_errors +
+		local->comm_tallies.rx_discards_no_buffer +
+		local->comm_tallies.tx_discards_wrong_sa;
+
+	wstats->discard.retries =
+		local->comm_tallies.tx_retry_limit_exceeded;
+	wstats->discard.fragment =
+		local->comm_tallies.rx_message_in_bad_msg_fragments;
+
+	if (local->iw_mode != IW_MODE_MASTER &&
+	    local->iw_mode != IW_MODE_REPEAT) {
+		int update = 1;
+#ifdef in_atomic
+		/* RID reading might sleep and it must not be called in
+		 * interrupt context or while atomic. However, this
+		 * function seems to be called while atomic (at least in Linux
+		 * 2.5.59). Update signal quality values only if in suitable
+		 * context. Otherwise, previous values read from tick timer
+		 * will be used. */
+		if (in_atomic())
+			update = 0;
+#endif /* in_atomic */
+
+		if (update && prism2_update_comms_qual(dev) == 0)
+			wstats->qual.updated = 7;
+
+		wstats->qual.qual = local->comms_qual;
+		wstats->qual.level = local->avg_signal;
+		wstats->qual.noise = local->avg_noise;
+	} else {
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+		wstats->qual.noise = 0;
+		wstats->qual.updated = 0;
+	}
+
+	return wstats;
+}
+
+
+static int prism2_get_datarates(struct net_device *dev, u8 *rates)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u8 buf[12];
+	int len;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	len = local->func->get_rid(dev, HFA384X_RID_SUPPORTEDDATARATES, buf,
+				   sizeof(buf), 0);
+	if (len < 2)
+		return 0;
+
+	val = le16_to_cpu(*(u16 *) buf); /* string length */
+
+	if (len - 2 < val || val > 10)
+		return 0;
+
+	memcpy(rates, buf + 2, val);
+	return val;
+}
+
+
+static int prism2_get_name(struct net_device *dev,
+			   struct iw_request_info *info,
+			   char *name, char *extra)
+{
+	u8 rates[10];
+	int len, i, over2 = 0;
+
+	len = prism2_get_datarates(dev, rates);
+
+	for (i = 0; i < len; i++) {
+		if (rates[i] == 0x0b || rates[i] == 0x16) {
+			over2 = 1;
+			break;
+		}
+	}
+
+	strcpy(name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS");
+
+	return 0;
+}
+
+
+static void prism2_crypt_delayed_deinit(local_info_t *local,
+					struct ieee80211_crypt_data **crypt)
+{
+	struct ieee80211_crypt_data *tmp;
+	unsigned long flags;
+
+	tmp = *crypt;
+	*crypt = NULL;
+
+	if (tmp == NULL)
+		return;
+
+	/* must not run ops->deinit() while there may be pending encrypt or
+	 * decrypt operations. Use a list of delayed deinits to avoid needing
+	 * locking. */
+
+	spin_lock_irqsave(&local->lock, flags);
+	list_add(&tmp->list, &local->crypt_deinit_list);
+	if (!timer_pending(&local->crypt_deinit_timer)) {
+		local->crypt_deinit_timer.expires = jiffies + HZ;
+		add_timer(&local->crypt_deinit_timer);
+	}
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+
+static int prism2_ioctl_siwencode(struct net_device *dev,
+				  struct iw_request_info *info,
+				  struct iw_point *erq, char *keybuf)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int i;
+	struct ieee80211_crypt_data **crypt;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	i = erq->flags & IW_ENCODE_INDEX;
+	if (i < 1 || i > 4)
+		i = local->tx_keyidx;
+	else
+		i--;
+	if (i < 0 || i >= WEP_KEYS)
+		return -EINVAL;
+
+	crypt = &local->crypt[i];
+
+	if (erq->flags & IW_ENCODE_DISABLED) {
+		if (*crypt)
+			prism2_crypt_delayed_deinit(local, crypt);
+		goto done;
+	}
+
+	if (*crypt != NULL && (*crypt)->ops != NULL &&
+	    strcmp((*crypt)->ops->name, "WEP") != 0) {
+		/* changing to use WEP; deinit previously used algorithm */
+		prism2_crypt_delayed_deinit(local, crypt);
+	}
+
+	if (*crypt == NULL) {
+		struct ieee80211_crypt_data *new_crypt;
+
+		/* take WEP into use */
+		new_crypt = (struct ieee80211_crypt_data *)
+			kmalloc(sizeof(struct ieee80211_crypt_data),
+				GFP_KERNEL);
+		if (new_crypt == NULL)
+			return -ENOMEM;
+		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+		new_crypt->ops = ieee80211_get_crypto_ops("WEP");
+		if (!new_crypt->ops) {
+			request_module("ieee80211_crypt_wep");
+			new_crypt->ops = ieee80211_get_crypto_ops("WEP");
+		}
+		if (new_crypt->ops)
+			new_crypt->priv = new_crypt->ops->init(i);
+		if (!new_crypt->ops || !new_crypt->priv) {
+			kfree(new_crypt);
+			new_crypt = NULL;
+
+			printk(KERN_WARNING "%s: could not initialize WEP: "
+			       "load module hostap_crypt_wep.o\n",
+			       dev->name);
+			return -EOPNOTSUPP;
+		}
+		*crypt = new_crypt;
+	}
+
+	if (erq->length > 0) {
+		int len = erq->length <= 5 ? 5 : 13;
+		int first = 1, j;
+		if (len > erq->length)
+			memset(keybuf + erq->length, 0, len - erq->length);
+		(*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv);
+		for (j = 0; j < WEP_KEYS; j++) {
+			if (j != i && local->crypt[j]) {
+				first = 0;
+				break;
+			}
+		}
+		if (first)
+			local->tx_keyidx = i;
+	} else {
+		/* No key data - just set the default TX key index */
+		local->tx_keyidx = i;
+	}
+
+ done:
+	local->open_wep = erq->flags & IW_ENCODE_OPEN;
+
+	if (hostap_set_encryption(local)) {
+		printk(KERN_DEBUG "%s: set_encryption failed\n", dev->name);
+		return -EINVAL;
+	}
+
+	/* Do not reset port0 if card is in Managed mode since resetting will
+	 * generate new IEEE 802.11 authentication which may end up in looping
+	 * with IEEE 802.1X. Prism2 documentation seem to require port reset
+	 * after WEP configuration. However, keys are apparently changed at
+	 * least in Managed mode. */
+	if (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(dev)) {
+		printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+
+static int prism2_ioctl_giwencode(struct net_device *dev,
+				  struct iw_request_info *info,
+				  struct iw_point *erq, char *key)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int i, len;
+	u16 val;
+	struct ieee80211_crypt_data *crypt;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	i = erq->flags & IW_ENCODE_INDEX;
+	if (i < 1 || i > 4)
+		i = local->tx_keyidx;
+	else
+		i--;
+	if (i < 0 || i >= WEP_KEYS)
+		return -EINVAL;
+
+	crypt = local->crypt[i];
+	erq->flags = i + 1;
+
+	if (crypt == NULL || crypt->ops == NULL) {
+		erq->length = 0;
+		erq->flags |= IW_ENCODE_DISABLED;
+		return 0;
+	}
+
+	if (strcmp(crypt->ops->name, "WEP") != 0) {
+		/* only WEP is supported with wireless extensions, so just
+		 * report that encryption is used */
+		erq->length = 0;
+		erq->flags |= IW_ENCODE_ENABLED;
+		return 0;
+	}
+
+	/* Reads from HFA384X_RID_CNFDEFAULTKEY* return bogus values, so show
+	 * the keys from driver buffer */
+	len = crypt->ops->get_key(key, WEP_KEY_LEN, NULL, crypt->priv);
+	erq->length = (len >= 0 ? len : 0);
+
+	if (local->func->get_rid(dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0)
+	{
+		printk("CNFWEPFLAGS reading failed\n");
+		return -EOPNOTSUPP;
+	}
+	le16_to_cpus(&val);
+	if (val & HFA384X_WEPFLAGS_PRIVACYINVOKED)
+		erq->flags |= IW_ENCODE_ENABLED;
+	else
+		erq->flags |= IW_ENCODE_DISABLED;
+	if (val & HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED)
+		erq->flags |= IW_ENCODE_RESTRICTED;
+	else
+		erq->flags |= IW_ENCODE_OPEN;
+
+	return 0;
+}
+
+
+static int hostap_set_rate(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret, basic_rates;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	basic_rates = local->basic_rates & local->tx_rate_control;
+	if (!basic_rates || basic_rates != local->basic_rates) {
+		printk(KERN_INFO "%s: updating basic rate set automatically "
+		       "to match with the new supported rate set\n",
+		       dev->name);
+		if (!basic_rates)
+			basic_rates = local->tx_rate_control;
+
+		local->basic_rates = basic_rates;
+		if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
+				    basic_rates))
+			printk(KERN_WARNING "%s: failed to set "
+			       "cnfBasicRates\n", dev->name);
+	}
+
+	ret = (hostap_set_word(dev, HFA384X_RID_TXRATECONTROL,
+			       local->tx_rate_control) ||
+	       hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES,
+			       local->tx_rate_control) ||
+	       local->func->reset_port(dev));
+
+	if (ret) {
+		printk(KERN_WARNING "%s: TXRateControl/cnfSupportedRates "
+		       "setting to 0x%x failed\n",
+		       dev->name, local->tx_rate_control);
+	}
+
+	/* Update TX rate configuration for all STAs based on new operational
+	 * rate set. */
+	hostap_update_rates(local);
+
+	return ret;
+}
+
+
+static int prism2_ioctl_siwrate(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *rrq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (rrq->fixed) {
+		switch (rrq->value) {
+		case 11000000:
+			local->tx_rate_control = HFA384X_RATES_11MBPS;
+			break;
+		case 5500000:
+			local->tx_rate_control = HFA384X_RATES_5MBPS;
+			break;
+		case 2000000:
+			local->tx_rate_control = HFA384X_RATES_2MBPS;
+			break;
+		case 1000000:
+			local->tx_rate_control = HFA384X_RATES_1MBPS;
+			break;
+		default:
+			local->tx_rate_control = HFA384X_RATES_1MBPS |
+				HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
+				HFA384X_RATES_11MBPS;
+			break;
+		}
+	} else {
+		switch (rrq->value) {
+		case 11000000:
+			local->tx_rate_control = HFA384X_RATES_1MBPS |
+				HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
+				HFA384X_RATES_11MBPS;
+			break;
+		case 5500000:
+			local->tx_rate_control = HFA384X_RATES_1MBPS |
+				HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS;
+			break;
+		case 2000000:
+			local->tx_rate_control = HFA384X_RATES_1MBPS |
+				HFA384X_RATES_2MBPS;
+			break;
+		case 1000000:
+			local->tx_rate_control = HFA384X_RATES_1MBPS;
+			break;
+		default:
+			local->tx_rate_control = HFA384X_RATES_1MBPS |
+				HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS |
+				HFA384X_RATES_11MBPS;
+			break;
+		}
+	}
+
+	return hostap_set_rate(dev);
+}
+
+
+static int prism2_ioctl_giwrate(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *rrq, char *extra)
+{
+	u16 val;
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_TXRATECONTROL, &val, 2, 1) <
+	    0)
+		return -EINVAL;
+
+	if ((val & 0x1) && (val > 1))
+		rrq->fixed = 0;
+	else
+		rrq->fixed = 1;
+
+	if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL &&
+	    !local->fw_tx_rate_control) {
+		/* HFA384X_RID_CURRENTTXRATE seems to always be 2 Mbps in
+		 * Host AP mode, so use the recorded TX rate of the last sent
+		 * frame */
+		rrq->value = local->ap->last_tx_rate > 0 ?
+			local->ap->last_tx_rate * 100000 : 11000000;
+		return 0;
+	}
+
+	if (local->func->get_rid(dev, HFA384X_RID_CURRENTTXRATE, &val, 2, 1) <
+	    0)
+		return -EINVAL;
+
+	switch (val) {
+	case HFA384X_RATES_1MBPS:
+		rrq->value = 1000000;
+		break;
+	case HFA384X_RATES_2MBPS:
+		rrq->value = 2000000;
+		break;
+	case HFA384X_RATES_5MBPS:
+		rrq->value = 5500000;
+		break;
+	case HFA384X_RATES_11MBPS:
+		rrq->value = 11000000;
+		break;
+	default:
+		/* should not happen */
+		rrq->value = 11000000;
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+
+static int prism2_ioctl_siwsens(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *sens, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* Set the desired AP density */
+	if (sens->value < 1 || sens->value > 3)
+		return -EINVAL;
+
+	if (hostap_set_word(dev, HFA384X_RID_CNFSYSTEMSCALE, sens->value) ||
+	    local->func->reset_port(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwsens(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *sens, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* Get the current AP density */
+	if (local->func->get_rid(dev, HFA384X_RID_CNFSYSTEMSCALE, &val, 2, 1) <
+	    0)
+		return -EINVAL;
+
+	sens->value = __le16_to_cpu(val);
+	sens->fixed = 1;
+
+	return 0;
+}
+
+
+/* Deprecated in new wireless extension API */
+static int prism2_ioctl_giwaplist(struct net_device *dev,
+				  struct iw_request_info *info,
+				  struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct sockaddr *addr;
+	struct iw_quality *qual;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->iw_mode != IW_MODE_MASTER) {
+		printk(KERN_DEBUG "SIOCGIWAPLIST is currently only supported "
+		       "in Host AP mode\n");
+		data->length = 0;
+		return -EOPNOTSUPP;
+	}
+
+	addr = kmalloc(sizeof(struct sockaddr) * IW_MAX_AP, GFP_KERNEL);
+	qual = kmalloc(sizeof(struct iw_quality) * IW_MAX_AP, GFP_KERNEL);
+	if (addr == NULL || qual == NULL) {
+		kfree(addr);
+		kfree(qual);
+		data->length = 0;
+		return -ENOMEM;
+	}
+
+	data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
+
+	memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+	data->flags = 1; /* has quality information */
+	memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+	       sizeof(struct iw_quality) * data->length);
+
+	kfree(addr);
+	kfree(qual);
+
+	return 0;
+}
+
+
+static int prism2_ioctl_siwrts(struct net_device *dev,
+			       struct iw_request_info *info,
+			       struct iw_param *rts, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (rts->disabled)
+		val = __constant_cpu_to_le16(2347);
+	else if (rts->value < 0 || rts->value > 2347)
+		return -EINVAL;
+	else
+		val = __cpu_to_le16(rts->value);
+
+	if (local->func->set_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2) ||
+	    local->func->reset_port(dev))
+		return -EINVAL;
+
+	local->rts_threshold = rts->value;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwrts(struct net_device *dev,
+			       struct iw_request_info *info,
+			       struct iw_param *rts, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2, 1) <
+	    0)
+		return -EINVAL;
+
+	rts->value = __le16_to_cpu(val);
+	rts->disabled = (rts->value == 2347);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+
+static int prism2_ioctl_siwfrag(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *rts, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (rts->disabled)
+		val = __constant_cpu_to_le16(2346);
+	else if (rts->value < 256 || rts->value > 2346)
+		return -EINVAL;
+	else
+		val = __cpu_to_le16(rts->value & ~0x1); /* even numbers only */
+
+	local->fragm_threshold = rts->value & ~0x1;
+	if (local->func->set_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val,
+				 2)
+	    || local->func->reset_port(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwfrag(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *rts, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD,
+				 &val, 2, 1) < 0)
+		return -EINVAL;
+
+	rts->value = __le16_to_cpu(val);
+	rts->disabled = (rts->value == 2346);
+	rts->fixed = 1;
+
+	return 0;
+}
+
+
+#ifndef PRISM2_NO_STATION_MODES
+static int hostap_join_ap(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_join_request req;
+	unsigned long flags;
+	int i;
+	struct hfa384x_hostscan_result *entry;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	memcpy(req.bssid, local->preferred_ap, ETH_ALEN);
+	req.channel = 0;
+
+	spin_lock_irqsave(&local->lock, flags);
+	for (i = 0; i < local->last_scan_results_count; i++) {
+		if (!local->last_scan_results)
+			break;
+		entry = &local->last_scan_results[i];
+		if (memcmp(local->preferred_ap, entry->bssid, ETH_ALEN) == 0) {
+			req.channel = entry->chid;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&local->lock, flags);
+
+	if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req,
+				 sizeof(req))) {
+		printk(KERN_DEBUG "%s: JoinRequest " MACSTR
+		       " failed\n",
+		       dev->name, MAC2STR(local->preferred_ap));
+		return -1;
+	}
+
+	printk(KERN_DEBUG "%s: Trying to join BSSID " MACSTR "\n",
+	       dev->name, MAC2STR(local->preferred_ap));
+
+	return 0;
+}
+#endif /* PRISM2_NO_STATION_MODES */
+
+
+static int prism2_ioctl_siwap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      struct sockaddr *ap_addr, char *extra)
+{
+#ifdef PRISM2_NO_STATION_MODES
+	return -EOPNOTSUPP;
+#else /* PRISM2_NO_STATION_MODES */
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	memcpy(local->preferred_ap, &ap_addr->sa_data, ETH_ALEN);
+
+	if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) {
+		struct hfa384x_scan_request scan_req;
+		memset(&scan_req, 0, sizeof(scan_req));
+		scan_req.channel_list = __constant_cpu_to_le16(0x3fff);
+		scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
+		if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST,
+					 &scan_req, sizeof(scan_req))) {
+			printk(KERN_DEBUG "%s: ScanResults request failed - "
+			       "preferred AP delayed to next unsolicited "
+			       "scan\n", dev->name);
+		}
+	} else if (local->host_roaming == 2 &&
+		   local->iw_mode == IW_MODE_INFRA) {
+		if (hostap_join_ap(dev))
+			return -EINVAL;
+	} else {
+		printk(KERN_DEBUG "%s: Preferred AP (SIOCSIWAP) is used only "
+		       "in Managed mode when host_roaming is enabled\n",
+		       dev->name);
+	}
+
+	return 0;
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+static int prism2_ioctl_giwap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      struct sockaddr *ap_addr, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	ap_addr->sa_family = ARPHRD_ETHER;
+	switch (iface->type) {
+	case HOSTAP_INTERFACE_AP:
+		memcpy(&ap_addr->sa_data, dev->dev_addr, ETH_ALEN);
+		break;
+	case HOSTAP_INTERFACE_STA:
+		memcpy(&ap_addr->sa_data, local->assoc_ap_addr, ETH_ALEN);
+		break;
+	case HOSTAP_INTERFACE_WDS:
+		memcpy(&ap_addr->sa_data, iface->u.wds.remote_addr, ETH_ALEN);
+		break;
+	default:
+		if (local->func->get_rid(dev, HFA384X_RID_CURRENTBSSID,
+					 &ap_addr->sa_data, ETH_ALEN, 1) < 0)
+			return -EOPNOTSUPP;
+
+		/* local->bssid is also updated in LinkStatus handler when in
+		 * station mode */
+		memcpy(local->bssid, &ap_addr->sa_data, ETH_ALEN);
+		break;
+	}
+
+	return 0;
+}
+
+
+static int prism2_ioctl_siwnickn(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *nickname)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	memset(local->name, 0, sizeof(local->name));
+	memcpy(local->name, nickname, data->length);
+	local->name_set = 1;
+
+	if (hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name) ||
+	    local->func->reset_port(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwnickn(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *nickname)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int len;
+	char name[MAX_NAME_LEN + 3];
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	len = local->func->get_rid(dev, HFA384X_RID_CNFOWNNAME,
+				   &name, MAX_NAME_LEN + 2, 0);
+	val = __le16_to_cpu(*(u16 *) name);
+	if (len > MAX_NAME_LEN + 2 || len < 0 || val > MAX_NAME_LEN)
+		return -EOPNOTSUPP;
+
+	name[val + 2] = '\0';
+	data->length = val + 1;
+	memcpy(nickname, name + 2, val + 1);
+
+	return 0;
+}
+
+
+static int prism2_ioctl_siwfreq(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_freq *freq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* freq => chan. */
+	if (freq->e == 1 &&
+	    freq->m / 100000 >= freq_list[0] &&
+	    freq->m / 100000 <= freq_list[FREQ_COUNT - 1]) {
+		int ch;
+		int fr = freq->m / 100000;
+		for (ch = 0; ch < FREQ_COUNT; ch++) {
+			if (fr == freq_list[ch]) {
+				freq->e = 0;
+				freq->m = ch + 1;
+				break;
+			}
+		}
+	}
+
+	if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT ||
+	    !(local->channel_mask & (1 << (freq->m - 1))))
+		return -EINVAL;
+
+	local->channel = freq->m; /* channel is used in prism2_setup_rids() */
+	if (hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel) ||
+	    local->func->reset_port(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwfreq(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_freq *freq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_CURRENTCHANNEL, &val, 2, 1) <
+	    0)
+		return -EINVAL;
+
+	le16_to_cpus(&val);
+	if (val < 1 || val > FREQ_COUNT)
+		return -EINVAL;
+
+	freq->m = freq_list[val - 1] * 100000;
+	freq->e = 1;
+
+	return 0;
+}
+
+
+static void hostap_monitor_set_type(local_info_t *local)
+{
+	struct net_device *dev = local->ddev;
+
+	if (dev == NULL)
+		return;
+
+	if (local->monitor_type == PRISM2_MONITOR_PRISM ||
+	    local->monitor_type == PRISM2_MONITOR_CAPHDR) {
+		dev->type = ARPHRD_IEEE80211_PRISM;
+		dev->hard_header_parse =
+			hostap_80211_prism_header_parse;
+	} else {
+		dev->type = ARPHRD_IEEE80211;
+		dev->hard_header_parse = hostap_80211_header_parse;
+	}
+}
+
+
+static int prism2_ioctl_siwessid(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *ssid)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (iface->type == HOSTAP_INTERFACE_WDS)
+		return -EOPNOTSUPP;
+
+	if (data->flags == 0)
+		ssid[0] = '\0'; /* ANY */
+
+	if (local->iw_mode == IW_MODE_MASTER && ssid[0] == '\0') {
+		/* Setting SSID to empty string seems to kill the card in
+		 * Host AP mode */
+		printk(KERN_DEBUG "%s: Host AP mode does not support "
+		       "'Any' essid\n", dev->name);
+		return -EINVAL;
+	}
+
+	memcpy(local->essid, ssid, data->length);
+	local->essid[data->length] = '\0';
+
+	if ((!local->fw_ap &&
+	     hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid))
+	    || hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid) ||
+	    local->func->reset_port(dev))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int prism2_ioctl_giwessid(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *essid)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (iface->type == HOSTAP_INTERFACE_WDS)
+		return -EOPNOTSUPP;
+
+	data->flags = 1; /* active */
+	if (local->iw_mode == IW_MODE_MASTER) {
+		data->length = strlen(local->essid);
+		memcpy(essid, local->essid, IW_ESSID_MAX_SIZE);
+	} else {
+		int len;
+		char ssid[MAX_SSID_LEN + 2];
+		memset(ssid, 0, sizeof(ssid));
+		len = local->func->get_rid(dev, HFA384X_RID_CURRENTSSID,
+					   &ssid, MAX_SSID_LEN + 2, 0);
+		val = __le16_to_cpu(*(u16 *) ssid);
+		if (len > MAX_SSID_LEN + 2 || len < 0 || val > MAX_SSID_LEN) {
+			return -EOPNOTSUPP;
+		}
+		data->length = val;
+		memcpy(essid, ssid + 2, IW_ESSID_MAX_SIZE);
+	}
+
+	return 0;
+}
+
+
+static int prism2_ioctl_giwrange(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct iw_range *range = (struct iw_range *) extra;
+	u8 rates[10];
+	u16 val;
+	int i, len, over2;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	data->length = sizeof(struct iw_range);
+	memset(range, 0, sizeof(struct iw_range));
+
+	/* TODO: could fill num_txpower and txpower array with
+	 * something; however, there are 128 different values.. */
+
+	range->txpower_capa = IW_TXPOW_DBM;
+
+	if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC)
+	{
+		range->min_pmp = 1 * 1024;
+		range->max_pmp = 65535 * 1024;
+		range->min_pmt = 1 * 1024;
+		range->max_pmt = 1000 * 1024;
+		range->pmp_flags = IW_POWER_PERIOD;
+		range->pmt_flags = IW_POWER_TIMEOUT;
+		range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT |
+			IW_POWER_UNICAST_R | IW_POWER_ALL_R;
+	}
+
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 18;
+
+	range->retry_capa = IW_RETRY_LIMIT;
+	range->retry_flags = IW_RETRY_LIMIT;
+	range->min_retry = 0;
+	range->max_retry = 255;
+
+	range->num_channels = FREQ_COUNT;
+
+	val = 0;
+	for (i = 0; i < FREQ_COUNT; i++) {
+		if (local->channel_mask & (1 << i)) {
+			range->freq[val].i = i + 1;
+			range->freq[val].m = freq_list[i] * 100000;
+			range->freq[val].e = 1;
+			val++;
+		}
+		if (val == IW_MAX_FREQUENCIES)
+			break;
+	}
+	range->num_frequency = val;
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) {
+		range->max_qual.qual = 70; /* what is correct max? This was not
+					    * documented exactly. At least
+					    * 69 has been observed. */
+		range->max_qual.level = 0; /* dB */
+		range->max_qual.noise = 0; /* dB */
+
+		/* What would be suitable values for "average/typical" qual? */
+		range->avg_qual.qual = 20;
+		range->avg_qual.level = -60;
+		range->avg_qual.noise = -95;
+	} else {
+		range->max_qual.qual = 92; /* 0 .. 92 */
+		range->max_qual.level = 154; /* 27 .. 154 */
+		range->max_qual.noise = 154; /* 27 .. 154 */
+	}
+	range->sensitivity = 3;
+
+	range->max_encoding_tokens = WEP_KEYS;
+	range->num_encoding_sizes = 2;
+	range->encoding_size[0] = 5;
+	range->encoding_size[1] = 13;
+
+	over2 = 0;
+	len = prism2_get_datarates(dev, rates);
+	range->num_bitrates = 0;
+	for (i = 0; i < len; i++) {
+		if (range->num_bitrates < IW_MAX_BITRATES) {
+			range->bitrate[range->num_bitrates] =
+				rates[i] * 500000;
+			range->num_bitrates++;
+		}
+		if (rates[i] == 0x0b || rates[i] == 0x16)
+			over2 = 1;
+	}
+	/* estimated maximum TCP throughput values (bps) */
+	range->throughput = over2 ? 5500000 : 1500000;
+
+	range->min_rts = 0;
+	range->max_rts = 2347;
+	range->min_frag = 256;
+	range->max_frag = 2346;
+
+	/* Event capability (kernel + driver) */
+	range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
+				IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
+				IW_EVENT_CAPA_MASK(SIOCGIWAP) |
+				IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
+	range->event_capa[1] = IW_EVENT_CAPA_K_1;
+	range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) |
+				IW_EVENT_CAPA_MASK(IWEVCUSTOM) |
+				IW_EVENT_CAPA_MASK(IWEVREGISTERED) |
+				IW_EVENT_CAPA_MASK(IWEVEXPIRED));
+
+	range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+		IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+	return 0;
+}
+
+
+static int hostap_monitor_mode_enable(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+
+	printk(KERN_DEBUG "Enabling monitor mode\n");
+	hostap_monitor_set_type(local);
+
+	if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
+			    HFA384X_PORTTYPE_PSEUDO_IBSS)) {
+		printk(KERN_DEBUG "Port type setting for monitor mode "
+		       "failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	/* Host decrypt is needed to get the IV and ICV fields;
+	 * however, monitor mode seems to remove WEP flag from frame
+	 * control field */
+	if (hostap_set_word(dev, HFA384X_RID_CNFWEPFLAGS,
+			    HFA384X_WEPFLAGS_HOSTENCRYPT |
+			    HFA384X_WEPFLAGS_HOSTDECRYPT)) {
+		printk(KERN_DEBUG "WEP flags setting failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	if (local->func->reset_port(dev) ||
+	    local->func->cmd(dev, HFA384X_CMDCODE_TEST |
+			     (HFA384X_TEST_MONITOR << 8),
+			     0, NULL, NULL)) {
+		printk(KERN_DEBUG "Setting monitor mode failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+
+static int hostap_monitor_mode_disable(local_info_t *local)
+{
+	struct net_device *dev = local->ddev;
+
+	if (dev == NULL)
+		return -1;
+
+	printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name);
+	dev->type = ARPHRD_ETHER;
+	dev->hard_header_parse = local->saved_eth_header_parse;
+	if (local->func->cmd(dev, HFA384X_CMDCODE_TEST |
+			     (HFA384X_TEST_STOP << 8),
+			     0, NULL, NULL))
+		return -1;
+	return hostap_set_encryption(local);
+}
+
+
+static int prism2_ioctl_siwmode(struct net_device *dev,
+				struct iw_request_info *info,
+				__u32 *mode, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int double_reset = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA &&
+	    *mode != IW_MODE_MASTER && *mode != IW_MODE_REPEAT &&
+	    *mode != IW_MODE_MONITOR)
+		return -EOPNOTSUPP;
+
+#ifdef PRISM2_NO_STATION_MODES
+	if (*mode == IW_MODE_ADHOC || *mode == IW_MODE_INFRA)
+		return -EOPNOTSUPP;
+#endif /* PRISM2_NO_STATION_MODES */
+
+	if (*mode == local->iw_mode)
+		return 0;
+
+	if (*mode == IW_MODE_MASTER && local->essid[0] == '\0') {
+		printk(KERN_WARNING "%s: empty SSID not allowed in Master "
+		       "mode\n", dev->name);
+		return -EINVAL;
+	}
+
+	if (local->iw_mode == IW_MODE_MONITOR)
+		hostap_monitor_mode_disable(local);
+
+	if ((local->iw_mode == IW_MODE_ADHOC ||
+	     local->iw_mode == IW_MODE_MONITOR) && *mode == IW_MODE_MASTER) {
+		/* There seems to be a firmware bug in at least STA f/w v1.5.6
+		 * that leaves beacon frames to use IBSS type when moving from
+		 * IBSS to Host AP mode. Doing double Port0 reset seems to be
+		 * enough to workaround this. */
+		double_reset = 1;
+	}
+
+	printk(KERN_DEBUG "prism2: %s: operating mode changed "
+	       "%d -> %d\n", dev->name, local->iw_mode, *mode);
+	local->iw_mode = *mode;
+
+	if (local->iw_mode == IW_MODE_MONITOR)
+		hostap_monitor_mode_enable(local);
+	else if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt &&
+		 !local->fw_encrypt_ok) {
+		printk(KERN_DEBUG "%s: defaulting to host-based encryption as "
+		       "a workaround for firmware bug in Host AP mode WEP\n",
+		       dev->name);
+		local->host_encrypt = 1;
+	}
+
+	if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
+			    hostap_get_porttype(local)))
+		return -EOPNOTSUPP;
+
+	if (local->func->reset_port(dev))
+		return -EINVAL;
+	if (double_reset && local->func->reset_port(dev))
+		return -EINVAL;
+
+	if (local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC)
+	{
+		/* netif_carrier is used only in client modes for now, so make
+		 * sure carrier is on when moving to non-client modes. */
+		netif_carrier_on(local->dev);
+		netif_carrier_on(local->ddev);
+	}
+	return 0;
+}
+
+
+static int prism2_ioctl_giwmode(struct net_device *dev,
+				struct iw_request_info *info,
+				__u32 *mode, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	switch (iface->type) {
+	case HOSTAP_INTERFACE_STA:
+		*mode = IW_MODE_INFRA;
+		break;
+	case HOSTAP_INTERFACE_WDS:
+		*mode = IW_MODE_REPEAT;
+		break;
+	default:
+		*mode = local->iw_mode;
+		break;
+	}
+	return 0;
+}
+
+
+static int prism2_ioctl_siwpower(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *wrq, char *extra)
+{
+#ifdef PRISM2_NO_STATION_MODES
+	return -EOPNOTSUPP;
+#else /* PRISM2_NO_STATION_MODES */
+	int ret = 0;
+
+	if (wrq->disabled)
+		return hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 0);
+
+	switch (wrq->flags & IW_POWER_MODE) {
+	case IW_POWER_UNICAST_R:
+		ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 0);
+		if (ret)
+			return ret;
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
+		if (ret)
+			return ret;
+		break;
+	case IW_POWER_ALL_R:
+		ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 1);
+		if (ret)
+			return ret;
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
+		if (ret)
+			return ret;
+		break;
+	case IW_POWER_ON:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (wrq->flags & IW_POWER_TIMEOUT) {
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
+		if (ret)
+			return ret;
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPMHOLDOVERDURATION,
+				      wrq->value / 1024);
+		if (ret)
+			return ret;
+	}
+	if (wrq->flags & IW_POWER_PERIOD) {
+		ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1);
+		if (ret)
+			return ret;
+		ret = hostap_set_word(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
+				      wrq->value / 1024);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+static int prism2_ioctl_giwpower(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *rrq, char *extra)
+{
+#ifdef PRISM2_NO_STATION_MODES
+	return -EOPNOTSUPP;
+#else /* PRISM2_NO_STATION_MODES */
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 enable, mcast;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_CNFPMENABLED, &enable, 2, 1)
+	    < 0)
+		return -EINVAL;
+
+	if (!__le16_to_cpu(enable)) {
+		rrq->disabled = 1;
+		return 0;
+	}
+
+	rrq->disabled = 0;
+
+	if ((rrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
+		u16 timeout;
+		if (local->func->get_rid(dev,
+					 HFA384X_RID_CNFPMHOLDOVERDURATION,
+					 &timeout, 2, 1) < 0)
+			return -EINVAL;
+
+		rrq->flags = IW_POWER_TIMEOUT;
+		rrq->value = __le16_to_cpu(timeout) * 1024;
+	} else {
+		u16 period;
+		if (local->func->get_rid(dev, HFA384X_RID_CNFMAXSLEEPDURATION,
+					 &period, 2, 1) < 0)
+			return -EINVAL;
+
+		rrq->flags = IW_POWER_PERIOD;
+		rrq->value = __le16_to_cpu(period) * 1024;
+	}
+
+	if (local->func->get_rid(dev, HFA384X_RID_CNFMULTICASTRECEIVE, &mcast,
+				 2, 1) < 0)
+		return -EINVAL;
+
+	if (__le16_to_cpu(mcast))
+		rrq->flags |= IW_POWER_ALL_R;
+	else
+		rrq->flags |= IW_POWER_UNICAST_R;
+
+	return 0;
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+static int prism2_ioctl_siwretry(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *rrq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (rrq->disabled)
+		return -EINVAL;
+
+	/* setting retry limits is not supported with the current station
+	 * firmware code; simulate this with alternative retry count for now */
+	if (rrq->flags == IW_RETRY_LIMIT) {
+		if (rrq->value < 0) {
+			/* disable manual retry count setting and use firmware
+			 * defaults */
+			local->manual_retry_count = -1;
+			local->tx_control &= ~HFA384X_TX_CTRL_ALT_RTRY;
+		} else {
+			if (hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT,
+					    rrq->value)) {
+				printk(KERN_DEBUG "%s: Alternate retry count "
+				       "setting to %d failed\n",
+				       dev->name, rrq->value);
+				return -EOPNOTSUPP;
+			}
+
+			local->manual_retry_count = rrq->value;
+			local->tx_control |= HFA384X_TX_CTRL_ALT_RTRY;
+		}
+		return 0;
+	}
+
+	return -EOPNOTSUPP;
+
+#if 0
+	/* what could be done, if firmware would support this.. */
+
+	if (rrq->flags & IW_RETRY_LIMIT) {
+		if (rrq->flags & IW_RETRY_MAX)
+			HFA384X_RID_LONGRETRYLIMIT = rrq->value;
+		else if (rrq->flags & IW_RETRY_MIN)
+			HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
+		else {
+			HFA384X_RID_LONGRETRYLIMIT = rrq->value;
+			HFA384X_RID_SHORTRETRYLIMIT = rrq->value;
+		}
+
+	}
+
+	if (rrq->flags & IW_RETRY_LIFETIME) {
+		HFA384X_RID_MAXTRANSMITLIFETIME = rrq->value / 1024;
+	}
+
+	return 0;
+#endif /* 0 */
+}
+
+static int prism2_ioctl_giwretry(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *rrq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 shortretry, longretry, lifetime, altretry;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->get_rid(dev, HFA384X_RID_SHORTRETRYLIMIT, &shortretry,
+				 2, 1) < 0 ||
+	    local->func->get_rid(dev, HFA384X_RID_LONGRETRYLIMIT, &longretry,
+				 2, 1) < 0 ||
+	    local->func->get_rid(dev, HFA384X_RID_MAXTRANSMITLIFETIME,
+				 &lifetime, 2, 1) < 0)
+		return -EINVAL;
+
+	le16_to_cpus(&shortretry);
+	le16_to_cpus(&longretry);
+	le16_to_cpus(&lifetime);
+
+	rrq->disabled = 0;
+
+	if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+		rrq->flags = IW_RETRY_LIFETIME;
+		rrq->value = lifetime * 1024;
+	} else {
+		if (local->manual_retry_count >= 0) {
+			rrq->flags = IW_RETRY_LIMIT;
+			if (local->func->get_rid(dev,
+						 HFA384X_RID_CNFALTRETRYCOUNT,
+						 &altretry, 2, 1) >= 0)
+				rrq->value = le16_to_cpu(altretry);
+			else
+				rrq->value = local->manual_retry_count;
+		} else if ((rrq->flags & IW_RETRY_MAX)) {
+			rrq->flags = IW_RETRY_LIMIT | IW_RETRY_MAX;
+			rrq->value = longretry;
+		} else {
+			rrq->flags = IW_RETRY_LIMIT;
+			rrq->value = shortretry;
+			if (shortretry != longretry)
+				rrq->flags |= IW_RETRY_MIN;
+		}
+	}
+	return 0;
+}
+
+
+/* Note! This TX power controlling is experimental and should not be used in
+ * production use. It just sets raw power register and does not use any kind of
+ * feedback information from the measured TX power (CR58). This is now
+ * commented out to make sure that it is not used by accident. TX power
+ * configuration will be enabled again after proper algorithm using feedback
+ * has been implemented. */
+
+#ifdef RAW_TXPOWER_SETTING
+/* Map HFA386x's CR31 to and from dBm with some sort of ad hoc mapping..
+ * This version assumes following mapping:
+ * CR31 is 7-bit value with -64 to +63 range.
+ * -64 is mapped into +20dBm and +63 into -43dBm.
+ * This is certainly not an exact mapping for every card, but at least
+ * increasing dBm value should correspond to increasing TX power.
+ */
+
+static int prism2_txpower_hfa386x_to_dBm(u16 val)
+{
+	signed char tmp;
+
+	if (val > 255)
+		val = 255;
+
+	tmp = val;
+	tmp >>= 2;
+
+	return -12 - tmp;
+}
+
+static u16 prism2_txpower_dBm_to_hfa386x(int val)
+{
+	signed char tmp;
+
+	if (val > 20)
+		return 128;
+	else if (val < -43)
+		return 127;
+
+	tmp = val;
+	tmp = -12 - tmp;
+	tmp <<= 2;
+
+	return (unsigned char) tmp;
+}
+#endif /* RAW_TXPOWER_SETTING */
+
+
+static int prism2_ioctl_siwtxpow(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *rrq, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+#ifdef RAW_TXPOWER_SETTING
+	char *tmp;
+#endif
+	u16 val;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (rrq->disabled) {
+		if (local->txpower_type != PRISM2_TXPOWER_OFF) {
+			val = 0xff; /* use all standby and sleep modes */
+			ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
+					       HFA386X_CR_A_D_TEST_MODES2,
+					       &val, NULL);
+			printk(KERN_DEBUG "%s: Turning radio off: %s\n",
+			       dev->name, ret ? "failed" : "OK");
+			local->txpower_type = PRISM2_TXPOWER_OFF;
+		}
+		return (ret ? -EOPNOTSUPP : 0);
+	}
+
+	if (local->txpower_type == PRISM2_TXPOWER_OFF) {
+		val = 0; /* disable all standby and sleep modes */
+		ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
+				       HFA386X_CR_A_D_TEST_MODES2, &val, NULL);
+		printk(KERN_DEBUG "%s: Turning radio on: %s\n",
+		       dev->name, ret ? "failed" : "OK");
+		local->txpower_type = PRISM2_TXPOWER_UNKNOWN;
+	}
+
+#ifdef RAW_TXPOWER_SETTING
+	if (!rrq->fixed && local->txpower_type != PRISM2_TXPOWER_AUTO) {
+		printk(KERN_DEBUG "Setting ALC on\n");
+		val = HFA384X_TEST_CFG_BIT_ALC;
+		local->func->cmd(dev, HFA384X_CMDCODE_TEST |
+				 (HFA384X_TEST_CFG_BITS << 8), 1, &val, NULL);
+		local->txpower_type = PRISM2_TXPOWER_AUTO;
+		return 0;
+	}
+
+	if (local->txpower_type != PRISM2_TXPOWER_FIXED) {
+		printk(KERN_DEBUG "Setting ALC off\n");
+		val = HFA384X_TEST_CFG_BIT_ALC;
+		local->func->cmd(dev, HFA384X_CMDCODE_TEST |
+				 (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL);
+			local->txpower_type = PRISM2_TXPOWER_FIXED;
+	}
+
+	if (rrq->flags == IW_TXPOW_DBM)
+		tmp = "dBm";
+	else if (rrq->flags == IW_TXPOW_MWATT)
+		tmp = "mW";
+	else
+		tmp = "UNKNOWN";
+	printk(KERN_DEBUG "Setting TX power to %d %s\n", rrq->value, tmp);
+
+	if (rrq->flags != IW_TXPOW_DBM) {
+		printk("SIOCSIWTXPOW with mW is not supported; use dBm\n");
+		return -EOPNOTSUPP;
+	}
+
+	local->txpower = rrq->value;
+	val = prism2_txpower_dBm_to_hfa386x(local->txpower);
+	if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF,
+			     HFA386X_CR_MANUAL_TX_POWER, &val, NULL))
+		ret = -EOPNOTSUPP;
+#else /* RAW_TXPOWER_SETTING */
+	if (rrq->fixed)
+		ret = -EOPNOTSUPP;
+#endif /* RAW_TXPOWER_SETTING */
+
+	return ret;
+}
+
+static int prism2_ioctl_giwtxpow(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_param *rrq, char *extra)
+{
+#ifdef RAW_TXPOWER_SETTING
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 resp0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	rrq->flags = IW_TXPOW_DBM;
+	rrq->disabled = 0;
+	rrq->fixed = 0;
+
+	if (local->txpower_type == PRISM2_TXPOWER_AUTO) {
+		if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF,
+				     HFA386X_CR_MANUAL_TX_POWER,
+				     NULL, &resp0) == 0) {
+			rrq->value = prism2_txpower_hfa386x_to_dBm(resp0);
+		} else {
+			/* Could not get real txpower; guess 15 dBm */
+			rrq->value = 15;
+		}
+	} else if (local->txpower_type == PRISM2_TXPOWER_OFF) {
+		rrq->value = 0;
+		rrq->disabled = 1;
+	} else if (local->txpower_type == PRISM2_TXPOWER_FIXED) {
+		rrq->value = local->txpower;
+		rrq->fixed = 1;
+	} else {
+		printk("SIOCGIWTXPOW - unknown txpower_type=%d\n",
+		       local->txpower_type);
+	}
+	return 0;
+#else /* RAW_TXPOWER_SETTING */
+	return -EOPNOTSUPP;
+#endif /* RAW_TXPOWER_SETTING */
+}
+
+
+#ifndef PRISM2_NO_STATION_MODES
+
+/* HostScan request works with and without host_roaming mode. In addition, it
+ * does not break current association. However, it requires newer station
+ * firmware version (>= 1.3.1) than scan request. */
+static int prism2_request_hostscan(struct net_device *dev,
+				   u8 *ssid, u8 ssid_len)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_hostscan_request scan_req;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	memset(&scan_req, 0, sizeof(scan_req));
+	scan_req.channel_list = cpu_to_le16(local->channel_mask &
+					    local->scan_channel_mask);
+	scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
+	if (ssid) {
+		if (ssid_len > 32)
+			return -EINVAL;
+		scan_req.target_ssid_len = cpu_to_le16(ssid_len);
+		memcpy(scan_req.target_ssid, ssid, ssid_len);
+	}
+
+	if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
+				 sizeof(scan_req))) {
+		printk(KERN_DEBUG "%s: HOSTSCAN failed\n", dev->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+
+static int prism2_request_scan(struct net_device *dev)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	struct hfa384x_scan_request scan_req;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	memset(&scan_req, 0, sizeof(scan_req));
+	scan_req.channel_list = cpu_to_le16(local->channel_mask &
+					    local->scan_channel_mask);
+	scan_req.txrate = __constant_cpu_to_le16(HFA384X_RATES_1MBPS);
+
+	/* FIX:
+	 * It seems to be enough to set roaming mode for a short moment to
+	 * host-based and then setup scanrequest data and return the mode to
+	 * firmware-based.
+	 *
+	 * Master mode would need to drop to Managed mode for a short while
+	 * to make scanning work.. Or sweep through the different channels and
+	 * use passive scan based on beacons. */
+
+	if (!local->host_roaming)
+		hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
+				HFA384X_ROAMING_HOST);
+
+	if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req,
+				 sizeof(scan_req))) {
+		printk(KERN_DEBUG "SCANREQUEST failed\n");
+		ret = -EINVAL;
+	}
+
+	if (!local->host_roaming)
+		hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE,
+				HFA384X_ROAMING_FIRMWARE);
+
+	return 0;
+}
+
+#else /* !PRISM2_NO_STATION_MODES */
+
+static inline int prism2_request_hostscan(struct net_device *dev,
+					  u8 *ssid, u8 ssid_len)
+{
+	return -EOPNOTSUPP;
+}
+
+
+static inline int prism2_request_scan(struct net_device *dev)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif /* !PRISM2_NO_STATION_MODES */
+
+
+static int prism2_ioctl_siwscan(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret;
+	u8 *ssid = NULL, ssid_len = 0;
+	struct iw_scan_req *req = (struct iw_scan_req *) extra;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (data->length < sizeof(struct iw_scan_req))
+		req = NULL;
+
+	if (local->iw_mode == IW_MODE_MASTER) {
+		/* In master mode, we just return the results of our local
+		 * tables, so we don't need to start anything...
+		 * Jean II */
+		data->length = 0;
+		return 0;
+	}
+
+	if (!local->dev_enabled)
+		return -ENETDOWN;
+
+	if (req && data->flags & IW_SCAN_THIS_ESSID) {
+		ssid = req->essid;
+		ssid_len = req->essid_len;
+
+		if (ssid_len &&
+		    ((local->iw_mode != IW_MODE_INFRA &&
+		      local->iw_mode != IW_MODE_ADHOC) ||
+		     (local->sta_fw_ver < PRISM2_FW_VER(1,3,1))))
+			return -EOPNOTSUPP;
+	}
+
+	if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1))
+		ret = prism2_request_hostscan(dev, ssid, ssid_len);
+	else
+		ret = prism2_request_scan(dev);
+
+	if (ret == 0)
+		local->scan_timestamp = jiffies;
+
+	/* Could inquire F101, F103 or wait for SIOCGIWSCAN and read RID */
+
+	return ret;
+}
+
+
+#ifndef PRISM2_NO_STATION_MODES
+static char * __prism2_translate_scan(local_info_t *local,
+				      struct hfa384x_hostscan_result *scan,
+				      struct hostap_bss_info *bss,
+				      char *current_ev, char *end_buf)
+{
+	int i, chan;
+	struct iw_event iwe;
+	char *current_val;
+	u16 capabilities;
+	u8 *pos;
+	u8 *ssid, *bssid;
+	size_t ssid_len;
+	char *buf;
+
+	if (bss) {
+		ssid = bss->ssid;
+		ssid_len = bss->ssid_len;
+		bssid = bss->bssid;
+	} else {
+		ssid = scan->ssid;
+		ssid_len = le16_to_cpu(scan->ssid_len);
+		bssid = scan->bssid;
+	}
+	if (ssid_len > 32)
+		ssid_len = 32;
+
+	/* First entry *MUST* be the AP MAC address */
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN);
+	/* FIX:
+	 * I do not know how this is possible, but iwe_stream_add_event
+	 * seems to re-order memcpy execution so that len is set only
+	 * after copying.. Pre-setting len here "fixes" this, but real
+	 * problems should be solved (after which these iwe.len
+	 * settings could be removed from this function). */
+	iwe.len = IW_EV_ADDR_LEN;
+	current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+					  IW_EV_ADDR_LEN);
+
+	/* Other entries will be displayed in the order we give them */
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.length = ssid_len;
+	iwe.u.data.flags = 1;
+	iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
+	current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, ssid);
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWMODE;
+	if (bss) {
+		capabilities = bss->capab_info;
+	} else {
+		capabilities = le16_to_cpu(scan->capability);
+	}
+	if (capabilities & (WLAN_CAPABILITY_ESS |
+			    WLAN_CAPABILITY_IBSS)) {
+		if (capabilities & WLAN_CAPABILITY_ESS)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+		iwe.len = IW_EV_UINT_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_UINT_LEN);
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWFREQ;
+	if (scan) {
+		chan = scan->chid;
+	} else if (bss) {
+		chan = bss->chan;
+	} else {
+		chan = 0;
+	}
+
+	if (chan > 0) {
+		iwe.u.freq.m = freq_list[le16_to_cpu(chan - 1)] * 100000;
+		iwe.u.freq.e = 1;
+		iwe.len = IW_EV_FREQ_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_FREQ_LEN);
+	}
+
+	if (scan) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVQUAL;
+		if (local->last_scan_type == PRISM2_HOSTSCAN) {
+			iwe.u.qual.level = le16_to_cpu(scan->sl);
+			iwe.u.qual.noise = le16_to_cpu(scan->anl);
+		} else {
+			iwe.u.qual.level =
+				HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->sl));
+			iwe.u.qual.noise =
+				HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl));
+		}
+		iwe.len = IW_EV_QUAL_LEN;
+		current_ev = iwe_stream_add_event(current_ev, end_buf, &iwe,
+						  IW_EV_QUAL_LEN);
+	}
+
+	memset(&iwe, 0, sizeof(iwe));
+	iwe.cmd = SIOCGIWENCODE;
+	if (capabilities & WLAN_CAPABILITY_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	iwe.len = IW_EV_POINT_LEN + iwe.u.data.length;
+	current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe, "");
+
+	/* TODO: add SuppRates into BSS table */
+	if (scan) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = SIOCGIWRATE;
+		current_val = current_ev + IW_EV_LCP_LEN;
+		pos = scan->sup_rates;
+		for (i = 0; i < sizeof(scan->sup_rates); i++) {
+			if (pos[i] == 0)
+				break;
+			/* Bit rate given in 500 kb/s units (+ 0x80) */
+			iwe.u.bitrate.value = ((pos[i] & 0x7f) * 500000);
+			current_val = iwe_stream_add_value(
+				current_ev, current_val, end_buf, &iwe,
+				IW_EV_PARAM_LEN);
+		}
+		/* Check if we added any event */
+		if ((current_val - current_ev) > IW_EV_LCP_LEN)
+			current_ev = current_val;
+	}
+
+	/* TODO: add BeaconInt,resp_rate,atim into BSS table */
+	buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_KERNEL);
+	if (buf && scan) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		sprintf(buf, "bcn_int=%d", le16_to_cpu(scan->beacon_interval));
+		iwe.u.data.length = strlen(buf);
+		current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
+						  buf);
+
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		sprintf(buf, "resp_rate=%d", le16_to_cpu(scan->rate));
+		iwe.u.data.length = strlen(buf);
+		current_ev = iwe_stream_add_point(current_ev, end_buf, &iwe,
+						  buf);
+
+		if (local->last_scan_type == PRISM2_HOSTSCAN &&
+		    (capabilities & WLAN_CAPABILITY_IBSS)) {
+			memset(&iwe, 0, sizeof(iwe));
+			iwe.cmd = IWEVCUSTOM;
+			sprintf(buf, "atim=%d", le16_to_cpu(scan->atim));
+			iwe.u.data.length = strlen(buf);
+			current_ev = iwe_stream_add_point(current_ev, end_buf,
+							  &iwe, buf);
+		}
+	}
+	kfree(buf);
+
+	if (bss && bss->wpa_ie_len > 0 && bss->wpa_ie_len <= MAX_WPA_IE_LEN) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = bss->wpa_ie_len;
+		current_ev = iwe_stream_add_point(
+			current_ev, end_buf, &iwe, bss->wpa_ie);
+	}
+
+	if (bss && bss->rsn_ie_len > 0 && bss->rsn_ie_len <= MAX_WPA_IE_LEN) {
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVGENIE;
+		iwe.u.data.length = bss->rsn_ie_len;
+		current_ev = iwe_stream_add_point(
+			current_ev, end_buf, &iwe, bss->rsn_ie);
+	}
+
+	return current_ev;
+}
+
+
+/* Translate scan data returned from the card to a card independant
+ * format that the Wireless Tools will understand - Jean II */
+static inline int prism2_translate_scan(local_info_t *local,
+					char *buffer, int buflen)
+{
+	struct hfa384x_hostscan_result *scan;
+	int entry, hostscan;
+	char *current_ev = buffer;
+	char *end_buf = buffer + buflen;
+	struct list_head *ptr;
+
+	spin_lock_bh(&local->lock);
+
+	list_for_each(ptr, &local->bss_list) {
+		struct hostap_bss_info *bss;
+		bss = list_entry(ptr, struct hostap_bss_info, list);
+		bss->included = 0;
+	}
+
+	hostscan = local->last_scan_type == PRISM2_HOSTSCAN;
+	for (entry = 0; entry < local->last_scan_results_count; entry++) {
+		int found = 0;
+		scan = &local->last_scan_results[entry];
+
+		/* Report every SSID if the AP is using multiple SSIDs. If no
+		 * BSS record is found (e.g., when WPA mode is disabled),
+		 * report the AP once. */
+		list_for_each(ptr, &local->bss_list) {
+			struct hostap_bss_info *bss;
+			bss = list_entry(ptr, struct hostap_bss_info, list);
+			if (memcmp(bss->bssid, scan->bssid, ETH_ALEN) == 0) {
+				bss->included = 1;
+				current_ev = __prism2_translate_scan(
+					local, scan, bss, current_ev, end_buf);
+				found++;
+			}
+		}
+		if (!found) {
+			current_ev = __prism2_translate_scan(
+				local, scan, NULL, current_ev, end_buf);
+		}
+		/* Check if there is space for one more entry */
+		if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
+			/* Ask user space to try again with a bigger buffer */
+			spin_unlock_bh(&local->lock);
+			return -E2BIG;
+		}
+	}
+
+	/* Prism2 firmware has limits (32 at least in some versions) for number
+	 * of BSSes in scan results. Extend this limit by using local BSS list.
+	 */
+	list_for_each(ptr, &local->bss_list) {
+		struct hostap_bss_info *bss;
+		bss = list_entry(ptr, struct hostap_bss_info, list);
+		if (bss->included)
+			continue;
+		current_ev = __prism2_translate_scan(local, NULL, bss,
+						     current_ev, end_buf);
+		/* Check if there is space for one more entry */
+		if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) {
+			/* Ask user space to try again with a bigger buffer */
+			spin_unlock_bh(&local->lock);
+			return -E2BIG;
+		}
+	}
+
+	spin_unlock_bh(&local->lock);
+
+	return current_ev - buffer;
+}
+#endif /* PRISM2_NO_STATION_MODES */
+
+
+static inline int prism2_ioctl_giwscan_sta(struct net_device *dev,
+					   struct iw_request_info *info,
+					   struct iw_point *data, char *extra)
+{
+#ifdef PRISM2_NO_STATION_MODES
+	return -EOPNOTSUPP;
+#else /* PRISM2_NO_STATION_MODES */
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int res;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	/* Wait until the scan is finished. We can probably do better
+	 * than that - Jean II */
+	if (local->scan_timestamp &&
+	    time_before(jiffies, local->scan_timestamp + 3 * HZ)) {
+		/* Important note : we don't want to block the caller
+		 * until results are ready for various reasons.
+		 * First, managing wait queues is complex and racy
+		 * (there may be multiple simultaneous callers).
+		 * Second, we grab some rtnetlink lock before comming
+		 * here (in dev_ioctl()).
+		 * Third, the caller can wait on the Wireless Event
+		 * - Jean II */
+		return -EAGAIN;
+	}
+	local->scan_timestamp = 0;
+
+	res = prism2_translate_scan(local, extra, data->length);
+
+	if (res >= 0) {
+		data->length = res;
+		return 0;
+	} else {
+		data->length = 0;
+		return res;
+	}
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+static int prism2_ioctl_giwscan(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int res;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->iw_mode == IW_MODE_MASTER) {
+		/* In MASTER mode, it doesn't make sense to go around
+		 * scanning the frequencies and make the stations we serve
+		 * wait when what the user is really interested about is the
+		 * list of stations and access points we are talking to.
+		 * So, just extract results from our cache...
+		 * Jean II */
+
+		/* Translate to WE format */
+		res = prism2_ap_translate_scan(dev, extra);
+		if (res >= 0) {
+			printk(KERN_DEBUG "Scan result translation succeeded "
+			       "(length=%d)\n", res);
+			data->length = res;
+			return 0;
+		} else {
+			printk(KERN_DEBUG
+			       "Scan result translation failed (res=%d)\n",
+			       res);
+			data->length = 0;
+			return res;
+		}
+	} else {
+		/* Station mode */
+		return prism2_ioctl_giwscan_sta(dev, info, data, extra);
+	}
+}
+
+
+static const struct iw_priv_args prism2_priv[] = {
+	{ PRISM2_IOCTL_MONITOR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor" },
+	{ PRISM2_IOCTL_READMIF,
+	  IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1,
+	  IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "readmif" },
+	{ PRISM2_IOCTL_WRITEMIF,
+	  IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 2, 0, "writemif" },
+	{ PRISM2_IOCTL_RESET,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset" },
+	{ PRISM2_IOCTL_INQUIRE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inquire" },
+	{ PRISM2_IOCTL_SET_RID_WORD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_rid_word" },
+	{ PRISM2_IOCTL_MACCMD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" },
+	{ PRISM2_IOCTL_WDS_ADD,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_add" },
+	{ PRISM2_IOCTL_WDS_DEL,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_del" },
+	{ PRISM2_IOCTL_ADDMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addmac" },
+	{ PRISM2_IOCTL_DELMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delmac" },
+	{ PRISM2_IOCTL_KICKMAC,
+	  IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac" },
+	/* --- raw access to sub-ioctls --- */
+	{ PRISM2_IOCTL_PRISM2_PARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "prism2_param" },
+	{ PRISM2_IOCTL_GET_PRISM2_PARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprism2_param" },
+	/* --- sub-ioctls handlers --- */
+	{ PRISM2_IOCTL_PRISM2_PARAM,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" },
+	{ PRISM2_IOCTL_GET_PRISM2_PARAM,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" },
+	/* --- sub-ioctls definitions --- */
+	{ PRISM2_PARAM_TXRATECTRL,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txratectrl" },
+	{ PRISM2_PARAM_TXRATECTRL,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettxratectrl" },
+	{ PRISM2_PARAM_BEACON_INT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beacon_int" },
+	{ PRISM2_PARAM_BEACON_INT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbeacon_int" },
+#ifndef PRISM2_NO_STATION_MODES
+	{ PRISM2_PARAM_PSEUDO_IBSS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pseudo_ibss" },
+	{ PRISM2_PARAM_PSEUDO_IBSS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpseudo_ibss" },
+#endif /* PRISM2_NO_STATION_MODES */
+	{ PRISM2_PARAM_ALC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "alc" },
+	{ PRISM2_PARAM_ALC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getalc" },
+	{ PRISM2_PARAM_DUMP,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump" },
+	{ PRISM2_PARAM_DUMP,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdump" },
+	{ PRISM2_PARAM_OTHER_AP_POLICY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "other_ap_policy" },
+	{ PRISM2_PARAM_OTHER_AP_POLICY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getother_ap_pol" },
+	{ PRISM2_PARAM_AP_MAX_INACTIVITY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_inactivity" },
+	{ PRISM2_PARAM_AP_MAX_INACTIVITY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_inactivi" },
+	{ PRISM2_PARAM_AP_BRIDGE_PACKETS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bridge_packets" },
+	{ PRISM2_PARAM_AP_BRIDGE_PACKETS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbridge_packe" },
+	{ PRISM2_PARAM_DTIM_PERIOD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" },
+	{ PRISM2_PARAM_DTIM_PERIOD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdtim_period" },
+	{ PRISM2_PARAM_AP_NULLFUNC_ACK,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "nullfunc_ack" },
+	{ PRISM2_PARAM_AP_NULLFUNC_ACK,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getnullfunc_ack" },
+	{ PRISM2_PARAM_MAX_WDS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_wds" },
+	{ PRISM2_PARAM_MAX_WDS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_wds" },
+	{ PRISM2_PARAM_AP_AUTOM_AP_WDS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autom_ap_wds" },
+	{ PRISM2_PARAM_AP_AUTOM_AP_WDS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getautom_ap_wds" },
+	{ PRISM2_PARAM_AP_AUTH_ALGS,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_auth_algs" },
+	{ PRISM2_PARAM_AP_AUTH_ALGS,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_auth_algs" },
+	{ PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "allow_fcserr" },
+	{ PRISM2_PARAM_MONITOR_ALLOW_FCSERR,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getallow_fcserr" },
+	{ PRISM2_PARAM_HOST_ENCRYPT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_encrypt" },
+	{ PRISM2_PARAM_HOST_ENCRYPT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_encrypt" },
+	{ PRISM2_PARAM_HOST_DECRYPT,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_decrypt" },
+	{ PRISM2_PARAM_HOST_DECRYPT,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_decrypt" },
+#ifndef PRISM2_NO_STATION_MODES
+	{ PRISM2_PARAM_HOST_ROAMING,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_roaming" },
+	{ PRISM2_PARAM_HOST_ROAMING,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_roaming" },
+#endif /* PRISM2_NO_STATION_MODES */
+	{ PRISM2_PARAM_BCRX_STA_KEY,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcrx_sta_key" },
+	{ PRISM2_PARAM_BCRX_STA_KEY,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbcrx_sta_key" },
+	{ PRISM2_PARAM_IEEE_802_1X,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ieee_802_1x" },
+	{ PRISM2_PARAM_IEEE_802_1X,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getieee_802_1x" },
+	{ PRISM2_PARAM_ANTSEL_TX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_tx" },
+	{ PRISM2_PARAM_ANTSEL_TX,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_tx" },
+	{ PRISM2_PARAM_ANTSEL_RX,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_rx" },
+	{ PRISM2_PARAM_ANTSEL_RX,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_rx" },
+	{ PRISM2_PARAM_MONITOR_TYPE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor_type" },
+	{ PRISM2_PARAM_MONITOR_TYPE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmonitor_type" },
+	{ PRISM2_PARAM_WDS_TYPE,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds_type" },
+	{ PRISM2_PARAM_WDS_TYPE,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwds_type" },
+	{ PRISM2_PARAM_HOSTSCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostscan" },
+	{ PRISM2_PARAM_HOSTSCAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostscan" },
+	{ PRISM2_PARAM_AP_SCAN,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_scan" },
+	{ PRISM2_PARAM_AP_SCAN,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_scan" },
+	{ PRISM2_PARAM_ENH_SEC,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enh_sec" },
+	{ PRISM2_PARAM_ENH_SEC,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getenh_sec" },
+#ifdef PRISM2_IO_DEBUG
+	{ PRISM2_PARAM_IO_DEBUG,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "io_debug" },
+	{ PRISM2_PARAM_IO_DEBUG,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getio_debug" },
+#endif /* PRISM2_IO_DEBUG */
+	{ PRISM2_PARAM_BASIC_RATES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "basic_rates" },
+	{ PRISM2_PARAM_BASIC_RATES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbasic_rates" },
+	{ PRISM2_PARAM_OPER_RATES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "oper_rates" },
+	{ PRISM2_PARAM_OPER_RATES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getoper_rates" },
+	{ PRISM2_PARAM_HOSTAPD,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd" },
+	{ PRISM2_PARAM_HOSTAPD,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd" },
+	{ PRISM2_PARAM_HOSTAPD_STA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd_sta" },
+	{ PRISM2_PARAM_HOSTAPD_STA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd_sta" },
+	{ PRISM2_PARAM_WPA,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" },
+	{ PRISM2_PARAM_WPA,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwpa" },
+	{ PRISM2_PARAM_PRIVACY_INVOKED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy_invoked" },
+	{ PRISM2_PARAM_PRIVACY_INVOKED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprivacy_invo" },
+	{ PRISM2_PARAM_TKIP_COUNTERMEASURES,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tkip_countermea" },
+	{ PRISM2_PARAM_TKIP_COUNTERMEASURES,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettkip_counter" },
+	{ PRISM2_PARAM_DROP_UNENCRYPTED,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "drop_unencrypte" },
+	{ PRISM2_PARAM_DROP_UNENCRYPTED,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdrop_unencry" },
+	{ PRISM2_PARAM_SCAN_CHANNEL_MASK,
+	  IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_channels" },
+	{ PRISM2_PARAM_SCAN_CHANNEL_MASK,
+	  0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getscan_channel" },
+};
+
+
+static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL))
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+
+static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
+					  struct iw_request_info *info,
+					  void *wrqu, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int *i = (int *) extra;
+	int param = *i;
+	int value = *(i + 1);
+	int ret = 0;
+	u16 val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	switch (param) {
+	case PRISM2_PARAM_TXRATECTRL:
+		local->fw_tx_rate_control = value;
+		break;
+
+	case PRISM2_PARAM_BEACON_INT:
+		if (hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, value) ||
+		    local->func->reset_port(dev))
+			ret = -EINVAL;
+		else
+			local->beacon_int = value;
+		break;
+
+#ifndef PRISM2_NO_STATION_MODES
+	case PRISM2_PARAM_PSEUDO_IBSS:
+		if (value == local->pseudo_adhoc)
+			break;
+
+		if (value != 0 && value != 1) {
+			ret = -EINVAL;
+			break;
+		}
+
+		printk(KERN_DEBUG "prism2: %s: pseudo IBSS change %d -> %d\n",
+		       dev->name, local->pseudo_adhoc, value);
+		local->pseudo_adhoc = value;
+		if (local->iw_mode != IW_MODE_ADHOC)
+			break;
+
+		if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
+				    hostap_get_porttype(local))) {
+			ret = -EOPNOTSUPP;
+			break;
+		}
+
+		if (local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+#endif /* PRISM2_NO_STATION_MODES */
+
+	case PRISM2_PARAM_ALC:
+		printk(KERN_DEBUG "%s: %s ALC\n", dev->name,
+		       value == 0 ? "Disabling" : "Enabling");
+		val = HFA384X_TEST_CFG_BIT_ALC;
+		local->func->cmd(dev, HFA384X_CMDCODE_TEST |
+				 (HFA384X_TEST_CFG_BITS << 8),
+				 value == 0 ? 0 : 1, &val, NULL);
+		break;
+
+	case PRISM2_PARAM_DUMP:
+		local->frame_dump = value;
+		break;
+
+	case PRISM2_PARAM_OTHER_AP_POLICY:
+		if (value < 0 || value > 3) {
+			ret = -EINVAL;
+			break;
+		}
+		if (local->ap != NULL)
+			local->ap->ap_policy = value;
+		break;
+
+	case PRISM2_PARAM_AP_MAX_INACTIVITY:
+		if (value < 0 || value > 7 * 24 * 60 * 60) {
+			ret = -EINVAL;
+			break;
+		}
+		if (local->ap != NULL)
+			local->ap->max_inactivity = value * HZ;
+		break;
+
+	case PRISM2_PARAM_AP_BRIDGE_PACKETS:
+		if (local->ap != NULL)
+			local->ap->bridge_packets = value;
+		break;
+
+	case PRISM2_PARAM_DTIM_PERIOD:
+		if (value < 0 || value > 65535) {
+			ret = -EINVAL;
+			break;
+		}
+		if (hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, value)
+		    || local->func->reset_port(dev))
+			ret = -EINVAL;
+		else
+			local->dtim_period = value;
+		break;
+
+	case PRISM2_PARAM_AP_NULLFUNC_ACK:
+		if (local->ap != NULL)
+			local->ap->nullfunc_ack = value;
+		break;
+
+	case PRISM2_PARAM_MAX_WDS:
+		local->wds_max_connections = value;
+		break;
+
+	case PRISM2_PARAM_AP_AUTOM_AP_WDS:
+		if (local->ap != NULL) {
+			if (!local->ap->autom_ap_wds && value) {
+				/* add WDS link to all APs in STA table */
+				hostap_add_wds_links(local);
+			}
+			local->ap->autom_ap_wds = value;
+		}
+		break;
+
+	case PRISM2_PARAM_AP_AUTH_ALGS:
+		local->auth_algs = value;
+		if (hostap_set_auth_algs(local))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
+		local->monitor_allow_fcserr = value;
+		break;
+
+	case PRISM2_PARAM_HOST_ENCRYPT:
+		local->host_encrypt = value;
+		if (hostap_set_encryption(local) ||
+		    local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_HOST_DECRYPT:
+		local->host_decrypt = value;
+		if (hostap_set_encryption(local) ||
+		    local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+
+#ifndef PRISM2_NO_STATION_MODES
+	case PRISM2_PARAM_HOST_ROAMING:
+		if (value < 0 || value > 2) {
+			ret = -EINVAL;
+			break;
+		}
+		local->host_roaming = value;
+		if (hostap_set_roaming(local) || local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+#endif /* PRISM2_NO_STATION_MODES */
+
+	case PRISM2_PARAM_BCRX_STA_KEY:
+		local->bcrx_sta_key = value;
+		break;
+
+	case PRISM2_PARAM_IEEE_802_1X:
+		local->ieee_802_1x = value;
+		break;
+
+	case PRISM2_PARAM_ANTSEL_TX:
+		if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
+			ret = -EINVAL;
+			break;
+		}
+		local->antsel_tx = value;
+		hostap_set_antsel(local);
+		break;
+
+	case PRISM2_PARAM_ANTSEL_RX:
+		if (value < 0 || value > HOSTAP_ANTSEL_HIGH) {
+			ret = -EINVAL;
+			break;
+		}
+		local->antsel_rx = value;
+		hostap_set_antsel(local);
+		break;
+
+	case PRISM2_PARAM_MONITOR_TYPE:
+		if (value != PRISM2_MONITOR_80211 &&
+		    value != PRISM2_MONITOR_CAPHDR &&
+		    value != PRISM2_MONITOR_PRISM) {
+			ret = -EINVAL;
+			break;
+		}
+		local->monitor_type = value;
+		if (local->iw_mode == IW_MODE_MONITOR)
+			hostap_monitor_set_type(local);
+		break;
+
+	case PRISM2_PARAM_WDS_TYPE:
+		local->wds_type = value;
+		break;
+
+	case PRISM2_PARAM_HOSTSCAN:
+	{
+		struct hfa384x_hostscan_request scan_req;
+		u16 rate;
+
+		memset(&scan_req, 0, sizeof(scan_req));
+		scan_req.channel_list = __constant_cpu_to_le16(0x3fff);
+		switch (value) {
+		case 1: rate = HFA384X_RATES_1MBPS; break;
+		case 2: rate = HFA384X_RATES_2MBPS; break;
+		case 3: rate = HFA384X_RATES_5MBPS; break;
+		case 4: rate = HFA384X_RATES_11MBPS; break;
+		default: rate = HFA384X_RATES_1MBPS; break;
+		}
+		scan_req.txrate = cpu_to_le16(rate);
+		/* leave SSID empty to accept all SSIDs */
+
+		if (local->iw_mode == IW_MODE_MASTER) {
+			if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
+					    HFA384X_PORTTYPE_BSS) ||
+			    local->func->reset_port(dev))
+				printk(KERN_DEBUG "Leaving Host AP mode "
+				       "for HostScan failed\n");
+		}
+
+		if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req,
+					 sizeof(scan_req))) {
+			printk(KERN_DEBUG "HOSTSCAN failed\n");
+			ret = -EINVAL;
+		}
+		if (local->iw_mode == IW_MODE_MASTER) {
+			wait_queue_t __wait;
+			init_waitqueue_entry(&__wait, current);
+			add_wait_queue(&local->hostscan_wq, &__wait);
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(HZ);
+			if (signal_pending(current))
+				ret = -EINTR;
+			set_current_state(TASK_RUNNING);
+			remove_wait_queue(&local->hostscan_wq, &__wait);
+
+			if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE,
+					    HFA384X_PORTTYPE_HOSTAP) ||
+			    local->func->reset_port(dev))
+				printk(KERN_DEBUG "Returning to Host AP mode "
+				       "after HostScan failed\n");
+		}
+		break;
+	}
+
+	case PRISM2_PARAM_AP_SCAN:
+		local->passive_scan_interval = value;
+		if (timer_pending(&local->passive_scan_timer))
+			del_timer(&local->passive_scan_timer);
+		if (value > 0) {
+			local->passive_scan_timer.expires = jiffies +
+				local->passive_scan_interval * HZ;
+			add_timer(&local->passive_scan_timer);
+		}
+		break;
+
+	case PRISM2_PARAM_ENH_SEC:
+		if (value < 0 || value > 3) {
+			ret = -EINVAL;
+			break;
+		}
+		local->enh_sec = value;
+		if (hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY,
+				    local->enh_sec) ||
+		    local->func->reset_port(dev)) {
+			printk(KERN_INFO "%s: cnfEnhSecurity requires STA f/w "
+			       "1.6.3 or newer\n", dev->name);
+			ret = -EOPNOTSUPP;
+		}
+		break;
+
+#ifdef PRISM2_IO_DEBUG
+	case PRISM2_PARAM_IO_DEBUG:
+		local->io_debug_enabled = value;
+		break;
+#endif /* PRISM2_IO_DEBUG */
+
+	case PRISM2_PARAM_BASIC_RATES:
+		if ((value & local->tx_rate_control) != value || value == 0) {
+			printk(KERN_INFO "%s: invalid basic rate set - basic "
+			       "rates must be in supported rate set\n",
+			       dev->name);
+			ret = -EINVAL;
+			break;
+		}
+		local->basic_rates = value;
+		if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES,
+				    local->basic_rates) ||
+		    local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_OPER_RATES:
+		local->tx_rate_control = value;
+		if (hostap_set_rate(dev))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_HOSTAPD:
+		ret = hostap_set_hostapd(local, value, 1);
+		break;
+
+	case PRISM2_PARAM_HOSTAPD_STA:
+		ret = hostap_set_hostapd_sta(local, value, 1);
+		break;
+
+	case PRISM2_PARAM_WPA:
+		local->wpa = value;
+		if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
+			ret = -EOPNOTSUPP;
+		else if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
+					 value ? 1 : 0))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_PRIVACY_INVOKED:
+		local->privacy_invoked = value;
+		if (hostap_set_encryption(local) ||
+		    local->func->reset_port(dev))
+			ret = -EINVAL;
+		break;
+
+	case PRISM2_PARAM_TKIP_COUNTERMEASURES:
+		local->tkip_countermeasures = value;
+		break;
+
+	case PRISM2_PARAM_DROP_UNENCRYPTED:
+		local->drop_unencrypted = value;
+		break;
+
+	case PRISM2_PARAM_SCAN_CHANNEL_MASK:
+		local->scan_channel_mask = value;
+		break;
+
+	default:
+		printk(KERN_DEBUG "%s: prism2_param: unknown param %d\n",
+		       dev->name, param);
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+
+static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev,
+					      struct iw_request_info *info,
+					      void *wrqu, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int *param = (int *) extra;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	switch (*param) {
+	case PRISM2_PARAM_TXRATECTRL:
+		*param = local->fw_tx_rate_control;
+		break;
+
+	case PRISM2_PARAM_BEACON_INT:
+		*param = local->beacon_int;
+		break;
+
+	case PRISM2_PARAM_PSEUDO_IBSS:
+		*param = local->pseudo_adhoc;
+		break;
+
+	case PRISM2_PARAM_ALC:
+		ret = -EOPNOTSUPP; /* FIX */
+		break;
+
+	case PRISM2_PARAM_DUMP:
+		*param = local->frame_dump;
+		break;
+
+	case PRISM2_PARAM_OTHER_AP_POLICY:
+		if (local->ap != NULL)
+			*param = local->ap->ap_policy;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_AP_MAX_INACTIVITY:
+		if (local->ap != NULL)
+			*param = local->ap->max_inactivity / HZ;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_AP_BRIDGE_PACKETS:
+		if (local->ap != NULL)
+			*param = local->ap->bridge_packets;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_DTIM_PERIOD:
+		*param = local->dtim_period;
+		break;
+
+	case PRISM2_PARAM_AP_NULLFUNC_ACK:
+		if (local->ap != NULL)
+			*param = local->ap->nullfunc_ack;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_MAX_WDS:
+		*param = local->wds_max_connections;
+		break;
+
+	case PRISM2_PARAM_AP_AUTOM_AP_WDS:
+		if (local->ap != NULL)
+			*param = local->ap->autom_ap_wds;
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_AP_AUTH_ALGS:
+		*param = local->auth_algs;
+		break;
+
+	case PRISM2_PARAM_MONITOR_ALLOW_FCSERR:
+		*param = local->monitor_allow_fcserr;
+		break;
+
+	case PRISM2_PARAM_HOST_ENCRYPT:
+		*param = local->host_encrypt;
+		break;
+
+	case PRISM2_PARAM_HOST_DECRYPT:
+		*param = local->host_decrypt;
+		break;
+
+	case PRISM2_PARAM_HOST_ROAMING:
+		*param = local->host_roaming;
+		break;
+
+	case PRISM2_PARAM_BCRX_STA_KEY:
+		*param = local->bcrx_sta_key;
+		break;
+
+	case PRISM2_PARAM_IEEE_802_1X:
+		*param = local->ieee_802_1x;
+		break;
+
+	case PRISM2_PARAM_ANTSEL_TX:
+		*param = local->antsel_tx;
+		break;
+
+	case PRISM2_PARAM_ANTSEL_RX:
+		*param = local->antsel_rx;
+		break;
+
+	case PRISM2_PARAM_MONITOR_TYPE:
+		*param = local->monitor_type;
+		break;
+
+	case PRISM2_PARAM_WDS_TYPE:
+		*param = local->wds_type;
+		break;
+
+	case PRISM2_PARAM_HOSTSCAN:
+		ret = -EOPNOTSUPP;
+		break;
+
+	case PRISM2_PARAM_AP_SCAN:
+		*param = local->passive_scan_interval;
+		break;
+
+	case PRISM2_PARAM_ENH_SEC:
+		*param = local->enh_sec;
+		break;
+
+#ifdef PRISM2_IO_DEBUG
+	case PRISM2_PARAM_IO_DEBUG:
+		*param = local->io_debug_enabled;
+		break;
+#endif /* PRISM2_IO_DEBUG */
+
+	case PRISM2_PARAM_BASIC_RATES:
+		*param = local->basic_rates;
+		break;
+
+	case PRISM2_PARAM_OPER_RATES:
+		*param = local->tx_rate_control;
+		break;
+
+	case PRISM2_PARAM_HOSTAPD:
+		*param = local->hostapd;
+		break;
+
+	case PRISM2_PARAM_HOSTAPD_STA:
+		*param = local->hostapd_sta;
+		break;
+
+	case PRISM2_PARAM_WPA:
+		if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
+			ret = -EOPNOTSUPP;
+		*param = local->wpa;
+		break;
+
+	case PRISM2_PARAM_PRIVACY_INVOKED:
+		*param = local->privacy_invoked;
+		break;
+
+	case PRISM2_PARAM_TKIP_COUNTERMEASURES:
+		*param = local->tkip_countermeasures;
+		break;
+
+	case PRISM2_PARAM_DROP_UNENCRYPTED:
+		*param = local->drop_unencrypted;
+		break;
+
+	case PRISM2_PARAM_SCAN_CHANNEL_MASK:
+		*param = local->scan_channel_mask;
+		break;
+
+	default:
+		printk(KERN_DEBUG "%s: get_prism2_param: unknown param %d\n",
+		       dev->name, *param);
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+
+static int prism2_ioctl_priv_readmif(struct net_device *dev,
+				     struct iw_request_info *info,
+				     void *wrqu, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 resp0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, *extra, NULL,
+			     &resp0))
+		return -EOPNOTSUPP;
+	else
+		*extra = resp0;
+
+	return 0;
+}
+
+
+static int prism2_ioctl_priv_writemif(struct net_device *dev,
+				      struct iw_request_info *info,
+				      void *wrqu, char *extra)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	u16 cr, val;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	cr = *extra;
+	val = *(extra + 1);
+	if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, cr, &val, NULL))
+		return -EOPNOTSUPP;
+
+	return 0;
+}
+
+
+static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 0;
+	u32 mode;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor "
+	       "- update software to use iwconfig mode monitor\n",
+	       dev->name, current->pid, current->comm);
+
+	/* Backward compatibility code - this can be removed at some point */
+
+	if (*i == 0) {
+		/* Disable monitor mode - old mode was not saved, so go to
+		 * Master mode */
+		mode = IW_MODE_MASTER;
+		ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
+	} else if (*i == 1) {
+		/* netlink socket mode is not supported anymore since it did
+		 * not separate different devices from each other and was not
+		 * best method for delivering large amount of packets to
+		 * user space */
+		ret = -EOPNOTSUPP;
+	} else if (*i == 2 || *i == 3) {
+		switch (*i) {
+		case 2:
+			local->monitor_type = PRISM2_MONITOR_80211;
+			break;
+		case 3:
+			local->monitor_type = PRISM2_MONITOR_PRISM;
+			break;
+		}
+		mode = IW_MODE_MONITOR;
+		ret = prism2_ioctl_siwmode(dev, NULL, &mode, NULL);
+		hostap_monitor_mode_enable(local);
+	} else
+		ret = -EINVAL;
+
+	return ret;
+}
+
+
+static int prism2_ioctl_priv_reset(struct net_device *dev, int *i)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i);
+	switch (*i) {
+	case 0:
+		/* Disable and enable card */
+		local->func->hw_shutdown(dev, 1);
+		local->func->hw_config(dev, 0);
+		break;
+
+	case 1:
+		/* COR sreset */
+		local->func->hw_reset(dev);
+		break;
+
+	case 2:
+		/* Disable and enable port 0 */
+		local->func->reset_port(dev);
+		break;
+
+	case 3:
+		prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING);
+		if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL,
+				     NULL))
+			return -EINVAL;
+		break;
+
+	case 4:
+		if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL,
+				     NULL))
+			return -EINVAL;
+		break;
+
+	default:
+		printk(KERN_DEBUG "Unknown reset request %d\n", *i);
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
+}
+
+
+static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i)
+{
+	int rid = *i;
+	int value = *(i + 1);
+
+	printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value);
+
+	if (hostap_set_word(dev, rid, value))
+		return -EINVAL;
+
+	return 0;
+}
+
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd)
+{
+	int ret = 0;
+
+	switch (*cmd) {
+	case AP_MAC_CMD_POLICY_OPEN:
+		local->ap->mac_restrictions.policy = MAC_POLICY_OPEN;
+		break;
+	case AP_MAC_CMD_POLICY_ALLOW:
+		local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW;
+		break;
+	case AP_MAC_CMD_POLICY_DENY:
+		local->ap->mac_restrictions.policy = MAC_POLICY_DENY;
+		break;
+	case AP_MAC_CMD_FLUSH:
+		ap_control_flush_macs(&local->ap->mac_restrictions);
+		break;
+	case AP_MAC_CMD_KICKALL:
+		ap_control_kickall(local->ap);
+		hostap_deauth_all_stas(local->dev, local->ap, 0);
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
+{
+	struct prism2_download_param *param;
+	int ret = 0;
+
+	if (p->length < sizeof(struct prism2_download_param) ||
+	    p->length > 1024 || !p->pointer)
+		return -EINVAL;
+
+	param = (struct prism2_download_param *)
+		kmalloc(p->length, GFP_KERNEL);
+	if (param == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(param, p->pointer, p->length)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (p->length < sizeof(struct prism2_download_param) +
+	    param->num_areas * sizeof(struct prism2_download_area)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = local->func->download(local, param);
+
+ out:
+	if (param != NULL)
+		kfree(param);
+
+	return ret;
+}
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+
+static int prism2_set_genericelement(struct net_device *dev, u8 *elem,
+				     size_t len)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	u8 *buf;
+
+	/*
+	 * Add 16-bit length in the beginning of the buffer because Prism2 RID
+	 * includes it.
+	 */
+	buf = kmalloc(len + 2, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	*((u16 *) buf) = cpu_to_le16(len);
+	memcpy(buf + 2, elem, len);
+
+	kfree(local->generic_elem);
+	local->generic_elem = buf;
+	local->generic_elem_len = len + 2;
+
+	return local->func->set_rid(local->dev, HFA384X_RID_GENERICELEMENT,
+				    buf, len + 2);
+}
+
+
+static int prism2_ioctl_siwauth(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *data, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+
+	switch (data->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * Host AP driver does not use these parameters and allows
+		 * wpa_supplicant to control them internally.
+		 */
+		break;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		local->tkip_countermeasures = data->value;
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		local->drop_unencrypted = data->value;
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		local->auth_algs = data->value;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		if (data->value == 0) {
+			local->wpa = 0;
+			if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
+				break;
+			prism2_set_genericelement(dev, "", 0);
+			local->host_roaming = 0;
+			local->privacy_invoked = 0;
+			if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE,
+					    0) ||
+			    hostap_set_roaming(local) ||
+			    hostap_set_encryption(local) ||
+			    local->func->reset_port(dev))
+				return -EINVAL;
+			break;
+		}
+		if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0))
+			return -EOPNOTSUPP;
+		local->host_roaming = 2;
+		local->privacy_invoked = 1;
+		local->wpa = 1;
+		if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1) ||
+		    hostap_set_roaming(local) ||
+		    hostap_set_encryption(local) ||
+		    local->func->reset_port(dev))
+			return -EINVAL;
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		local->ieee_802_1x = data->value;
+		break;
+	case IW_AUTH_PRIVACY_INVOKED:
+		local->privacy_invoked = data->value;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+
+static int prism2_ioctl_giwauth(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_param *data, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+
+	switch (data->flags & IW_AUTH_INDEX) {
+	case IW_AUTH_WPA_VERSION:
+	case IW_AUTH_CIPHER_PAIRWISE:
+	case IW_AUTH_CIPHER_GROUP:
+	case IW_AUTH_KEY_MGMT:
+		/*
+		 * Host AP driver does not use these parameters and allows
+		 * wpa_supplicant to control them internally.
+		 */
+		return -EOPNOTSUPP;
+	case IW_AUTH_TKIP_COUNTERMEASURES:
+		data->value = local->tkip_countermeasures;
+		break;
+	case IW_AUTH_DROP_UNENCRYPTED:
+		data->value = local->drop_unencrypted;
+		break;
+	case IW_AUTH_80211_AUTH_ALG:
+		data->value = local->auth_algs;
+		break;
+	case IW_AUTH_WPA_ENABLED:
+		data->value = local->wpa;
+		break;
+	case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+		data->value = local->ieee_802_1x;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+
+static int prism2_ioctl_siwencodeext(struct net_device *dev,
+				     struct iw_request_info *info,
+				     struct iw_point *erq, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
+	int i, ret = 0;
+	struct ieee80211_crypto_ops *ops;
+	struct ieee80211_crypt_data **crypt;
+	void *sta_ptr;
+	u8 *addr;
+	const char *alg, *module;
+
+	i = erq->flags & IW_ENCODE_INDEX;
+	if (i > WEP_KEYS)
+		return -EINVAL;
+	if (i < 1 || i > WEP_KEYS)
+		i = local->tx_keyidx;
+	else
+		i--;
+	if (i < 0 || i >= WEP_KEYS)
+		return -EINVAL;
+
+	addr = ext->addr.sa_data;
+	if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
+	    addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+		sta_ptr = NULL;
+		crypt = &local->crypt[i];
+	} else {
+		if (i != 0)
+			return -EINVAL;
+		sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
+		if (sta_ptr == NULL) {
+			if (local->iw_mode == IW_MODE_INFRA) {
+				/*
+				 * TODO: add STA entry for the current AP so
+				 * that unicast key can be used. For now, this
+				 * is emulated by using default key idx 0.
+				 */
+				i = 0;
+				crypt = &local->crypt[i];
+			} else
+				return -EINVAL;
+		}
+	}
+
+	if ((erq->flags & IW_ENCODE_DISABLED) ||
+	    ext->alg == IW_ENCODE_ALG_NONE) {
+		if (*crypt)
+			prism2_crypt_delayed_deinit(local, crypt);
+		goto done;
+	}
+
+	switch (ext->alg) {
+	case IW_ENCODE_ALG_WEP:
+		alg = "WEP";
+		module = "ieee80211_crypt_wep";
+		break;
+	case IW_ENCODE_ALG_TKIP:
+		alg = "TKIP";
+		module = "ieee80211_crypt_tkip";
+		break;
+	case IW_ENCODE_ALG_CCMP:
+		alg = "CCMP";
+		module = "ieee80211_crypt_ccmp";
+		break;
+	default:
+		printk(KERN_DEBUG "%s: unsupported algorithm %d\n",
+		       local->dev->name, ext->alg);
+		ret = -EOPNOTSUPP;
+		goto done;
+	}
+
+	ops = ieee80211_get_crypto_ops(alg);
+	if (ops == NULL) {
+		request_module(module);
+		ops = ieee80211_get_crypto_ops(alg);
+	}
+	if (ops == NULL) {
+		printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
+		       local->dev->name, alg);
+		ret = -EOPNOTSUPP;
+		goto done;
+	}
+
+	if (sta_ptr || ext->alg != IW_ENCODE_ALG_WEP) {
+		/*
+		 * Per station encryption and other than WEP algorithms
+		 * require host-based encryption, so force them on
+		 * automatically.
+		 */
+		local->host_decrypt = local->host_encrypt = 1;
+	}
+
+	if (*crypt == NULL || (*crypt)->ops != ops) {
+		struct ieee80211_crypt_data *new_crypt;
+
+		prism2_crypt_delayed_deinit(local, crypt);
+
+		new_crypt = (struct ieee80211_crypt_data *)
+			kmalloc(sizeof(struct ieee80211_crypt_data),
+				GFP_KERNEL);
+		if (new_crypt == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+		new_crypt->ops = ops;
+		new_crypt->priv = new_crypt->ops->init(i);
+		if (new_crypt->priv == NULL) {
+			kfree(new_crypt);
+			ret = -EINVAL;
+			goto done;
+		}
+
+		*crypt = new_crypt;
+	}
+
+	/*
+	 * TODO: if ext_flags does not have IW_ENCODE_EXT_RX_SEQ_VALID, the
+	 * existing seq# should not be changed.
+	 * TODO: if ext_flags has IW_ENCODE_EXT_TX_SEQ_VALID, next TX seq#
+	 * should be changed to something else than zero.
+	 */
+	if ((!(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) || ext->key_len > 0)
+	    && (*crypt)->ops->set_key &&
+	    (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq,
+				   (*crypt)->priv) < 0) {
+		printk(KERN_DEBUG "%s: key setting failed\n",
+		       local->dev->name);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
+		if (!sta_ptr)
+			local->tx_keyidx = i;
+		else if (i) {
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+
+	if (sta_ptr == NULL && ext->key_len > 0) {
+		int first = 1, j;
+		for (j = 0; j < WEP_KEYS; j++) {
+			if (j != i && local->crypt[j]) {
+				first = 0;
+				break;
+			}
+		}
+		if (first)
+			local->tx_keyidx = i;
+	}
+
+ done:
+	if (sta_ptr)
+		hostap_handle_sta_release(sta_ptr);
+
+	local->open_wep = erq->flags & IW_ENCODE_OPEN;
+
+	/*
+	 * Do not reset port0 if card is in Managed mode since resetting will
+	 * generate new IEEE 802.11 authentication which may end up in looping
+	 * with IEEE 802.1X. Prism2 documentation seem to require port reset
+	 * after WEP configuration. However, keys are apparently changed at
+	 * least in Managed mode.
+	 */
+	if (ret == 0 &&
+	    (hostap_set_encryption(local) ||
+	     (local->iw_mode != IW_MODE_INFRA &&
+	      local->func->reset_port(local->dev))))
+		ret = -EINVAL;
+
+	return ret;
+}
+
+
+static int prism2_ioctl_giwencodeext(struct net_device *dev,
+				     struct iw_request_info *info,
+				     struct iw_point *erq, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	struct ieee80211_crypt_data **crypt;
+	void *sta_ptr;
+	int max_key_len, i;
+	struct iw_encode_ext *ext = (struct iw_encode_ext *) extra;
+	u8 *addr;
+
+	max_key_len = erq->length - sizeof(*ext);
+	if (max_key_len < 0)
+		return -EINVAL;
+
+	i = erq->flags & IW_ENCODE_INDEX;
+	if (i < 1 || i > WEP_KEYS)
+		i = local->tx_keyidx;
+	else
+		i--;
+
+	addr = ext->addr.sa_data;
+	if (addr[0] == 0xff && addr[1] == 0xff && addr[2] == 0xff &&
+	    addr[3] == 0xff && addr[4] == 0xff && addr[5] == 0xff) {
+		sta_ptr = NULL;
+		crypt = &local->crypt[i];
+	} else {
+		i = 0;
+		sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt);
+		if (sta_ptr == NULL)
+			return -EINVAL;
+	}
+	erq->flags = i + 1;
+	memset(ext, 0, sizeof(*ext));
+
+	if (*crypt == NULL || (*crypt)->ops == NULL) {
+		ext->alg = IW_ENCODE_ALG_NONE;
+		ext->key_len = 0;
+		erq->flags |= IW_ENCODE_DISABLED;
+	} else {
+		if (strcmp((*crypt)->ops->name, "WEP") == 0)
+			ext->alg = IW_ENCODE_ALG_WEP;
+		else if (strcmp((*crypt)->ops->name, "TKIP") == 0)
+			ext->alg = IW_ENCODE_ALG_TKIP;
+		else if (strcmp((*crypt)->ops->name, "CCMP") == 0)
+			ext->alg = IW_ENCODE_ALG_CCMP;
+		else
+			return -EINVAL;
+
+		if ((*crypt)->ops->get_key) {
+			ext->key_len =
+				(*crypt)->ops->get_key(ext->key,
+						       max_key_len,
+						       ext->tx_seq,
+						       (*crypt)->priv);
+			if (ext->key_len &&
+			    (ext->alg == IW_ENCODE_ALG_TKIP ||
+			     ext->alg == IW_ENCODE_ALG_CCMP))
+				ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID;
+		}
+	}
+
+	if (sta_ptr)
+		hostap_handle_sta_release(sta_ptr);
+
+	return 0;
+}
+
+
+static int prism2_ioctl_set_encryption(local_info_t *local,
+				       struct prism2_hostapd_param *param,
+				       int param_len)
+{
+	int ret = 0;
+	struct ieee80211_crypto_ops *ops;
+	struct ieee80211_crypt_data **crypt;
+	void *sta_ptr;
+
+	param->u.crypt.err = 0;
+	param->u.crypt.alg[HOSTAP_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+	if (param_len !=
+	    (int) ((char *) param->u.crypt.key - (char *) param) +
+	    param->u.crypt.key_len)
+		return -EINVAL;
+
+	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+		if (param->u.crypt.idx >= WEP_KEYS)
+			return -EINVAL;
+		sta_ptr = NULL;
+		crypt = &local->crypt[param->u.crypt.idx];
+	} else {
+		if (param->u.crypt.idx)
+			return -EINVAL;
+		sta_ptr = ap_crypt_get_ptrs(
+			local->ap, param->sta_addr,
+			(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_PERMANENT),
+			&crypt);
+
+		if (sta_ptr == NULL) {
+			param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
+			return -EINVAL;
+		}
+	}
+
+	if (strcmp(param->u.crypt.alg, "none") == 0) {
+		if (crypt)
+			prism2_crypt_delayed_deinit(local, crypt);
+		goto done;
+	}
+
+	ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+		request_module("ieee80211_crypt_wep");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	} else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+		request_module("ieee80211_crypt_tkip");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	} else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+		request_module("ieee80211_crypt_ccmp");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	}
+	if (ops == NULL) {
+		printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n",
+		       local->dev->name, param->u.crypt.alg);
+		param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ALG;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/* station based encryption and other than WEP algorithms require
+	 * host-based encryption, so force them on automatically */
+	local->host_decrypt = local->host_encrypt = 1;
+
+	if (*crypt == NULL || (*crypt)->ops != ops) {
+		struct ieee80211_crypt_data *new_crypt;
+
+		prism2_crypt_delayed_deinit(local, crypt);
+
+		new_crypt = (struct ieee80211_crypt_data *)
+			kmalloc(sizeof(struct ieee80211_crypt_data),
+				GFP_KERNEL);
+		if (new_crypt == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+		new_crypt->ops = ops;
+		new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
+		if (new_crypt->priv == NULL) {
+			kfree(new_crypt);
+			param->u.crypt.err =
+				HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED;
+			ret = -EINVAL;
+			goto done;
+		}
+
+		*crypt = new_crypt;
+	}
+
+	if ((!(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) ||
+	     param->u.crypt.key_len > 0) && (*crypt)->ops->set_key &&
+	    (*crypt)->ops->set_key(param->u.crypt.key,
+				   param->u.crypt.key_len, param->u.crypt.seq,
+				   (*crypt)->priv) < 0) {
+		printk(KERN_DEBUG "%s: key setting failed\n",
+		       local->dev->name);
+		param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) {
+		if (!sta_ptr)
+			local->tx_keyidx = param->u.crypt.idx;
+		else if (param->u.crypt.idx) {
+			printk(KERN_DEBUG "%s: TX key idx setting failed\n",
+			       local->dev->name);
+			param->u.crypt.err =
+				HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED;
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+ done:
+	if (sta_ptr)
+		hostap_handle_sta_release(sta_ptr);
+
+	/* Do not reset port0 if card is in Managed mode since resetting will
+	 * generate new IEEE 802.11 authentication which may end up in looping
+	 * with IEEE 802.1X. Prism2 documentation seem to require port reset
+	 * after WEP configuration. However, keys are apparently changed at
+	 * least in Managed mode. */
+	if (ret == 0 &&
+	    (hostap_set_encryption(local) ||
+	     (local->iw_mode != IW_MODE_INFRA &&
+	      local->func->reset_port(local->dev)))) {
+		param->u.crypt.err = HOSTAP_CRYPT_ERR_CARD_CONF_FAILED;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+
+static int prism2_ioctl_get_encryption(local_info_t *local,
+				       struct prism2_hostapd_param *param,
+				       int param_len)
+{
+	struct ieee80211_crypt_data **crypt;
+	void *sta_ptr;
+	int max_key_len;
+
+	param->u.crypt.err = 0;
+
+	max_key_len = param_len -
+		(int) ((char *) param->u.crypt.key - (char *) param);
+	if (max_key_len < 0)
+		return -EINVAL;
+
+	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+		sta_ptr = NULL;
+		if (param->u.crypt.idx >= WEP_KEYS)
+			param->u.crypt.idx = local->tx_keyidx;
+		crypt = &local->crypt[param->u.crypt.idx];
+	} else {
+		param->u.crypt.idx = 0;
+		sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0,
+					    &crypt);
+
+		if (sta_ptr == NULL) {
+			param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR;
+			return -EINVAL;
+		}
+	}
+
+	if (*crypt == NULL || (*crypt)->ops == NULL) {
+		memcpy(param->u.crypt.alg, "none", 5);
+		param->u.crypt.key_len = 0;
+		param->u.crypt.idx = 0xff;
+	} else {
+		strncpy(param->u.crypt.alg, (*crypt)->ops->name,
+			HOSTAP_CRYPT_ALG_NAME_LEN);
+		param->u.crypt.key_len = 0;
+
+		memset(param->u.crypt.seq, 0, 8);
+		if ((*crypt)->ops->get_key) {
+			param->u.crypt.key_len =
+				(*crypt)->ops->get_key(param->u.crypt.key,
+						       max_key_len,
+						       param->u.crypt.seq,
+						       (*crypt)->priv);
+		}
+	}
+
+	if (sta_ptr)
+		hostap_handle_sta_release(sta_ptr);
+
+	return 0;
+}
+
+
+static int prism2_ioctl_get_rid(local_info_t *local,
+				struct prism2_hostapd_param *param,
+				int param_len)
+{
+	int max_len, res;
+
+	max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
+	if (max_len < 0)
+		return -EINVAL;
+
+	res = local->func->get_rid(local->dev, param->u.rid.rid,
+				   param->u.rid.data, param->u.rid.len, 0);
+	if (res >= 0) {
+		param->u.rid.len = res;
+		return 0;
+	}
+
+	return res;
+}
+
+
+static int prism2_ioctl_set_rid(local_info_t *local,
+				struct prism2_hostapd_param *param,
+				int param_len)
+{
+	int max_len;
+
+	max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN;
+	if (max_len < 0 || max_len < param->u.rid.len)
+		return -EINVAL;
+
+	return local->func->set_rid(local->dev, param->u.rid.rid,
+				    param->u.rid.data, param->u.rid.len);
+}
+
+
+static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local,
+					  struct prism2_hostapd_param *param,
+					  int param_len)
+{
+	printk(KERN_DEBUG "%ssta: associated as client with AP " MACSTR "\n",
+	       local->dev->name, MAC2STR(param->sta_addr));
+	memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN);
+	return 0;
+}
+
+
+static int prism2_ioctl_siwgenie(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *extra)
+{
+	return prism2_set_genericelement(dev, extra, data->length);
+}
+
+
+static int prism2_ioctl_giwgenie(struct net_device *dev,
+				 struct iw_request_info *info,
+				 struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	int len = local->generic_elem_len - 2;
+
+	if (len <= 0 || local->generic_elem == NULL) {
+		data->length = 0;
+		return 0;
+	}
+
+	if (data->length < len)
+		return -E2BIG;
+
+	data->length = len;
+	memcpy(extra, local->generic_elem + 2, len);
+
+	return 0;
+}
+
+
+static int prism2_ioctl_set_generic_element(local_info_t *local,
+					    struct prism2_hostapd_param *param,
+					    int param_len)
+{
+	int max_len, len;
+
+	len = param->u.generic_elem.len;
+	max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
+	if (max_len < 0 || max_len < len)
+		return -EINVAL;
+
+	return prism2_set_genericelement(local->dev,
+					 param->u.generic_elem.data, len);
+}
+
+
+static int prism2_ioctl_siwmlme(struct net_device *dev,
+				struct iw_request_info *info,
+				struct iw_point *data, char *extra)
+{
+	struct hostap_interface *iface = dev->priv;
+	local_info_t *local = iface->local;
+	struct iw_mlme *mlme = (struct iw_mlme *) extra;
+	u16 reason;
+
+	reason = cpu_to_le16(mlme->reason_code);
+
+	switch (mlme->cmd) {
+	case IW_MLME_DEAUTH:
+		return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
+					    IEEE80211_STYPE_DEAUTH,
+					    (u8 *) &reason, 2);
+	case IW_MLME_DISASSOC:
+		return prism2_sta_send_mgmt(local, mlme->addr.sa_data,
+					    IEEE80211_STYPE_DISASSOC,
+					    (u8 *) &reason, 2);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+
+static int prism2_ioctl_mlme(local_info_t *local,
+			     struct prism2_hostapd_param *param)
+{
+	u16 reason;
+
+	reason = cpu_to_le16(param->u.mlme.reason_code);
+	switch (param->u.mlme.cmd) {
+	case MLME_STA_DEAUTH:
+		return prism2_sta_send_mgmt(local, param->sta_addr,
+					    IEEE80211_STYPE_DEAUTH,
+					    (u8 *) &reason, 2);
+	case MLME_STA_DISASSOC:
+		return prism2_sta_send_mgmt(local, param->sta_addr,
+					    IEEE80211_STYPE_DISASSOC,
+					    (u8 *) &reason, 2);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+
+static int prism2_ioctl_scan_req(local_info_t *local,
+				 struct prism2_hostapd_param *param)
+{
+#ifndef PRISM2_NO_STATION_MODES
+	if ((local->iw_mode != IW_MODE_INFRA &&
+	     local->iw_mode != IW_MODE_ADHOC) ||
+	    (local->sta_fw_ver < PRISM2_FW_VER(1,3,1)))
+		return -EOPNOTSUPP;
+
+	if (!local->dev_enabled)
+		return -ENETDOWN;
+
+	return prism2_request_hostscan(local->dev, param->u.scan_req.ssid,
+				       param->u.scan_req.ssid_len);
+#else /* PRISM2_NO_STATION_MODES */
+	return -EOPNOTSUPP;
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p)
+{
+	struct prism2_hostapd_param *param;
+	int ret = 0;
+	int ap_ioctl = 0;
+
+	if (p->length < sizeof(struct prism2_hostapd_param) ||
+	    p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
+		return -EINVAL;
+
+	param = (struct prism2_hostapd_param *) kmalloc(p->length, GFP_KERNEL);
+	if (param == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(param, p->pointer, p->length)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	switch (param->cmd) {
+	case PRISM2_SET_ENCRYPTION:
+		ret = prism2_ioctl_set_encryption(local, param, p->length);
+		break;
+	case PRISM2_GET_ENCRYPTION:
+		ret = prism2_ioctl_get_encryption(local, param, p->length);
+		break;
+	case PRISM2_HOSTAPD_GET_RID:
+		ret = prism2_ioctl_get_rid(local, param, p->length);
+		break;
+	case PRISM2_HOSTAPD_SET_RID:
+		ret = prism2_ioctl_set_rid(local, param, p->length);
+		break;
+	case PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR:
+		ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length);
+		break;
+	case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
+		ret = prism2_ioctl_set_generic_element(local, param,
+						       p->length);
+		break;
+	case PRISM2_HOSTAPD_MLME:
+		ret = prism2_ioctl_mlme(local, param);
+		break;
+	case PRISM2_HOSTAPD_SCAN_REQ:
+		ret = prism2_ioctl_scan_req(local, param);
+		break;
+	default:
+		ret = prism2_hostapd(local->ap, param);
+		ap_ioctl = 1;
+		break;
+	}
+
+	if (ret == 1 || !ap_ioctl) {
+		if (copy_to_user(p->pointer, param, p->length)) {
+			ret = -EFAULT;
+			goto out;
+		} else if (ap_ioctl)
+			ret = 0;
+	}
+
+ out:
+	if (param != NULL)
+		kfree(param);
+
+	return ret;
+}
+
+
+static void prism2_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
+	strncpy(info->version, PRISM2_VERSION,
+		sizeof(info->version) - 1);
+	snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+		 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
+		 (local->sta_fw_ver >> 8) & 0xff,
+		 local->sta_fw_ver & 0xff);
+}
+
+static struct ethtool_ops prism2_ethtool_ops = {
+	.get_drvinfo = prism2_get_drvinfo
+};
+
+
+/* Structures to export the Wireless Handlers */
+
+static const iw_handler prism2_handler[] =
+{
+	(iw_handler) NULL,				/* SIOCSIWCOMMIT */
+	(iw_handler) prism2_get_name,			/* SIOCGIWNAME */
+	(iw_handler) NULL,				/* SIOCSIWNWID */
+	(iw_handler) NULL,				/* SIOCGIWNWID */
+	(iw_handler) prism2_ioctl_siwfreq,		/* SIOCSIWFREQ */
+	(iw_handler) prism2_ioctl_giwfreq,		/* SIOCGIWFREQ */
+	(iw_handler) prism2_ioctl_siwmode,		/* SIOCSIWMODE */
+	(iw_handler) prism2_ioctl_giwmode,		/* SIOCGIWMODE */
+	(iw_handler) prism2_ioctl_siwsens,		/* SIOCSIWSENS */
+	(iw_handler) prism2_ioctl_giwsens,		/* SIOCGIWSENS */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWRANGE */
+	(iw_handler) prism2_ioctl_giwrange,		/* SIOCGIWRANGE */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWPRIV */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWPRIV */
+	(iw_handler) NULL /* not used */,		/* SIOCSIWSTATS */
+	(iw_handler) NULL /* kernel code */,		/* SIOCGIWSTATS */
+	iw_handler_set_spy,				/* SIOCSIWSPY */
+	iw_handler_get_spy,				/* SIOCGIWSPY */
+	iw_handler_set_thrspy,				/* SIOCSIWTHRSPY */
+	iw_handler_get_thrspy,				/* SIOCGIWTHRSPY */
+	(iw_handler) prism2_ioctl_siwap,		/* SIOCSIWAP */
+	(iw_handler) prism2_ioctl_giwap,		/* SIOCGIWAP */
+	(iw_handler) prism2_ioctl_siwmlme,		/* SIOCSIWMLME */
+	(iw_handler) prism2_ioctl_giwaplist,		/* SIOCGIWAPLIST */
+	(iw_handler) prism2_ioctl_siwscan,		/* SIOCSIWSCAN */
+	(iw_handler) prism2_ioctl_giwscan,		/* SIOCGIWSCAN */
+	(iw_handler) prism2_ioctl_siwessid,		/* SIOCSIWESSID */
+	(iw_handler) prism2_ioctl_giwessid,		/* SIOCGIWESSID */
+	(iw_handler) prism2_ioctl_siwnickn,		/* SIOCSIWNICKN */
+	(iw_handler) prism2_ioctl_giwnickn,		/* SIOCGIWNICKN */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) prism2_ioctl_siwrate,		/* SIOCSIWRATE */
+	(iw_handler) prism2_ioctl_giwrate,		/* SIOCGIWRATE */
+	(iw_handler) prism2_ioctl_siwrts,		/* SIOCSIWRTS */
+	(iw_handler) prism2_ioctl_giwrts,		/* SIOCGIWRTS */
+	(iw_handler) prism2_ioctl_siwfrag,		/* SIOCSIWFRAG */
+	(iw_handler) prism2_ioctl_giwfrag,		/* SIOCGIWFRAG */
+	(iw_handler) prism2_ioctl_siwtxpow,		/* SIOCSIWTXPOW */
+	(iw_handler) prism2_ioctl_giwtxpow,		/* SIOCGIWTXPOW */
+	(iw_handler) prism2_ioctl_siwretry,		/* SIOCSIWRETRY */
+	(iw_handler) prism2_ioctl_giwretry,		/* SIOCGIWRETRY */
+	(iw_handler) prism2_ioctl_siwencode,		/* SIOCSIWENCODE */
+	(iw_handler) prism2_ioctl_giwencode,		/* SIOCGIWENCODE */
+	(iw_handler) prism2_ioctl_siwpower,		/* SIOCSIWPOWER */
+	(iw_handler) prism2_ioctl_giwpower,		/* SIOCGIWPOWER */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) NULL,				/* -- hole -- */
+	(iw_handler) prism2_ioctl_siwgenie,		/* SIOCSIWGENIE */
+	(iw_handler) prism2_ioctl_giwgenie,		/* SIOCGIWGENIE */
+	(iw_handler) prism2_ioctl_siwauth,		/* SIOCSIWAUTH */
+	(iw_handler) prism2_ioctl_giwauth,		/* SIOCGIWAUTH */
+	(iw_handler) prism2_ioctl_siwencodeext,		/* SIOCSIWENCODEEXT */
+	(iw_handler) prism2_ioctl_giwencodeext,		/* SIOCGIWENCODEEXT */
+	(iw_handler) NULL,				/* SIOCSIWPMKSA */
+	(iw_handler) NULL,				/* -- hole -- */
+};
+
+static const iw_handler prism2_private_handler[] =
+{							/* SIOCIWFIRSTPRIV + */
+	(iw_handler) prism2_ioctl_priv_prism2_param,	/* 0 */
+	(iw_handler) prism2_ioctl_priv_get_prism2_param, /* 1 */
+	(iw_handler) prism2_ioctl_priv_writemif,	/* 2 */
+	(iw_handler) prism2_ioctl_priv_readmif,		/* 3 */
+};
+
+static const struct iw_handler_def hostap_iw_handler_def =
+{
+	.num_standard	= sizeof(prism2_handler) / sizeof(iw_handler),
+	.num_private	= sizeof(prism2_private_handler) / sizeof(iw_handler),
+	.num_private_args = sizeof(prism2_priv) / sizeof(struct iw_priv_args),
+	.standard	= (iw_handler *) prism2_handler,
+	.private	= (iw_handler *) prism2_private_handler,
+	.private_args	= (struct iw_priv_args *) prism2_priv,
+	.get_wireless_stats = hostap_get_wireless_stats,
+};
+
+
+int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct iwreq *wrq = (struct iwreq *) ifr;
+	struct hostap_interface *iface;
+	local_info_t *local;
+	int ret = 0;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	switch (cmd) {
+		/* Private ioctls (iwpriv) that have not yet been converted
+		 * into new wireless extensions API */
+
+	case PRISM2_IOCTL_INQUIRE:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name);
+		break;
+
+	case PRISM2_IOCTL_MONITOR:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name);
+		break;
+
+	case PRISM2_IOCTL_RESET:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name);
+		break;
+
+	case PRISM2_IOCTL_WDS_ADD:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1);
+		break;
+
+	case PRISM2_IOCTL_WDS_DEL:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0);
+		break;
+
+	case PRISM2_IOCTL_SET_RID_WORD:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_set_rid_word(dev,
+							  (int *) wrq->u.name);
+		break;
+
+#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	case PRISM2_IOCTL_MACCMD:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name);
+		break;
+
+	case PRISM2_IOCTL_ADDMAC:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = ap_control_add_mac(&local->ap->mac_restrictions,
+					      wrq->u.ap_addr.sa_data);
+		break;
+	case PRISM2_IOCTL_DELMAC:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = ap_control_del_mac(&local->ap->mac_restrictions,
+					      wrq->u.ap_addr.sa_data);
+		break;
+	case PRISM2_IOCTL_KICKMAC:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = ap_control_kick_mac(local->ap, local->dev,
+					       wrq->u.ap_addr.sa_data);
+		break;
+#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
+
+
+		/* Private ioctls that are not used with iwpriv;
+		 * in SIOCDEVPRIVATE range */
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	case PRISM2_IOCTL_DOWNLOAD:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_download(local, &wrq->u.data);
+		break;
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+	case PRISM2_IOCTL_HOSTAPD:
+		if (!capable(CAP_NET_ADMIN)) ret = -EPERM;
+		else ret = prism2_ioctl_priv_hostapd(local, &wrq->u.data);
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -0,0 +1,473 @@
+#define PRISM2_PCI
+
+/* Host AP driver's support for Intersil Prism2.5 PCI cards is based on
+ * driver patches from Reyk Floeter <reyk@vantronix.net> and
+ * Andy Warner <andyw@pobox.com> */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include "hostap_wlan.h"
+
+
+static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
+static char *dev_info = "hostap_pci";
+
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
+		   "PCI cards.");
+MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PRISM2_VERSION);
+
+
+/* struct local_info::hw_priv */
+struct hostap_pci_priv {
+	void __iomem *mem_start;
+};
+
+
+/* FIX: do we need mb/wmb/rmb with memory operations? */
+
+
+static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+	/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
+	{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
+	/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
+	{ 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID },
+	/* Samsung MagicLAN SWL-2210P */
+	{ 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID },
+	{ 0 }
+};
+
+
+#ifdef PRISM2_IO_DEBUG
+
+static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
+	writeb(v, hw_priv->mem_start + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u8 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	v = readb(hw_priv->mem_start + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
+	writew(v, hw_priv->mem_start + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u16 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	v = readw(hw_priv->mem_start + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
+#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
+#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
+#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
+#define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), cpu_to_le16((v)))
+#define HFA384X_INW_DATA(a) (u16) le16_to_cpu(hfa384x_inw_debug(dev, (a)))
+
+#else /* PRISM2_IO_DEBUG */
+
+static inline void hfa384x_outb(struct net_device *dev, int a, u8 v)
+{
+	struct hostap_interface *iface;
+	struct hostap_pci_priv *hw_priv;
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+	writeb(v, hw_priv->mem_start + a);
+}
+
+static inline u8 hfa384x_inb(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	struct hostap_pci_priv *hw_priv;
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+	return readb(hw_priv->mem_start + a);
+}
+
+static inline void hfa384x_outw(struct net_device *dev, int a, u16 v)
+{
+	struct hostap_interface *iface;
+	struct hostap_pci_priv *hw_priv;
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+	writew(v, hw_priv->mem_start + a);
+}
+
+static inline u16 hfa384x_inw(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	struct hostap_pci_priv *hw_priv;
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+	return readw(hw_priv->mem_start + a);
+}
+
+#define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v))
+#define HFA384X_INB(a) hfa384x_inb(dev, (a))
+#define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v))
+#define HFA384X_INW(a) hfa384x_inw(dev, (a))
+#define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), cpu_to_le16((v)))
+#define HFA384X_INW_DATA(a) (u16) le16_to_cpu(hfa384x_inw(dev, (a)))
+
+#endif /* PRISM2_IO_DEBUG */
+
+
+static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
+			    int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	for ( ; len > 1; len -= 2)
+		*pos++ = HFA384X_INW_DATA(d_off);
+
+	if (len & 1)
+		*((char *) pos) = HFA384X_INB(d_off);
+
+	return 0;
+}
+
+
+static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	for ( ; len > 1; len -= 2)
+		HFA384X_OUTW_DATA(*pos++, d_off);
+
+	if (len & 1)
+		HFA384X_OUTB(*((char *) pos), d_off);
+
+	return 0;
+}
+
+
+/* FIX: This might change at some point.. */
+#include "hostap_hw.c"
+
+static void prism2_pci_cor_sreset(local_info_t *local)
+{
+	struct net_device *dev = local->dev;
+	u16 reg;
+
+	reg = HFA384X_INB(HFA384X_PCICOR_OFF);
+	printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg);
+
+	/* linux-wlan-ng uses extremely long hold and settle times for
+	 * COR sreset. A comment in the driver code mentions that the long
+	 * delays appear to be necessary. However, at least IBM 22P6901 seems
+	 * to work fine with shorter delays.
+	 *
+	 * Longer delays can be configured by uncommenting following line: */
+/* #define PRISM2_PCI_USE_LONG_DELAYS */
+
+#ifdef PRISM2_PCI_USE_LONG_DELAYS
+	int i;
+
+	HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
+	mdelay(250);
+
+	HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
+	mdelay(500);
+
+	/* Wait for f/w to complete initialization (CMD:BUSY == 0) */
+	i = 2000000 / 10;
+	while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i)
+		udelay(10);
+
+#else /* PRISM2_PCI_USE_LONG_DELAYS */
+
+	HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF);
+	mdelay(2);
+	HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF);
+	mdelay(2);
+
+#endif /* PRISM2_PCI_USE_LONG_DELAYS */
+
+	if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) {
+		printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name);
+	}
+}
+
+
+static void prism2_pci_genesis_reset(local_info_t *local, int hcr)
+{
+	struct net_device *dev = local->dev;
+
+	HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF);
+	mdelay(10);
+	HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF);
+	mdelay(10);
+	HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF);
+	mdelay(10);
+}
+
+
+static struct prism2_helper_functions prism2_pci_funcs =
+{
+	.card_present	= NULL,
+	.cor_sreset	= prism2_pci_cor_sreset,
+	.dev_open	= NULL,
+	.dev_close	= NULL,
+	.genesis_reset	= prism2_pci_genesis_reset,
+	.hw_type	= HOSTAP_HW_PCI,
+};
+
+
+static int prism2_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id)
+{
+	unsigned long phymem;
+	void __iomem *mem = NULL;
+	local_info_t *local = NULL;
+	struct net_device *dev = NULL;
+	static int cards_found /* = 0 */;
+	int irq_registered = 0;
+	struct hostap_interface *iface;
+	struct hostap_pci_priv *hw_priv;
+
+	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	if (hw_priv == NULL)
+		return -ENOMEM;
+	memset(hw_priv, 0, sizeof(*hw_priv));
+
+	if (pci_enable_device(pdev))
+		return -EIO;
+
+	phymem = pci_resource_start(pdev, 0);
+
+	if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) {
+		printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n");
+		goto err_out_disable;
+	}
+
+	mem = ioremap(phymem, pci_resource_len(pdev, 0));
+	if (mem == NULL) {
+		printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ;
+		goto fail;
+	}
+
+	dev = prism2_init_local_data(&prism2_pci_funcs, cards_found,
+				     &pdev->dev);
+	if (dev == NULL)
+		goto fail;
+	iface = netdev_priv(dev);
+	local = iface->local;
+	local->hw_priv = hw_priv;
+	cards_found++;
+
+        dev->irq = pdev->irq;
+        hw_priv->mem_start = mem;
+
+	prism2_pci_cor_sreset(local);
+
+	pci_set_drvdata(pdev, dev);
+
+	if (request_irq(dev->irq, prism2_interrupt, SA_SHIRQ, dev->name,
+			dev)) {
+		printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
+		goto fail;
+	} else
+		irq_registered = 1;
+
+	if (!local->pri_only && prism2_hw_config(dev, 1)) {
+		printk(KERN_DEBUG "%s: hardware initialization failed\n",
+		       dev_info);
+		goto fail;
+	}
+
+	printk(KERN_INFO "%s: Intersil Prism2.5 PCI: "
+	       "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq);
+
+	return hostap_hw_ready(dev);
+
+ fail:
+	kfree(hw_priv);
+
+	if (irq_registered && dev)
+		free_irq(dev->irq, dev);
+
+	if (mem)
+		iounmap(mem);
+
+	release_mem_region(phymem, pci_resource_len(pdev, 0));
+
+ err_out_disable:
+	pci_disable_device(pdev);
+	kfree(hw_priv);
+	if (local)
+		local->hw_priv = NULL;
+	prism2_free_local_data(dev);
+
+	return -ENODEV;
+}
+
+
+static void prism2_pci_remove(struct pci_dev *pdev)
+{
+	struct net_device *dev;
+	struct hostap_interface *iface;
+	void __iomem *mem_start;
+	struct hostap_pci_priv *hw_priv;
+
+	dev = pci_get_drvdata(pdev);
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+
+	/* Reset the hardware, and ensure interrupts are disabled. */
+	prism2_pci_cor_sreset(iface->local);
+	hfa384x_disable_interrupts(dev);
+
+	if (dev->irq)
+		free_irq(dev->irq, dev);
+
+	mem_start = hw_priv->mem_start;
+	kfree(hw_priv);
+	iface->local->hw_priv = NULL;
+	prism2_free_local_data(dev);
+
+	iounmap(mem_start);
+
+	release_mem_region(pci_resource_start(pdev, 0),
+			   pci_resource_len(pdev, 0));
+	pci_disable_device(pdev);
+}
+
+
+#ifdef CONFIG_PM
+static int prism2_pci_suspend(struct pci_dev *pdev, u32 state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (netif_running(dev)) {
+		netif_stop_queue(dev);
+		netif_device_detach(dev);
+	}
+	prism2_suspend(dev);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, 3);
+
+	return 0;
+}
+
+static int prism2_pci_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	pci_enable_device(pdev);
+	pci_restore_state(pdev);
+	prism2_hw_config(dev, 0);
+	if (netif_running(dev)) {
+		netif_device_attach(dev);
+		netif_start_queue(dev);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM */
+
+
+MODULE_DEVICE_TABLE(pci, prism2_pci_id_table);
+
+static struct pci_driver prism2_pci_drv_id = {
+	.name		= "prism2_pci",
+	.id_table	= prism2_pci_id_table,
+	.probe		= prism2_pci_probe,
+	.remove		= prism2_pci_remove,
+#ifdef CONFIG_PM
+	.suspend	= prism2_pci_suspend,
+	.resume		= prism2_pci_resume,
+#endif /* CONFIG_PM */
+	/* Linux 2.4.6 added save_state and enable_wake that are not used here
+	 */
+};
+
+
+static int __init init_prism2_pci(void)
+{
+	printk(KERN_INFO "%s: %s\n", dev_info, version);
+
+	return pci_register_driver(&prism2_pci_drv_id);
+}
+
+
+static void __exit exit_prism2_pci(void)
+{
+	pci_unregister_driver(&prism2_pci_drv_id);
+	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+
+
+module_init(init_prism2_pci);
+module_exit(exit_prism2_pci);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -0,0 +1,645 @@
+#define PRISM2_PLX
+
+/* Host AP driver's support for PC Cards on PCI adapters using PLX9052 is
+ * based on:
+ * - Host AP driver patch from james@madingley.org
+ * - linux-wlan-ng driver, Copyright (C) AbsoluteValue Systems, Inc.
+ */
+
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include "hostap_wlan.h"
+
+
+static char *version = PRISM2_VERSION " (Jouni Malinen <jkmaline@cc.hut.fi>)";
+static char *dev_info = "hostap_plx";
+
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
+		   "cards (PLX).");
+MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PRISM2_VERSION);
+
+
+static int ignore_cis;
+module_param(ignore_cis, int, 0444);
+MODULE_PARM_DESC(ignore_cis, "Do not verify manfid information in CIS");
+
+
+/* struct local_info::hw_priv */
+struct hostap_plx_priv {
+	void __iomem *attr_mem;
+	unsigned int cor_offset;
+};
+
+
+#define PLX_MIN_ATTR_LEN 512	/* at least 2 x 256 is needed for CIS */
+#define COR_SRESET       0x80
+#define COR_LEVLREQ      0x40
+#define COR_ENABLE_FUNC  0x01
+/* PCI Configuration Registers */
+#define PLX_PCIIPR       0x3d   /* PCI Interrupt Pin */
+/* Local Configuration Registers */
+#define PLX_INTCSR       0x4c   /* Interrupt Control/Status Register */
+#define PLX_INTCSR_PCI_INTEN BIT(6) /* PCI Interrupt Enable */
+#define PLX_CNTRL        0x50
+#define PLX_CNTRL_SERIAL_EEPROM_PRESENT BIT(28)
+
+
+#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
+
+static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+	PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
+	PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
+	PLXDEV(0x126c, 0x8030, "Nortel emobility"),
+	PLXDEV(0x1385, 0x4100, "Netgear MA301"),
+	PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"),
+	PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"),
+	PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"),
+	PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"),
+	PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"),
+	PLXDEV(0x16ab, 0x1103, "Longshine 8031"),
+	PLXDEV(0x16ec, 0x3685, "US Robotics USR2415"),
+	PLXDEV(0xec80, 0xec00, "Belkin F5D6000"),
+	{ 0 }
+};
+
+
+/* Array of known Prism2/2.5 PC Card manufactured ids. If your card's manfid
+ * is not listed here, you will need to add it here to get the driver
+ * initialized. */
+static struct prism2_plx_manfid {
+	u16 manfid1, manfid2;
+} prism2_plx_known_manfids[] = {
+	{ 0x000b, 0x7110 } /* D-Link DWL-650 Rev. P1 */,
+	{ 0x000b, 0x7300 } /* Philips 802.11b WLAN PCMCIA */,
+	{ 0x0101, 0x0777 } /* 3Com AirConnect PCI 777A */,
+	{ 0x0126, 0x8000 } /* Proxim RangeLAN */,
+	{ 0x0138, 0x0002 } /* Compaq WL100 */,
+	{ 0x0156, 0x0002 } /* Intersil Prism II Ref. Design (and others) */,
+	{ 0x026f, 0x030b } /* Buffalo WLI-CF-S11G */,
+	{ 0x0274, 0x1612 } /* Linksys WPC11 Ver 2.5 */,
+	{ 0x0274, 0x1613 } /* Linksys WPC11 Ver 3 */,
+	{ 0x028a, 0x0002 } /* D-Link DRC-650 */,
+	{ 0x0250, 0x0002 } /* Samsung SWL2000-N */,
+	{ 0xc250, 0x0002 } /* EMTAC A2424i */,
+	{ 0xd601, 0x0002 } /* Z-Com XI300 */,
+	{ 0xd601, 0x0005 } /* Zcomax XI-325H 200mW */,
+	{ 0, 0}
+};
+
+
+#ifdef PRISM2_IO_DEBUG
+
+static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v);
+	outb(v, dev->base_addr + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u8 hfa384x_inb_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u8 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	v = inb(dev->base_addr + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v);
+	outw(v, dev->base_addr + a);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline u16 hfa384x_inw_debug(struct net_device *dev, int a)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+	u16 v;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	v = inw(dev->base_addr + a);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v);
+	spin_unlock_irqrestore(&local->lock, flags);
+	return v;
+}
+
+static inline void hfa384x_outsw_debug(struct net_device *dev, int a,
+				       u8 *buf, int wc)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc);
+	outsw(dev->base_addr + a, buf, wc);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+static inline void hfa384x_insw_debug(struct net_device *dev, int a,
+				      u8 *buf, int wc)
+{
+	struct hostap_interface *iface;
+	local_info_t *local;
+	unsigned long flags;
+
+	iface = netdev_priv(dev);
+	local = iface->local;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc);
+	insw(dev->base_addr + a, buf, wc);
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+#define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v))
+#define HFA384X_INB(a) hfa384x_inb_debug(dev, (a))
+#define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v))
+#define HFA384X_INW(a) hfa384x_inw_debug(dev, (a))
+#define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc))
+#define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc))
+
+#else /* PRISM2_IO_DEBUG */
+
+#define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a))
+#define HFA384X_INB(a) inb(dev->base_addr + (a))
+#define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a))
+#define HFA384X_INW(a) inw(dev->base_addr + (a))
+#define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc)
+#define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc)
+
+#endif /* PRISM2_IO_DEBUG */
+
+
+static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf,
+			    int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	if (len / 2)
+		HFA384X_INSW(d_off, buf, len / 2);
+	pos += len / 2;
+
+	if (len & 1)
+		*((char *) pos) = HFA384X_INB(d_off);
+
+	return 0;
+}
+
+
+static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len)
+{
+	u16 d_off;
+	u16 *pos;
+
+	d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF;
+	pos = (u16 *) buf;
+
+	if (len / 2)
+		HFA384X_OUTSW(d_off, buf, len / 2);
+	pos += len / 2;
+
+	if (len & 1)
+		HFA384X_OUTB(*((char *) pos), d_off);
+
+	return 0;
+}
+
+
+/* FIX: This might change at some point.. */
+#include "hostap_hw.c"
+
+
+static void prism2_plx_cor_sreset(local_info_t *local)
+{
+	unsigned char corsave;
+	struct hostap_plx_priv *hw_priv = local->hw_priv;
+
+	printk(KERN_DEBUG "%s: Doing reset via direct COR access.\n",
+	       dev_info);
+
+	/* Set sreset bit of COR and clear it after hold time */
+
+	if (hw_priv->attr_mem == NULL) {
+		/* TMD7160 - COR at card's first I/O addr */
+		corsave = inb(hw_priv->cor_offset);
+		outb(corsave | COR_SRESET, hw_priv->cor_offset);
+		mdelay(2);
+		outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
+		mdelay(2);
+	} else {
+		/* PLX9052 */
+		corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
+		writeb(corsave | COR_SRESET,
+		       hw_priv->attr_mem + hw_priv->cor_offset);
+		mdelay(2);
+		writeb(corsave & ~COR_SRESET,
+		       hw_priv->attr_mem + hw_priv->cor_offset);
+		mdelay(2);
+	}
+}
+
+
+static void prism2_plx_genesis_reset(local_info_t *local, int hcr)
+{
+	unsigned char corsave;
+	struct hostap_plx_priv *hw_priv = local->hw_priv;
+
+	if (hw_priv->attr_mem == NULL) {
+		/* TMD7160 - COR at card's first I/O addr */
+		corsave = inb(hw_priv->cor_offset);
+		outb(corsave | COR_SRESET, hw_priv->cor_offset);
+		mdelay(10);
+		outb(hcr, hw_priv->cor_offset + 2);
+		mdelay(10);
+		outb(corsave & ~COR_SRESET, hw_priv->cor_offset);
+		mdelay(10);
+	} else {
+		/* PLX9052 */
+		corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset);
+		writeb(corsave | COR_SRESET,
+		       hw_priv->attr_mem + hw_priv->cor_offset);
+		mdelay(10);
+		writeb(hcr, hw_priv->attr_mem + hw_priv->cor_offset + 2);
+		mdelay(10);
+		writeb(corsave & ~COR_SRESET,
+		       hw_priv->attr_mem + hw_priv->cor_offset);
+		mdelay(10);
+	}
+}
+
+
+static struct prism2_helper_functions prism2_plx_funcs =
+{
+	.card_present	= NULL,
+	.cor_sreset	= prism2_plx_cor_sreset,
+	.dev_open	= NULL,
+	.dev_close	= NULL,
+	.genesis_reset	= prism2_plx_genesis_reset,
+	.hw_type	= HOSTAP_HW_PLX,
+};
+
+
+static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len,
+				unsigned int *cor_offset,
+				unsigned int *cor_index)
+{
+#define CISTPL_CONFIG 0x1A
+#define CISTPL_MANFID 0x20
+#define CISTPL_END 0xFF
+#define CIS_MAX_LEN 256
+	u8 *cis;
+	int i, pos;
+	unsigned int rmsz, rasz, manfid1, manfid2;
+	struct prism2_plx_manfid *manfid;
+
+	cis = kmalloc(CIS_MAX_LEN, GFP_KERNEL);
+	if (cis == NULL)
+		return -ENOMEM;
+
+	/* read CIS; it is in even offsets in the beginning of attr_mem */
+	for (i = 0; i < CIS_MAX_LEN; i++)
+		cis[i] = readb(attr_mem + 2 * i);
+	printk(KERN_DEBUG "%s: CIS: %02x %02x %02x %02x %02x %02x ...\n",
+	       dev_info, cis[0], cis[1], cis[2], cis[3], cis[4], cis[5]);
+
+	/* set reasonable defaults for Prism2 cards just in case CIS parsing
+	 * fails */
+	*cor_offset = 0x3e0;
+	*cor_index = 0x01;
+	manfid1 = manfid2 = 0;
+
+	pos = 0;
+	while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) {
+		if (pos + cis[pos + 1] >= CIS_MAX_LEN)
+			goto cis_error;
+
+		switch (cis[pos]) {
+		case CISTPL_CONFIG:
+			if (cis[pos + 1] < 1)
+				goto cis_error;
+			rmsz = (cis[pos + 2] & 0x3c) >> 2;
+			rasz = cis[pos + 2] & 0x03;
+			if (4 + rasz + rmsz > cis[pos + 1])
+				goto cis_error;
+			*cor_index = cis[pos + 3] & 0x3F;
+			*cor_offset = 0;
+			for (i = 0; i <= rasz; i++)
+				*cor_offset += cis[pos + 4 + i] << (8 * i);
+			printk(KERN_DEBUG "%s: cor_index=0x%x "
+			       "cor_offset=0x%x\n", dev_info,
+			       *cor_index, *cor_offset);
+			if (*cor_offset > attr_len) {
+				printk(KERN_ERR "%s: COR offset not within "
+				       "attr_mem\n", dev_info);
+				kfree(cis);
+				return -1;
+			}
+			break;
+
+		case CISTPL_MANFID:
+			if (cis[pos + 1] < 4)
+				goto cis_error;
+			manfid1 = cis[pos + 2] + (cis[pos + 3] << 8);
+			manfid2 = cis[pos + 4] + (cis[pos + 5] << 8);
+			printk(KERN_DEBUG "%s: manfid=0x%04x, 0x%04x\n",
+			       dev_info, manfid1, manfid2);
+			break;
+		}
+
+		pos += cis[pos + 1] + 2;
+	}
+
+	if (pos >= CIS_MAX_LEN || cis[pos] != CISTPL_END)
+		goto cis_error;
+
+	for (manfid = prism2_plx_known_manfids; manfid->manfid1 != 0; manfid++)
+		if (manfid1 == manfid->manfid1 && manfid2 == manfid->manfid2) {
+			kfree(cis);
+			return 0;
+		}
+
+	printk(KERN_INFO "%s: unknown manfid 0x%04x, 0x%04x - assuming this is"
+	       " not supported card\n", dev_info, manfid1, manfid2);
+	goto fail;
+
+ cis_error:
+	printk(KERN_WARNING "%s: invalid CIS data\n", dev_info);
+
+ fail:
+	kfree(cis);
+	if (ignore_cis) {
+		printk(KERN_INFO "%s: ignore_cis parameter set - ignoring "
+		       "errors during CIS verification\n", dev_info);
+		return 0;
+	}
+	return -1;
+}
+
+
+static int prism2_plx_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *id)
+{
+	unsigned int pccard_ioaddr, plx_ioaddr;
+	unsigned long pccard_attr_mem;
+	unsigned int pccard_attr_len;
+	void __iomem *attr_mem = NULL;
+	unsigned int cor_offset, cor_index;
+	u32 reg;
+	local_info_t *local = NULL;
+	struct net_device *dev = NULL;
+	struct hostap_interface *iface;
+	static int cards_found /* = 0 */;
+	int irq_registered = 0;
+	int tmd7160;
+	struct hostap_plx_priv *hw_priv;
+
+	hw_priv = kmalloc(sizeof(*hw_priv), GFP_KERNEL);
+	if (hw_priv == NULL)
+		return -ENOMEM;
+	memset(hw_priv, 0, sizeof(*hw_priv));
+
+	if (pci_enable_device(pdev))
+		return -EIO;
+
+	/* National Datacomm NCP130 based on TMD7160, not PLX9052. */
+	tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131);
+
+	plx_ioaddr = pci_resource_start(pdev, 1);
+	pccard_ioaddr = pci_resource_start(pdev, tmd7160 ? 2 : 3);
+
+	if (tmd7160) {
+		/* TMD7160 */
+		attr_mem = NULL; /* no access to PC Card attribute memory */
+
+		printk(KERN_INFO "TMD7160 PCI/PCMCIA adapter: io=0x%x, "
+		       "irq=%d, pccard_io=0x%x\n",
+		       plx_ioaddr, pdev->irq, pccard_ioaddr);
+
+		cor_offset = plx_ioaddr;
+		cor_index = 0x04;
+
+		outb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, plx_ioaddr);
+		mdelay(1);
+		reg = inb(plx_ioaddr);
+		if (reg != (cor_index | COR_LEVLREQ | COR_ENABLE_FUNC)) {
+			printk(KERN_ERR "%s: Error setting COR (expected="
+			       "0x%02x, was=0x%02x)\n", dev_info,
+			       cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, reg);
+			goto fail;
+		}
+	} else {
+		/* PLX9052 */
+		pccard_attr_mem = pci_resource_start(pdev, 2);
+		pccard_attr_len = pci_resource_len(pdev, 2);
+		if (pccard_attr_len < PLX_MIN_ATTR_LEN)
+			goto fail;
+
+
+		attr_mem = ioremap(pccard_attr_mem, pccard_attr_len);
+		if (attr_mem == NULL) {
+			printk(KERN_ERR "%s: cannot remap attr_mem\n",
+			       dev_info);
+			goto fail;
+		}
+
+		printk(KERN_INFO "PLX9052 PCI/PCMCIA adapter: "
+		       "mem=0x%lx, plx_io=0x%x, irq=%d, pccard_io=0x%x\n",
+		       pccard_attr_mem, plx_ioaddr, pdev->irq, pccard_ioaddr);
+
+		if (prism2_plx_check_cis(attr_mem, pccard_attr_len,
+					 &cor_offset, &cor_index)) {
+			printk(KERN_INFO "Unknown PC Card CIS - not a "
+			       "Prism2/2.5 card?\n");
+			goto fail;
+		}
+
+		printk(KERN_DEBUG "Prism2/2.5 PC Card detected in PLX9052 "
+		       "adapter\n");
+
+		/* Write COR to enable PC Card */
+		writeb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC,
+		       attr_mem + cor_offset);
+
+		/* Enable PCI interrupts if they are not already enabled */
+		reg = inl(plx_ioaddr + PLX_INTCSR);
+		printk(KERN_DEBUG "PLX_INTCSR=0x%x\n", reg);
+		if (!(reg & PLX_INTCSR_PCI_INTEN)) {
+			outl(reg | PLX_INTCSR_PCI_INTEN,
+			     plx_ioaddr + PLX_INTCSR);
+			if (!(inl(plx_ioaddr + PLX_INTCSR) &
+			      PLX_INTCSR_PCI_INTEN)) {
+				printk(KERN_WARNING "%s: Could not enable "
+				       "Local Interrupts\n", dev_info);
+				goto fail;
+			}
+		}
+
+		reg = inl(plx_ioaddr + PLX_CNTRL);
+		printk(KERN_DEBUG "PLX_CNTRL=0x%x (Serial EEPROM "
+		       "present=%d)\n",
+		       reg, (reg & PLX_CNTRL_SERIAL_EEPROM_PRESENT) != 0);
+		/* should set PLX_PCIIPR to 0x01 (INTA#) if Serial EEPROM is
+		 * not present; but are there really such cards in use(?) */
+	}
+
+	dev = prism2_init_local_data(&prism2_plx_funcs, cards_found,
+				     &pdev->dev);
+	if (dev == NULL)
+		goto fail;
+	iface = netdev_priv(dev);
+	local = iface->local;
+	local->hw_priv = hw_priv;
+	cards_found++;
+
+	dev->irq = pdev->irq;
+	dev->base_addr = pccard_ioaddr;
+	hw_priv->attr_mem = attr_mem;
+	hw_priv->cor_offset = cor_offset;
+
+	pci_set_drvdata(pdev, dev);
+
+	if (request_irq(dev->irq, prism2_interrupt, SA_SHIRQ, dev->name,
+			dev)) {
+		printk(KERN_WARNING "%s: request_irq failed\n", dev->name);
+		goto fail;
+	} else
+		irq_registered = 1;
+
+	if (prism2_hw_config(dev, 1)) {
+		printk(KERN_DEBUG "%s: hardware initialization failed\n",
+		       dev_info);
+		goto fail;
+	}
+
+	return hostap_hw_ready(dev);
+
+ fail:
+	kfree(hw_priv);
+	if (local)
+		local->hw_priv = NULL;
+	prism2_free_local_data(dev);
+
+	if (irq_registered && dev)
+		free_irq(dev->irq, dev);
+
+	if (attr_mem)
+		iounmap(attr_mem);
+
+	pci_disable_device(pdev);
+
+	return -ENODEV;
+}
+
+
+static void prism2_plx_remove(struct pci_dev *pdev)
+{
+	struct net_device *dev;
+	struct hostap_interface *iface;
+	struct hostap_plx_priv *hw_priv;
+
+	dev = pci_get_drvdata(pdev);
+	iface = netdev_priv(dev);
+	hw_priv = iface->local->hw_priv;
+
+	/* Reset the hardware, and ensure interrupts are disabled. */
+	prism2_plx_cor_sreset(iface->local);
+	hfa384x_disable_interrupts(dev);
+
+	if (hw_priv->attr_mem)
+		iounmap(hw_priv->attr_mem);
+	if (dev->irq)
+		free_irq(dev->irq, dev);
+
+	kfree(iface->local->hw_priv);
+	iface->local->hw_priv = NULL;
+	prism2_free_local_data(dev);
+	pci_disable_device(pdev);
+}
+
+
+MODULE_DEVICE_TABLE(pci, prism2_plx_id_table);
+
+static struct pci_driver prism2_plx_drv_id = {
+	.name		= "prism2_plx",
+	.id_table	= prism2_plx_id_table,
+	.probe		= prism2_plx_probe,
+	.remove		= prism2_plx_remove,
+	.suspend	= NULL,
+	.resume		= NULL,
+	.enable_wake	= NULL
+};
+
+
+static int __init init_prism2_plx(void)
+{
+	printk(KERN_INFO "%s: %s\n", dev_info, version);
+
+	return pci_register_driver(&prism2_plx_drv_id);
+}
+
+
+static void __exit exit_prism2_plx(void)
+{
+	pci_unregister_driver(&prism2_plx_drv_id);
+	printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+
+
+module_init(init_prism2_plx);
+module_exit(exit_prism2_plx);
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -0,0 +1,448 @@
+/* /proc routines for Host AP driver */
+
+#define PROC_LIMIT (PAGE_SIZE - 80)
+
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+static int prism2_debug_proc_read(char *page, char **start, off_t off,
+				  int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	int i;
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "next_txfid=%d next_alloc=%d\n",
+		     local->next_txfid, local->next_alloc);
+	for (i = 0; i < PRISM2_TXFID_COUNT; i++)
+		p += sprintf(p, "FID: tx=%04X intransmit=%04X\n",
+			     local->txfid[i], local->intransmitfid[i]);
+	p += sprintf(p, "FW TX rate control: %d\n", local->fw_tx_rate_control);
+	p += sprintf(p, "beacon_int=%d\n", local->beacon_int);
+	p += sprintf(p, "dtim_period=%d\n", local->dtim_period);
+	p += sprintf(p, "wds_max_connections=%d\n",
+		     local->wds_max_connections);
+	p += sprintf(p, "dev_enabled=%d\n", local->dev_enabled);
+	p += sprintf(p, "sw_tick_stuck=%d\n", local->sw_tick_stuck);
+	for (i = 0; i < WEP_KEYS; i++) {
+		if (local->crypt[i] && local->crypt[i]->ops) {
+			p += sprintf(p, "crypt[%d]=%s\n",
+				     i, local->crypt[i]->ops->name);
+		}
+	}
+	p += sprintf(p, "pri_only=%d\n", local->pri_only);
+	p += sprintf(p, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI);
+	p += sprintf(p, "sram_type=%d\n", local->sram_type);
+	p += sprintf(p, "no_pri=%d\n", local->no_pri);
+
+	return (p - page);
+}
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+
+
+static int prism2_stats_proc_read(char *page, char **start, off_t off,
+				  int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	struct comm_tallies_sums *sums = (struct comm_tallies_sums *)
+		&local->comm_tallies;
+
+	if (off != 0) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "TxUnicastFrames=%u\n", sums->tx_unicast_frames);
+	p += sprintf(p, "TxMulticastframes=%u\n", sums->tx_multicast_frames);
+	p += sprintf(p, "TxFragments=%u\n", sums->tx_fragments);
+	p += sprintf(p, "TxUnicastOctets=%u\n", sums->tx_unicast_octets);
+	p += sprintf(p, "TxMulticastOctets=%u\n", sums->tx_multicast_octets);
+	p += sprintf(p, "TxDeferredTransmissions=%u\n",
+		     sums->tx_deferred_transmissions);
+	p += sprintf(p, "TxSingleRetryFrames=%u\n",
+		     sums->tx_single_retry_frames);
+	p += sprintf(p, "TxMultipleRetryFrames=%u\n",
+		     sums->tx_multiple_retry_frames);
+	p += sprintf(p, "TxRetryLimitExceeded=%u\n",
+		     sums->tx_retry_limit_exceeded);
+	p += sprintf(p, "TxDiscards=%u\n", sums->tx_discards);
+	p += sprintf(p, "RxUnicastFrames=%u\n", sums->rx_unicast_frames);
+	p += sprintf(p, "RxMulticastFrames=%u\n", sums->rx_multicast_frames);
+	p += sprintf(p, "RxFragments=%u\n", sums->rx_fragments);
+	p += sprintf(p, "RxUnicastOctets=%u\n", sums->rx_unicast_octets);
+	p += sprintf(p, "RxMulticastOctets=%u\n", sums->rx_multicast_octets);
+	p += sprintf(p, "RxFCSErrors=%u\n", sums->rx_fcs_errors);
+	p += sprintf(p, "RxDiscardsNoBuffer=%u\n",
+		     sums->rx_discards_no_buffer);
+	p += sprintf(p, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa);
+	p += sprintf(p, "RxDiscardsWEPUndecryptable=%u\n",
+		     sums->rx_discards_wep_undecryptable);
+	p += sprintf(p, "RxMessageInMsgFragments=%u\n",
+		     sums->rx_message_in_msg_fragments);
+	p += sprintf(p, "RxMessageInBadMsgFragments=%u\n",
+		     sums->rx_message_in_bad_msg_fragments);
+	/* FIX: this may grow too long for one page(?) */
+
+	return (p - page);
+}
+
+
+static int prism2_wds_proc_read(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	struct list_head *ptr;
+	struct hostap_interface *iface;
+
+	if (off > PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	read_lock_bh(&local->iface_lock);
+	list_for_each(ptr, &local->hostap_interfaces) {
+		iface = list_entry(ptr, struct hostap_interface, list);
+		if (iface->type != HOSTAP_INTERFACE_WDS)
+			continue;
+		p += sprintf(p, "%s\t" MACSTR "\n",
+			     iface->dev->name,
+			     MAC2STR(iface->u.wds.remote_addr));
+		if ((p - page) > PROC_LIMIT) {
+			printk(KERN_DEBUG "%s: wds proc did not fit\n",
+			       local->dev->name);
+			break;
+		}
+	}
+	read_unlock_bh(&local->iface_lock);
+
+	if ((p - page) <= off) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = page + off;
+
+	return (p - page - off);
+}
+
+
+static int prism2_bss_list_proc_read(char *page, char **start, off_t off,
+				     int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	struct list_head *ptr;
+	struct hostap_bss_info *bss;
+	int i;
+
+	if (off > PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t"
+		     "SSID(hex)\tWPA IE\n");
+	spin_lock_bh(&local->lock);
+	list_for_each(ptr, &local->bss_list) {
+		bss = list_entry(ptr, struct hostap_bss_info, list);
+		p += sprintf(p, MACSTR "\t%lu\t%u\t0x%x\t",
+			     MAC2STR(bss->bssid), bss->last_update,
+			     bss->count, bss->capab_info);
+		for (i = 0; i < bss->ssid_len; i++) {
+			p += sprintf(p, "%c",
+				     bss->ssid[i] >= 32 && bss->ssid[i] < 127 ?
+				     bss->ssid[i] : '_');
+		}
+		p += sprintf(p, "\t");
+		for (i = 0; i < bss->ssid_len; i++) {
+			p += sprintf(p, "%02x", bss->ssid[i]);
+		}
+		p += sprintf(p, "\t");
+		for (i = 0; i < bss->wpa_ie_len; i++) {
+			p += sprintf(p, "%02x", bss->wpa_ie[i]);
+		}
+		p += sprintf(p, "\n");
+		if ((p - page) > PROC_LIMIT) {
+			printk(KERN_DEBUG "%s: BSS proc did not fit\n",
+			       local->dev->name);
+			break;
+		}
+	}
+	spin_unlock_bh(&local->lock);
+
+	if ((p - page) <= off) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = page + off;
+
+	return (p - page - off);
+}
+
+
+static int prism2_crypt_proc_read(char *page, char **start, off_t off,
+				  int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	int i;
+
+	if (off > PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	p += sprintf(p, "tx_keyidx=%d\n", local->tx_keyidx);
+	for (i = 0; i < WEP_KEYS; i++) {
+		if (local->crypt[i] && local->crypt[i]->ops &&
+		    local->crypt[i]->ops->print_stats) {
+			p = local->crypt[i]->ops->print_stats(
+				p, local->crypt[i]->priv);
+		}
+	}
+
+	if ((p - page) <= off) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = page + off;
+
+	return (p - page - off);
+}
+
+
+static int prism2_pda_proc_read(char *page, char **start, off_t off,
+				int count, int *eof, void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+
+	if (local->pda == NULL || off >= PRISM2_PDA_SIZE) {
+		*eof = 1;
+		return 0;
+	}
+
+	if (off + count > PRISM2_PDA_SIZE)
+		count = PRISM2_PDA_SIZE - off;
+
+	memcpy(page, local->pda + off, count);
+	return count;
+}
+
+
+static int prism2_aux_dump_proc_read(char *page, char **start, off_t off,
+				     int count, int *eof, void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+
+	if (local->func->read_aux == NULL) {
+		*eof = 1;
+		return 0;
+	}
+
+	if (local->func->read_aux(local->dev, off, count, page)) {
+		*eof = 1;
+		return 0;
+	}
+	*start = page;
+
+	return count;
+}
+
+
+#ifdef PRISM2_IO_DEBUG
+static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
+				     int count, int *eof, void *data)
+{
+	local_info_t *local = (local_info_t *) data;
+	int head = local->io_debug_head;
+	int start_bytes, left, copy, copied;
+
+	if (off + count > PRISM2_IO_DEBUG_SIZE * 4) {
+		*eof = 1;
+		if (off >= PRISM2_IO_DEBUG_SIZE * 4)
+			return 0;
+		count = PRISM2_IO_DEBUG_SIZE * 4 - off;
+	}
+
+	copied = 0;
+	start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4;
+	left = count;
+
+	if (off < start_bytes) {
+		copy = start_bytes - off;
+		if (copy > count)
+			copy = count;
+		memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy);
+		left -= copy;
+		if (left > 0)
+			memcpy(&page[copy], local->io_debug, left);
+	} else {
+		memcpy(page, ((u8 *) local->io_debug) + (off - start_bytes),
+		       left);
+	}
+
+	*start = page;
+
+	return count;
+}
+#endif /* PRISM2_IO_DEBUG */
+
+
+#ifndef PRISM2_NO_STATION_MODES
+static int prism2_scan_results_proc_read(char *page, char **start, off_t off,
+					 int count, int *eof, void *data)
+{
+	char *p = page;
+	local_info_t *local = (local_info_t *) data;
+	int entry, i, len, total = 0;
+	struct hfa384x_hostscan_result *scanres;
+	u8 *pos;
+
+	p += sprintf(p, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates "
+		     "SSID\n");
+
+	spin_lock_bh(&local->lock);
+	for (entry = 0; entry < local->last_scan_results_count; entry++) {
+		scanres = &local->last_scan_results[entry];
+
+		if (total + (p - page) <= off) {
+			total += p - page;
+			p = page;
+		}
+		if (total + (p - page) > off + count)
+			break;
+		if ((p - page) > (PAGE_SIZE - 200))
+			break;
+
+		p += sprintf(p, "%d %d %d %d 0x%02x %d " MACSTR " %d ",
+			     le16_to_cpu(scanres->chid),
+			     (s16) le16_to_cpu(scanres->anl),
+			     (s16) le16_to_cpu(scanres->sl),
+			     le16_to_cpu(scanres->beacon_interval),
+			     le16_to_cpu(scanres->capability),
+			     le16_to_cpu(scanres->rate),
+			     MAC2STR(scanres->bssid),
+			     le16_to_cpu(scanres->atim));
+
+		pos = scanres->sup_rates;
+		for (i = 0; i < sizeof(scanres->sup_rates); i++) {
+			if (pos[i] == 0)
+				break;
+			p += sprintf(p, "<%02x>", pos[i]);
+		}
+		p += sprintf(p, " ");
+
+		pos = scanres->ssid;
+		len = le16_to_cpu(scanres->ssid_len);
+		if (len > 32)
+			len = 32;
+		for (i = 0; i < len; i++) {
+			unsigned char c = pos[i];
+			if (c >= 32 && c < 127)
+				p += sprintf(p, "%c", c);
+			else
+				p += sprintf(p, "<%02x>", c);
+		}
+		p += sprintf(p, "\n");
+	}
+	spin_unlock_bh(&local->lock);
+
+	total += (p - page);
+	if (total >= off + count)
+		*eof = 1;
+
+	if (total < off) {
+		*eof = 1;
+		return 0;
+	}
+
+	len = total - off;
+	if (len > (p - page))
+		len = p - page;
+	*start = p - len;
+	if (len > count)
+		len = count;
+
+	return len;
+}
+#endif /* PRISM2_NO_STATION_MODES */
+
+
+void hostap_init_proc(local_info_t *local)
+{
+	local->proc = NULL;
+
+	if (hostap_proc == NULL) {
+		printk(KERN_WARNING "%s: hostap proc directory not created\n",
+		       local->dev->name);
+		return;
+	}
+
+	local->proc = proc_mkdir(local->ddev->name, hostap_proc);
+	if (local->proc == NULL) {
+		printk(KERN_INFO "/proc/net/hostap/%s creation failed\n",
+		       local->ddev->name);
+		return;
+	}
+
+#ifndef PRISM2_NO_PROCFS_DEBUG
+	create_proc_read_entry("debug", 0, local->proc,
+			       prism2_debug_proc_read, local);
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+	create_proc_read_entry("stats", 0, local->proc,
+			       prism2_stats_proc_read, local);
+	create_proc_read_entry("wds", 0, local->proc,
+			       prism2_wds_proc_read, local);
+	create_proc_read_entry("pda", 0, local->proc,
+			       prism2_pda_proc_read, local);
+	create_proc_read_entry("aux_dump", 0, local->proc,
+			       prism2_aux_dump_proc_read, local);
+	create_proc_read_entry("bss_list", 0, local->proc,
+			       prism2_bss_list_proc_read, local);
+	create_proc_read_entry("crypt", 0, local->proc,
+			       prism2_crypt_proc_read, local);
+#ifdef PRISM2_IO_DEBUG
+	create_proc_read_entry("io_debug", 0, local->proc,
+			       prism2_io_debug_proc_read, local);
+#endif /* PRISM2_IO_DEBUG */
+#ifndef PRISM2_NO_STATION_MODES
+	create_proc_read_entry("scan_results", 0, local->proc,
+			       prism2_scan_results_proc_read, local);
+#endif /* PRISM2_NO_STATION_MODES */
+}
+
+
+void hostap_remove_proc(local_info_t *local)
+{
+	if (local->proc != NULL) {
+#ifndef PRISM2_NO_STATION_MODES
+		remove_proc_entry("scan_results", local->proc);
+#endif /* PRISM2_NO_STATION_MODES */
+#ifdef PRISM2_IO_DEBUG
+		remove_proc_entry("io_debug", local->proc);
+#endif /* PRISM2_IO_DEBUG */
+		remove_proc_entry("pda", local->proc);
+		remove_proc_entry("aux_dump", local->proc);
+		remove_proc_entry("wds", local->proc);
+		remove_proc_entry("stats", local->proc);
+		remove_proc_entry("bss_list", local->proc);
+		remove_proc_entry("crypt", local->proc);
+#ifndef PRISM2_NO_PROCFS_DEBUG
+		remove_proc_entry("debug", local->proc);
+#endif /* PRISM2_NO_PROCFS_DEBUG */
+		if (hostap_proc != NULL)
+			remove_proc_entry(local->proc->name, hostap_proc);
+	}
+}
+
+
+EXPORT_SYMBOL(hostap_init_proc);
+EXPORT_SYMBOL(hostap_remove_proc);
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -0,0 +1,1033 @@
+#ifndef HOSTAP_WLAN_H
+#define HOSTAP_WLAN_H
+
+#include "hostap_config.h"
+#include "hostap_common.h"
+
+#define MAX_PARM_DEVICES 8
+#define PARM_MIN_MAX "1-" __MODULE_STRING(MAX_PARM_DEVICES)
+#define DEF_INTS -1, -1, -1, -1, -1, -1, -1
+#define GET_INT_PARM(var,idx) var[var[idx] < 0 ? 0 : idx]
+
+
+/* Specific skb->protocol value that indicates that the packet already contains
+ * txdesc header.
+ * FIX: This might need own value that would be allocated especially for Prism2
+ * txdesc; ETH_P_CONTROL is commented as "Card specific control frames".
+ * However, these skb's should have only minimal path in the kernel side since
+ * prism2_send_mgmt() sends these with dev_queue_xmit() to prism2_tx(). */
+#define ETH_P_HOSTAP ETH_P_CONTROL
+
+/* ARPHRD_IEEE80211_PRISM uses a bloated version of Prism2 RX frame header
+ * (from linux-wlan-ng) */
+struct linux_wlan_ng_val {
+	u32 did;
+	u16 status, len;
+	u32 data;
+} __attribute__ ((packed));
+
+struct linux_wlan_ng_prism_hdr {
+	u32 msgcode, msglen;
+	char devname[16];
+	struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal,
+		noise, rate, istx, frmlen;
+} __attribute__ ((packed));
+
+struct linux_wlan_ng_cap_hdr {
+	u32 version;
+	u32 length;
+	u64 mactime;
+	u64 hosttime;
+	u32 phytype;
+	u32 channel;
+	u32 datarate;
+	u32 antenna;
+	u32 priority;
+	u32 ssi_type;
+	s32 ssi_signal;
+	s32 ssi_noise;
+	u32 preamble;
+	u32 encoding;
+} __attribute__ ((packed));
+
+#define LWNG_CAP_DID_BASE   (4 | (1 << 6)) /* section 4, group 1 */
+#define LWNG_CAPHDR_VERSION 0x80211001
+
+struct hfa384x_rx_frame {
+	/* HFA384X RX frame descriptor */
+	u16 status; /* HFA384X_RX_STATUS_ flags */
+	u32 time; /* timestamp, 1 microsecond resolution */
+	u8 silence; /* 27 .. 154; seems to be 0 */
+	u8 signal; /* 27 .. 154 */
+	u8 rate; /* 10, 20, 55, or 110 */
+	u8 rxflow;
+	u32 reserved;
+
+	/* 802.11 */
+	u16 frame_control;
+	u16 duration_id;
+	u8 addr1[6];
+	u8 addr2[6];
+	u8 addr3[6];
+	u16 seq_ctrl;
+	u8 addr4[6];
+	u16 data_len;
+
+	/* 802.3 */
+	u8 dst_addr[6];
+	u8 src_addr[6];
+	u16 len;
+
+	/* followed by frame data; max 2304 bytes */
+} __attribute__ ((packed));
+
+
+struct hfa384x_tx_frame {
+	/* HFA384X TX frame descriptor */
+	u16 status; /* HFA384X_TX_STATUS_ flags */
+	u16 reserved1;
+	u16 reserved2;
+	u32 sw_support;
+	u8 retry_count; /* not yet implemented */
+	u8 tx_rate; /* Host AP only; 0 = firmware, or 10, 20, 55, 110 */
+	u16 tx_control; /* HFA384X_TX_CTRL_ flags */
+
+	/* 802.11 */
+	u16 frame_control; /* parts not used */
+	u16 duration_id;
+	u8 addr1[6];
+	u8 addr2[6]; /* filled by firmware */
+	u8 addr3[6];
+	u16 seq_ctrl; /* filled by firmware */
+	u8 addr4[6];
+	u16 data_len;
+
+	/* 802.3 */
+	u8 dst_addr[6];
+	u8 src_addr[6];
+	u16 len;
+
+	/* followed by frame data; max 2304 bytes */
+} __attribute__ ((packed));
+
+
+struct hfa384x_rid_hdr
+{
+	u16 len;
+	u16 rid;
+} __attribute__ ((packed));
+
+
+/* Macro for converting signal levels (range 27 .. 154) to wireless ext
+ * dBm value with some accuracy */
+#define HFA384X_LEVEL_TO_dBm(v) 0x100 + (v) * 100 / 255 - 100
+
+#define HFA384X_LEVEL_TO_dBm_sign(v) (v) * 100 / 255 - 100
+
+struct hfa384x_scan_request {
+	u16 channel_list;
+	u16 txrate; /* HFA384X_RATES_* */
+} __attribute__ ((packed));
+
+struct hfa384x_hostscan_request {
+	u16 channel_list;
+	u16 txrate;
+	u16 target_ssid_len;
+	u8 target_ssid[32];
+} __attribute__ ((packed));
+
+struct hfa384x_join_request {
+	u8 bssid[6];
+	u16 channel;
+} __attribute__ ((packed));
+
+struct hfa384x_info_frame {
+	u16 len;
+	u16 type;
+} __attribute__ ((packed));
+
+struct hfa384x_comm_tallies {
+	u16 tx_unicast_frames;
+	u16 tx_multicast_frames;
+	u16 tx_fragments;
+	u16 tx_unicast_octets;
+	u16 tx_multicast_octets;
+	u16 tx_deferred_transmissions;
+	u16 tx_single_retry_frames;
+	u16 tx_multiple_retry_frames;
+	u16 tx_retry_limit_exceeded;
+	u16 tx_discards;
+	u16 rx_unicast_frames;
+	u16 rx_multicast_frames;
+	u16 rx_fragments;
+	u16 rx_unicast_octets;
+	u16 rx_multicast_octets;
+	u16 rx_fcs_errors;
+	u16 rx_discards_no_buffer;
+	u16 tx_discards_wrong_sa;
+	u16 rx_discards_wep_undecryptable;
+	u16 rx_message_in_msg_fragments;
+	u16 rx_message_in_bad_msg_fragments;
+} __attribute__ ((packed));
+
+struct hfa384x_comm_tallies32 {
+	u32 tx_unicast_frames;
+	u32 tx_multicast_frames;
+	u32 tx_fragments;
+	u32 tx_unicast_octets;
+	u32 tx_multicast_octets;
+	u32 tx_deferred_transmissions;
+	u32 tx_single_retry_frames;
+	u32 tx_multiple_retry_frames;
+	u32 tx_retry_limit_exceeded;
+	u32 tx_discards;
+	u32 rx_unicast_frames;
+	u32 rx_multicast_frames;
+	u32 rx_fragments;
+	u32 rx_unicast_octets;
+	u32 rx_multicast_octets;
+	u32 rx_fcs_errors;
+	u32 rx_discards_no_buffer;
+	u32 tx_discards_wrong_sa;
+	u32 rx_discards_wep_undecryptable;
+	u32 rx_message_in_msg_fragments;
+	u32 rx_message_in_bad_msg_fragments;
+} __attribute__ ((packed));
+
+struct hfa384x_scan_result_hdr {
+	u16 reserved;
+	u16 scan_reason;
+#define HFA384X_SCAN_IN_PROGRESS 0 /* no results available yet */
+#define HFA384X_SCAN_HOST_INITIATED 1
+#define HFA384X_SCAN_FIRMWARE_INITIATED 2
+#define HFA384X_SCAN_INQUIRY_FROM_HOST 3
+} __attribute__ ((packed));
+
+#define HFA384X_SCAN_MAX_RESULTS 32
+
+struct hfa384x_scan_result {
+	u16 chid;
+	u16 anl;
+	u16 sl;
+	u8 bssid[6];
+	u16 beacon_interval;
+	u16 capability;
+	u16 ssid_len;
+	u8 ssid[32];
+	u8 sup_rates[10];
+	u16 rate;
+} __attribute__ ((packed));
+
+struct hfa384x_hostscan_result {
+	u16 chid;
+	u16 anl;
+	u16 sl;
+	u8 bssid[6];
+	u16 beacon_interval;
+	u16 capability;
+	u16 ssid_len;
+	u8 ssid[32];
+	u8 sup_rates[10];
+	u16 rate;
+	u16 atim;
+} __attribute__ ((packed));
+
+struct comm_tallies_sums {
+	unsigned int tx_unicast_frames;
+	unsigned int tx_multicast_frames;
+	unsigned int tx_fragments;
+	unsigned int tx_unicast_octets;
+	unsigned int tx_multicast_octets;
+	unsigned int tx_deferred_transmissions;
+	unsigned int tx_single_retry_frames;
+	unsigned int tx_multiple_retry_frames;
+	unsigned int tx_retry_limit_exceeded;
+	unsigned int tx_discards;
+	unsigned int rx_unicast_frames;
+	unsigned int rx_multicast_frames;
+	unsigned int rx_fragments;
+	unsigned int rx_unicast_octets;
+	unsigned int rx_multicast_octets;
+	unsigned int rx_fcs_errors;
+	unsigned int rx_discards_no_buffer;
+	unsigned int tx_discards_wrong_sa;
+	unsigned int rx_discards_wep_undecryptable;
+	unsigned int rx_message_in_msg_fragments;
+	unsigned int rx_message_in_bad_msg_fragments;
+};
+
+
+struct hfa384x_regs {
+	u16 cmd;
+	u16 evstat;
+	u16 offset0;
+	u16 offset1;
+	u16 swsupport0;
+};
+
+
+#if defined(PRISM2_PCCARD) || defined(PRISM2_PLX)
+/* I/O ports for HFA384X Controller access */
+#define HFA384X_CMD_OFF 0x00
+#define HFA384X_PARAM0_OFF 0x02
+#define HFA384X_PARAM1_OFF 0x04
+#define HFA384X_PARAM2_OFF 0x06
+#define HFA384X_STATUS_OFF 0x08
+#define HFA384X_RESP0_OFF 0x0A
+#define HFA384X_RESP1_OFF 0x0C
+#define HFA384X_RESP2_OFF 0x0E
+#define HFA384X_INFOFID_OFF 0x10
+#define HFA384X_CONTROL_OFF 0x14
+#define HFA384X_SELECT0_OFF 0x18
+#define HFA384X_SELECT1_OFF 0x1A
+#define HFA384X_OFFSET0_OFF 0x1C
+#define HFA384X_OFFSET1_OFF 0x1E
+#define HFA384X_RXFID_OFF 0x20
+#define HFA384X_ALLOCFID_OFF 0x22
+#define HFA384X_TXCOMPLFID_OFF 0x24
+#define HFA384X_SWSUPPORT0_OFF 0x28
+#define HFA384X_SWSUPPORT1_OFF 0x2A
+#define HFA384X_SWSUPPORT2_OFF 0x2C
+#define HFA384X_EVSTAT_OFF 0x30
+#define HFA384X_INTEN_OFF 0x32
+#define HFA384X_EVACK_OFF 0x34
+#define HFA384X_DATA0_OFF 0x36
+#define HFA384X_DATA1_OFF 0x38
+#define HFA384X_AUXPAGE_OFF 0x3A
+#define HFA384X_AUXOFFSET_OFF 0x3C
+#define HFA384X_AUXDATA_OFF 0x3E
+#endif /* PRISM2_PCCARD || PRISM2_PLX */
+
+#ifdef PRISM2_PCI
+/* Memory addresses for ISL3874 controller access */
+#define HFA384X_CMD_OFF 0x00
+#define HFA384X_PARAM0_OFF 0x04
+#define HFA384X_PARAM1_OFF 0x08
+#define HFA384X_PARAM2_OFF 0x0C
+#define HFA384X_STATUS_OFF 0x10
+#define HFA384X_RESP0_OFF 0x14
+#define HFA384X_RESP1_OFF 0x18
+#define HFA384X_RESP2_OFF 0x1C
+#define HFA384X_INFOFID_OFF 0x20
+#define HFA384X_CONTROL_OFF 0x28
+#define HFA384X_SELECT0_OFF 0x30
+#define HFA384X_SELECT1_OFF 0x34
+#define HFA384X_OFFSET0_OFF 0x38
+#define HFA384X_OFFSET1_OFF 0x3C
+#define HFA384X_RXFID_OFF 0x40
+#define HFA384X_ALLOCFID_OFF 0x44
+#define HFA384X_TXCOMPLFID_OFF 0x48
+#define HFA384X_PCICOR_OFF 0x4C
+#define HFA384X_SWSUPPORT0_OFF 0x50
+#define HFA384X_SWSUPPORT1_OFF 0x54
+#define HFA384X_SWSUPPORT2_OFF 0x58
+#define HFA384X_PCIHCR_OFF 0x5C
+#define HFA384X_EVSTAT_OFF 0x60
+#define HFA384X_INTEN_OFF 0x64
+#define HFA384X_EVACK_OFF 0x68
+#define HFA384X_DATA0_OFF 0x6C
+#define HFA384X_DATA1_OFF 0x70
+#define HFA384X_AUXPAGE_OFF 0x74
+#define HFA384X_AUXOFFSET_OFF 0x78
+#define HFA384X_AUXDATA_OFF 0x7C
+#define HFA384X_PCI_M0_ADDRH_OFF 0x80
+#define HFA384X_PCI_M0_ADDRL_OFF 0x84
+#define HFA384X_PCI_M0_LEN_OFF 0x88
+#define HFA384X_PCI_M0_CTL_OFF 0x8C
+#define HFA384X_PCI_STATUS_OFF 0x98
+#define HFA384X_PCI_M1_ADDRH_OFF 0xA0
+#define HFA384X_PCI_M1_ADDRL_OFF 0xA4
+#define HFA384X_PCI_M1_LEN_OFF 0xA8
+#define HFA384X_PCI_M1_CTL_OFF 0xAC
+
+/* PCI bus master control bits (these are undocumented; based on guessing and
+ * experimenting..) */
+#define HFA384X_PCI_CTL_FROM_BAP (BIT(5) | BIT(1) | BIT(0))
+#define HFA384X_PCI_CTL_TO_BAP (BIT(5) | BIT(0))
+
+#endif /* PRISM2_PCI */
+
+
+/* Command codes for CMD reg. */
+#define HFA384X_CMDCODE_INIT 0x00
+#define HFA384X_CMDCODE_ENABLE 0x01
+#define HFA384X_CMDCODE_DISABLE 0x02
+#define HFA384X_CMDCODE_ALLOC 0x0A
+#define HFA384X_CMDCODE_TRANSMIT 0x0B
+#define HFA384X_CMDCODE_INQUIRE 0x11
+#define HFA384X_CMDCODE_ACCESS 0x21
+#define HFA384X_CMDCODE_ACCESS_WRITE (0x21 | BIT(8))
+#define HFA384X_CMDCODE_DOWNLOAD 0x22
+#define HFA384X_CMDCODE_READMIF 0x30
+#define HFA384X_CMDCODE_WRITEMIF 0x31
+#define HFA384X_CMDCODE_TEST 0x38
+
+#define HFA384X_CMDCODE_MASK 0x3F
+
+/* Test mode operations */
+#define HFA384X_TEST_CHANGE_CHANNEL 0x08
+#define HFA384X_TEST_MONITOR 0x0B
+#define HFA384X_TEST_STOP 0x0F
+#define HFA384X_TEST_CFG_BITS 0x15
+#define HFA384X_TEST_CFG_BIT_ALC BIT(3)
+
+#define HFA384X_CMD_BUSY BIT(15)
+
+#define HFA384X_CMD_TX_RECLAIM BIT(8)
+
+#define HFA384X_OFFSET_ERR BIT(14)
+#define HFA384X_OFFSET_BUSY BIT(15)
+
+
+/* ProgMode for download command */
+#define HFA384X_PROGMODE_DISABLE 0
+#define HFA384X_PROGMODE_ENABLE_VOLATILE 1
+#define HFA384X_PROGMODE_ENABLE_NON_VOLATILE 2
+#define HFA384X_PROGMODE_PROGRAM_NON_VOLATILE 3
+
+#define HFA384X_AUX_MAGIC0 0xfe01
+#define HFA384X_AUX_MAGIC1 0xdc23
+#define HFA384X_AUX_MAGIC2 0xba45
+
+#define HFA384X_AUX_PORT_DISABLED 0
+#define HFA384X_AUX_PORT_DISABLE BIT(14)
+#define HFA384X_AUX_PORT_ENABLE BIT(15)
+#define HFA384X_AUX_PORT_ENABLED (BIT(14) | BIT(15))
+#define HFA384X_AUX_PORT_MASK (BIT(14) | BIT(15))
+
+#define PRISM2_PDA_SIZE 1024
+
+
+/* Events; EvStat, Interrupt mask (IntEn), and acknowledge bits (EvAck) */
+#define HFA384X_EV_TICK BIT(15)
+#define HFA384X_EV_WTERR BIT(14)
+#define HFA384X_EV_INFDROP BIT(13)
+#ifdef PRISM2_PCI
+#define HFA384X_EV_PCI_M1 BIT(9)
+#define HFA384X_EV_PCI_M0 BIT(8)
+#endif /* PRISM2_PCI */
+#define HFA384X_EV_INFO BIT(7)
+#define HFA384X_EV_DTIM BIT(5)
+#define HFA384X_EV_CMD BIT(4)
+#define HFA384X_EV_ALLOC BIT(3)
+#define HFA384X_EV_TXEXC BIT(2)
+#define HFA384X_EV_TX BIT(1)
+#define HFA384X_EV_RX BIT(0)
+
+
+/* HFA384X Information frames */
+#define HFA384X_INFO_HANDOVERADDR 0xF000 /* AP f/w ? */
+#define HFA384X_INFO_HANDOVERDEAUTHADDR 0xF001 /* AP f/w 1.3.7 */
+#define HFA384X_INFO_COMMTALLIES 0xF100
+#define HFA384X_INFO_SCANRESULTS 0xF101
+#define HFA384X_INFO_CHANNELINFORESULTS 0xF102 /* AP f/w only */
+#define HFA384X_INFO_HOSTSCANRESULTS 0xF103
+#define HFA384X_INFO_LINKSTATUS 0xF200
+#define HFA384X_INFO_ASSOCSTATUS 0xF201 /* ? */
+#define HFA384X_INFO_AUTHREQ 0xF202 /* ? */
+#define HFA384X_INFO_PSUSERCNT 0xF203 /* ? */
+#define HFA384X_INFO_KEYIDCHANGED 0xF204 /* ? */
+
+enum { HFA384X_LINKSTATUS_CONNECTED = 1,
+       HFA384X_LINKSTATUS_DISCONNECTED = 2,
+       HFA384X_LINKSTATUS_AP_CHANGE = 3,
+       HFA384X_LINKSTATUS_AP_OUT_OF_RANGE = 4,
+       HFA384X_LINKSTATUS_AP_IN_RANGE = 5,
+       HFA384X_LINKSTATUS_ASSOC_FAILED = 6 };
+
+enum { HFA384X_PORTTYPE_BSS = 1, HFA384X_PORTTYPE_WDS = 2,
+       HFA384X_PORTTYPE_PSEUDO_IBSS = 3, HFA384X_PORTTYPE_IBSS = 0,
+       HFA384X_PORTTYPE_HOSTAP = 6 };
+
+#define HFA384X_RATES_1MBPS BIT(0)
+#define HFA384X_RATES_2MBPS BIT(1)
+#define HFA384X_RATES_5MBPS BIT(2)
+#define HFA384X_RATES_11MBPS BIT(3)
+
+#define HFA384X_ROAMING_FIRMWARE 1
+#define HFA384X_ROAMING_HOST 2
+#define HFA384X_ROAMING_DISABLED 3
+
+#define HFA384X_WEPFLAGS_PRIVACYINVOKED BIT(0)
+#define HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED BIT(1)
+#define HFA384X_WEPFLAGS_HOSTENCRYPT BIT(4)
+#define HFA384X_WEPFLAGS_HOSTDECRYPT BIT(7)
+
+#define HFA384X_RX_STATUS_MSGTYPE (BIT(15) | BIT(14) | BIT(13))
+#define HFA384X_RX_STATUS_PCF BIT(12)
+#define HFA384X_RX_STATUS_MACPORT (BIT(10) | BIT(9) | BIT(8))
+#define HFA384X_RX_STATUS_UNDECR BIT(1)
+#define HFA384X_RX_STATUS_FCSERR BIT(0)
+
+#define HFA384X_RX_STATUS_GET_MSGTYPE(s) \
+(((s) & HFA384X_RX_STATUS_MSGTYPE) >> 13)
+#define HFA384X_RX_STATUS_GET_MACPORT(s) \
+(((s) & HFA384X_RX_STATUS_MACPORT) >> 8)
+
+enum { HFA384X_RX_MSGTYPE_NORMAL = 0, HFA384X_RX_MSGTYPE_RFC1042 = 1,
+       HFA384X_RX_MSGTYPE_BRIDGETUNNEL = 2, HFA384X_RX_MSGTYPE_MGMT = 4 };
+
+
+#define HFA384X_TX_CTRL_ALT_RTRY BIT(5)
+#define HFA384X_TX_CTRL_802_11 BIT(3)
+#define HFA384X_TX_CTRL_802_3 0
+#define HFA384X_TX_CTRL_TX_EX BIT(2)
+#define HFA384X_TX_CTRL_TX_OK BIT(1)
+
+#define HFA384X_TX_STATUS_RETRYERR BIT(0)
+#define HFA384X_TX_STATUS_AGEDERR BIT(1)
+#define HFA384X_TX_STATUS_DISCON BIT(2)
+#define HFA384X_TX_STATUS_FORMERR BIT(3)
+
+/* HFA3861/3863 (BBP) Control Registers */
+#define HFA386X_CR_TX_CONFIGURE 0x12 /* CR9 */
+#define HFA386X_CR_RX_CONFIGURE 0x14 /* CR10 */
+#define HFA386X_CR_A_D_TEST_MODES2 0x1A /* CR13 */
+#define HFA386X_CR_MANUAL_TX_POWER 0x3E /* CR31 */
+#define HFA386X_CR_MEASURED_TX_POWER 0x74 /* CR58 */
+
+
+#ifdef __KERNEL__
+
+#define PRISM2_TXFID_COUNT 8
+#define PRISM2_DATA_MAXLEN 2304
+#define PRISM2_TXFID_LEN (PRISM2_DATA_MAXLEN + sizeof(struct hfa384x_tx_frame))
+#define PRISM2_TXFID_EMPTY 0xffff
+#define PRISM2_TXFID_RESERVED 0xfffe
+#define PRISM2_DUMMY_FID 0xffff
+#define MAX_SSID_LEN 32
+#define MAX_NAME_LEN 32 /* this is assumed to be equal to MAX_SSID_LEN */
+
+#define PRISM2_DUMP_RX_HDR BIT(0)
+#define PRISM2_DUMP_TX_HDR BIT(1)
+#define PRISM2_DUMP_TXEXC_HDR BIT(2)
+
+struct hostap_tx_callback_info {
+	u16 idx;
+	void (*func)(struct sk_buff *, int ok, void *);
+	void *data;
+	struct hostap_tx_callback_info *next;
+};
+
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define PRISM2_FRAG_CACHE_LEN 4
+
+struct prism2_frag_entry {
+	unsigned long first_frag_time;
+	unsigned int seq;
+	unsigned int last_frag;
+	struct sk_buff *skb;
+	u8 src_addr[ETH_ALEN];
+	u8 dst_addr[ETH_ALEN];
+};
+
+
+struct hostap_cmd_queue {
+	struct list_head list;
+	wait_queue_head_t compl;
+	volatile enum { CMD_SLEEP, CMD_CALLBACK, CMD_COMPLETED } type;
+	void (*callback)(struct net_device *dev, long context, u16 resp0,
+			 u16 res);
+	long context;
+	u16 cmd, param0, param1;
+	u16 resp0, res;
+	volatile int issued, issuing;
+
+	atomic_t usecnt;
+	int del_req;
+};
+
+/* options for hw_shutdown */
+#define HOSTAP_HW_NO_DISABLE BIT(0)
+#define HOSTAP_HW_ENABLE_CMDCOMPL BIT(1)
+
+typedef struct local_info local_info_t;
+
+struct prism2_helper_functions {
+	/* these functions are defined in hardware model specific files
+	 * (hostap_{cs,plx,pci}.c */
+	int (*card_present)(local_info_t *local);
+	void (*cor_sreset)(local_info_t *local);
+	int (*dev_open)(local_info_t *local);
+	int (*dev_close)(local_info_t *local);
+	void (*genesis_reset)(local_info_t *local, int hcr);
+
+	/* the following functions are from hostap_hw.c, but they may have some
+	 * hardware model specific code */
+
+	/* FIX: low-level commands like cmd might disappear at some point to
+	 * make it easier to change them if needed (e.g., cmd would be replaced
+	 * with write_mif/read_mif/testcmd/inquire); at least get_rid and
+	 * set_rid might move to hostap_{cs,plx,pci}.c */
+	int (*cmd)(struct net_device *dev, u16 cmd, u16 param0, u16 *param1,
+		   u16 *resp0);
+	void (*read_regs)(struct net_device *dev, struct hfa384x_regs *regs);
+	int (*get_rid)(struct net_device *dev, u16 rid, void *buf, int len,
+		       int exact_len);
+	int (*set_rid)(struct net_device *dev, u16 rid, void *buf, int len);
+	int (*hw_enable)(struct net_device *dev, int initial);
+	int (*hw_config)(struct net_device *dev, int initial);
+	void (*hw_reset)(struct net_device *dev);
+	void (*hw_shutdown)(struct net_device *dev, int no_disable);
+	int (*reset_port)(struct net_device *dev);
+	void (*schedule_reset)(local_info_t *local);
+	int (*download)(local_info_t *local,
+			struct prism2_download_param *param);
+	int (*tx)(struct sk_buff *skb, struct net_device *dev);
+	int (*set_tim)(struct net_device *dev, int aid, int set);
+	int (*read_aux)(struct net_device *dev, unsigned addr, int len,
+			u8 *buf);
+
+	int need_tx_headroom; /* number of bytes of headroom needed before
+			       * IEEE 802.11 header */
+	enum { HOSTAP_HW_PCCARD, HOSTAP_HW_PLX, HOSTAP_HW_PCI } hw_type;
+};
+
+
+struct prism2_download_data {
+	u32 dl_cmd;
+	u32 start_addr;
+	u32 num_areas;
+	struct prism2_download_data_area {
+		u32 addr; /* wlan card address */
+		u32 len;
+		u8 *data; /* allocated data */
+	} data[0];
+};
+
+
+#define HOSTAP_MAX_BSS_COUNT 64
+#define MAX_WPA_IE_LEN 64
+
+struct hostap_bss_info {
+	struct list_head list;
+	unsigned long last_update;
+	unsigned int count;
+	u8 bssid[ETH_ALEN];
+	u16 capab_info;
+	u8 ssid[32];
+	size_t ssid_len;
+	u8 wpa_ie[MAX_WPA_IE_LEN];
+	size_t wpa_ie_len;
+	u8 rsn_ie[MAX_WPA_IE_LEN];
+	size_t rsn_ie_len;
+	int chan;
+	int included;
+};
+
+
+/* Per radio private Host AP data - shared by all net devices interfaces used
+ * by each radio (wlan#, wlan#ap, wlan#sta, WDS).
+ * ((struct hostap_interface *) netdev_priv(dev))->local points to this
+ * structure. */
+struct local_info {
+	struct module *hw_module;
+	int card_idx;
+	int dev_enabled;
+	int master_dev_auto_open; /* was master device opened automatically */
+	int num_dev_open; /* number of open devices */
+	struct net_device *dev; /* master radio device */
+	struct net_device *ddev; /* main data device */
+	struct list_head hostap_interfaces; /* Host AP interface list (contains
+					     * struct hostap_interface entries)
+					     */
+	rwlock_t iface_lock; /* hostap_interfaces read lock; use write lock
+			      * when removing entries from the list.
+			      * TX and RX paths can use read lock. */
+	spinlock_t cmdlock, baplock, lock;
+	struct semaphore rid_bap_sem;
+	u16 infofid; /* MAC buffer id for info frame */
+	/* txfid, intransmitfid, next_txtid, and next_alloc are protected by
+	 * txfidlock */
+	spinlock_t txfidlock;
+	int txfid_len; /* length of allocated TX buffers */
+	u16 txfid[PRISM2_TXFID_COUNT]; /* buffer IDs for TX frames */
+	/* buffer IDs for intransmit frames or PRISM2_TXFID_EMPTY if
+	 * corresponding txfid is free for next TX frame */
+	u16 intransmitfid[PRISM2_TXFID_COUNT];
+	int next_txfid; /* index to the next txfid to be checked for
+			 * availability */
+	int next_alloc; /* index to the next intransmitfid to be checked for
+			 * allocation events */
+
+	/* bitfield for atomic bitops */
+#define HOSTAP_BITS_TRANSMIT 0
+#define HOSTAP_BITS_BAP_TASKLET 1
+#define HOSTAP_BITS_BAP_TASKLET2 2
+	long bits;
+
+	struct ap_data *ap;
+
+	char essid[MAX_SSID_LEN + 1];
+	char name[MAX_NAME_LEN + 1];
+	int name_set;
+	u16 channel_mask; /* mask of allowed channels */
+	u16 scan_channel_mask; /* mask of channels to be scanned */
+	struct comm_tallies_sums comm_tallies;
+	struct net_device_stats stats;
+	struct proc_dir_entry *proc;
+	int iw_mode; /* operating mode (IW_MODE_*) */
+	int pseudo_adhoc; /* 0: IW_MODE_ADHOC is real 802.11 compliant IBSS
+			   * 1: IW_MODE_ADHOC is "pseudo IBSS" */
+	char bssid[ETH_ALEN];
+	int channel;
+	int beacon_int;
+	int dtim_period;
+	int mtu;
+	int frame_dump; /* dump RX/TX frame headers, PRISM2_DUMP_ flags */
+	int fw_tx_rate_control;
+	u16 tx_rate_control;
+	u16 basic_rates;
+	int hw_resetting;
+	int hw_ready;
+	int hw_reset_tries; /* how many times reset has been tried */
+	int hw_downloading;
+	int shutdown;
+	int pri_only;
+	int no_pri; /* no PRI f/w present */
+	int sram_type; /* 8 = x8 SRAM, 16 = x16 SRAM, -1 = unknown */
+
+	enum {
+		PRISM2_TXPOWER_AUTO = 0, PRISM2_TXPOWER_OFF,
+		PRISM2_TXPOWER_FIXED, PRISM2_TXPOWER_UNKNOWN
+	} txpower_type;
+	int txpower; /* if txpower_type == PRISM2_TXPOWER_FIXED */
+
+	/* command queue for hfa384x_cmd(); protected with cmdlock */
+	struct list_head cmd_queue;
+	/* max_len for cmd_queue; in addition, cmd_callback can use two
+	 * additional entries to prevent sleeping commands from stopping
+	 * transmits */
+#define HOSTAP_CMD_QUEUE_MAX_LEN 16
+	int cmd_queue_len; /* number of entries in cmd_queue */
+
+	/* if card timeout is detected in interrupt context, reset_queue is
+	 * used to schedule card reseting to be done in user context */
+	struct work_struct reset_queue;
+
+	/* For scheduling a change of the promiscuous mode RID */
+	int is_promisc;
+	struct work_struct set_multicast_list_queue;
+
+	struct work_struct set_tim_queue;
+	struct list_head set_tim_list;
+	spinlock_t set_tim_lock;
+
+	int wds_max_connections;
+	int wds_connections;
+#define HOSTAP_WDS_BROADCAST_RA BIT(0)
+#define HOSTAP_WDS_AP_CLIENT BIT(1)
+#define HOSTAP_WDS_STANDARD_FRAME BIT(2)
+	u32 wds_type;
+	u16 tx_control; /* flags to be used in TX description */
+	int manual_retry_count; /* -1 = use f/w default; otherwise retry count
+				 * to be used with all frames */
+
+	struct iw_statistics wstats;
+	unsigned long scan_timestamp; /* Time started to scan */
+	enum {
+		PRISM2_MONITOR_80211 = 0, PRISM2_MONITOR_PRISM = 1,
+		PRISM2_MONITOR_CAPHDR = 2
+	} monitor_type;
+	int (*saved_eth_header_parse)(struct sk_buff *skb,
+				      unsigned char *haddr);
+	int monitor_allow_fcserr;
+
+	int hostapd; /* whether user space daemon, hostapd, is used for AP
+		      * management */
+	int hostapd_sta; /* whether hostapd is used with an extra STA interface
+			  */
+	struct net_device *apdev;
+	struct net_device_stats apdevstats;
+
+	char assoc_ap_addr[ETH_ALEN];
+	struct net_device *stadev;
+	struct net_device_stats stadevstats;
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+	struct ieee80211_crypt_data *crypt[WEP_KEYS];
+	int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
+	struct timer_list crypt_deinit_timer;
+	struct list_head crypt_deinit_list;
+
+	int open_wep; /* allow unencrypted frames */
+	int host_encrypt;
+	int host_decrypt;
+	int privacy_invoked; /* force privacy invoked flag even if no keys are
+			      * configured */
+	int fw_encrypt_ok; /* whether firmware-based WEP encrypt is working
+			    * in Host AP mode (STA f/w 1.4.9 or newer) */
+	int bcrx_sta_key; /* use individual keys to override default keys even
+			   * with RX of broad/multicast frames */
+
+	struct prism2_frag_entry frag_cache[PRISM2_FRAG_CACHE_LEN];
+	unsigned int frag_next_idx;
+
+	int ieee_802_1x; /* is IEEE 802.1X used */
+
+	int antsel_tx, antsel_rx;
+	int rts_threshold; /* dot11RTSThreshold */
+	int fragm_threshold; /* dot11FragmentationThreshold */
+	int auth_algs; /* PRISM2_AUTH_ flags */
+
+	int enh_sec; /* cnfEnhSecurity options (broadcast SSID hide/ignore) */
+	int tallies32; /* 32-bit tallies in use */
+
+	struct prism2_helper_functions *func;
+
+	u8 *pda;
+	int fw_ap;
+#define PRISM2_FW_VER(major, minor, variant) \
+(((major) << 16) | ((minor) << 8) | variant)
+	u32 sta_fw_ver;
+
+	/* Tasklets for handling hardware IRQ related operations outside hw IRQ
+	 * handler */
+	struct tasklet_struct bap_tasklet;
+
+	struct tasklet_struct info_tasklet;
+	struct sk_buff_head info_list; /* info frames as skb's for
+					* info_tasklet */
+
+	struct hostap_tx_callback_info *tx_callback; /* registered TX callbacks
+						      */
+
+	struct tasklet_struct rx_tasklet;
+	struct sk_buff_head rx_list;
+
+	struct tasklet_struct sta_tx_exc_tasklet;
+	struct sk_buff_head sta_tx_exc_list;
+
+	int host_roaming;
+	unsigned long last_join_time; /* time of last JoinRequest */
+	struct hfa384x_hostscan_result *last_scan_results;
+	int last_scan_results_count;
+	enum { PRISM2_SCAN, PRISM2_HOSTSCAN } last_scan_type;
+	struct work_struct info_queue;
+	long pending_info; /* bit field of pending info_queue items */
+#define PRISM2_INFO_PENDING_LINKSTATUS 0
+#define PRISM2_INFO_PENDING_SCANRESULTS 1
+	int prev_link_status; /* previous received LinkStatus info */
+	int prev_linkstatus_connected;
+	u8 preferred_ap[6]; /* use this AP if possible */
+
+#ifdef PRISM2_CALLBACK
+	void *callback_data; /* Can be used in callbacks; e.g., allocate
+			      * on enable event and free on disable event.
+			      * Host AP driver code does not touch this. */
+#endif /* PRISM2_CALLBACK */
+
+	wait_queue_head_t hostscan_wq;
+
+	/* Passive scan in Host AP mode */
+	struct timer_list passive_scan_timer;
+	int passive_scan_interval; /* in seconds, 0 = disabled */
+	int passive_scan_channel;
+	enum { PASSIVE_SCAN_WAIT, PASSIVE_SCAN_LISTEN } passive_scan_state;
+
+	struct timer_list tick_timer;
+	unsigned long last_tick_timer;
+	unsigned int sw_tick_stuck;
+
+	/* commsQuality / dBmCommsQuality data from periodic polling; only
+	 * valid for Managed and Ad-hoc modes */
+	unsigned long last_comms_qual_update;
+	int comms_qual; /* in some odd unit.. */
+	int avg_signal; /* in dB (note: negative) */
+	int avg_noise; /* in dB (note: negative) */
+	struct work_struct comms_qual_update;
+
+	/* RSSI to dBm adjustment (for RX descriptor fields) */
+	int rssi_to_dBm; /* substract from RSSI to get approximate dBm value */
+
+	/* BSS list / protected by local->lock */
+	struct list_head bss_list;
+	int num_bss_info;
+	int wpa; /* WPA support enabled */
+	int tkip_countermeasures;
+	int drop_unencrypted;
+	/* Generic IEEE 802.11 info element to be added to
+	 * ProbeResp/Beacon/(Re)AssocReq */
+	u8 *generic_elem;
+	size_t generic_elem_len;
+
+#ifdef PRISM2_DOWNLOAD_SUPPORT
+	/* Persistent volatile download data */
+	struct prism2_download_data *dl_pri;
+	struct prism2_download_data *dl_sec;
+#endif /* PRISM2_DOWNLOAD_SUPPORT */
+
+#ifdef PRISM2_IO_DEBUG
+#define PRISM2_IO_DEBUG_SIZE 10000
+	u32 io_debug[PRISM2_IO_DEBUG_SIZE];
+	int io_debug_head;
+	int io_debug_enabled;
+#endif /* PRISM2_IO_DEBUG */
+
+	/* Pointer to hardware model specific (cs,pci,plx) private data. */
+	void *hw_priv;
+};
+
+
+/* Per interface private Host AP data
+ * Allocated for each net device that Host AP uses (wlan#, wlan#ap, wlan#sta,
+ * WDS) and netdev_priv(dev) points to this structure. */
+struct hostap_interface {
+	struct list_head list; /* list entry in Host AP interface list */
+	struct net_device *dev; /* pointer to this device */
+	struct local_info *local; /* pointer to shared private data */
+	struct net_device_stats stats;
+	struct iw_spy_data spy_data; /* iwspy support */
+	struct iw_public_data wireless_data;
+
+	enum {
+		HOSTAP_INTERFACE_MASTER,
+		HOSTAP_INTERFACE_MAIN,
+		HOSTAP_INTERFACE_AP,
+		HOSTAP_INTERFACE_STA,
+		HOSTAP_INTERFACE_WDS,
+	} type;
+
+	union {
+		struct hostap_interface_wds {
+			u8 remote_addr[ETH_ALEN];
+		} wds;
+	} u;
+};
+
+
+#define HOSTAP_SKB_TX_DATA_MAGIC 0xf08a36a2
+
+/*
+ * TX meta data - stored in skb->cb buffer, so this must not be increased over
+ * the 40-byte limit
+ */
+struct hostap_skb_tx_data {
+	u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
+	u8 rate; /* transmit rate */
+#define HOSTAP_TX_FLAGS_WDS BIT(0)
+#define HOSTAP_TX_FLAGS_BUFFERED_FRAME BIT(1)
+#define HOSTAP_TX_FLAGS_ADD_MOREDATA BIT(2)
+	u8 flags; /* HOSTAP_TX_FLAGS_* */
+	u16 tx_cb_idx;
+	struct hostap_interface *iface;
+	unsigned long jiffies; /* queueing timestamp */
+	unsigned short ethertype;
+};
+
+
+#ifndef PRISM2_NO_DEBUG
+
+#define DEBUG_FID BIT(0)
+#define DEBUG_PS BIT(1)
+#define DEBUG_FLOW BIT(2)
+#define DEBUG_AP BIT(3)
+#define DEBUG_HW BIT(4)
+#define DEBUG_EXTRA BIT(5)
+#define DEBUG_EXTRA2 BIT(6)
+#define DEBUG_PS2 BIT(7)
+#define DEBUG_MASK (DEBUG_PS | DEBUG_AP | DEBUG_HW | DEBUG_EXTRA)
+#define PDEBUG(n, args...) \
+do { if ((n) & DEBUG_MASK) printk(KERN_DEBUG args); } while (0)
+#define PDEBUG2(n, args...) \
+do { if ((n) & DEBUG_MASK) printk(args); } while (0)
+
+#else /* PRISM2_NO_DEBUG */
+
+#define PDEBUG(n, args...)
+#define PDEBUG2(n, args...)
+
+#endif /* PRISM2_NO_DEBUG */
+
+enum { BAP0 = 0, BAP1 = 1 };
+
+#define PRISM2_IO_DEBUG_CMD_INB 0
+#define PRISM2_IO_DEBUG_CMD_INW 1
+#define PRISM2_IO_DEBUG_CMD_INSW 2
+#define PRISM2_IO_DEBUG_CMD_OUTB 3
+#define PRISM2_IO_DEBUG_CMD_OUTW 4
+#define PRISM2_IO_DEBUG_CMD_OUTSW 5
+#define PRISM2_IO_DEBUG_CMD_ERROR 6
+#define PRISM2_IO_DEBUG_CMD_INTERRUPT 7
+
+#ifdef PRISM2_IO_DEBUG
+
+#define PRISM2_IO_DEBUG_ENTRY(cmd, reg, value) \
+(((cmd) << 24) | ((reg) << 16) | value)
+
+static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
+				       int reg, int value)
+{
+	struct hostap_interface *iface = netdev_priv(dev);
+	local_info_t *local = iface->local;
+
+	if (!local->io_debug_enabled)
+		return;
+
+	local->io_debug[local->io_debug_head] =	jiffies & 0xffffffff;
+	if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
+		local->io_debug_head = 0;
+	local->io_debug[local->io_debug_head] =
+		PRISM2_IO_DEBUG_ENTRY(cmd, reg, value);
+	if (++local->io_debug_head >= PRISM2_IO_DEBUG_SIZE)
+		local->io_debug_head = 0;
+}
+
+
+static inline void prism2_io_debug_error(struct net_device *dev, int err)
+{
+	struct hostap_interface *iface = netdev_priv(dev);
+	local_info_t *local = iface->local;
+	unsigned long flags;
+
+	if (!local->io_debug_enabled)
+		return;
+
+	spin_lock_irqsave(&local->lock, flags);
+	prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_ERROR, 0, err);
+	if (local->io_debug_enabled == 1) {
+		local->io_debug_enabled = 0;
+		printk(KERN_DEBUG "%s: I/O debug stopped\n", dev->name);
+	}
+	spin_unlock_irqrestore(&local->lock, flags);
+}
+
+#else /* PRISM2_IO_DEBUG */
+
+static inline void prism2_io_debug_add(struct net_device *dev, int cmd,
+				       int reg, int value)
+{
+}
+
+static inline void prism2_io_debug_error(struct net_device *dev, int err)
+{
+}
+
+#endif /* PRISM2_IO_DEBUG */
+
+
+#ifdef PRISM2_CALLBACK
+enum {
+	/* Called when card is enabled */
+	PRISM2_CALLBACK_ENABLE,
+
+	/* Called when card is disabled */
+	PRISM2_CALLBACK_DISABLE,
+
+	/* Called when RX/TX starts/ends */
+	PRISM2_CALLBACK_RX_START, PRISM2_CALLBACK_RX_END,
+	PRISM2_CALLBACK_TX_START, PRISM2_CALLBACK_TX_END
+};
+void prism2_callback(local_info_t *local, int event);
+#else /* PRISM2_CALLBACK */
+#define prism2_callback(d, e) do { } while (0)
+#endif /* PRISM2_CALLBACK */
+
+#endif /* __KERNEL__ */
+
+#endif /* HOSTAP_WLAN_H */
diff --git a/drivers/net/wireless/ieee802_11.h b/drivers/net/wireless/ieee802_11.h
deleted file mode 100644
--- a/drivers/net/wireless/ieee802_11.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#ifndef _IEEE802_11_H
-#define _IEEE802_11_H
-
-#define IEEE802_11_DATA_LEN		2304
-/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
-   6.2.1.1.2.
-
-   The figure in section 7.1.2 suggests a body size of up to 2312
-   bytes is allowed, which is a bit confusing, I suspect this
-   represents the 2304 bytes of real data, plus a possible 8 bytes of
-   WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
-
-
-#define IEEE802_11_HLEN			30
-#define IEEE802_11_FRAME_LEN		(IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
-
-struct ieee802_11_hdr {
-	u16 frame_ctl;
-	u16 duration_id;
-	u8 addr1[ETH_ALEN];
-	u8 addr2[ETH_ALEN];
-	u8 addr3[ETH_ALEN];
-	u16 seq_ctl;
-	u8 addr4[ETH_ALEN];
-} __attribute__ ((packed));
-
-/* Frame control field constants */
-#define IEEE802_11_FCTL_VERS		0x0002
-#define IEEE802_11_FCTL_FTYPE		0x000c
-#define IEEE802_11_FCTL_STYPE		0x00f0
-#define IEEE802_11_FCTL_TODS		0x0100
-#define IEEE802_11_FCTL_FROMDS		0x0200
-#define IEEE802_11_FCTL_MOREFRAGS	0x0400
-#define IEEE802_11_FCTL_RETRY		0x0800
-#define IEEE802_11_FCTL_PM		0x1000
-#define IEEE802_11_FCTL_MOREDATA	0x2000
-#define IEEE802_11_FCTL_WEP		0x4000
-#define IEEE802_11_FCTL_ORDER		0x8000
-
-#define IEEE802_11_FTYPE_MGMT		0x0000
-#define IEEE802_11_FTYPE_CTL		0x0004
-#define IEEE802_11_FTYPE_DATA		0x0008
-
-/* management */
-#define IEEE802_11_STYPE_ASSOC_REQ	0x0000
-#define IEEE802_11_STYPE_ASSOC_RESP 	0x0010
-#define IEEE802_11_STYPE_REASSOC_REQ	0x0020
-#define IEEE802_11_STYPE_REASSOC_RESP	0x0030
-#define IEEE802_11_STYPE_PROBE_REQ	0x0040
-#define IEEE802_11_STYPE_PROBE_RESP	0x0050
-#define IEEE802_11_STYPE_BEACON		0x0080
-#define IEEE802_11_STYPE_ATIM		0x0090
-#define IEEE802_11_STYPE_DISASSOC	0x00A0
-#define IEEE802_11_STYPE_AUTH		0x00B0
-#define IEEE802_11_STYPE_DEAUTH		0x00C0
-
-/* control */
-#define IEEE802_11_STYPE_PSPOLL		0x00A0
-#define IEEE802_11_STYPE_RTS		0x00B0
-#define IEEE802_11_STYPE_CTS		0x00C0
-#define IEEE802_11_STYPE_ACK		0x00D0
-#define IEEE802_11_STYPE_CFEND		0x00E0
-#define IEEE802_11_STYPE_CFENDACK	0x00F0
-
-/* data */
-#define IEEE802_11_STYPE_DATA		0x0000
-#define IEEE802_11_STYPE_DATA_CFACK	0x0010
-#define IEEE802_11_STYPE_DATA_CFPOLL	0x0020
-#define IEEE802_11_STYPE_DATA_CFACKPOLL	0x0030
-#define IEEE802_11_STYPE_NULLFUNC	0x0040
-#define IEEE802_11_STYPE_CFACK		0x0050
-#define IEEE802_11_STYPE_CFPOLL		0x0060
-#define IEEE802_11_STYPE_CFACKPOLL	0x0070
-
-#define IEEE802_11_SCTL_FRAG		0x000F
-#define IEEE802_11_SCTL_SEQ		0xFFF0
-
-#endif /* _IEEE802_11_H */
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.c
@@ -0,0 +1,8641 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+  Portions of this file are based on the sample_* files provided by Wireless
+  Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes
+  <jt@hpl.hp.com>
+
+  Portions of this file are based on the Host AP project,
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+    <jkmaline@cc.hut.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+
+  Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and
+  ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c
+  available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox
+
+******************************************************************************/
+/*
+
+ Initial driver on which this is based was developed by Janusz Gorycki,
+ Maciej Urbaniak, and Maciej Sosnowski.
+
+ Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak.
+
+Theory of Operation
+
+Tx - Commands and Data
+
+Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs)
+Each TBD contains a pointer to the physical (dma_addr_t) address of data being
+sent to the firmware as well as the length of the data.
+
+The host writes to the TBD queue at the WRITE index.  The WRITE index points
+to the _next_ packet to be written and is advanced when after the TBD has been
+filled.
+
+The firmware pulls from the TBD queue at the READ index.  The READ index points
+to the currently being read entry, and is advanced once the firmware is
+done with a packet.
+
+When data is sent to the firmware, the first TBD is used to indicate to the
+firmware if a Command or Data is being sent.  If it is Command, all of the
+command information is contained within the physical address referred to by the
+TBD.  If it is Data, the first TBD indicates the type of data packet, number
+of fragments, etc.  The next TBD then referrs to the actual packet location.
+
+The Tx flow cycle is as follows:
+
+1) ipw2100_tx() is called by kernel with SKB to transmit
+2) Packet is move from the tx_free_list and appended to the transmit pending
+   list (tx_pend_list)
+3) work is scheduled to move pending packets into the shared circular queue.
+4) when placing packet in the circular queue, the incoming SKB is DMA mapped
+   to a physical address.  That address is entered into a TBD.  Two TBDs are
+   filled out.  The first indicating a data packet, the second referring to the
+   actual payload data.
+5) the packet is removed from tx_pend_list and placed on the end of the
+   firmware pending list (fw_pend_list)
+6) firmware is notified that the WRITE index has
+7) Once the firmware has processed the TBD, INTA is triggered.
+8) For each Tx interrupt received from the firmware, the READ index is checked
+   to see which TBDs are done being processed.
+9) For each TBD that has been processed, the ISR pulls the oldest packet
+   from the fw_pend_list.
+10)The packet structure contained in the fw_pend_list is then used
+   to unmap the DMA address and to free the SKB originally passed to the driver
+   from the kernel.
+11)The packet structure is placed onto the tx_free_list
+
+The above steps are the same for commands, only the msg_free_list/msg_pend_list
+are used instead of tx_free_list/tx_pend_list
+
+...
+
+Critical Sections / Locking :
+
+There are two locks utilized.  The first is the low level lock (priv->low_lock)
+that protects the following:
+
+- Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows:
+
+  tx_free_list : Holds pre-allocated Tx buffers.
+    TAIL modified in __ipw2100_tx_process()
+    HEAD modified in ipw2100_tx()
+
+  tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring
+    TAIL modified ipw2100_tx()
+    HEAD modified by X__ipw2100_tx_send_data()
+
+  msg_free_list : Holds pre-allocated Msg (Command) buffers
+    TAIL modified in __ipw2100_tx_process()
+    HEAD modified in ipw2100_hw_send_command()
+
+  msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring
+    TAIL modified in ipw2100_hw_send_command()
+    HEAD modified in X__ipw2100_tx_send_commands()
+
+  The flow of data on the TX side is as follows:
+
+  MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST
+  TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST
+
+  The methods that work on the TBD ring are protected via priv->low_lock.
+
+- The internal data state of the device itself
+- Access to the firmware read/write indexes for the BD queues
+  and associated logic
+
+All external entry functions are locked with the priv->action_lock to ensure
+that only one external action is invoked at a time.
+
+
+*/
+
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#define __KERNEL_SYSCALLS__
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/unistd.h>
+#include <linux/stringify.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/time.h>
+#include <linux/firmware.h>
+#include <linux/acpi.h>
+#include <linux/ctype.h>
+
+#include "ipw2100.h"
+
+#define IPW2100_VERSION "1.1.0"
+
+#define DRV_NAME	"ipw2100"
+#define DRV_VERSION	IPW2100_VERSION
+#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2100 Network Driver"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2004 Intel Corporation"
+
+
+/* Debugging stuff */
+#ifdef CONFIG_IPW_DEBUG
+#define CONFIG_IPW2100_RX_DEBUG   /* Reception debugging */
+#endif
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+static int mode = 0;
+static int channel = 0;
+static int associate = 1;
+static int disable = 0;
+#ifdef CONFIG_PM
+static struct ipw2100_fw ipw2100_firmware;
+#endif
+
+#include <linux/moduleparam.h>
+module_param(debug, int, 0444);
+module_param(mode, int, 0444);
+module_param(channel, int, 0444);
+module_param(associate, int, 0444);
+module_param(disable, int, 0444);
+
+MODULE_PARM_DESC(debug, "debug level");
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
+MODULE_PARM_DESC(channel, "channel");
+MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
+MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
+
+u32 ipw2100_debug_level = IPW_DL_NONE;
+
+#ifdef CONFIG_IPW_DEBUG
+static const char *command_types[] = {
+	"undefined",
+	"unused", /* HOST_ATTENTION */
+	"HOST_COMPLETE",
+	"unused", /* SLEEP */
+	"unused", /* HOST_POWER_DOWN */
+	"unused",
+	"SYSTEM_CONFIG",
+	"unused", /* SET_IMR */
+	"SSID",
+	"MANDATORY_BSSID",
+	"AUTHENTICATION_TYPE",
+	"ADAPTER_ADDRESS",
+	"PORT_TYPE",
+	"INTERNATIONAL_MODE",
+	"CHANNEL",
+	"RTS_THRESHOLD",
+	"FRAG_THRESHOLD",
+	"POWER_MODE",
+	"TX_RATES",
+	"BASIC_TX_RATES",
+	"WEP_KEY_INFO",
+	"unused",
+	"unused",
+	"unused",
+	"unused",
+	"WEP_KEY_INDEX",
+	"WEP_FLAGS",
+	"ADD_MULTICAST",
+	"CLEAR_ALL_MULTICAST",
+	"BEACON_INTERVAL",
+	"ATIM_WINDOW",
+	"CLEAR_STATISTICS",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"TX_POWER_INDEX",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"undefined",
+	"BROADCAST_SCAN",
+	"CARD_DISABLE",
+	"PREFERRED_BSSID",
+	"SET_SCAN_OPTIONS",
+	"SCAN_DWELL_TIME",
+	"SWEEP_TABLE",
+	"AP_OR_STATION_TABLE",
+	"GROUP_ORDINALS",
+	"SHORT_RETRY_LIMIT",
+	"LONG_RETRY_LIMIT",
+	"unused", /* SAVE_CALIBRATION */
+	"unused", /* RESTORE_CALIBRATION */
+	"undefined",
+	"undefined",
+	"undefined",
+	"HOST_PRE_POWER_DOWN",
+	"unused", /* HOST_INTERRUPT_COALESCING */
+	"undefined",
+	"CARD_DISABLE_PHY_OFF",
+	"MSDU_TX_RATES"
+	"undefined",
+	"undefined",
+	"SET_STATION_STAT_BITS",
+	"CLEAR_STATIONS_STAT_BITS",
+	"LEAP_ROGUE_MODE",
+	"SET_SECURITY_INFORMATION",
+	"DISASSOCIATION_BSSID",
+	"SET_WPA_ASS_IE"
+};
+#endif
+
+
+/* Pre-decl until we get the code solid and then we can clean it up */
+static void X__ipw2100_tx_send_commands(struct ipw2100_priv *priv);
+static void X__ipw2100_tx_send_data(struct ipw2100_priv *priv);
+static int ipw2100_adapter_setup(struct ipw2100_priv *priv);
+
+static void ipw2100_queues_initialize(struct ipw2100_priv *priv);
+static void ipw2100_queues_free(struct ipw2100_priv *priv);
+static int ipw2100_queues_allocate(struct ipw2100_priv *priv);
+
+
+static inline void read_register(struct net_device *dev, u32 reg, u32 *val)
+{
+	*val = readl((void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
+}
+
+static inline void write_register(struct net_device *dev, u32 reg, u32 val)
+{
+	writel(val, (void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
+}
+
+static inline void read_register_word(struct net_device *dev, u32 reg, u16 *val)
+{
+	*val = readw((void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
+}
+
+static inline void read_register_byte(struct net_device *dev, u32 reg, u8 *val)
+{
+	*val = readb((void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
+}
+
+static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
+{
+	writew(val, (void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
+}
+
+
+static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
+{
+	writeb(val, (void *)(dev->base_addr + reg));
+	IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
+}
+
+static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 *val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void read_nic_word(struct net_device *dev, u32 addr, u16 *val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 *val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val)
+{
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+	write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
+}
+
+static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr)
+{
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
+		       addr & IPW_REG_INDIRECT_ADDR_MASK);
+}
+
+static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val)
+{
+	write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val);
+}
+
+static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
+				    const u8 *buf)
+{
+	u32 aligned_addr;
+	u32 aligned_len;
+	u32 dif_len;
+	u32 i;
+
+	/* read first nibble byte by byte */
+	aligned_addr = addr & (~0x3);
+	dif_len = addr - aligned_addr;
+	if (dif_len) {
+		/* Start reading at aligned_addr + dif_len */
+		write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+			       aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			write_register_byte(
+				dev, IPW_REG_INDIRECT_ACCESS_DATA + i,
+				*buf);
+
+		len -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* read DWs through autoincrement registers */
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
+		       aligned_addr);
+	aligned_len = len & (~0x3);
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		write_register(
+			dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *)buf);
+
+	/* copy the last nibble */
+	dif_len = len - aligned_len;
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		write_register_byte(
+			dev, IPW_REG_INDIRECT_ACCESS_DATA + i, *buf);
+}
+
+static inline void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
+				   u8 *buf)
+{
+	u32 aligned_addr;
+	u32 aligned_len;
+	u32 dif_len;
+	u32 i;
+
+	/* read first nibble byte by byte */
+	aligned_addr = addr & (~0x3);
+	dif_len = addr - aligned_addr;
+	if (dif_len) {
+		/* Start reading at aligned_addr + dif_len */
+		write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+			       aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			read_register_byte(
+				dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
+
+		len -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* read DWs through autoincrement registers */
+	write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
+		       aligned_addr);
+	aligned_len = len & (~0x3);
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		read_register(dev, IPW_REG_AUTOINCREMENT_DATA,
+			      (u32 *)buf);
+
+	/* copy the last nibble */
+	dif_len = len - aligned_len;
+	write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
+		       aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA +
+				   i, buf);
+}
+
+static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
+{
+	return (dev->base_addr &&
+		(readl((void *)(dev->base_addr + IPW_REG_DOA_DEBUG_AREA_START))
+		 == IPW_DATA_DOA_DEBUG_VALUE));
+}
+
+int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
+			void *val, u32 *len)
+{
+	struct ipw2100_ordinals *ordinals = &priv->ordinals;
+	u32 addr;
+	u32 field_info;
+	u16 field_len;
+	u16 field_count;
+	u32 total_length;
+
+	if (ordinals->table1_addr == 0) {
+		IPW_DEBUG_WARNING(DRV_NAME ": attempt to use fw ordinals "
+		       "before they have been loaded.\n");
+		return -EINVAL;
+	}
+
+	if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
+		if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) {
+			*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+			IPW_DEBUG_WARNING(DRV_NAME
+			       ": ordinal buffer length too small, need %zd\n",
+			       IPW_ORD_TAB_1_ENTRY_SIZE);
+
+			return -EINVAL;
+		}
+
+		read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
+			       &addr);
+		read_nic_dword(priv->net_dev, addr, val);
+
+		*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+		return 0;
+	}
+
+	if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) {
+
+		ord -= IPW_START_ORD_TAB_2;
+
+		/* get the address of statistic */
+		read_nic_dword(priv->net_dev, ordinals->table2_addr + (ord << 3),
+			       &addr);
+
+		/* get the second DW of statistics ;
+		 * two 16-bit words - first is length, second is count */
+		read_nic_dword(priv->net_dev,
+			       ordinals->table2_addr + (ord << 3) + sizeof(u32),
+			       &field_info);
+
+		/* get each entry length */
+		field_len = *((u16 *)&field_info);
+
+		/* get number of entries */
+		field_count = *(((u16 *)&field_info) + 1);
+
+		/* abort if no enought memory */
+		total_length = field_len * field_count;
+		if (total_length > *len) {
+			*len = total_length;
+			return -EINVAL;
+		}
+
+		*len = total_length;
+		if (!total_length)
+			return 0;
+
+		/* read the ordinal data from the SRAM */
+		read_nic_memory(priv->net_dev, addr, total_length, val);
+
+		return 0;
+	}
+
+	IPW_DEBUG_WARNING(DRV_NAME ": ordinal %d neither in table 1 nor "
+	       "in table 2\n", ord);
+
+	return -EINVAL;
+}
+
+static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 *val,
+			       u32 *len)
+{
+	struct ipw2100_ordinals *ordinals = &priv->ordinals;
+	u32 addr;
+
+	if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
+		if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) {
+			*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+			IPW_DEBUG_INFO("wrong size\n");
+			return -EINVAL;
+		}
+
+		read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
+			       &addr);
+
+		write_nic_dword(priv->net_dev, addr, *val);
+
+		*len = IPW_ORD_TAB_1_ENTRY_SIZE;
+
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("wrong table\n");
+	if (IS_ORDINAL_TABLE_TWO(ordinals, ord))
+		return -EINVAL;
+
+	return -EINVAL;
+}
+
+static char *snprint_line(char *buf, size_t count,
+			  const u8 *data, u32 len, u32 ofs)
+{
+	int out, i, j, l;
+	char c;
+
+	out = snprintf(buf, count, "%08X", ofs);
+
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++)
+			out += snprintf(buf + out, count - out, "%02X ",
+					data[(i * 8 + j)]);
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, "   ");
+	}
+
+	out += snprintf(buf + out, count - out, " ");
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++) {
+			c = data[(i * 8 + j)];
+			if (!isascii(c) || !isprint(c))
+				c = '.';
+
+			out += snprintf(buf + out, count - out, "%c", c);
+		}
+
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, " ");
+	}
+
+	return buf;
+}
+
+static void printk_buf(int level, const u8 *data, u32 len)
+{
+	char line[81];
+	u32 ofs = 0;
+	if (!(ipw2100_debug_level & level))
+		return;
+
+	while (len) {
+		printk(KERN_DEBUG "%s\n",
+		       snprint_line(line, sizeof(line), &data[ofs],
+				    min(len, 16U), ofs));
+		ofs += 16;
+		len -= min(len, 16U);
+	}
+}
+
+
+
+#define MAX_RESET_BACKOFF 10
+
+static inline void schedule_reset(struct ipw2100_priv *priv)
+{
+	unsigned long now = get_seconds();
+
+	/* If we haven't received a reset request within the backoff period,
+	 * then we can reset the backoff interval so this reset occurs
+	 * immediately */
+	if (priv->reset_backoff &&
+	    (now - priv->last_reset > priv->reset_backoff))
+		priv->reset_backoff = 0;
+
+	priv->last_reset = get_seconds();
+
+	if (!(priv->status & STATUS_RESET_PENDING)) {
+		IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
+			       priv->net_dev->name, priv->reset_backoff);
+		netif_carrier_off(priv->net_dev);
+		netif_stop_queue(priv->net_dev);
+		priv->status |= STATUS_RESET_PENDING;
+		if (priv->reset_backoff)
+			queue_delayed_work(priv->workqueue, &priv->reset_work,
+					   priv->reset_backoff * HZ);
+		else
+			queue_work(priv->workqueue, &priv->reset_work);
+
+		if (priv->reset_backoff < MAX_RESET_BACKOFF)
+			priv->reset_backoff++;
+
+		wake_up_interruptible(&priv->wait_command_queue);
+	} else
+		IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n",
+			       priv->net_dev->name);
+
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
+				   struct host_command * cmd)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	unsigned long flags;
+	int err = 0;
+
+	IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
+		     command_types[cmd->host_command], cmd->host_command,
+		     cmd->host_command_length);
+	printk_buf(IPW_DL_HC, (u8*)cmd->host_command_parameters,
+		   cmd->host_command_length);
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->fatal_error) {
+		IPW_DEBUG_INFO("Attempt to send command while hardware in fatal error condition.\n");
+		err = -EIO;
+		goto fail_unlock;
+	}
+
+	if (!(priv->status & STATUS_RUNNING)) {
+		IPW_DEBUG_INFO("Attempt to send command while hardware is not running.\n");
+		err = -EIO;
+		goto fail_unlock;
+	}
+
+	if (priv->status & STATUS_CMD_ACTIVE) {
+		IPW_DEBUG_INFO("Attempt to send command while another command is pending.\n");
+		err = -EBUSY;
+		goto fail_unlock;
+	}
+
+	if (list_empty(&priv->msg_free_list)) {
+		IPW_DEBUG_INFO("no available msg buffers\n");
+		goto fail_unlock;
+	}
+
+	priv->status |= STATUS_CMD_ACTIVE;
+	priv->messages_sent++;
+
+	element = priv->msg_free_list.next;
+
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+	packet->jiffy_start = jiffies;
+
+	/* initialize the firmware command packet */
+	packet->info.c_struct.cmd->host_command_reg = cmd->host_command;
+	packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1;
+	packet->info.c_struct.cmd->host_command_len_reg = cmd->host_command_length;
+	packet->info.c_struct.cmd->sequence = cmd->host_command_sequence;
+
+	memcpy(packet->info.c_struct.cmd->host_command_params_reg,
+	       cmd->host_command_parameters,
+	       sizeof(packet->info.c_struct.cmd->host_command_params_reg));
+
+	list_del(element);
+	DEC_STAT(&priv->msg_free_stat);
+
+	list_add_tail(element, &priv->msg_pend_list);
+	INC_STAT(&priv->msg_pend_stat);
+
+	X__ipw2100_tx_send_commands(priv);
+	X__ipw2100_tx_send_data(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	/*
+	 * We must wait for this command to complete before another
+	 * command can be sent...  but if we wait more than 3 seconds
+	 * then there is a problem.
+	 */
+
+	err = wait_event_interruptible_timeout(
+		priv->wait_command_queue, !(priv->status & STATUS_CMD_ACTIVE),
+		HOST_COMPLETE_TIMEOUT);
+
+	if (err == 0) {
+		IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
+			       HOST_COMPLETE_TIMEOUT / (HZ / 100));
+		priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT;
+		priv->status &= ~STATUS_CMD_ACTIVE;
+		schedule_reset(priv);
+		return -EIO;
+	}
+
+	if (priv->fatal_error) {
+		IPW_DEBUG_WARNING("%s: firmware fatal error\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* !!!!! HACK TEST !!!!!
+	 * When lots of debug trace statements are enabled, the driver
+	 * doesn't seem to have as many firmware restart cycles...
+	 *
+	 * As a test, we're sticking in a 1/100s delay here */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(HZ / 100);
+
+	return 0;
+
+ fail_unlock:
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	return err;
+}
+
+
+/*
+ * Verify the values and data access of the hardware
+ * No locks needed or used.  No functions called.
+ */
+static int ipw2100_verify(struct ipw2100_priv *priv)
+{
+	u32 data1, data2;
+	u32 address;
+
+	u32 val1 = 0x76543210;
+	u32 val2 = 0xFEDCBA98;
+
+	/* Domain 0 check - all values should be DOA_DEBUG */
+	for (address = IPW_REG_DOA_DEBUG_AREA_START;
+	     address < IPW_REG_DOA_DEBUG_AREA_END;
+	     address += sizeof(u32)) {
+		read_register(priv->net_dev, address, &data1);
+		if (data1 != IPW_DATA_DOA_DEBUG_VALUE)
+			return -EIO;
+	}
+
+	/* Domain 1 check - use arbitrary read/write compare  */
+	for (address = 0; address < 5; address++) {
+		/* The memory area is not used now */
+		write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
+			       val1);
+		write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
+			       val2);
+		read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
+			      &data1);
+		read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
+			      &data2);
+		if (val1 == data1 && val2 == data2)
+			return 0;
+	}
+
+	return -EIO;
+}
+
+/*
+ *
+ * Loop until the CARD_DISABLED bit is the same value as the
+ * supplied parameter
+ *
+ * TODO: See if it would be more efficient to do a wait/wake
+ *       cycle and have the completion event trigger the wakeup
+ *
+ */
+#define IPW_CARD_DISABLE_COMPLETE_WAIT		    100	// 100 milli
+static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state)
+{
+	int i;
+	u32 card_state;
+	u32 len = sizeof(card_state);
+	int err;
+
+	for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) {
+		err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED,
+					  &card_state, &len);
+		if (err) {
+			IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal "
+				       "failed.\n");
+			return 0;
+		}
+
+		/* We'll break out if either the HW state says it is
+		 * in the state we want, or if HOST_COMPLETE command
+		 * finishes */
+		if ((card_state == state) ||
+		    ((priv->status & STATUS_ENABLED) ?
+		     IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) {
+			if (state == IPW_HW_STATE_ENABLED)
+				priv->status |= STATUS_ENABLED;
+			else
+				priv->status &= ~STATUS_ENABLED;
+
+			return 0;
+		}
+
+		udelay(50);
+	}
+
+	IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n",
+		       state ? "DISABLED" : "ENABLED");
+	return -EIO;
+}
+
+
+/*********************************************************************
+    Procedure   :   sw_reset_and_clock
+    Purpose     :   Asserts s/w reset, asserts clock initialization
+                    and waits for clock stabilization
+ ********************************************************************/
+static int sw_reset_and_clock(struct ipw2100_priv *priv)
+{
+	int i;
+	u32 r;
+
+	// assert s/w reset
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+	// wait for clock stabilization
+	for (i = 0; i < 1000; i++) {
+		udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY);
+
+		// check clock ready bit
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &r);
+		if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET)
+			break;
+	}
+
+	if (i == 1000)
+		return -EIO;	// TODO: better error value
+
+	/* set "initialization complete" bit to move adapter to
+	 * D0 state */
+	write_register(priv->net_dev, IPW_REG_GP_CNTRL,
+		       IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE);
+
+	/* wait for clock stabilization */
+	for (i = 0; i < 10000; i++) {
+		udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4);
+
+		/* check clock ready bit */
+		read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
+		if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY)
+			break;
+	}
+
+	if (i == 10000)
+		return -EIO;	/* TODO: better error value */
+
+	/* set D0 standby bit */
+	read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
+	write_register(priv->net_dev, IPW_REG_GP_CNTRL,
+		       r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
+
+	return 0;
+}
+
+/*********************************************************************
+    Procedure   :   ipw2100_download_firmware
+    Purpose     :   Initiaze adapter after power on.
+                    The sequence is:
+                    1. assert s/w reset first!
+                    2. awake clocks & wait for clock stabilization
+                    3. hold ARC (don't ask me why...)
+                    4. load Dino ucode and reset/clock init again
+                    5. zero-out shared mem
+                    6. download f/w
+ *******************************************************************/
+static int ipw2100_download_firmware(struct ipw2100_priv *priv)
+{
+	u32 address;
+	int err;
+
+#ifndef CONFIG_PM
+	/* Fetch the firmware and microcode */
+	struct ipw2100_fw ipw2100_firmware;
+#endif
+
+	if (priv->fatal_error) {
+		IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after "
+		       "fatal error %d.  Interface must be brought down.\n",
+		       priv->net_dev->name, priv->fatal_error);
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_PM
+	if (!ipw2100_firmware.version) {
+		err = ipw2100_get_firmware(priv, &ipw2100_firmware);
+		if (err) {
+			IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
+			       priv->net_dev->name, err);
+			priv->fatal_error = IPW2100_ERR_FW_LOAD;
+			goto fail;
+		}
+	}
+#else
+	err = ipw2100_get_firmware(priv, &ipw2100_firmware);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
+		       priv->net_dev->name, err);
+		priv->fatal_error = IPW2100_ERR_FW_LOAD;
+		goto fail;
+	}
+#endif
+	priv->firmware_version = ipw2100_firmware.version;
+
+	/* s/w reset and clock stabilization */
+	err = sw_reset_and_clock(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	err = ipw2100_verify(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* Hold ARC */
+	write_nic_dword(priv->net_dev,
+			IPW_INTERNAL_REGISTER_HALT_AND_RESET,
+			0x80000000);
+
+	/* allow ARC to run */
+	write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
+
+	/* load microcode */
+	err = ipw2100_ucode_download(priv, &ipw2100_firmware);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: Error loading microcode: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* release ARC */
+	write_nic_dword(priv->net_dev,
+			IPW_INTERNAL_REGISTER_HALT_AND_RESET,
+			0x00000000);
+
+	/* s/w reset and clock stabilization (again!!!) */
+	err = sw_reset_and_clock(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+	/* load f/w */
+	err = ipw2100_fw_download(priv, &ipw2100_firmware);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n",
+		       priv->net_dev->name, err);
+		goto fail;
+	}
+
+#ifndef CONFIG_PM
+	/*
+	 * When the .resume method of the driver is called, the other
+	 * part of the system, i.e. the ide driver could still stay in
+	 * the suspend stage. This prevents us from loading the firmware
+	 * from the disk.  --YZ
+	 */
+
+	/* free any storage allocated for firmware image */
+	ipw2100_release_firmware(priv, &ipw2100_firmware);
+#endif
+
+	/* zero out Domain 1 area indirectly (Si requirement) */
+	for (address = IPW_HOST_FW_SHARED_AREA0;
+	     address < IPW_HOST_FW_SHARED_AREA0_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA1;
+	     address < IPW_HOST_FW_SHARED_AREA1_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA2;
+	     address < IPW_HOST_FW_SHARED_AREA2_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_SHARED_AREA3;
+	     address < IPW_HOST_FW_SHARED_AREA3_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+	for (address = IPW_HOST_FW_INTERRUPT_AREA;
+	     address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4)
+		write_nic_dword(priv->net_dev, address, 0);
+
+	return 0;
+
+ fail:
+	ipw2100_release_firmware(priv, &ipw2100_firmware);
+	return err;
+}
+
+static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv)
+{
+	if (priv->status & STATUS_INT_ENABLED)
+		return;
+	priv->status |= STATUS_INT_ENABLED;
+	write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK);
+}
+
+static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv)
+{
+	if (!(priv->status & STATUS_INT_ENABLED))
+		return;
+	priv->status &= ~STATUS_INT_ENABLED;
+	write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0);
+}
+
+
+static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv)
+{
+	struct ipw2100_ordinals *ord = &priv->ordinals;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1,
+		      &ord->table1_addr);
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2,
+		      &ord->table2_addr);
+
+	read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size);
+	read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size);
+
+	ord->table2_size &= 0x0000FFFF;
+
+	IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size);
+	IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size);
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv)
+{
+	u32 reg = 0;
+	/*
+	 * Set GPIO 3 writable by FW; GPIO 1 writable
+	 * by driver and enable clock
+	 */
+	reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE |
+	       IPW_BIT_GPIO_LED_OFF);
+	write_register(priv->net_dev, IPW_REG_GPIO, reg);
+}
+
+static inline int rf_kill_active(struct ipw2100_priv *priv)
+{
+#define MAX_RF_KILL_CHECKS 5
+#define RF_KILL_CHECK_DELAY 40
+
+	unsigned short value = 0;
+	u32 reg = 0;
+	int i;
+
+	if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
+		priv->status &= ~STATUS_RF_KILL_HW;
+		return 0;
+	}
+
+	for (i = 0; i < MAX_RF_KILL_CHECKS; i++) {
+		udelay(RF_KILL_CHECK_DELAY);
+		read_register(priv->net_dev, IPW_REG_GPIO, &reg);
+		value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
+	}
+
+	if (value == 0)
+		priv->status |= STATUS_RF_KILL_HW;
+	else
+		priv->status &= ~STATUS_RF_KILL_HW;
+
+	return (value == 0);
+}
+
+static int ipw2100_get_hw_features(struct ipw2100_priv *priv)
+{
+	u32 addr, len;
+	u32 val;
+
+	/*
+	 * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1
+	 */
+	len = sizeof(addr);
+	if (ipw2100_get_ordinal(
+		    priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS,
+		    &addr, &len)) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+		       __LINE__);
+		return -EIO;
+	}
+
+	IPW_DEBUG_INFO("EEPROM address: %08X\n", addr);
+
+	/*
+	 * EEPROM version is the byte at offset 0xfd in firmware
+	 * We read 4 bytes, then shift out the byte we actually want */
+	read_nic_dword(priv->net_dev, addr + 0xFC, &val);
+	priv->eeprom_version = (val >> 24) & 0xFF;
+	IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version);
+
+        /*
+	 *  HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware
+	 *
+	 *  notice that the EEPROM bit is reverse polarity, i.e.
+	 *     bit = 0  signifies HW RF kill switch is supported
+	 *     bit = 1  signifies HW RF kill switch is NOT supported
+	 */
+	read_nic_dword(priv->net_dev, addr + 0x20, &val);
+	if (!((val >> 24) & 0x01))
+		priv->hw_features |= HW_FEATURE_RFKILL;
+
+	IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n",
+			   (priv->hw_features & HW_FEATURE_RFKILL) ?
+			   "" : "not ");
+
+	return 0;
+}
+
+/*
+ * Start firmware execution after power on and intialization
+ * The sequence is:
+ *  1. Release ARC
+ *  2. Wait for f/w initialization completes;
+ */
+static int ipw2100_start_adapter(struct ipw2100_priv *priv)
+{
+	int i;
+	u32 inta, inta_mask, gpio;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->status & STATUS_RUNNING)
+		return 0;
+
+	/*
+	 * Initialize the hw - drive adapter to DO state by setting
+	 * init_done bit. Wait for clk_ready bit and Download
+	 * fw & dino ucode
+	 */
+	if (ipw2100_download_firmware(priv)) {
+		IPW_DEBUG_ERROR("%s: Failed to power on the adapter.\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* Clear the Tx, Rx and Msg queues and the r/w indexes
+	 * in the firmware RBD and TBD ring queue */
+	ipw2100_queues_initialize(priv);
+
+	ipw2100_hw_set_gpio(priv);
+
+	/* TODO -- Look at disabling interrupts here to make sure none
+	 * get fired during FW initialization */
+
+	/* Release ARC - clear reset bit */
+	write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
+
+	/* wait for f/w intialization complete */
+	IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
+	i = 5000;
+	do {
+  		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(40 * HZ / 1000);
+		/* Todo... wait for sync command ... */
+
+		read_register(priv->net_dev, IPW_REG_INTA, &inta);
+
+		/* check "init done" bit */
+		if (inta & IPW2100_INTA_FW_INIT_DONE) {
+			/* reset "init done" bit */
+			write_register(priv->net_dev, IPW_REG_INTA,
+				       IPW2100_INTA_FW_INIT_DONE);
+			break;
+		}
+
+		/* check error conditions : we check these after the firmware
+		 * check so that if there is an error, the interrupt handler
+		 * will see it and the adapter will be reset */
+		if (inta &
+		    (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) {
+			/* clear error conditions */
+			write_register(priv->net_dev, IPW_REG_INTA,
+				       IPW2100_INTA_FATAL_ERROR |
+				       IPW2100_INTA_PARITY_ERROR);
+		}
+	} while (i--);
+
+	/* Clear out any pending INTAs since we aren't supposed to have
+	 * interrupts enabled at this point... */
+	read_register(priv->net_dev, IPW_REG_INTA, &inta);
+	read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
+	inta &= IPW_INTERRUPT_MASK;
+	/* Clear out any pending interrupts */
+	if (inta & inta_mask)
+		write_register(priv->net_dev, IPW_REG_INTA, inta);
+
+	IPW_DEBUG_FW("f/w initialization complete: %s\n",
+		     i ? "SUCCESS" : "FAILED");
+
+	if (!i) {
+		IPW_DEBUG_WARNING("%s: Firmware did not initialize.\n",
+		       priv->net_dev->name);
+		return -EIO;
+	}
+
+	/* allow firmware to write to GPIO1 & GPIO3 */
+	read_register(priv->net_dev, IPW_REG_GPIO, &gpio);
+
+	gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK);
+
+	write_register(priv->net_dev, IPW_REG_GPIO, gpio);
+
+	/* Ready to receive commands */
+	priv->status |= STATUS_RUNNING;
+
+	/* The adapter has been reset; we are not associated */
+	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv)
+{
+	if (!priv->fatal_error)
+		return;
+
+	priv->fatal_errors[priv->fatal_index++] = priv->fatal_error;
+	priv->fatal_index %= IPW2100_ERROR_QUEUE;
+	priv->fatal_error = 0;
+}
+
+
+/* NOTE: Our interrupt is disabled when this method is called */
+static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
+{
+	u32 reg;
+	int i;
+
+	IPW_DEBUG_INFO("Power cycling the hardware.\n");
+
+	ipw2100_hw_set_gpio(priv);
+
+	/* Step 1. Stop Master Assert */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+
+	/* Step 2. Wait for stop Master Assert
+	 *         (not more then 50us, otherwise ret error */
+	i = 5;
+	do {
+		udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	}  while(i--);
+
+	priv->status &= ~STATUS_RESET_PENDING;
+
+	if (!i) {
+		IPW_DEBUG_INFO("exit - waited too long for master assert stop\n");
+		return -EIO;
+	}
+
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+
+	/* Reset any fatal_error conditions */
+	ipw2100_reset_fatalerror(priv);
+
+	/* At this point, the adapter is now stopped and disabled */
+	priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING |
+			  STATUS_ASSOCIATED | STATUS_ENABLED);
+
+	return 0;
+}
+
+/*
+ * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
+ *
+ * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
+ *
+ * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of
+ * if STATUS_ASSN_LOST is sent.
+ */
+static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
+{
+
+#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
+
+	struct host_command cmd = {
+		.host_command = CARD_DISABLE_PHY_OFF,
+		.host_command_sequence = 0,
+		.host_command_length = 0,
+	};
+	int err, i;
+	u32 val1, val2;
+
+	IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n");
+
+	/* Turn off the radio */
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	for (i = 0; i < 2500; i++) {
+		read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1);
+		read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2);
+
+		if ((val1 & IPW2100_CONTROL_PHY_OFF) &&
+		    (val2 & IPW2100_COMMAND_PHY_OFF))
+			return 0;
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
+	}
+
+	return -EIO;
+}
+
+
+static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = HOST_COMPLETE,
+		.host_command_sequence = 0,
+		.host_command_length = 0
+	};
+	int err = 0;
+
+	IPW_DEBUG_HC("HOST_COMPLETE\n");
+
+	if (priv->status & STATUS_ENABLED)
+		return 0;
+
+	down(&priv->adapter_sem);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_HC("Command aborted due to RF kill active.\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED);
+	if (err) {
+		IPW_DEBUG_INFO(
+		       "%s: card not responding to init command.\n",
+		       priv->net_dev->name);
+		goto fail_up;
+	}
+
+	if (priv->stop_hang_check) {
+		priv->stop_hang_check = 0;
+		queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+	}
+
+fail_up:
+	up(&priv->adapter_sem);
+	return err;
+}
+
+static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
+{
+#define HW_POWER_DOWN_DELAY (HZ / 10)
+
+	struct host_command cmd = {
+		.host_command = HOST_PRE_POWER_DOWN,
+		.host_command_sequence = 0,
+		.host_command_length = 0,
+	};
+	int err, i;
+	u32 reg;
+
+	if (!(priv->status & STATUS_RUNNING))
+		return 0;
+
+	priv->status |= STATUS_STOPPING;
+
+	/* We can only shut down the card if the firmware is operational.  So,
+	 * if we haven't reset since a fatal_error, then we can not send the
+	 * shutdown commands. */
+	if (!priv->fatal_error) {
+		/* First, make sure the adapter is enabled so that the PHY_OFF
+		 * command can shut it down */
+		ipw2100_enable_adapter(priv);
+
+		err = ipw2100_hw_phy_off(priv);
+		if (err)
+			IPW_DEBUG_WARNING("Error disabling radio %d\n", err);
+
+		/*
+		 * If in D0-standby mode going directly to D3 may cause a
+		 * PCI bus violation.  Therefore we must change out of the D0
+		 * state.
+		 *
+		 * Sending the PREPARE_FOR_POWER_DOWN will restrict the
+		 * hardware from going into standby mode and will transition
+		 * out of D0-standy if it is already in that state.
+		 *
+		 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
+		 * driver upon completion.  Once received, the driver can
+		 * proceed to the D3 state.
+		 *
+		 * Prepare for power down command to fw.  This command would
+		 * take HW out of D0-standby and prepare it for D3 state.
+		 *
+		 * Currently FW does not support event notification for this
+		 * event. Therefore, skip waiting for it.  Just wait a fixed
+		 * 100ms
+		 */
+		IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n");
+
+		err = ipw2100_hw_send_command(priv, &cmd);
+		if (err)
+			IPW_DEBUG_WARNING(
+			       "%s: Power down command failed: Error %d\n",
+			       priv->net_dev->name, err);
+		else {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(HW_POWER_DOWN_DELAY);
+		}
+	}
+
+	priv->status &= ~STATUS_ENABLED;
+
+	/*
+	 * Set GPIO 3 writable by FW; GPIO 1 writable
+	 * by driver and enable clock
+	 */
+	ipw2100_hw_set_gpio(priv);
+
+	/*
+	 * Power down adapter.  Sequence:
+	 * 1. Stop master assert (RESET_REG[9]=1)
+	 * 2. Wait for stop master (RESET_REG[8]==1)
+	 * 3. S/w reset assert (RESET_REG[7] = 1)
+	 */
+
+	/* Stop master assert */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+
+	/* wait stop master not more than 50 usec.
+	 * Otherwise return error. */
+	for (i = 5; i > 0; i--) {
+		udelay(10);
+
+		/* Check master stop bit */
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	}
+
+	if (i == 0)
+		IPW_DEBUG_WARNING(DRV_NAME
+		       ": %s: Could now power down adapter.\n",
+		       priv->net_dev->name);
+
+	/* assert s/w reset */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_SW_RESET);
+
+	priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING);
+
+	return 0;
+}
+
+
+static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = CARD_DISABLE,
+		.host_command_sequence = 0,
+		.host_command_length = 0
+	};
+	int err = 0;
+
+	IPW_DEBUG_HC("CARD_DISABLE\n");
+
+	if (!(priv->status & STATUS_ENABLED))
+		return 0;
+
+	/* Make sure we clear the associated state */
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+
+	if (!priv->stop_hang_check) {
+		priv->stop_hang_check = 1;
+		cancel_delayed_work(&priv->hang_check);
+	}
+
+	down(&priv->adapter_sem);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		IPW_DEBUG_WARNING("exit - failed to send CARD_DISABLE command\n");
+		goto fail_up;
+	}
+
+	err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED);
+	if (err) {
+		IPW_DEBUG_WARNING("exit - card failed to change to DISABLED\n");
+		goto fail_up;
+	}
+
+	IPW_DEBUG_INFO("TODO: implement scan state machine\n");
+
+fail_up:
+	up(&priv->adapter_sem);
+	return err;
+}
+
+int ipw2100_set_scan_options(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = SET_SCAN_OPTIONS,
+		.host_command_sequence = 0,
+		.host_command_length = 8
+	};
+	int err;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	IPW_DEBUG_SCAN("setting scan options\n");
+
+	cmd.host_command_parameters[0] = 0;
+
+	if (!(priv->config & CFG_ASSOCIATE))
+		cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE;
+	if ((priv->sec.flags & SEC_ENABLED) && priv->sec.enabled)
+		cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL;
+	if (priv->config & CFG_PASSIVE_SCAN)
+		cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE;
+
+	cmd.host_command_parameters[1] = priv->channel_mask;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n",
+		     cmd.host_command_parameters[0]);
+
+	return err;
+}
+
+int ipw2100_start_scan(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = BROADCAST_SCAN,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	IPW_DEBUG_HC("START_SCAN\n");
+
+	cmd.host_command_parameters[0] = 0;
+
+	/* No scanning if in monitor mode */
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		return 1;
+
+	if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_SCAN("Scan requested while already in scan...\n");
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("enter\n");
+
+	/* Not clearing here; doing so makes iwlist always return nothing...
+	 *
+	 * We should modify the table logic to use aging tables vs. clearing
+	 * the table on each scan start.
+	 */
+	IPW_DEBUG_SCAN("starting scan\n");
+
+	priv->status |= STATUS_SCANNING;
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		priv->status &= ~STATUS_SCANNING;
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return err;
+}
+
+static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
+{
+	unsigned long flags;
+	int rc = 0;
+	u32 lock;
+	u32 ord_len = sizeof(lock);
+
+	/* Quite if manually disabled. */
+	if (priv->status & STATUS_RF_KILL_SW) {
+		IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable "
+			       "switch\n", priv->net_dev->name);
+		return 0;
+	}
+
+	/* If the interrupt is enabled, turn it off... */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+
+	/* Reset any fatal_error conditions */
+	ipw2100_reset_fatalerror(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	if (priv->status & STATUS_POWERED ||
+	    (priv->status & STATUS_RESET_PENDING)) {
+		/* Power cycle the card ... */
+		if (ipw2100_power_cycle_adapter(priv)) {
+			IPW_DEBUG_WARNING("%s: Could not cycle adapter.\n",
+					  priv->net_dev->name);
+			rc = 1;
+			goto exit;
+		}
+	} else
+		priv->status |= STATUS_POWERED;
+
+	/* Load the firmware, start the clocks, etc. */
+	if (ipw2100_start_adapter(priv)) {
+	       	IPW_DEBUG_ERROR("%s: Failed to start the firmware.\n",
+				priv->net_dev->name);
+		rc = 1;
+		goto exit;
+	}
+
+	ipw2100_initialize_ordinals(priv);
+
+	/* Determine capabilities of this particular HW configuration */
+	if (ipw2100_get_hw_features(priv)) {
+		IPW_DEBUG_ERROR("%s: Failed to determine HW features.\n",
+				priv->net_dev->name);
+		rc = 1;
+		goto exit;
+	}
+
+	lock = LOCK_NONE;
+	if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) {
+		IPW_DEBUG_ERROR("%s: Failed to clear ordinal lock.\n",
+				priv->net_dev->name);
+		rc = 1;
+		goto exit;
+	}
+
+	priv->status &= ~STATUS_SCANNING;
+
+	if (rf_kill_active(priv)) {
+		printk(KERN_INFO "%s: Radio is disabled by RF switch.\n",
+		       priv->net_dev->name);
+
+		if (priv->stop_rf_kill) {
+			priv->stop_rf_kill = 0;
+			queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+		}
+
+		deferred = 1;
+	}
+
+	/* Turn on the interrupt so that commands can be processed */
+	ipw2100_enable_interrupts(priv);
+
+	/* Send all of the commands that must be sent prior to
+	 * HOST_COMPLETE */
+	if (ipw2100_adapter_setup(priv)) {
+		IPW_DEBUG_ERROR("%s: Failed to start the card.\n",
+				priv->net_dev->name);
+		rc = 1;
+		goto exit;
+	}
+
+	if (!deferred) {
+		/* Enable the adapter - sends HOST_COMPLETE */
+		if (ipw2100_enable_adapter(priv)) {
+			IPW_DEBUG_ERROR(
+				"%s: failed in call to enable adapter.\n",
+				priv->net_dev->name);
+			ipw2100_hw_stop_adapter(priv);
+			rc = 1;
+			goto exit;
+		}
+
+
+		/* Start a scan . . . */
+		ipw2100_set_scan_options(priv);
+		ipw2100_start_scan(priv);
+	}
+
+ exit:
+	return rc;
+}
+
+/* Called by register_netdev() */
+static int ipw2100_net_init(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	return ipw2100_up(priv, 1);
+}
+
+static void ipw2100_down(struct ipw2100_priv *priv)
+{
+	unsigned long flags;
+	union iwreq_data wrqu = {
+		.ap_addr = {
+			.sa_family = ARPHRD_ETHER
+		}
+	};
+	int associated = priv->status & STATUS_ASSOCIATED;
+
+	/* Kill the RF switch timer */
+	if (!priv->stop_rf_kill) {
+		priv->stop_rf_kill = 1;
+		cancel_delayed_work(&priv->rf_kill);
+	}
+
+	/* Kill the firmare hang check timer */
+	if (!priv->stop_hang_check) {
+		priv->stop_hang_check = 1;
+		cancel_delayed_work(&priv->hang_check);
+	}
+
+	/* Kill any pending resets */
+	if (priv->status & STATUS_RESET_PENDING)
+		cancel_delayed_work(&priv->reset_work);
+
+	/* Make sure the interrupt is on so that FW commands will be
+	 * processed correctly */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_enable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	if (ipw2100_hw_stop_adapter(priv))
+		IPW_DEBUG_ERROR("%s: Error stopping adapter.\n",
+		       priv->net_dev->name);
+
+	/* Do not disable the interrupt until _after_ we disable
+	 * the adaptor.  Otherwise the CARD_DISABLE command will never
+	 * be ack'd by the firmware */
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+#ifdef ACPI_CSTATE_LIMIT_DEFINED
+	if (priv->config & CFG_C3_DISABLED) {
+		IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
+		acpi_set_cstate_limit(priv->cstate_limit);
+		priv->config &= ~CFG_C3_DISABLED;
+	}
+#endif
+
+	/* We have to signal any supplicant if we are disassociating */
+	if (associated)
+		wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+	netif_carrier_off(priv->net_dev);
+	netif_stop_queue(priv->net_dev);
+}
+
+void ipw2100_reset_adapter(struct ipw2100_priv *priv)
+{
+	unsigned long flags;
+	union iwreq_data wrqu = {
+		.ap_addr = {
+			.sa_family = ARPHRD_ETHER
+		}
+	};
+	int associated = priv->status & STATUS_ASSOCIATED;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	IPW_DEBUG_INFO(DRV_NAME ": %s: Restarting adapter.\n",
+		       priv->net_dev->name);
+	priv->resets++;
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+	priv->status |= STATUS_SECURITY_UPDATED;
+
+	/* Force a power cycle even if interface hasn't been opened
+	 * yet */
+	cancel_delayed_work(&priv->reset_work);
+	priv->status |= STATUS_RESET_PENDING;
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	down(&priv->action_sem);
+	/* stop timed checks so that they don't interfere with reset */
+	priv->stop_hang_check = 1;
+	cancel_delayed_work(&priv->hang_check);
+
+	/* We have to signal any supplicant if we are disassociating */
+	if (associated)
+		wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+
+	ipw2100_up(priv, 0);
+	up(&priv->action_sem);
+
+}
+
+
+static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
+{
+
+#define MAC_ASSOCIATION_READ_DELAY (HZ)
+	int ret, len, essid_len;
+	char essid[IW_ESSID_MAX_SIZE];
+	u32 txrate;
+	u32 chan;
+	char *txratename;
+ 	u8 bssid[ETH_ALEN];
+
+	/*
+	 * TBD: BSSID is usually 00:00:00:00:00:00 here and not
+	 *      an actual MAC of the AP. Seems like FW sets this
+	 *      address too late. Read it later and expose through
+	 *      /proc or schedule a later task to query and update
+	 */
+
+	essid_len = IW_ESSID_MAX_SIZE;
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID,
+				  essid, &essid_len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+				   __LINE__);
+		return;
+	}
+
+	len = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE,
+				  &txrate, &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+				   __LINE__);
+		return;
+	}
+
+	len = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+				   __LINE__);
+		return;
+	}
+	len = ETH_ALEN;
+        ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid,  &len);
+	if (ret) {
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+				   __LINE__);
+		return;
+	}
+	memcpy(priv->ieee->bssid, bssid, ETH_ALEN);
+
+
+	switch (txrate) {
+	case TX_RATE_1_MBIT:
+		txratename = "1Mbps";
+		break;
+	case TX_RATE_2_MBIT:
+		txratename = "2Mbsp";
+		break;
+	case TX_RATE_5_5_MBIT:
+		txratename = "5.5Mbps";
+		break;
+	case TX_RATE_11_MBIT:
+		txratename = "11Mbps";
+		break;
+	default:
+		IPW_DEBUG_INFO("Unknown rate: %d\n", txrate);
+		txratename = "unknown rate";
+		break;
+	}
+
+	IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID="
+		       MAC_FMT ")\n",
+		       priv->net_dev->name, escape_essid(essid, essid_len),
+		       txratename, chan, MAC_ARG(bssid));
+
+	/* now we copy read ssid into dev */
+	if (!(priv->config & CFG_STATIC_ESSID)) {
+		priv->essid_len = min((u8)essid_len, (u8)IW_ESSID_MAX_SIZE);
+		memcpy(priv->essid, essid, priv->essid_len);
+	}
+	priv->channel = chan;
+	memcpy(priv->bssid, bssid, ETH_ALEN);
+
+	priv->status |= STATUS_ASSOCIATING;
+	priv->connect_start = get_seconds();
+
+	queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
+}
+
+
+int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
+		      int length, int batch_mode)
+{
+	int ssid_len = min(length, IW_ESSID_MAX_SIZE);
+	struct host_command cmd = {
+		.host_command = SSID,
+		.host_command_sequence = 0,
+		.host_command_length = ssid_len
+	};
+	int err;
+
+	IPW_DEBUG_HC("SSID: '%s'\n", escape_essid(essid, ssid_len));
+
+	if (ssid_len)
+		memcpy((char*)cmd.host_command_parameters,
+		       essid, ssid_len);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	/* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to
+	 * disable auto association -- so we cheat by setting a bogus SSID */
+	if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) {
+		int i;
+		u8 *bogus = (u8*)cmd.host_command_parameters;
+		for (i = 0; i < IW_ESSID_MAX_SIZE; i++)
+			bogus[i] = 0x18 + i;
+		cmd.host_command_length = IW_ESSID_MAX_SIZE;
+	}
+
+	/* NOTE:  We always send the SSID command even if the provided ESSID is
+	 * the same as what we currently think is set. */
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (!err) {
+		memset(priv->essid + ssid_len, 0,
+		       IW_ESSID_MAX_SIZE - ssid_len);
+		memcpy(priv->essid, essid, ssid_len);
+		priv->essid_len = ssid_len;
+	}
+
+	if (!batch_mode) {
+		if (ipw2100_enable_adapter(priv))
+			err = -EIO;
+	}
+
+	return err;
+}
+
+static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+		  "disassociated: '%s' " MAC_FMT " \n",
+		  escape_essid(priv->essid, priv->essid_len),
+		  MAC_ARG(priv->bssid));
+
+	priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
+
+	if (priv->status & STATUS_STOPPING) {
+		IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n");
+		return;
+	}
+
+	memset(priv->bssid, 0, ETH_ALEN);
+	memset(priv->ieee->bssid, 0, ETH_ALEN);
+
+	netif_carrier_off(priv->net_dev);
+	netif_stop_queue(priv->net_dev);
+
+	if (!(priv->status & STATUS_RUNNING))
+		return;
+
+	if (priv->status & STATUS_SECURITY_UPDATED)
+		queue_work(priv->workqueue, &priv->security_work);
+
+	queue_work(priv->workqueue, &priv->wx_event_work);
+}
+
+static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n",
+	       priv->net_dev->name);
+
+	/* RF_KILL is now enabled (else we wouldn't be here) */
+	priv->status |= STATUS_RF_KILL_HW;
+
+#ifdef ACPI_CSTATE_LIMIT_DEFINED
+	if (priv->config & CFG_C3_DISABLED) {
+		IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
+		acpi_set_cstate_limit(priv->cstate_limit);
+		priv->config &= ~CFG_C3_DISABLED;
+	}
+#endif
+
+	/* Make sure the RF Kill check timer is running */
+	priv->stop_rf_kill = 0;
+	cancel_delayed_work(&priv->rf_kill);
+	queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+}
+
+static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_SCAN("scan complete\n");
+	/* Age the scan results... */
+	priv->ieee->scans++;
+	priv->status &= ~STATUS_SCANNING;
+}
+
+#ifdef CONFIG_IPW_DEBUG
+#define IPW2100_HANDLER(v, f) { v, f, # v }
+struct ipw2100_status_indicator {
+	int status;
+	void (*cb)(struct ipw2100_priv *priv, u32 status);
+	char *name;
+};
+#else
+#define IPW2100_HANDLER(v, f) { v, f }
+struct ipw2100_status_indicator {
+	int status;
+	void (*cb)(struct ipw2100_priv *priv, u32 status);
+};
+#endif /* CONFIG_IPW_DEBUG */
+
+static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status)
+{
+	IPW_DEBUG_SCAN("Scanning...\n");
+	priv->status |= STATUS_SCANNING;
+}
+
+const struct ipw2100_status_indicator status_handlers[] = {
+	IPW2100_HANDLER(IPW_STATE_INITIALIZED, 0),
+	IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, 0),
+	IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated),
+	IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost),
+	IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, 0),
+	IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete),
+	IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, 0),
+	IPW2100_HANDLER(IPW_STATE_LEFT_PSP, 0),
+	IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill),
+	IPW2100_HANDLER(IPW_STATE_DISABLED, 0),
+	IPW2100_HANDLER(IPW_STATE_POWER_DOWN, 0),
+	IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning),
+	IPW2100_HANDLER(-1, 0)
+};
+
+
+static void isr_status_change(struct ipw2100_priv *priv, int status)
+{
+	int i;
+
+	if (status == IPW_STATE_SCANNING &&
+	    priv->status & STATUS_ASSOCIATED &&
+	    !(priv->status & STATUS_SCANNING)) {
+		IPW_DEBUG_INFO("Scan detected while associated, with "
+			       "no scan request.  Restarting firmware.\n");
+
+		/* Wake up any sleeping jobs */
+		schedule_reset(priv);
+	}
+
+	for (i = 0; status_handlers[i].status != -1; i++) {
+		if (status == status_handlers[i].status) {
+			IPW_DEBUG_NOTIF("Status change: %s\n",
+					 status_handlers[i].name);
+			if (status_handlers[i].cb)
+				status_handlers[i].cb(priv, status);
+			priv->wstats.status = status;
+			return;
+		}
+	}
+
+	IPW_DEBUG_NOTIF("unknown status received: %04x\n", status);
+}
+
+static void isr_rx_complete_command(
+	struct ipw2100_priv *priv,
+	struct ipw2100_cmd_header *cmd)
+{
+#ifdef CONFIG_IPW_DEBUG
+	if (cmd->host_command_reg < ARRAY_SIZE(command_types)) {
+		IPW_DEBUG_HC("Command completed '%s (%d)'\n",
+			     command_types[cmd->host_command_reg],
+			     cmd->host_command_reg);
+	}
+#endif
+	if (cmd->host_command_reg == HOST_COMPLETE)
+		priv->status |= STATUS_ENABLED;
+
+	if (cmd->host_command_reg == CARD_DISABLE)
+		priv->status &= ~STATUS_ENABLED;
+
+	priv->status &= ~STATUS_CMD_ACTIVE;
+
+	wake_up_interruptible(&priv->wait_command_queue);
+}
+
+#ifdef CONFIG_IPW_DEBUG
+const char *frame_types[] = {
+	"COMMAND_STATUS_VAL",
+	"STATUS_CHANGE_VAL",
+	"P80211_DATA_VAL",
+	"P8023_DATA_VAL",
+	"HOST_NOTIFICATION_VAL"
+};
+#endif
+
+
+static inline int ipw2100_alloc_skb(
+	struct ipw2100_priv *priv,
+	struct ipw2100_rx_packet *packet)
+{
+	packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
+	if (!packet->skb)
+		return -ENOMEM;
+
+	packet->rxp = (struct ipw2100_rx *)packet->skb->data;
+	packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
+					  sizeof(struct ipw2100_rx),
+					  PCI_DMA_FROMDEVICE);
+	/* NOTE: pci_map_single does not return an error code, and 0 is a valid
+	 *       dma_addr */
+
+	return 0;
+}
+
+
+#define SEARCH_ERROR   0xffffffff
+#define SEARCH_FAIL    0xfffffffe
+#define SEARCH_SUCCESS 0xfffffff0
+#define SEARCH_DISCARD 0
+#define SEARCH_SNAPSHOT 1
+
+#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
+static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
+{
+	int i;
+	if (priv->snapshot[0])
+		return 1;
+	for (i = 0; i < 0x30; i++) {
+		priv->snapshot[i] = (u8*)kmalloc(0x1000, GFP_ATOMIC);
+		if (!priv->snapshot[i]) {
+			IPW_DEBUG_INFO("%s: Error allocating snapshot "
+			       "buffer %d\n", priv->net_dev->name, i);
+			while (i > 0)
+				kfree(priv->snapshot[--i]);
+			priv->snapshot[0] = NULL;
+			return 0;
+		}
+	}
+
+	return 1;
+}
+
+static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv)
+{
+	int i;
+	if (!priv->snapshot[0])
+		return;
+	for (i = 0; i < 0x30; i++)
+		kfree(priv->snapshot[i]);
+	priv->snapshot[0] = NULL;
+}
+
+static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 *in_buf,
+				    size_t len, int mode)
+{
+	u32 i, j;
+	u32 tmp;
+	u8 *s, *d;
+	u32 ret;
+
+	s = in_buf;
+	if (mode == SEARCH_SNAPSHOT) {
+		if (!ipw2100_snapshot_alloc(priv))
+			mode = SEARCH_DISCARD;
+	}
+
+	for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) {
+		read_nic_dword(priv->net_dev, i, &tmp);
+		if (mode == SEARCH_SNAPSHOT)
+			*(u32 *)SNAPSHOT_ADDR(i) = tmp;
+		if (ret == SEARCH_FAIL) {
+			d = (u8*)&tmp;
+			for (j = 0; j < 4; j++) {
+				if (*s != *d) {
+					s = in_buf;
+					continue;
+				}
+
+				s++;
+				d++;
+
+				if ((s - in_buf) == len)
+					ret = (i + j) - len + 1;
+			}
+		} else if (mode == SEARCH_DISCARD)
+			return ret;
+	}
+
+	return ret;
+}
+
+/*
+ *
+ * 0) Disconnect the SKB from the firmware (just unmap)
+ * 1) Pack the ETH header into the SKB
+ * 2) Pass the SKB to the network stack
+ *
+ * When packet is provided by the firmware, it contains the following:
+ *
+ * .  ieee80211_hdr
+ * .  ieee80211_snap_hdr
+ *
+ * The size of the constructed ethernet
+ *
+ */
+#ifdef CONFIG_IPW2100_RX_DEBUG
+u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
+#endif
+
+static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv,
+					       int i)
+{
+#ifdef CONFIG_IPW_DEBUG_C3
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	u32 match, reg;
+	int j;
+#endif
+#ifdef ACPI_CSTATE_LIMIT_DEFINED
+	int limit;
+#endif
+
+	IPW_DEBUG_INFO(DRV_NAME ": PCI latency error detected at "
+		       "0x%04zX.\n", i * sizeof(struct ipw2100_status));
+
+#ifdef ACPI_CSTATE_LIMIT_DEFINED
+	IPW_DEBUG_INFO(DRV_NAME ": Disabling C3 transitions.\n");
+	limit = acpi_get_cstate_limit();
+	if (limit > 2) {
+		priv->cstate_limit = limit;
+		acpi_set_cstate_limit(2);
+		priv->config |= CFG_C3_DISABLED;
+	}
+#endif
+
+#ifdef CONFIG_IPW_DEBUG_C3
+	/* Halt the fimrware so we can get a good image */
+	write_register(priv->net_dev, IPW_REG_RESET_REG,
+		       IPW_AUX_HOST_RESET_REG_STOP_MASTER);
+	j = 5;
+	do {
+		udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
+		read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
+
+		if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
+			break;
+	}  while (j--);
+
+	match = ipw2100_match_buf(priv, (u8*)status,
+				  sizeof(struct ipw2100_status),
+				  SEARCH_SNAPSHOT);
+	if (match < SEARCH_SUCCESS)
+		IPW_DEBUG_INFO("%s: DMA status match in Firmware at "
+			       "offset 0x%06X, length %d:\n",
+			       priv->net_dev->name, match,
+			       sizeof(struct ipw2100_status));
+	else
+		IPW_DEBUG_INFO("%s: No DMA status match in "
+			       "Firmware.\n", priv->net_dev->name);
+
+	printk_buf((u8*)priv->status_queue.drv,
+		   sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH);
+#endif
+
+	priv->fatal_error = IPW2100_ERR_C3_CORRUPTION;
+	priv->ieee->stats.rx_errors++;
+	schedule_reset(priv);
+}
+
+static inline void isr_rx(struct ipw2100_priv *priv, int i,
+			  struct ieee80211_rx_stats *stats)
+{
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
+
+	IPW_DEBUG_RX("Handler...\n");
+
+	if (unlikely(status->frame_size > skb_tailroom(packet->skb))) {
+		IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
+			       "  Dropping.\n",
+			       priv->net_dev->name,
+			       status->frame_size, skb_tailroom(packet->skb));
+		priv->ieee->stats.rx_errors++;
+		return;
+	}
+
+	if (unlikely(!netif_running(priv->net_dev))) {
+		priv->ieee->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR &&
+		     status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
+		IPW_DEBUG_RX("CRC error in packet.  Dropping.\n");
+		priv->ieee->stats.rx_errors++;
+		return;
+	}
+
+	if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR &&
+		!(priv->status & STATUS_ASSOCIATED))) {
+		IPW_DEBUG_DROP("Dropping packet while not associated.\n");
+		priv->wstats.discard.misc++;
+		return;
+	}
+
+
+	pci_unmap_single(priv->pci_dev,
+			 packet->dma_addr,
+			 sizeof(struct ipw2100_rx),
+			 PCI_DMA_FROMDEVICE);
+
+	skb_put(packet->skb, status->frame_size);
+
+#ifdef CONFIG_IPW2100_RX_DEBUG
+	/* Make a copy of the frame so we can dump it to the logs if
+	 * ieee80211_rx fails */
+	memcpy(packet_data, packet->skb->data,
+	       min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH));
+#endif
+
+	if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
+#ifdef CONFIG_IPW2100_RX_DEBUG
+		IPW_DEBUG_DROP("%s: Non consumed packet:\n",
+			       priv->net_dev->name);
+		printk_buf(IPW_DL_DROP, packet_data, status->frame_size);
+#endif
+		priv->ieee->stats.rx_errors++;
+
+		/* ieee80211_rx failed, so it didn't free the SKB */
+		dev_kfree_skb_any(packet->skb);
+		packet->skb = NULL;
+	}
+
+	/* We need to allocate a new SKB and attach it to the RDB. */
+	if (unlikely(ipw2100_alloc_skb(priv, packet))) {
+		IPW_DEBUG_WARNING(
+			"%s: Unable to allocate SKB onto RBD ring - disabling "
+			"adapter.\n", priv->net_dev->name);
+		/* TODO: schedule adapter shutdown */
+		IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
+	}
+
+	/* Update the RDB entry */
+	priv->rx_queue.drv[i].host_addr = packet->dma_addr;
+}
+
+static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
+{
+	struct ipw2100_status *status = &priv->status_queue.drv[i];
+	struct ipw2100_rx *u = priv->rx_buffers[i].rxp;
+	u16 frame_type = status->status_fields & STATUS_TYPE_MASK;
+
+	switch (frame_type) {
+	case COMMAND_STATUS_VAL:
+		return (status->frame_size != sizeof(u->rx_data.command));
+	case STATUS_CHANGE_VAL:
+		return (status->frame_size != sizeof(u->rx_data.status));
+	case HOST_NOTIFICATION_VAL:
+		return (status->frame_size < sizeof(u->rx_data.notification));
+	case P80211_DATA_VAL:
+	case P8023_DATA_VAL:
+#ifdef CONFIG_IPW2100_MONITOR
+		return 0;
+#else
+		switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
+		case IEEE80211_FTYPE_MGMT:
+		case IEEE80211_FTYPE_CTL:
+			return 0;
+		case IEEE80211_FTYPE_DATA:
+			return (status->frame_size >
+				IPW_MAX_802_11_PAYLOAD_LENGTH);
+		}
+#endif
+	}
+
+	return 1;
+}
+
+/*
+ * ipw2100 interrupts are disabled at this point, and the ISR
+ * is the only code that calls this method.  So, we do not need
+ * to play with any locks.
+ *
+ * RX Queue works as follows:
+ *
+ * Read index - firmware places packet in entry identified by the
+ *              Read index and advances Read index.  In this manner,
+ *              Read index will always point to the next packet to
+ *              be filled--but not yet valid.
+ *
+ * Write index - driver fills this entry with an unused RBD entry.
+ *               This entry has not filled by the firmware yet.
+ *
+ * In between the W and R indexes are the RBDs that have been received
+ * but not yet processed.
+ *
+ * The process of handling packets will start at WRITE + 1 and advance
+ * until it reaches the READ index.
+ *
+ * The WRITE index is cached in the variable 'priv->rx_queue.next'.
+ *
+ */
+static inline void __ipw2100_rx_process(struct ipw2100_priv *priv)
+{
+	struct ipw2100_bd_queue *rxq = &priv->rx_queue;
+	struct ipw2100_status_queue *sq = &priv->status_queue;
+	struct ipw2100_rx_packet *packet;
+	u16 frame_type;
+	u32 r, w, i, s;
+	struct ipw2100_rx *u;
+	struct ieee80211_rx_stats stats = {
+		.mac_time = jiffies,
+	};
+
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r);
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w);
+
+	if (r >= rxq->entries) {
+		IPW_DEBUG_RX("exit - bad read index\n");
+		return;
+	}
+
+	i = (rxq->next + 1) % rxq->entries;
+	s = i;
+	while (i != r) {
+		/* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n",
+		   r, rxq->next, i); */
+
+		packet = &priv->rx_buffers[i];
+
+		/* Sync the DMA for the STATUS buffer so CPU is sure to get
+		 * the correct values */
+		pci_dma_sync_single_for_cpu(
+			priv->pci_dev,
+			sq->nic + sizeof(struct ipw2100_status) * i,
+			sizeof(struct ipw2100_status),
+			PCI_DMA_FROMDEVICE);
+
+		/* Sync the DMA for the RX buffer so CPU is sure to get
+		 * the correct values */
+		pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
+					    sizeof(struct ipw2100_rx),
+					    PCI_DMA_FROMDEVICE);
+
+		if (unlikely(ipw2100_corruption_check(priv, i))) {
+			ipw2100_corruption_detected(priv, i);
+			goto increment;
+		}
+
+		u = packet->rxp;
+		frame_type = sq->drv[i].status_fields &
+			STATUS_TYPE_MASK;
+		stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM;
+		stats.len = sq->drv[i].frame_size;
+
+		stats.mask = 0;
+		if (stats.rssi != 0)
+			stats.mask |= IEEE80211_STATMASK_RSSI;
+		stats.freq = IEEE80211_24GHZ_BAND;
+
+		IPW_DEBUG_RX(
+			"%s: '%s' frame type received (%d).\n",
+			priv->net_dev->name, frame_types[frame_type],
+			stats.len);
+
+		switch (frame_type) {
+		case COMMAND_STATUS_VAL:
+			/* Reset Rx watchdog */
+			isr_rx_complete_command(
+				priv, &u->rx_data.command);
+			break;
+
+		case STATUS_CHANGE_VAL:
+			isr_status_change(priv, u->rx_data.status);
+			break;
+
+		case P80211_DATA_VAL:
+		case P8023_DATA_VAL:
+#ifdef CONFIG_IPW2100_MONITOR
+			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+				isr_rx(priv, i, &stats);
+				break;
+			}
+#endif
+			if (stats.len < sizeof(u->rx_data.header))
+				break;
+			switch (WLAN_FC_GET_TYPE(u->rx_data.header.
+						 frame_ctl)) {
+			case IEEE80211_FTYPE_MGMT:
+				ieee80211_rx_mgt(priv->ieee,
+						 &u->rx_data.header,
+						 &stats);
+				break;
+
+			case IEEE80211_FTYPE_CTL:
+				break;
+
+			case IEEE80211_FTYPE_DATA:
+				isr_rx(priv, i, &stats);
+				break;
+
+			}
+			break;
+		}
+
+	increment:
+		/* clear status field associated with this RBD */
+		rxq->drv[i].status.info.field = 0;
+
+		i = (i + 1) % rxq->entries;
+	}
+
+	if (i != s) {
+		/* backtrack one entry, wrapping to end if at 0 */
+		rxq->next = (i ? i : rxq->entries) - 1;
+
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_RX_WRITE_INDEX,
+			       rxq->next);
+	}
+}
+
+
+/*
+ * __ipw2100_tx_process
+ *
+ * This routine will determine whether the next packet on
+ * the fw_pend_list has been processed by the firmware yet.
+ *
+ * If not, then it does nothing and returns.
+ *
+ * If so, then it removes the item from the fw_pend_list, frees
+ * any associated storage, and places the item back on the
+ * free list of its source (either msg_free_list or tx_free_list)
+ *
+ * TX Queue works as follows:
+ *
+ * Read index - points to the next TBD that the firmware will
+ *              process.  The firmware will read the data, and once
+ *              done processing, it will advance the Read index.
+ *
+ * Write index - driver fills this entry with an constructed TBD
+ *               entry.  The Write index is not advanced until the
+ *               packet has been configured.
+ *
+ * In between the W and R indexes are the TBDs that have NOT been
+ * processed.  Lagging behind the R index are packets that have
+ * been processed but have not been freed by the driver.
+ *
+ * In order to free old storage, an internal index will be maintained
+ * that points to the next packet to be freed.  When all used
+ * packets have been freed, the oldest index will be the same as the
+ * firmware's read index.
+ *
+ * The OLDEST index is cached in the variable 'priv->tx_queue.oldest'
+ *
+ * Because the TBD structure can not contain arbitrary data, the
+ * driver must keep an internal queue of cached allocations such that
+ * it can put that data back into the tx_free_list and msg_free_list
+ * for use by future command and data packets.
+ *
+ */
+static inline int __ipw2100_tx_process(struct ipw2100_priv *priv)
+{
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+        struct ipw2100_bd *tbd;
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	int descriptors_used;
+	int e, i;
+	u32 r, w, frag_num = 0;
+
+	if (list_empty(&priv->fw_pend_list))
+		return 0;
+
+	element = priv->fw_pend_list.next;
+
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+        tbd = &txq->drv[packet->index];
+
+	/* Determine how many TBD entries must be finished... */
+	switch (packet->type) {
+	case COMMAND:
+		/* COMMAND uses only one slot; don't advance */
+		descriptors_used = 1;
+		e = txq->oldest;
+		break;
+
+	case DATA:
+		/* DATA uses two slots; advance and loop position. */
+		descriptors_used = tbd->num_fragments;
+                frag_num = tbd->num_fragments - 1;
+		e = txq->oldest + frag_num;
+		e %= txq->entries;
+		break;
+
+	default:
+		IPW_DEBUG_WARNING("%s: Bad fw_pend_list entry!\n",
+				   priv->net_dev->name);
+		return 0;
+	}
+
+	/* if the last TBD is not done by NIC yet, then packet is
+	 * not ready to be released.
+	 *
+	 */
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
+		      &r);
+	read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+		      &w);
+	if (w != txq->next)
+		IPW_DEBUG_WARNING("%s: write index mismatch\n",
+		       priv->net_dev->name);
+
+        /*
+	 * txq->next is the index of the last packet written txq->oldest is
+	 * the index of the r is the index of the next packet to be read by
+	 * firmware
+	 */
+
+
+	/*
+	 * Quick graphic to help you visualize the following
+	 * if / else statement
+	 *
+	 * ===>|                     s---->|===============
+	 *                               e>|
+	 * | a | b | c | d | e | f | g | h | i | j | k | l
+	 *       r---->|
+	 *               w
+	 *
+	 * w - updated by driver
+	 * r - updated by firmware
+	 * s - start of oldest BD entry (txq->oldest)
+	 * e - end of oldest BD entry
+	 *
+	 */
+	if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) {
+		IPW_DEBUG_TX("exit - no processed packets ready to release.\n");
+		return 0;
+	}
+
+	list_del(element);
+	DEC_STAT(&priv->fw_pend_stat);
+
+#ifdef CONFIG_IPW_DEBUG
+	{
+		int i = txq->oldest;
+		IPW_DEBUG_TX(
+			"TX%d V=%p P=%04X T=%04X L=%d\n", i,
+			&txq->drv[i],
+			(u32)(txq->nic + i * sizeof(struct ipw2100_bd)),
+			txq->drv[i].host_addr,
+			txq->drv[i].buf_length);
+
+		if (packet->type == DATA) {
+			i = (i + 1) % txq->entries;
+
+			IPW_DEBUG_TX(
+				"TX%d V=%p P=%04X T=%04X L=%d\n", i,
+				&txq->drv[i],
+				(u32)(txq->nic + i *
+				sizeof(struct ipw2100_bd)),
+				(u32)txq->drv[i].host_addr,
+				txq->drv[i].buf_length);
+		}
+	}
+#endif
+
+	switch (packet->type) {
+	case DATA:
+		if (txq->drv[txq->oldest].status.info.fields.txType != 0)
+			IPW_DEBUG_WARNING("%s: Queue mismatch.  "
+			       "Expecting DATA TBD but pulled "
+			       "something else: ids %d=%d.\n",
+			       priv->net_dev->name, txq->oldest, packet->index);
+
+		/* DATA packet; we have to unmap and free the SKB */
+		priv->ieee->stats.tx_packets++;
+		for (i = 0; i < frag_num; i++) {
+			tbd = &txq->drv[(packet->index + 1 + i) %
+					txq->entries];
+
+			IPW_DEBUG_TX(
+				"TX%d P=%08x L=%d\n",
+				(packet->index + 1 + i) % txq->entries,
+				tbd->host_addr, tbd->buf_length);
+
+			pci_unmap_single(priv->pci_dev,
+					 tbd->host_addr,
+					 tbd->buf_length,
+					 PCI_DMA_TODEVICE);
+		}
+
+		priv->ieee->stats.tx_bytes += packet->info.d_struct.txb->payload_size;
+		ieee80211_txb_free(packet->info.d_struct.txb);
+		packet->info.d_struct.txb = NULL;
+
+		list_add_tail(element, &priv->tx_free_list);
+		INC_STAT(&priv->tx_free_stat);
+
+		/* We have a free slot in the Tx queue, so wake up the
+		 * transmit layer if it is stopped. */
+		if (priv->status & STATUS_ASSOCIATED &&
+		    netif_queue_stopped(priv->net_dev)) {
+			IPW_DEBUG_INFO(KERN_INFO
+					   "%s: Waking net queue.\n",
+					   priv->net_dev->name);
+			netif_wake_queue(priv->net_dev);
+		}
+
+		/* A packet was processed by the hardware, so update the
+		 * watchdog */
+		priv->net_dev->trans_start = jiffies;
+
+		break;
+
+	case COMMAND:
+		if (txq->drv[txq->oldest].status.info.fields.txType != 1)
+			IPW_DEBUG_WARNING("%s: Queue mismatch.  "
+			       "Expecting COMMAND TBD but pulled "
+			       "something else: ids %d=%d.\n",
+			       priv->net_dev->name, txq->oldest, packet->index);
+
+#ifdef CONFIG_IPW_DEBUG
+		if (packet->info.c_struct.cmd->host_command_reg <
+		    sizeof(command_types) / sizeof(*command_types))
+			IPW_DEBUG_TX(
+				"Command '%s (%d)' processed: %d.\n",
+				command_types[packet->info.c_struct.cmd->host_command_reg],
+				packet->info.c_struct.cmd->host_command_reg,
+				packet->info.c_struct.cmd->cmd_status_reg);
+#endif
+
+		list_add_tail(element, &priv->msg_free_list);
+		INC_STAT(&priv->msg_free_stat);
+		break;
+	}
+
+	/* advance oldest used TBD pointer to start of next entry */
+	txq->oldest = (e + 1) % txq->entries;
+	/* increase available TBDs number */
+	txq->available += descriptors_used;
+	SET_STAT(&priv->txq_stat, txq->available);
+
+	IPW_DEBUG_TX("packet latency (send to process)  %ld jiffies\n",
+			 jiffies - packet->jiffy_start);
+
+	return (!list_empty(&priv->fw_pend_list));
+}
+
+
+static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv)
+{
+	int i = 0;
+
+	while (__ipw2100_tx_process(priv) && i < 200) i++;
+
+	if (i == 200) {
+		IPW_DEBUG_WARNING(
+		       "%s: Driver is running slow (%d iters).\n",
+		       priv->net_dev->name, i);
+	}
+}
+
+
+static void X__ipw2100_tx_send_commands(struct ipw2100_priv *priv)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+	struct ipw2100_bd *tbd;
+	int next = txq->next;
+
+	while (!list_empty(&priv->msg_pend_list)) {
+		/* if there isn't enough space in TBD queue, then
+		 * don't stuff a new one in.
+		 * NOTE: 3 are needed as a command will take one,
+		 *       and there is a minimum of 2 that must be
+		 *       maintained between the r and w indexes
+		 */
+		if (txq->available <= 3) {
+			IPW_DEBUG_TX("no room in tx_queue\n");
+			break;
+		}
+
+		element = priv->msg_pend_list.next;
+		list_del(element);
+		DEC_STAT(&priv->msg_pend_stat);
+
+		packet = list_entry(element,
+				    struct ipw2100_tx_packet, list);
+
+		IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n",
+				 &txq->drv[txq->next],
+				 (void*)(txq->nic + txq->next *
+					 sizeof(struct ipw2100_bd)));
+
+		packet->index = txq->next;
+
+		tbd = &txq->drv[txq->next];
+
+		/* initialize TBD */
+		tbd->host_addr = packet->info.c_struct.cmd_phys;
+		tbd->buf_length = sizeof(struct ipw2100_cmd_header);
+		/* not marking number of fragments causes problems
+		 * with f/w debug version */
+		tbd->num_fragments = 1;
+		tbd->status.info.field =
+			IPW_BD_STATUS_TX_FRAME_COMMAND |
+			IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
+
+		/* update TBD queue counters */
+		txq->next++;
+		txq->next %= txq->entries;
+		txq->available--;
+		DEC_STAT(&priv->txq_stat);
+
+		list_add_tail(element, &priv->fw_pend_list);
+		INC_STAT(&priv->fw_pend_stat);
+	}
+
+	if (txq->next != next) {
+		/* kick off the DMA by notifying firmware the
+		 * write index has moved; make sure TBD stores are sync'd */
+		wmb();
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+			       txq->next);
+	}
+}
+
+
+/*
+ * X__ipw2100_tx_send_data
+ *
+ */
+static void X__ipw2100_tx_send_data(struct ipw2100_priv *priv)
+{
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	struct ipw2100_bd_queue *txq = &priv->tx_queue;
+	struct ipw2100_bd *tbd;
+	int next = txq->next;
+        int i = 0;
+	struct ipw2100_data_header *ipw_hdr;
+	struct ieee80211_hdr *hdr;
+
+	while (!list_empty(&priv->tx_pend_list)) {
+		/* if there isn't enough space in TBD queue, then
+		 * don't stuff a new one in.
+		 * NOTE: 4 are needed as a data will take two,
+		 *       and there is a minimum of 2 that must be
+		 *       maintained between the r and w indexes
+		 */
+		element = priv->tx_pend_list.next;
+                packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+		if (unlikely(1 + packet->info.d_struct.txb->nr_frags >
+			     IPW_MAX_BDS)) {
+			/* TODO: Support merging buffers if more than
+			 * IPW_MAX_BDS are used */
+			IPW_DEBUG_INFO(
+			       "%s: Maximum BD theshold exceeded.  "
+			       "Increase fragmentation level.\n",
+			       priv->net_dev->name);
+		}
+
+		if (txq->available <= 3 +
+		    packet->info.d_struct.txb->nr_frags) {
+			IPW_DEBUG_TX("no room in tx_queue\n");
+			break;
+		}
+
+		list_del(element);
+		DEC_STAT(&priv->tx_pend_stat);
+
+		tbd = &txq->drv[txq->next];
+
+		packet->index = txq->next;
+
+		ipw_hdr = packet->info.d_struct.data;
+		hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb->
+			fragments[0]->data;
+
+		if (priv->ieee->iw_mode == IW_MODE_INFRA) {
+			/* To DS: Addr1 = BSSID, Addr2 = SA,
+			   Addr3 = DA */
+			memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
+			memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN);
+		} else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+			/* not From/To DS: Addr1 = DA, Addr2 = SA,
+			   Addr3 = BSSID */
+			memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
+			memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN);
+		}
+
+		ipw_hdr->host_command_reg = SEND;
+		ipw_hdr->host_command_reg1 = 0;
+
+		/* For now we only support host based encryption */
+		ipw_hdr->needs_encryption = 0;
+		ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted;
+		if (packet->info.d_struct.txb->nr_frags > 1)
+			ipw_hdr->fragment_size =
+				packet->info.d_struct.txb->frag_size - IEEE80211_3ADDR_LEN;
+		else
+			ipw_hdr->fragment_size = 0;
+
+		tbd->host_addr = packet->info.d_struct.data_phys;
+		tbd->buf_length = sizeof(struct ipw2100_data_header);
+		tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags;
+		tbd->status.info.field =
+			IPW_BD_STATUS_TX_FRAME_802_3 |
+			IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
+		txq->next++;
+		txq->next %= txq->entries;
+
+		IPW_DEBUG_TX(
+			"data header tbd TX%d P=%08x L=%d\n",
+			packet->index, tbd->host_addr,
+			tbd->buf_length);
+#ifdef CONFIG_IPW_DEBUG
+		if (packet->info.d_struct.txb->nr_frags > 1)
+			IPW_DEBUG_FRAG("fragment Tx: %d frames\n",
+				       packet->info.d_struct.txb->nr_frags);
+#endif
+
+                for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) {
+		        tbd = &txq->drv[txq->next];
+			if (i == packet->info.d_struct.txb->nr_frags - 1)
+				tbd->status.info.field =
+					IPW_BD_STATUS_TX_FRAME_802_3 |
+					IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
+			else
+				tbd->status.info.field =
+					IPW_BD_STATUS_TX_FRAME_802_3 |
+					IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
+
+			tbd->buf_length = packet->info.d_struct.txb->
+				fragments[i]->len - IEEE80211_3ADDR_LEN;
+
+                        tbd->host_addr = pci_map_single(
+				priv->pci_dev,
+				packet->info.d_struct.txb->fragments[i]->data +
+				IEEE80211_3ADDR_LEN,
+				tbd->buf_length,
+				PCI_DMA_TODEVICE);
+
+			IPW_DEBUG_TX(
+				"data frag tbd TX%d P=%08x L=%d\n",
+				txq->next, tbd->host_addr, tbd->buf_length);
+
+			pci_dma_sync_single_for_device(
+				priv->pci_dev, tbd->host_addr,
+				tbd->buf_length,
+				PCI_DMA_TODEVICE);
+
+			txq->next++;
+			txq->next %= txq->entries;
+                }
+
+		txq->available -= 1 + packet->info.d_struct.txb->nr_frags;
+		SET_STAT(&priv->txq_stat, txq->available);
+
+		list_add_tail(element, &priv->fw_pend_list);
+		INC_STAT(&priv->fw_pend_stat);
+	}
+
+	if (txq->next != next) {
+		/* kick off the DMA by notifying firmware the
+		 * write index has moved; make sure TBD stores are sync'd */
+		write_register(priv->net_dev,
+			       IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
+			       txq->next);
+	}
+        return;
+}
+
+static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
+{
+	struct net_device *dev = priv->net_dev;
+	unsigned long flags;
+	u32 inta, tmp;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	ipw2100_disable_interrupts(priv);
+
+	read_register(dev, IPW_REG_INTA, &inta);
+
+	IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n",
+		      (unsigned long)inta & IPW_INTERRUPT_MASK);
+
+	priv->in_isr++;
+	priv->interrupts++;
+
+	/* We do not loop and keep polling for more interrupts as this
+	 * is frowned upon and doesn't play nicely with other potentially
+	 * chained IRQs */
+	IPW_DEBUG_ISR("INTA: 0x%08lX\n",
+		      (unsigned long)inta & IPW_INTERRUPT_MASK);
+
+	if (inta & IPW2100_INTA_FATAL_ERROR) {
+		IPW_DEBUG_WARNING(DRV_NAME
+				  ": Fatal interrupt. Scheduling firmware restart.\n");
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_FATAL_ERROR);
+
+		read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error);
+		IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n",
+			       priv->net_dev->name, priv->fatal_error);
+
+		read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp);
+		IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n",
+			       priv->net_dev->name, tmp);
+
+		/* Wake up any sleeping jobs */
+		schedule_reset(priv);
+	}
+
+	if (inta & IPW2100_INTA_PARITY_ERROR) {
+		IPW_DEBUG_ERROR("***** PARITY ERROR INTERRUPT !!!! \n");
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_PARITY_ERROR);
+	}
+
+	if (inta & IPW2100_INTA_RX_TRANSFER) {
+		IPW_DEBUG_ISR("RX interrupt\n");
+
+		priv->rx_interrupts++;
+
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_RX_TRANSFER);
+
+		__ipw2100_rx_process(priv);
+		__ipw2100_tx_complete(priv);
+	}
+
+	if (inta & IPW2100_INTA_TX_TRANSFER) {
+		IPW_DEBUG_ISR("TX interrupt\n");
+
+		priv->tx_interrupts++;
+
+		write_register(dev, IPW_REG_INTA,
+			       IPW2100_INTA_TX_TRANSFER);
+
+		__ipw2100_tx_complete(priv);
+		X__ipw2100_tx_send_commands(priv);
+		X__ipw2100_tx_send_data(priv);
+	}
+
+	if (inta & IPW2100_INTA_TX_COMPLETE) {
+		IPW_DEBUG_ISR("TX complete\n");
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_TX_COMPLETE);
+
+		__ipw2100_tx_complete(priv);
+	}
+
+	if (inta & IPW2100_INTA_EVENT_INTERRUPT) {
+		/* ipw2100_handle_event(dev); */
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_EVENT_INTERRUPT);
+	}
+
+	if (inta & IPW2100_INTA_FW_INIT_DONE) {
+		IPW_DEBUG_ISR("FW init done interrupt\n");
+		priv->inta_other++;
+
+		read_register(dev, IPW_REG_INTA, &tmp);
+		if (tmp & (IPW2100_INTA_FATAL_ERROR |
+			   IPW2100_INTA_PARITY_ERROR)) {
+			write_register(
+				dev, IPW_REG_INTA,
+				IPW2100_INTA_FATAL_ERROR |
+				IPW2100_INTA_PARITY_ERROR);
+		}
+
+		write_register(dev, IPW_REG_INTA,
+			       IPW2100_INTA_FW_INIT_DONE);
+	}
+
+	if (inta & IPW2100_INTA_STATUS_CHANGE) {
+		IPW_DEBUG_ISR("Status change interrupt\n");
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_STATUS_CHANGE);
+	}
+
+	if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) {
+		IPW_DEBUG_ISR("slave host mode interrupt\n");
+		priv->inta_other++;
+		write_register(
+			dev, IPW_REG_INTA,
+			IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE);
+	}
+
+	priv->in_isr--;
+	ipw2100_enable_interrupts(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	IPW_DEBUG_ISR("exit\n");
+}
+
+
+static irqreturn_t ipw2100_interrupt(int irq, void *data,
+				     struct pt_regs *regs)
+{
+	struct ipw2100_priv *priv = data;
+	u32 inta, inta_mask;
+
+	if (!data)
+		return IRQ_NONE;
+
+ 	spin_lock(&priv->low_lock);
+
+	/* We check to see if we should be ignoring interrupts before
+	 * we touch the hardware.  During ucode load if we try and handle
+	 * an interrupt we can cause keyboard problems as well as cause
+	 * the ucode to fail to initialize */
+	if (!(priv->status & STATUS_INT_ENABLED)) {
+		/* Shared IRQ */
+		goto none;
+	}
+
+	read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
+	read_register(priv->net_dev, IPW_REG_INTA, &inta);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_DEBUG_WARNING("IRQ INTA == 0xFFFFFFFF\n");
+		goto none;
+	}
+
+	inta &= IPW_INTERRUPT_MASK;
+
+	if (!(inta & inta_mask)) {
+		/* Shared interrupt */
+		goto none;
+	}
+
+	/* We disable the hardware interrupt here just to prevent unneeded
+	 * calls to be made.  We disable this again within the actual
+	 * work tasklet, so if another part of the code re-enables the
+	 * interrupt, that is fine */
+	ipw2100_disable_interrupts(priv);
+
+	tasklet_schedule(&priv->irq_tasklet);
+ 	spin_unlock(&priv->low_lock);
+
+	return IRQ_HANDLED;
+ none:
+	spin_unlock(&priv->low_lock);
+	return IRQ_NONE;
+}
+
+static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_INFO("Can not transmit when not connected.\n");
+		priv->ieee->stats.tx_carrier_errors++;
+		netif_stop_queue(dev);
+		goto fail_unlock;
+	}
+
+	if (list_empty(&priv->tx_free_list))
+		goto fail_unlock;
+
+	element = priv->tx_free_list.next;
+	packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+	packet->info.d_struct.txb = txb;
+
+	IPW_DEBUG_TX("Sending fragment (%d bytes):\n",
+			 txb->fragments[0]->len);
+	printk_buf(IPW_DL_TX, txb->fragments[0]->data,
+		   txb->fragments[0]->len);
+
+	packet->jiffy_start = jiffies;
+
+	list_del(element);
+	DEC_STAT(&priv->tx_free_stat);
+
+	list_add_tail(element, &priv->tx_pend_list);
+	INC_STAT(&priv->tx_pend_stat);
+
+	X__ipw2100_tx_send_data(priv);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+	return 0;
+
+ fail_unlock:
+	netif_stop_queue(dev);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+	return 1;
+}
+
+
+static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+	void *v;
+	dma_addr_t p;
+
+	priv->msg_buffers = (struct ipw2100_tx_packet *)kmalloc(
+		IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
+		GFP_KERNEL);
+	if (!priv->msg_buffers) {
+		IPW_DEBUG_ERROR("%s: PCI alloc failed for msg "
+		       "buffers.\n", priv->net_dev->name);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
+		v = pci_alloc_consistent(
+			priv->pci_dev,
+			sizeof(struct ipw2100_cmd_header),
+			&p);
+		if (!v) {
+			IPW_DEBUG_ERROR(
+			       "%s: PCI alloc failed for msg "
+			       "buffers.\n",
+			       priv->net_dev->name);
+			err = -ENOMEM;
+			break;
+		}
+
+		memset(v, 0, sizeof(struct ipw2100_cmd_header));
+
+		priv->msg_buffers[i].type = COMMAND;
+		priv->msg_buffers[i].info.c_struct.cmd =
+			(struct ipw2100_cmd_header*)v;
+		priv->msg_buffers[i].info.c_struct.cmd_phys = p;
+	}
+
+	if (i == IPW_COMMAND_POOL_SIZE)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_free_consistent(
+			priv->pci_dev,
+			sizeof(struct ipw2100_cmd_header),
+			priv->msg_buffers[j].info.c_struct.cmd,
+			priv->msg_buffers[j].info.c_struct.cmd_phys);
+	}
+
+	kfree(priv->msg_buffers);
+	priv->msg_buffers = NULL;
+
+	return err;
+}
+
+static int ipw2100_msg_initialize(struct ipw2100_priv *priv)
+{
+	int i;
+
+	INIT_LIST_HEAD(&priv->msg_free_list);
+	INIT_LIST_HEAD(&priv->msg_pend_list);
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++)
+		list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list);
+	SET_STAT(&priv->msg_free_stat, i);
+
+	return 0;
+}
+
+static void ipw2100_msg_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	if (!priv->msg_buffers)
+		return;
+
+	for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
+		pci_free_consistent(priv->pci_dev,
+				    sizeof(struct ipw2100_cmd_header),
+				    priv->msg_buffers[i].info.c_struct.cmd,
+				    priv->msg_buffers[i].info.c_struct.cmd_phys);
+	}
+
+	kfree(priv->msg_buffers);
+	priv->msg_buffers = NULL;
+}
+
+static ssize_t show_pci(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
+	char *out = buf;
+	int i, j;
+	u32 val;
+
+	for (i = 0; i < 16; i++) {
+		out += sprintf(out, "[%08X] ", i * 16);
+		for (j = 0; j < 16; j += 4) {
+			pci_read_config_dword(pci_dev, i * 16 + j, &val);
+			out += sprintf(out, "%08X ", val);
+		}
+		out += sprintf(out, "\n");
+	}
+
+	return out - buf;
+}
+static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL);
+
+static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw2100_priv *p = d->driver_data;
+	return sprintf(buf, "0x%08x\n", (int)p->config);
+}
+static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
+
+static ssize_t show_status(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw2100_priv *p = d->driver_data;
+	return sprintf(buf, "0x%08x\n", (int)p->status);
+}
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+
+static ssize_t show_capability(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *p = d->driver_data;
+	return sprintf(buf, "0x%08x\n", (int)p->capability);
+}
+static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL);
+
+
+#define IPW2100_REG(x) { IPW_ ##x, #x }
+const struct {
+	u32 addr;
+	const char *name;
+} hw_data[] = {
+	IPW2100_REG(REG_GP_CNTRL),
+	IPW2100_REG(REG_GPIO),
+	IPW2100_REG(REG_INTA),
+	IPW2100_REG(REG_INTA_MASK),
+	IPW2100_REG(REG_RESET_REG),
+};
+#define IPW2100_NIC(x, s) { x, #x, s }
+const struct {
+	u32 addr;
+	const char *name;
+	size_t size;
+} nic_data[] = {
+	IPW2100_NIC(IPW2100_CONTROL_REG, 2),
+	IPW2100_NIC(0x210014, 1),
+	IPW2100_NIC(0x210000, 1),
+};
+#define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d }
+const struct {
+	u8 index;
+	const char *name;
+	const char *desc;
+} ord_data[] = {
+	IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
+	IPW2100_ORD(STAT_TX_HOST_COMPLETE, "successful Host Tx's (MSDU)"),
+	IPW2100_ORD(STAT_TX_DIR_DATA,	   "successful Directed Tx's (MSDU)"),
+	IPW2100_ORD(STAT_TX_DIR_DATA1,	   "successful Directed Tx's (MSDU) @ 1MB"),
+	IPW2100_ORD(STAT_TX_DIR_DATA2,	   "successful Directed Tx's (MSDU) @ 2MB"),
+	IPW2100_ORD(STAT_TX_DIR_DATA5_5,   "successful Directed Tx's (MSDU) @ 5_5MB"),
+	IPW2100_ORD(STAT_TX_DIR_DATA11,	   "successful Directed Tx's (MSDU) @ 11MB"),
+	IPW2100_ORD(STAT_TX_NODIR_DATA1,   "successful Non_Directed Tx's (MSDU) @ 1MB"),
+	IPW2100_ORD(STAT_TX_NODIR_DATA2,   "successful Non_Directed Tx's (MSDU) @ 2MB"),
+	IPW2100_ORD(STAT_TX_NODIR_DATA5_5, "successful Non_Directed Tx's (MSDU) @ 5.5MB"),
+	IPW2100_ORD(STAT_TX_NODIR_DATA11,  "successful Non_Directed Tx's (MSDU) @ 11MB"),
+	IPW2100_ORD(STAT_NULL_DATA,	   "successful NULL data Tx's"),
+	IPW2100_ORD(STAT_TX_RTS,	   "successful Tx RTS"),
+	IPW2100_ORD(STAT_TX_CTS,	   "successful Tx CTS"),
+	IPW2100_ORD(STAT_TX_ACK,	   "successful Tx ACK"),
+	IPW2100_ORD(STAT_TX_ASSN,	   "successful Association Tx's"),
+	IPW2100_ORD(STAT_TX_ASSN_RESP,	   "successful Association response Tx's"),
+	IPW2100_ORD(STAT_TX_REASSN,	   "successful Reassociation Tx's"),
+	IPW2100_ORD(STAT_TX_REASSN_RESP,   "successful Reassociation response Tx's"),
+	IPW2100_ORD(STAT_TX_PROBE,	   "probes successfully transmitted"),
+	IPW2100_ORD(STAT_TX_PROBE_RESP,	   "probe responses successfully transmitted"),
+	IPW2100_ORD(STAT_TX_BEACON,	   "tx beacon"),
+	IPW2100_ORD(STAT_TX_ATIM,	   "Tx ATIM"),
+	IPW2100_ORD(STAT_TX_DISASSN,	   "successful Disassociation TX"),
+	IPW2100_ORD(STAT_TX_AUTH,	   "successful Authentication Tx"),
+	IPW2100_ORD(STAT_TX_DEAUTH,	   "successful Deauthentication TX"),
+	IPW2100_ORD(STAT_TX_TOTAL_BYTES,   "Total successful Tx data bytes"),
+	IPW2100_ORD(STAT_TX_RETRIES,       "Tx retries"),
+	IPW2100_ORD(STAT_TX_RETRY1,        "Tx retries at 1MBPS"),
+	IPW2100_ORD(STAT_TX_RETRY2,        "Tx retries at 2MBPS"),
+	IPW2100_ORD(STAT_TX_RETRY5_5,	   "Tx retries at 5.5MBPS"),
+	IPW2100_ORD(STAT_TX_RETRY11,	   "Tx retries at 11MBPS"),
+	IPW2100_ORD(STAT_TX_FAILURES,	   "Tx Failures"),
+	IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP,"times max tries in a hop failed"),
+	IPW2100_ORD(STAT_TX_DISASSN_FAIL,	"times disassociation failed"),
+	IPW2100_ORD(STAT_TX_ERR_CTS,         "missed/bad CTS frames"),
+	IPW2100_ORD(STAT_TX_ERR_ACK,	"tx err due to acks"),
+	IPW2100_ORD(STAT_RX_HOST,	"packets passed to host"),
+	IPW2100_ORD(STAT_RX_DIR_DATA,	"directed packets"),
+	IPW2100_ORD(STAT_RX_DIR_DATA1,	"directed packets at 1MB"),
+	IPW2100_ORD(STAT_RX_DIR_DATA2,	"directed packets at 2MB"),
+	IPW2100_ORD(STAT_RX_DIR_DATA5_5,	"directed packets at 5.5MB"),
+	IPW2100_ORD(STAT_RX_DIR_DATA11,	"directed packets at 11MB"),
+	IPW2100_ORD(STAT_RX_NODIR_DATA,"nondirected packets"),
+	IPW2100_ORD(STAT_RX_NODIR_DATA1,	"nondirected packets at 1MB"),
+	IPW2100_ORD(STAT_RX_NODIR_DATA2,	"nondirected packets at 2MB"),
+	IPW2100_ORD(STAT_RX_NODIR_DATA5_5,	"nondirected packets at 5.5MB"),
+	IPW2100_ORD(STAT_RX_NODIR_DATA11,	"nondirected packets at 11MB"),
+	IPW2100_ORD(STAT_RX_NULL_DATA,	"null data rx's"),
+	IPW2100_ORD(STAT_RX_RTS,	"Rx RTS"),
+	IPW2100_ORD(STAT_RX_CTS,	"Rx CTS"),
+	IPW2100_ORD(STAT_RX_ACK,	"Rx ACK"),
+	IPW2100_ORD(STAT_RX_CFEND,	"Rx CF End"),
+	IPW2100_ORD(STAT_RX_CFEND_ACK,	"Rx CF End + CF Ack"),
+	IPW2100_ORD(STAT_RX_ASSN,	"Association Rx's"),
+	IPW2100_ORD(STAT_RX_ASSN_RESP,	"Association response Rx's"),
+	IPW2100_ORD(STAT_RX_REASSN,	"Reassociation Rx's"),
+	IPW2100_ORD(STAT_RX_REASSN_RESP,	"Reassociation response Rx's"),
+	IPW2100_ORD(STAT_RX_PROBE,	"probe Rx's"),
+	IPW2100_ORD(STAT_RX_PROBE_RESP,	"probe response Rx's"),
+	IPW2100_ORD(STAT_RX_BEACON,	"Rx beacon"),
+	IPW2100_ORD(STAT_RX_ATIM,	"Rx ATIM"),
+	IPW2100_ORD(STAT_RX_DISASSN,	"disassociation Rx"),
+	IPW2100_ORD(STAT_RX_AUTH,	"authentication Rx"),
+	IPW2100_ORD(STAT_RX_DEAUTH,	"deauthentication Rx"),
+	IPW2100_ORD(STAT_RX_TOTAL_BYTES,"Total rx data bytes received"),
+	IPW2100_ORD(STAT_RX_ERR_CRC,	 "packets with Rx CRC error"),
+	IPW2100_ORD(STAT_RX_ERR_CRC1,	 "Rx CRC errors at 1MB"),
+	IPW2100_ORD(STAT_RX_ERR_CRC2,	 "Rx CRC errors at 2MB"),
+	IPW2100_ORD(STAT_RX_ERR_CRC5_5,	 "Rx CRC errors at 5.5MB"),
+	IPW2100_ORD(STAT_RX_ERR_CRC11,	 "Rx CRC errors at 11MB"),
+	IPW2100_ORD(STAT_RX_DUPLICATE1, "duplicate rx packets at 1MB"),
+	IPW2100_ORD(STAT_RX_DUPLICATE2,	 "duplicate rx packets at 2MB"),
+	IPW2100_ORD(STAT_RX_DUPLICATE5_5,	 "duplicate rx packets at 5.5MB"),
+	IPW2100_ORD(STAT_RX_DUPLICATE11,	 "duplicate rx packets at 11MB"),
+	IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"),
+	IPW2100_ORD(PERS_DB_LOCK,	"locking fw permanent  db"),
+	IPW2100_ORD(PERS_DB_SIZE,	"size of fw permanent  db"),
+	IPW2100_ORD(PERS_DB_ADDR,	"address of fw permanent  db"),
+	IPW2100_ORD(STAT_RX_INVALID_PROTOCOL,	"rx frames with invalid protocol"),
+	IPW2100_ORD(SYS_BOOT_TIME,	"Boot time"),
+	IPW2100_ORD(STAT_RX_NO_BUFFER,	"rx frames rejected due to no buffer"),
+	IPW2100_ORD(STAT_RX_MISSING_FRAG,	"rx frames dropped due to missing fragment"),
+	IPW2100_ORD(STAT_RX_ORPHAN_FRAG,	"rx frames dropped due to non-sequential fragment"),
+	IPW2100_ORD(STAT_RX_ORPHAN_FRAME,	"rx frames dropped due to unmatched 1st frame"),
+	IPW2100_ORD(STAT_RX_FRAG_AGEOUT,	"rx frames dropped due to uncompleted frame"),
+	IPW2100_ORD(STAT_RX_ICV_ERRORS,	"ICV errors during decryption"),
+	IPW2100_ORD(STAT_PSP_SUSPENSION,"times adapter suspended"),
+	IPW2100_ORD(STAT_PSP_BCN_TIMEOUT,	"beacon timeout"),
+	IPW2100_ORD(STAT_PSP_POLL_TIMEOUT,	"poll response timeouts"),
+	IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT, "timeouts waiting for last {broad,multi}cast pkt"),
+	IPW2100_ORD(STAT_PSP_RX_DTIMS,	"PSP DTIMs received"),
+	IPW2100_ORD(STAT_PSP_RX_TIMS,	"PSP TIMs received"),
+	IPW2100_ORD(STAT_PSP_STATION_ID,"PSP Station ID"),
+	IPW2100_ORD(LAST_ASSN_TIME,	"RTC time of last association"),
+	IPW2100_ORD(STAT_PERCENT_MISSED_BCNS,"current calculation of % missed beacons"),
+	IPW2100_ORD(STAT_PERCENT_RETRIES,"current calculation of % missed tx retries"),
+	IPW2100_ORD(ASSOCIATED_AP_PTR,	"0 if not associated, else pointer to AP table entry"),
+	IPW2100_ORD(AVAILABLE_AP_CNT,	"AP's decsribed in the AP table"),
+	IPW2100_ORD(AP_LIST_PTR,	"Ptr to list of available APs"),
+	IPW2100_ORD(STAT_AP_ASSNS,	"associations"),
+	IPW2100_ORD(STAT_ASSN_FAIL,	"association failures"),
+	IPW2100_ORD(STAT_ASSN_RESP_FAIL,"failures due to response fail"),
+	IPW2100_ORD(STAT_FULL_SCANS,	"full scans"),
+	IPW2100_ORD(CARD_DISABLED,	"Card Disabled"),
+	IPW2100_ORD(STAT_ROAM_INHIBIT,	"times roaming was inhibited due to activity"),
+	IPW2100_ORD(RSSI_AT_ASSN,	"RSSI of associated AP at time of association"),
+	IPW2100_ORD(STAT_ASSN_CAUSE1,	"reassociation: no probe response or TX on hop"),
+	IPW2100_ORD(STAT_ASSN_CAUSE2,	"reassociation: poor tx/rx quality"),
+	IPW2100_ORD(STAT_ASSN_CAUSE3,	"reassociation: tx/rx quality (excessive AP load"),
+	IPW2100_ORD(STAT_ASSN_CAUSE4,	"reassociation: AP RSSI level"),
+	IPW2100_ORD(STAT_ASSN_CAUSE5,	"reassociations due to load leveling"),
+	IPW2100_ORD(STAT_AUTH_FAIL,	"times authentication failed"),
+	IPW2100_ORD(STAT_AUTH_RESP_FAIL,"times authentication response failed"),
+	IPW2100_ORD(STATION_TABLE_CNT,	"entries in association table"),
+	IPW2100_ORD(RSSI_AVG_CURR,	"Current avg RSSI"),
+	IPW2100_ORD(POWER_MGMT_MODE,	"Power mode - 0=CAM, 1=PSP"),
+	IPW2100_ORD(COUNTRY_CODE,	"IEEE country code as recv'd from beacon"),
+	IPW2100_ORD(COUNTRY_CHANNELS,	"channels suported by country"),
+	IPW2100_ORD(RESET_CNT,	"adapter resets (warm)"),
+	IPW2100_ORD(BEACON_INTERVAL,	"Beacon interval"),
+	IPW2100_ORD(ANTENNA_DIVERSITY,	"TRUE if antenna diversity is disabled"),
+	IPW2100_ORD(DTIM_PERIOD,	"beacon intervals between DTIMs"),
+	IPW2100_ORD(OUR_FREQ,	"current radio freq lower digits - channel ID"),
+	IPW2100_ORD(RTC_TIME,	"current RTC time"),
+	IPW2100_ORD(PORT_TYPE,	"operating mode"),
+	IPW2100_ORD(CURRENT_TX_RATE,	"current tx rate"),
+	IPW2100_ORD(SUPPORTED_RATES,	"supported tx rates"),
+	IPW2100_ORD(ATIM_WINDOW,	"current ATIM Window"),
+	IPW2100_ORD(BASIC_RATES,	"basic tx rates"),
+	IPW2100_ORD(NIC_HIGHEST_RATE,	"NIC highest tx rate"),
+	IPW2100_ORD(AP_HIGHEST_RATE,	"AP highest tx rate"),
+	IPW2100_ORD(CAPABILITIES,	"Management frame capability field"),
+	IPW2100_ORD(AUTH_TYPE,	"Type of authentication"),
+	IPW2100_ORD(RADIO_TYPE,	"Adapter card platform type"),
+	IPW2100_ORD(RTS_THRESHOLD,	"Min packet length for RTS handshaking"),
+	IPW2100_ORD(INT_MODE,	"International mode"),
+	IPW2100_ORD(FRAGMENTATION_THRESHOLD,	"protocol frag threshold"),
+	IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS,	"EEPROM offset in SRAM"),
+	IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE,	"EEPROM size in SRAM"),
+	IPW2100_ORD(EEPROM_SKU_CAPABILITY,	"EEPROM SKU Capability"),
+	IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS,	"EEPROM IBSS 11b channel set"),
+	IPW2100_ORD(MAC_VERSION,	"MAC Version"),
+	IPW2100_ORD(MAC_REVISION,	"MAC Revision"),
+	IPW2100_ORD(RADIO_VERSION,	"Radio Version"),
+	IPW2100_ORD(NIC_MANF_DATE_TIME,	"MANF Date/Time STAMP"),
+	IPW2100_ORD(UCODE_VERSION,	"Ucode Version"),
+};
+
+
+static ssize_t show_registers(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	int i;
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char * out = buf;
+	u32 val = 0;
+
+	out += sprintf(out, "%30s [Address ] : Hex\n", "Register");
+
+	for (i = 0; i < (sizeof(hw_data) / sizeof(*hw_data)); i++) {
+		read_register(dev, hw_data[i].addr, &val);
+		out += sprintf(out, "%30s [%08X] : %08X\n",
+			       hw_data[i].name, hw_data[i].addr, val);
+	}
+
+	return out - buf;
+}
+static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
+
+
+static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char * out = buf;
+	int i;
+
+	out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry");
+
+	for (i = 0; i < (sizeof(nic_data) / sizeof(*nic_data)); i++) {
+		u8 tmp8;
+		u16 tmp16;
+		u32 tmp32;
+
+		switch (nic_data[i].size) {
+		case 1:
+			read_nic_byte(dev, nic_data[i].addr, &tmp8);
+			out += sprintf(out, "%30s [%08X] : %02X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp8);
+			break;
+		case 2:
+			read_nic_word(dev, nic_data[i].addr, &tmp16);
+			out += sprintf(out, "%30s [%08X] : %04X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp16);
+			break;
+		case 4:
+			read_nic_dword(dev, nic_data[i].addr, &tmp32);
+			out += sprintf(out, "%30s [%08X] : %08X\n",
+				       nic_data[i].name, nic_data[i].addr,
+				       tmp32);
+			break;
+		}
+	}
+	return out - buf;
+}
+static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
+
+
+static ssize_t show_memory(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	static unsigned long loop = 0;
+	int len = 0;
+	u32 buffer[4];
+	int i;
+	char line[81];
+
+	if (loop >= 0x30000)
+		loop = 0;
+
+	/* sysfs provides us PAGE_SIZE buffer */
+	while (len < PAGE_SIZE - 128 && loop < 0x30000) {
+
+		if (priv->snapshot[0]) for (i = 0; i < 4; i++)
+			buffer[i] = *(u32 *)SNAPSHOT_ADDR(loop + i * 4);
+		else for (i = 0; i < 4; i++)
+			read_nic_dword(dev, loop + i * 4, &buffer[i]);
+
+		if (priv->dump_raw)
+			len += sprintf(buf + len,
+				       "%c%c%c%c"
+				       "%c%c%c%c"
+				       "%c%c%c%c"
+				       "%c%c%c%c",
+				       ((u8*)buffer)[0x0],
+				       ((u8*)buffer)[0x1],
+				       ((u8*)buffer)[0x2],
+				       ((u8*)buffer)[0x3],
+				       ((u8*)buffer)[0x4],
+				       ((u8*)buffer)[0x5],
+				       ((u8*)buffer)[0x6],
+				       ((u8*)buffer)[0x7],
+				       ((u8*)buffer)[0x8],
+				       ((u8*)buffer)[0x9],
+				       ((u8*)buffer)[0xa],
+				       ((u8*)buffer)[0xb],
+				       ((u8*)buffer)[0xc],
+				       ((u8*)buffer)[0xd],
+				       ((u8*)buffer)[0xe],
+				       ((u8*)buffer)[0xf]);
+		else
+			len += sprintf(buf + len, "%s\n",
+				       snprint_line(line, sizeof(line),
+						    (u8*)buffer, 16, loop));
+		loop += 16;
+	}
+
+	return len;
+}
+
+static ssize_t store_memory(struct device *d, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	const char *p = buf;
+
+	if (count < 1)
+		return count;
+
+	if (p[0] == '1' ||
+	    (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) {
+		IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n",
+		       dev->name);
+		priv->dump_raw = 1;
+
+	} else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' &&
+				  tolower(p[1]) == 'f')) {
+		IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n",
+		       dev->name);
+		priv->dump_raw = 0;
+
+	} else if (tolower(p[0]) == 'r') {
+		IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n",
+		       dev->name);
+		ipw2100_snapshot_free(priv);
+
+	} else
+		IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, "
+		       "reset = clear memory snapshot\n",
+		       dev->name);
+
+	return count;
+}
+static DEVICE_ATTR(memory, S_IWUSR|S_IRUGO, show_memory, store_memory);
+
+
+static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	u32 val = 0;
+	int len = 0;
+	u32 val_len;
+	static int loop = 0;
+
+	if (loop >= sizeof(ord_data) / sizeof(*ord_data))
+		loop = 0;
+
+	/* sysfs provides us PAGE_SIZE buffer */
+	while (len < PAGE_SIZE - 128 &&
+	       loop < (sizeof(ord_data) / sizeof(*ord_data))) {
+
+		val_len = sizeof(u32);
+
+		if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val,
+					&val_len))
+			len += sprintf(buf + len, "[0x%02X] = ERROR    %s\n",
+				       ord_data[loop].index,
+				       ord_data[loop].desc);
+		else
+			len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n",
+				       ord_data[loop].index, val,
+				       ord_data[loop].desc);
+		loop++;
+	}
+
+	return len;
+}
+static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL);
+
+
+static ssize_t show_stats(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char * out = buf;
+
+	out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n",
+		       priv->interrupts, priv->tx_interrupts,
+		       priv->rx_interrupts, priv->inta_other);
+	out += sprintf(out, "firmware resets: %d\n", priv->resets);
+	out += sprintf(out, "firmware hangs: %d\n", priv->hangs);
+#ifdef CONFIG_IPW_DEBUG
+	out += sprintf(out, "packet mismatch image: %s\n",
+		       priv->snapshot[0] ? "YES" : "NO");
+#endif
+
+	return out - buf;
+}
+static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
+
+
+int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
+{
+	int err;
+
+	if (mode == priv->ieee->iw_mode)
+		return 0;
+
+	err = ipw2100_disable_adapter(priv);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
+		       priv->net_dev->name, err);
+		return err;
+	}
+
+	switch (mode) {
+	case IW_MODE_INFRA:
+		priv->net_dev->type = ARPHRD_ETHER;
+		break;
+	case IW_MODE_ADHOC:
+		priv->net_dev->type = ARPHRD_ETHER;
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		priv->last_mode = priv->ieee->iw_mode;
+		priv->net_dev->type = ARPHRD_IEEE80211;
+		break;
+#endif /* CONFIG_IPW2100_MONITOR */
+	}
+
+	priv->ieee->iw_mode = mode;
+
+#ifdef CONFIG_PM
+        /* Indicate ipw2100_download_firmware download firmware
+	 * from disk instead of memory. */
+	ipw2100_firmware.version = 0;
+#endif
+
+	printk(KERN_INFO "%s: Reseting on mode change.\n",
+		priv->net_dev->name);
+	priv->reset_backoff = 0;
+	schedule_reset(priv);
+
+	return 0;
+}
+
+static ssize_t show_internals(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	int len = 0;
+
+#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" # y "\n", priv-> x)
+
+	if (priv->status & STATUS_ASSOCIATED)
+		len += sprintf(buf + len, "connected: %lu\n",
+			       get_seconds() - priv->connect_start);
+	else
+		len += sprintf(buf + len, "not connected\n");
+
+	DUMP_VAR(ieee->crypt[priv->ieee->tx_keyidx], p);
+	DUMP_VAR(status, 08lx);
+	DUMP_VAR(config, 08lx);
+	DUMP_VAR(capability, 08lx);
+
+	len += sprintf(buf + len, "last_rtc: %lu\n", (unsigned long)priv->last_rtc);
+
+	DUMP_VAR(fatal_error, d);
+	DUMP_VAR(stop_hang_check, d);
+	DUMP_VAR(stop_rf_kill, d);
+	DUMP_VAR(messages_sent, d);
+
+	DUMP_VAR(tx_pend_stat.value, d);
+	DUMP_VAR(tx_pend_stat.hi, d);
+
+	DUMP_VAR(tx_free_stat.value, d);
+	DUMP_VAR(tx_free_stat.lo, d);
+
+	DUMP_VAR(msg_free_stat.value, d);
+	DUMP_VAR(msg_free_stat.lo, d);
+
+	DUMP_VAR(msg_pend_stat.value, d);
+	DUMP_VAR(msg_pend_stat.hi, d);
+
+	DUMP_VAR(fw_pend_stat.value, d);
+	DUMP_VAR(fw_pend_stat.hi, d);
+
+	DUMP_VAR(txq_stat.value, d);
+	DUMP_VAR(txq_stat.lo, d);
+
+	DUMP_VAR(ieee->scans, d);
+	DUMP_VAR(reset_backoff, d);
+
+	return len;
+}
+static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL);
+
+
+static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char essid[IW_ESSID_MAX_SIZE + 1];
+	u8 bssid[ETH_ALEN];
+	u32 chan = 0;
+	char * out = buf;
+	int length;
+	int ret;
+
+	memset(essid, 0, sizeof(essid));
+	memset(bssid, 0, sizeof(bssid));
+
+	length = IW_ESSID_MAX_SIZE;
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	length = sizeof(bssid);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
+				  bssid, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	length = sizeof(u32);
+	ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length);
+	if (ret)
+		IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
+			       __LINE__);
+
+	out += sprintf(out, "ESSID: %s\n", essid);
+	out += sprintf(out, "BSSID:   %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       bssid[0], bssid[1], bssid[2],
+		       bssid[3], bssid[4], bssid[5]);
+	out += sprintf(out, "Channel: %d\n", chan);
+
+	return out - buf;
+}
+static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL);
+
+
+#ifdef CONFIG_IPW_DEBUG
+static ssize_t show_debug_level(struct device_driver *d, char *buf)
+{
+	return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
+}
+
+static ssize_t store_debug_level(struct device_driver *d, const char *buf,
+				 size_t count)
+{
+	char *p = (char *)buf;
+	u32 val;
+
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		IPW_DEBUG_INFO(DRV_NAME
+		       ": %s is not in hex or decimal form.\n", buf);
+	else
+		ipw2100_debug_level = val;
+
+	return strnlen(buf, count);
+}
+static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level,
+		   store_debug_level);
+#endif /* CONFIG_IPW_DEBUG */
+
+
+static ssize_t show_fatal_error(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	char *out = buf;
+	int i;
+
+	if (priv->fatal_error)
+		out += sprintf(out, "0x%08X\n",
+			       priv->fatal_error);
+	else
+		out += sprintf(out, "0\n");
+
+	for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) {
+		if (!priv->fatal_errors[(priv->fatal_index - i) %
+					IPW2100_ERROR_QUEUE])
+			continue;
+
+		out += sprintf(out, "%d. 0x%08X\n", i,
+			       priv->fatal_errors[(priv->fatal_index - i) %
+						  IPW2100_ERROR_QUEUE]);
+	}
+
+	return out - buf;
+}
+
+static ssize_t store_fatal_error(struct device *d,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	schedule_reset(priv);
+	return count;
+}
+static DEVICE_ATTR(fatal_error, S_IWUSR|S_IRUGO, show_fatal_error, store_fatal_error);
+
+
+static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	return sprintf(buf, "%d\n", priv->ieee->scan_age);
+}
+
+static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	struct net_device *dev = priv->net_dev;
+	char buffer[] = "00000000";
+	unsigned long len =
+	    (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
+	unsigned long val;
+	char *p = buffer;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	strncpy(buffer, buf, len);
+	buffer[len] = 0;
+
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buffer) {
+		IPW_DEBUG_INFO("%s: user supplied invalid value.\n",
+		       dev->name);
+	} else {
+		priv->ieee->scan_age = val;
+		IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+	return len;
+}
+static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
+
+
+static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	/* 0 - RF kill not enabled
+	   1 - SW based RF kill active (sysfs)
+	   2 - HW based RF kill active
+	   3 - Both HW and SW baed RF kill active */
+	struct ipw2100_priv *priv = (struct ipw2100_priv *)d->driver_data;
+	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
+		(rf_kill_active(priv) ? 0x2 : 0x0);
+	return sprintf(buf, "%i\n", val);
+}
+
+static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
+{
+	if ((disable_radio ? 1 : 0) ==
+	    (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
+		return 0 ;
+
+	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
+			  disable_radio ? "OFF" : "ON");
+
+	down(&priv->action_sem);
+
+	if (disable_radio) {
+		priv->status |= STATUS_RF_KILL_SW;
+		ipw2100_down(priv);
+	} else {
+		priv->status &= ~STATUS_RF_KILL_SW;
+		if (rf_kill_active(priv)) {
+			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
+					  "disabled by HW switch\n");
+			/* Make sure the RF_KILL check timer is running */
+			priv->stop_rf_kill = 0;
+			cancel_delayed_work(&priv->rf_kill);
+			queue_delayed_work(priv->workqueue, &priv->rf_kill,
+					   HZ);
+		} else
+			schedule_reset(priv);
+	}
+
+	up(&priv->action_sem);
+	return 1;
+}
+
+static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ipw2100_priv *priv = dev_get_drvdata(d);
+	ipw_radio_kill_sw(priv, buf[0] == '1');
+	return count;
+}
+static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
+
+
+static struct attribute *ipw2100_sysfs_entries[] = {
+	&dev_attr_hardware.attr,
+	&dev_attr_registers.attr,
+	&dev_attr_ordinals.attr,
+	&dev_attr_pci.attr,
+	&dev_attr_stats.attr,
+	&dev_attr_internals.attr,
+	&dev_attr_bssinfo.attr,
+	&dev_attr_memory.attr,
+	&dev_attr_scan_age.attr,
+	&dev_attr_fatal_error.attr,
+	&dev_attr_rf_kill.attr,
+	&dev_attr_cfg.attr,
+	&dev_attr_status.attr,
+	&dev_attr_capability.attr,
+	NULL,
+};
+
+static struct attribute_group ipw2100_attribute_group = {
+	.attrs = ipw2100_sysfs_entries,
+};
+
+
+static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
+{
+	struct ipw2100_status_queue *q = &priv->status_queue;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	q->size = entries * sizeof(struct ipw2100_status);
+	q->drv = (struct ipw2100_status *)pci_alloc_consistent(
+		priv->pci_dev, q->size, &q->nic);
+	if (!q->drv) {
+		IPW_DEBUG_WARNING(
+		       "Can not allocate status queue.\n");
+		return -ENOMEM;
+	}
+
+	memset(q->drv, 0, q->size);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static void status_queue_free(struct ipw2100_priv *priv)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->status_queue.drv) {
+		pci_free_consistent(
+			priv->pci_dev, priv->status_queue.size,
+			priv->status_queue.drv, priv->status_queue.nic);
+		priv->status_queue.drv = NULL;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static int bd_queue_allocate(struct ipw2100_priv *priv,
+			     struct ipw2100_bd_queue *q, int entries)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	memset(q, 0, sizeof(struct ipw2100_bd_queue));
+
+	q->entries = entries;
+	q->size = entries * sizeof(struct ipw2100_bd);
+	q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic);
+	if (!q->drv) {
+		IPW_DEBUG_INFO("can't allocate shared memory for buffer descriptors\n");
+		return -ENOMEM;
+	}
+	memset(q->drv, 0, q->size);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+static void bd_queue_free(struct ipw2100_priv *priv,
+			  struct ipw2100_bd_queue *q)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	if (!q)
+		return;
+
+	if (q->drv) {
+		pci_free_consistent(priv->pci_dev,
+				    q->size, q->drv, q->nic);
+		q->drv = NULL;
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void bd_queue_initialize(
+	struct ipw2100_priv *priv, struct ipw2100_bd_queue * q,
+	u32 base, u32 size, u32 r, u32 w)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, (u32)q->nic);
+
+	write_register(priv->net_dev, base, q->nic);
+	write_register(priv->net_dev, size, q->entries);
+	write_register(priv->net_dev, r, q->oldest);
+	write_register(priv->net_dev, w, q->next);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
+{
+	if (priv->workqueue) {
+		priv->stop_rf_kill = 1;
+		priv->stop_hang_check = 1;
+		cancel_delayed_work(&priv->reset_work);
+		cancel_delayed_work(&priv->security_work);
+		cancel_delayed_work(&priv->wx_event_work);
+		cancel_delayed_work(&priv->hang_check);
+		cancel_delayed_work(&priv->rf_kill);
+		destroy_workqueue(priv->workqueue);
+		priv->workqueue = NULL;
+	}
+}
+
+static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+	void *v;
+	dma_addr_t p;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n",
+		       priv->net_dev->name);
+		return err;
+	}
+
+	priv->tx_buffers = (struct ipw2100_tx_packet *)kmalloc(
+		TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
+		GFP_ATOMIC);
+	if (!priv->tx_buffers) {
+		IPW_DEBUG_ERROR("%s: alloc failed form tx buffers.\n",
+		       priv->net_dev->name);
+		bd_queue_free(priv, &priv->tx_queue);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		v = pci_alloc_consistent(
+			priv->pci_dev, sizeof(struct ipw2100_data_header), &p);
+		if (!v) {
+			IPW_DEBUG_ERROR("%s: PCI alloc failed for tx "
+			       "buffers.\n", priv->net_dev->name);
+			err = -ENOMEM;
+			break;
+		}
+
+		priv->tx_buffers[i].type = DATA;
+		priv->tx_buffers[i].info.d_struct.data = (struct ipw2100_data_header*)v;
+		priv->tx_buffers[i].info.d_struct.data_phys = p;
+		priv->tx_buffers[i].info.d_struct.txb = NULL;
+	}
+
+	if (i == TX_PENDED_QUEUE_LENGTH)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_free_consistent(
+			priv->pci_dev,
+			sizeof(struct ipw2100_data_header),
+			priv->tx_buffers[j].info.d_struct.data,
+			priv->tx_buffers[j].info.d_struct.data_phys);
+	}
+
+	kfree(priv->tx_buffers);
+	priv->tx_buffers = NULL;
+
+	return err;
+}
+
+static void ipw2100_tx_initialize(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	/*
+	 * reinitialize packet info lists
+	 */
+	INIT_LIST_HEAD(&priv->fw_pend_list);
+	INIT_STAT(&priv->fw_pend_stat);
+
+	/*
+	 * reinitialize lists
+	 */
+	INIT_LIST_HEAD(&priv->tx_pend_list);
+	INIT_LIST_HEAD(&priv->tx_free_list);
+	INIT_STAT(&priv->tx_pend_stat);
+	INIT_STAT(&priv->tx_free_stat);
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		/* We simply drop any SKBs that have been queued for
+		 * transmit */
+		if (priv->tx_buffers[i].info.d_struct.txb) {
+			ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
+			priv->tx_buffers[i].info.d_struct.txb = NULL;
+		}
+
+		list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list);
+	}
+
+	SET_STAT(&priv->tx_free_stat, i);
+
+	priv->tx_queue.oldest = 0;
+	priv->tx_queue.available = priv->tx_queue.entries;
+	priv->tx_queue.next = 0;
+	INIT_STAT(&priv->txq_stat);
+	SET_STAT(&priv->txq_stat, priv->tx_queue.available);
+
+	bd_queue_initialize(priv, &priv->tx_queue,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
+			    IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX);
+
+	IPW_DEBUG_INFO("exit\n");
+
+}
+
+static void ipw2100_tx_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	bd_queue_free(priv, &priv->tx_queue);
+
+	if (!priv->tx_buffers)
+		return;
+
+	for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
+		if (priv->tx_buffers[i].info.d_struct.txb) {
+			ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
+			priv->tx_buffers[i].info.d_struct.txb = NULL;
+		}
+		if (priv->tx_buffers[i].info.d_struct.data)
+			pci_free_consistent(
+				priv->pci_dev,
+				sizeof(struct ipw2100_data_header),
+				priv->tx_buffers[i].info.d_struct.data,
+				priv->tx_buffers[i].info.d_struct.data_phys);
+	}
+
+	kfree(priv->tx_buffers);
+	priv->tx_buffers = NULL;
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+
+
+static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
+{
+	int i, j, err = -EINVAL;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_INFO("failed bd_queue_allocate\n");
+		return err;
+	}
+
+	err = status_queue_allocate(priv, RX_QUEUE_LENGTH);
+	if (err) {
+		IPW_DEBUG_INFO("failed status_queue_allocate\n");
+		bd_queue_free(priv, &priv->rx_queue);
+		return err;
+	}
+
+	/*
+	 * allocate packets
+	 */
+	priv->rx_buffers = (struct ipw2100_rx_packet *)
+	    kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet),
+		    GFP_KERNEL);
+	if (!priv->rx_buffers) {
+		IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
+
+		bd_queue_free(priv, &priv->rx_queue);
+
+		status_queue_free(priv);
+
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+		struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
+
+		err = ipw2100_alloc_skb(priv, packet);
+		if (unlikely(err)) {
+			err = -ENOMEM;
+			break;
+		}
+
+		/* The BD holds the cache aligned address */
+		priv->rx_queue.drv[i].host_addr = packet->dma_addr;
+		priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH;
+		priv->status_queue.drv[i].status_fields = 0;
+	}
+
+	if (i == RX_QUEUE_LENGTH)
+		return 0;
+
+	for (j = 0; j < i; j++) {
+		pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
+				 sizeof(struct ipw2100_rx_packet),
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(priv->rx_buffers[j].skb);
+	}
+
+	kfree(priv->rx_buffers);
+	priv->rx_buffers = NULL;
+
+	bd_queue_free(priv, &priv->rx_queue);
+
+	status_queue_free(priv);
+
+	return err;
+}
+
+static void ipw2100_rx_initialize(struct ipw2100_priv *priv)
+{
+	IPW_DEBUG_INFO("enter\n");
+
+	priv->rx_queue.oldest = 0;
+	priv->rx_queue.available = priv->rx_queue.entries - 1;
+	priv->rx_queue.next = priv->rx_queue.entries - 1;
+
+	INIT_STAT(&priv->rxq_stat);
+	SET_STAT(&priv->rxq_stat, priv->rx_queue.available);
+
+	bd_queue_initialize(priv, &priv->rx_queue,
+			    IPW_MEM_HOST_SHARED_RX_BD_BASE,
+			    IPW_MEM_HOST_SHARED_RX_BD_SIZE,
+			    IPW_MEM_HOST_SHARED_RX_READ_INDEX,
+			    IPW_MEM_HOST_SHARED_RX_WRITE_INDEX);
+
+	/* set up the status queue */
+	write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE,
+		       priv->status_queue.nic);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static void ipw2100_rx_free(struct ipw2100_priv *priv)
+{
+	int i;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	bd_queue_free(priv, &priv->rx_queue);
+	status_queue_free(priv);
+
+	if (!priv->rx_buffers)
+		return;
+
+	for (i = 0; i < RX_QUEUE_LENGTH; i++) {
+		if (priv->rx_buffers[i].rxp) {
+			pci_unmap_single(priv->pci_dev,
+					 priv->rx_buffers[i].dma_addr,
+					 sizeof(struct ipw2100_rx),
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(priv->rx_buffers[i].skb);
+		}
+	}
+
+	kfree(priv->rx_buffers);
+	priv->rx_buffers = NULL;
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
+{
+	u32 length = ETH_ALEN;
+	u8 mac[ETH_ALEN];
+
+	int err;
+
+	err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC,
+				  mac, &length);
+	if (err) {
+		IPW_DEBUG_INFO("MAC address read failed\n");
+		return -EIO;
+	}
+	IPW_DEBUG_INFO("card MAC is %02X:%02X:%02X:%02X:%02X:%02X\n",
+	       mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+
+	memcpy(priv->net_dev->dev_addr, mac, ETH_ALEN);
+
+	return 0;
+}
+
+/********************************************************************
+ *
+ * Firmware Commands
+ *
+ ********************************************************************/
+
+int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = ADAPTER_ADDRESS,
+		.host_command_sequence = 0,
+		.host_command_length = ETH_ALEN
+	};
+	int err;
+
+	IPW_DEBUG_HC("SET_MAC_ADDRESS\n");
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->config & CFG_CUSTOM_MAC) {
+		memcpy(cmd.host_command_parameters, priv->mac_addr,
+		       ETH_ALEN);
+		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+	} else
+		memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr,
+		       ETH_ALEN);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	IPW_DEBUG_INFO("exit\n");
+	return err;
+}
+
+int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type,
+				 int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = PORT_TYPE,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(u32)
+	};
+	int err;
+
+	switch (port_type) {
+	case IW_MODE_INFRA:
+		cmd.host_command_parameters[0] = IPW_BSS;
+		break;
+	case IW_MODE_ADHOC:
+		cmd.host_command_parameters[0] = IPW_IBSS;
+		break;
+	}
+
+	IPW_DEBUG_HC("PORT_TYPE: %s\n",
+		     port_type == IPW_IBSS ? "Ad-Hoc" : "Managed");
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+
+int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = CHANNEL,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(u32)
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = channel;
+
+	IPW_DEBUG_HC("CHANNEL: %d\n", channel);
+
+	/* If BSS then we don't support channel selection */
+	if (priv->ieee->iw_mode == IW_MODE_INFRA)
+		return 0;
+
+	if ((channel != 0) &&
+	    ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL)))
+		return -EINVAL;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err) {
+		IPW_DEBUG_INFO("Failed to set channel to %d",
+			       channel);
+		return err;
+	}
+
+	if (channel)
+		priv->config |= CFG_STATIC_CHANNEL;
+	else
+		priv->config &= ~CFG_STATIC_CHANNEL;
+
+	priv->channel = channel;
+
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SYSTEM_CONFIG,
+		.host_command_sequence = 0,
+		.host_command_length = 12,
+	};
+	u32 ibss_mask, len = sizeof(u32);
+	int err;
+
+	/* Set system configuration */
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START;
+
+	cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK |
+		IPW_CFG_BSS_MASK |
+		IPW_CFG_802_1x_ENABLE;
+
+	if (!(priv->config & CFG_LONG_PREAMBLE))
+		cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO;
+
+	err = ipw2100_get_ordinal(priv,
+				  IPW_ORD_EEPROM_IBSS_11B_CHANNELS,
+				  &ibss_mask,  &len);
+	if (err)
+		ibss_mask = IPW_IBSS_11B_DEFAULT_MASK;
+
+	cmd.host_command_parameters[1] = REG_CHANNEL_MASK;
+	cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask;
+
+	/* 11b only */
+	/*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A;*/
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+/* If IPv6 is configured in the kernel then we don't want to filter out all
+ * of the multicast packets as IPv6 needs some. */
+#if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE)
+	cmd.host_command = ADD_MULTICAST;
+	cmd.host_command_sequence = 0;
+	cmd.host_command_length = 0;
+
+	ipw2100_hw_send_command(priv, &cmd);
+#endif
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = BASIC_TX_RATES,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = rate & TX_RATE_MASK;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	/* Set BASIC TX Rate first */
+	ipw2100_hw_send_command(priv, &cmd);
+
+	/* Set TX Rate */
+	cmd.host_command = TX_RATES;
+	ipw2100_hw_send_command(priv, &cmd);
+
+	/* Set MSDU TX Rate */
+	cmd.host_command = MSDU_TX_RATES;
+	ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		err = ipw2100_enable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	priv->tx_rates = rate;
+
+	return 0;
+}
+
+int ipw2100_set_power_mode(struct ipw2100_priv *priv,
+			   int power_level)
+{
+	struct host_command cmd = {
+		.host_command = POWER_MODE,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = power_level;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	if (power_level == IPW_POWER_MODE_CAM)
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+	else
+		priv->power_mode = IPW_POWER_ENABLED | power_level;
+
+#ifdef CONFIG_IPW2100_TX_POWER
+	if (priv->port_type == IBSS &&
+	    priv->adhoc_power != DFTL_IBSS_TX_POWER) {
+		/* Set beacon interval */
+		cmd.host_command = TX_POWER_INDEX;
+		cmd.host_command_parameters[0] = (u32)priv->adhoc_power;
+
+		err = ipw2100_hw_send_command(priv, &cmd);
+		if (err)
+			return err;
+	}
+#endif
+
+	return 0;
+}
+
+
+int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold)
+{
+	struct host_command cmd = {
+		.host_command = RTS_THRESHOLD,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	if (threshold & RTS_DISABLED)
+		cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD;
+	else
+		cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->rts_threshold = threshold;
+
+	return 0;
+}
+
+#if 0
+int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv,
+					u32 threshold, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = FRAG_THRESHOLD,
+		.host_command_sequence = 0,
+		.host_command_length = 4,
+		.host_command_parameters[0] = 0,
+	};
+	int err;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (threshold == 0)
+		threshold = DEFAULT_FRAG_THRESHOLD;
+	else {
+		threshold = max(threshold, MIN_FRAG_THRESHOLD);
+		threshold = min(threshold, MAX_FRAG_THRESHOLD);
+	}
+
+	cmd.host_command_parameters[0] = threshold;
+
+	IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	if (!err)
+		priv->frag_threshold = threshold;
+
+	return err;
+}
+#endif
+
+int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry)
+{
+	struct host_command cmd = {
+		.host_command = SHORT_RETRY_LIMIT,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = retry;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->short_retry_limit = retry;
+
+	return 0;
+}
+
+int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry)
+{
+	struct host_command cmd = {
+		.host_command = LONG_RETRY_LIMIT,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = retry;
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+	if (err)
+		return err;
+
+	priv->long_retry_limit = retry;
+
+	return 0;
+}
+
+
+int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 *bssid,
+				int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = MANDATORY_BSSID,
+		.host_command_sequence = 0,
+		.host_command_length = (bssid == NULL) ? 0 : ETH_ALEN
+	};
+	int err;
+
+#ifdef CONFIG_IPW_DEBUG
+	if (bssid != NULL)
+		IPW_DEBUG_HC(
+			"MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n",
+			bssid[0], bssid[1], bssid[2], bssid[3], bssid[4],
+			bssid[5]);
+	else
+		IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n");
+#endif
+	/* if BSSID is empty then we disable mandatory bssid mode */
+	if (bssid != NULL)
+		memcpy((u8 *)cmd.host_command_parameters, bssid, ETH_ALEN);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+#ifdef CONFIG_IEEE80211_WPA
+static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
+{
+	struct host_command cmd = {
+		.host_command = DISASSOCIATION_BSSID,
+		.host_command_sequence = 0,
+		.host_command_length = ETH_ALEN
+	};
+	int err;
+	int len;
+
+	IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
+
+	len = ETH_ALEN;
+	/* The Firmware currently ignores the BSSID and just disassociates from
+	 * the currently associated AP -- but in the off chance that a future
+	 * firmware does use the BSSID provided here, we go ahead and try and
+	 * set it to the currently associated AP's BSSID */
+	memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN);
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	return err;
+}
+#endif
+
+/*
+ * Pseudo code for setting up wpa_frame:
+ */
+#if 0
+void x(struct ieee80211_assoc_frame *wpa_assoc)
+{
+	struct ipw2100_wpa_assoc_frame frame;
+	frame->fixed_ie_mask = IPW_WPA_CAPABILTIES |
+		IPW_WPA_LISTENINTERVAL |
+		IPW_WPA_AP_ADDRESS;
+	frame->capab_info = wpa_assoc->capab_info;
+	frame->lisen_interval = wpa_assoc->listent_interval;
+	memcpy(frame->current_ap, wpa_assoc->current_ap, ETH_ALEN);
+
+	/* UNKNOWN -- I'm not postivive about this part; don't have any WPA
+	 * setup here to test it with.
+	 *
+	 * Walk the IEs in the wpa_assoc and figure out the total size of all
+	 * that data.  Stick that into frame->var_ie_len.  Then memcpy() all of
+	 * the IEs from wpa_frame into frame.
+	 */
+	frame->var_ie_len = calculate_ie_len(wpa_assoc);
+	memcpy(frame->var_ie,  wpa_assoc->variable, frame->var_ie_len);
+
+	ipw2100_set_wpa_ie(priv, &frame, 0);
+}
+#endif
+
+
+
+
+static int ipw2100_set_wpa_ie(struct ipw2100_priv *,
+			      struct ipw2100_wpa_assoc_frame *, int)
+__attribute__ ((unused));
+
+static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv,
+			      struct ipw2100_wpa_assoc_frame *wpa_frame,
+			      int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SET_WPA_IE,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct ipw2100_wpa_assoc_frame),
+	};
+	int err;
+
+	IPW_DEBUG_HC("SET_WPA_IE\n");
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	memcpy(cmd.host_command_parameters, wpa_frame,
+	       sizeof(struct ipw2100_wpa_assoc_frame));
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		if (ipw2100_enable_adapter(priv))
+			err = -EIO;
+	}
+
+	return err;
+}
+
+struct security_info_params {
+	u32 allowed_ciphers;
+	u16 version;
+	u8 auth_mode;
+	u8 replay_counters_number;
+	u8 unicast_using_group;
+} __attribute__ ((packed));
+
+int ipw2100_set_security_information(struct ipw2100_priv *priv,
+				     int auth_mode,
+				     int security_level,
+				     int unicast_using_group,
+				     int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = SET_SECURITY_INFORMATION,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct security_info_params)
+	};
+	struct security_info_params *security =
+		(struct security_info_params *)&cmd.host_command_parameters;
+	int err;
+	memset(security, 0, sizeof(*security));
+
+	/* If shared key AP authentication is turned on, then we need to
+	 * configure the firmware to try and use it.
+	 *
+	 * Actual data encryption/decryption is handled by the host. */
+	security->auth_mode = auth_mode;
+	security->unicast_using_group = unicast_using_group;
+
+	switch (security_level) {
+	default:
+	case SEC_LEVEL_0:
+		security->allowed_ciphers = IPW_NONE_CIPHER;
+		break;
+	case SEC_LEVEL_1:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+			IPW_WEP104_CIPHER;
+		break;
+	case SEC_LEVEL_2:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+			IPW_WEP104_CIPHER | IPW_TKIP_CIPHER;
+		break;
+	case SEC_LEVEL_2_CKIP:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+			IPW_WEP104_CIPHER | IPW_CKIP_CIPHER;
+		break;
+	case SEC_LEVEL_3:
+		security->allowed_ciphers = IPW_WEP40_CIPHER |
+			IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER;
+		break;
+	}
+
+	IPW_DEBUG_HC(
+		"SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n",
+		security->auth_mode, security->allowed_ciphers, security_level);
+
+	security->replay_counters_number = 0;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+int ipw2100_set_tx_power(struct ipw2100_priv *priv,
+			 u32 tx_power)
+{
+	struct host_command cmd = {
+		.host_command = TX_POWER_INDEX,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err = 0;
+
+	cmd.host_command_parameters[0] = tx_power;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC)
+		err = ipw2100_hw_send_command(priv, &cmd);
+	if (!err)
+		priv->tx_power = tx_power;
+
+	return 0;
+}
+
+int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv,
+				     u32 interval, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = BEACON_INTERVAL,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = interval;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		if (!batch_mode) {
+			err = ipw2100_disable_adapter(priv);
+			if (err)
+				return err;
+		}
+
+		ipw2100_hw_send_command(priv, &cmd);
+
+		if (!batch_mode) {
+			err = ipw2100_enable_adapter(priv);
+			if (err)
+				return err;
+		}
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+
+void ipw2100_queues_initialize(struct ipw2100_priv *priv)
+{
+	ipw2100_tx_initialize(priv);
+	ipw2100_rx_initialize(priv);
+	ipw2100_msg_initialize(priv);
+}
+
+void ipw2100_queues_free(struct ipw2100_priv *priv)
+{
+	ipw2100_tx_free(priv);
+	ipw2100_rx_free(priv);
+	ipw2100_msg_free(priv);
+}
+
+int ipw2100_queues_allocate(struct ipw2100_priv *priv)
+{
+	if (ipw2100_tx_allocate(priv) ||
+	    ipw2100_rx_allocate(priv) ||
+	    ipw2100_msg_allocate(priv))
+		goto fail;
+
+	return 0;
+
+ fail:
+	ipw2100_tx_free(priv);
+	ipw2100_rx_free(priv);
+	ipw2100_msg_free(priv);
+	return -ENOMEM;
+}
+
+#define IPW_PRIVACY_CAPABLE 0x0008
+
+static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags,
+				 int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = WEP_FLAGS,
+		.host_command_sequence = 0,
+		.host_command_length = 4
+	};
+	int err;
+
+	cmd.host_command_parameters[0] = flags;
+
+	IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags);
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+struct ipw2100_wep_key {
+	u8 idx;
+	u8 len;
+	u8 key[13];
+};
+
+/* Macros to ease up priting WEP keys */
+#define WEP_FMT_64  "%02X%02X%02X%02X-%02X"
+#define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X"
+#define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4]
+#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
+
+
+/**
+ * Set a the wep key
+ *
+ * @priv: struct to work on
+ * @idx: index of the key we want to set
+ * @key: ptr to the key data to set
+ * @len: length of the buffer at @key
+ * @batch_mode: FIXME perform the operation in batch mode, not
+ *              disabling the device.
+ *
+ * @returns 0 if OK, < 0 errno code on error.
+ *
+ * Fill out a command structure with the new wep key, length an
+ * index and send it down the wire.
+ */
+static int ipw2100_set_key(struct ipw2100_priv *priv,
+			   int idx, char *key, int len, int batch_mode)
+{
+	int keylen = len ? (len <= 5 ? 5 : 13) : 0;
+	struct host_command cmd = {
+		.host_command = WEP_KEY_INFO,
+		.host_command_sequence = 0,
+		.host_command_length = sizeof(struct ipw2100_wep_key),
+	};
+	struct ipw2100_wep_key *wep_key = (void*)cmd.host_command_parameters;
+	int err;
+
+	IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
+				 idx, keylen, len);
+
+	/* NOTE: We don't check cached values in case the firmware was reset
+	 * or some other problem is occuring.  If the user is setting the key,
+	 * then we push the change */
+
+	wep_key->idx = idx;
+	wep_key->len = keylen;
+
+	if (keylen) {
+		memcpy(wep_key->key, key, len);
+		memset(wep_key->key + len, 0, keylen - len);
+	}
+
+	/* Will be optimized out on debug not being configured in */
+	if (keylen == 0)
+		IPW_DEBUG_WEP("%s: Clearing key %d\n",
+				  priv->net_dev->name, wep_key->idx);
+	else if (keylen == 5)
+		IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n",
+				  priv->net_dev->name, wep_key->idx, wep_key->len,
+				  WEP_STR_64(wep_key->key));
+	else
+		IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128
+				  "\n",
+				  priv->net_dev->name, wep_key->idx, wep_key->len,
+				  WEP_STR_128(wep_key->key));
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		/* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */
+		if (err) {
+			IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode) {
+		int err2 = ipw2100_enable_adapter(priv);
+		if (err == 0)
+			err = err2;
+	}
+	return err;
+}
+
+static int ipw2100_set_key_index(struct ipw2100_priv *priv,
+				 int idx, int batch_mode)
+{
+	struct host_command cmd = {
+		.host_command = WEP_KEY_INDEX,
+		.host_command_sequence = 0,
+		.host_command_length = 4,
+		.host_command_parameters = { idx },
+	};
+	int err;
+
+	IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx);
+
+	if (idx < 0 || idx > 3)
+		return -EINVAL;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err) {
+			IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
+			       priv->net_dev->name, err);
+			return err;
+		}
+	}
+
+	/* send cmd to firmware */
+	err = ipw2100_hw_send_command(priv, &cmd);
+
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+
+static int ipw2100_configure_security(struct ipw2100_priv *priv,
+				      int batch_mode)
+{
+	int i, err, auth_mode, sec_level, use_group;
+
+	if (!(priv->status & STATUS_RUNNING))
+		return 0;
+
+	if (!batch_mode) {
+		err = ipw2100_disable_adapter(priv);
+		if (err)
+			return err;
+	}
+
+	if (!priv->sec.enabled) {
+		err = ipw2100_set_security_information(
+			priv, IPW_AUTH_OPEN, SEC_LEVEL_0, 0, 1);
+	} else {
+		auth_mode = IPW_AUTH_OPEN;
+		if ((priv->sec.flags & SEC_AUTH_MODE) &&
+		    (priv->sec.auth_mode == WLAN_AUTH_SHARED_KEY))
+			auth_mode = IPW_AUTH_SHARED;
+
+		sec_level = SEC_LEVEL_0;
+		if (priv->sec.flags & SEC_LEVEL)
+			sec_level = priv->sec.level;
+
+		use_group = 0;
+		if (priv->sec.flags & SEC_UNICAST_GROUP)
+			use_group = priv->sec.unicast_uses_group;
+
+		err = ipw2100_set_security_information(
+			    priv, auth_mode, sec_level, use_group, 1);
+	}
+
+	if (err)
+		goto exit;
+
+	if (priv->sec.enabled) {
+		for (i = 0; i < 4; i++) {
+			if (!(priv->sec.flags & (1 << i))) {
+				memset(priv->sec.keys[i], 0, WEP_KEY_LEN);
+				priv->sec.key_sizes[i] = 0;
+			} else {
+				err = ipw2100_set_key(priv, i,
+						      priv->sec.keys[i],
+						      priv->sec.key_sizes[i],
+						      1);
+				if (err)
+					goto exit;
+			}
+		}
+
+		ipw2100_set_key_index(priv, priv->ieee->tx_keyidx, 1);
+	}
+
+	/* Always enable privacy so the Host can filter WEP packets if
+	 * encrypted data is sent up */
+	err = ipw2100_set_wep_flags(
+		priv, priv->sec.enabled ? IPW_PRIVACY_CAPABLE : 0, 1);
+	if (err)
+		goto exit;
+
+	priv->status &= ~STATUS_SECURITY_UPDATED;
+
+ exit:
+	if (!batch_mode)
+		ipw2100_enable_adapter(priv);
+
+	return err;
+}
+
+static void ipw2100_security_work(struct ipw2100_priv *priv)
+{
+	/* If we happen to have reconnected before we get a chance to
+	 * process this, then update the security settings--which causes
+	 * a disassociation to occur */
+	if (!(priv->status & STATUS_ASSOCIATED) &&
+	    priv->status & STATUS_SECURITY_UPDATED)
+		ipw2100_configure_security(priv, 0);
+}
+
+static void shim__set_security(struct net_device *dev,
+			       struct ieee80211_security *sec)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int i, force_update = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED))
+		goto done;
+
+	for (i = 0; i < 4; i++) {
+		if (sec->flags & (1 << i)) {
+			priv->sec.key_sizes[i] = sec->key_sizes[i];
+			if (sec->key_sizes[i] == 0)
+				priv->sec.flags &= ~(1 << i);
+			else
+				memcpy(priv->sec.keys[i], sec->keys[i],
+				       sec->key_sizes[i]);
+			priv->sec.flags |= (1 << i);
+			priv->status |= STATUS_SECURITY_UPDATED;
+		}
+	}
+
+	if ((sec->flags & SEC_ACTIVE_KEY) &&
+	    priv->sec.active_key != sec->active_key) {
+		if (sec->active_key <= 3) {
+			priv->sec.active_key = sec->active_key;
+			priv->sec.flags |= SEC_ACTIVE_KEY;
+		} else
+			priv->sec.flags &= ~SEC_ACTIVE_KEY;
+
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if ((sec->flags & SEC_AUTH_MODE) &&
+	    (priv->sec.auth_mode != sec->auth_mode)) {
+		priv->sec.auth_mode = sec->auth_mode;
+		priv->sec.flags |= SEC_AUTH_MODE;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if (sec->flags & SEC_ENABLED &&
+	    priv->sec.enabled != sec->enabled) {
+		priv->sec.flags |= SEC_ENABLED;
+		priv->sec.enabled = sec->enabled;
+		priv->status |= STATUS_SECURITY_UPDATED;
+		force_update = 1;
+	}
+
+	if (sec->flags & SEC_LEVEL &&
+	    priv->sec.level != sec->level) {
+		priv->sec.level = sec->level;
+		priv->sec.flags |= SEC_LEVEL;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n",
+			  priv->sec.flags & (1<<8) ? '1' : '0',
+			  priv->sec.flags & (1<<7) ? '1' : '0',
+			  priv->sec.flags & (1<<6) ? '1' : '0',
+			  priv->sec.flags & (1<<5) ? '1' : '0',
+			  priv->sec.flags & (1<<4) ? '1' : '0',
+			  priv->sec.flags & (1<<3) ? '1' : '0',
+			  priv->sec.flags & (1<<2) ? '1' : '0',
+			  priv->sec.flags & (1<<1) ? '1' : '0',
+			  priv->sec.flags & (1<<0) ? '1' : '0');
+
+/* As a temporary work around to enable WPA until we figure out why
+ * wpa_supplicant toggles the security capability of the driver, which
+ * forces a disassocation with force_update...
+ *
+ *	if (force_update || !(priv->status & STATUS_ASSOCIATED))*/
+	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
+		ipw2100_configure_security(priv, 0);
+done:
+	up(&priv->action_sem);
+}
+
+static int ipw2100_adapter_setup(struct ipw2100_priv *priv)
+{
+	int err;
+	int batch_mode = 1;
+	u8 *bssid;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	err = ipw2100_disable_adapter(priv);
+	if (err)
+		return err;
+#ifdef CONFIG_IPW2100_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+		err = ipw2100_set_channel(priv, priv->channel, batch_mode);
+		if (err)
+			return err;
+
+		IPW_DEBUG_INFO("exit\n");
+
+		return 0;
+	}
+#endif /* CONFIG_IPW2100_MONITOR */
+
+	err = ipw2100_read_mac_address(priv);
+	if (err)
+		return -EIO;
+
+	err = ipw2100_set_mac_address(priv, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		err = ipw2100_set_channel(priv, priv->channel, batch_mode);
+		if (err)
+			return err;
+	}
+
+	err  = ipw2100_system_config(priv, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode);
+	if (err)
+		return err;
+
+	/* Default to power mode OFF */
+	err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
+	if (err)
+		return err;
+
+	err = ipw2100_set_rts_threshold(priv, priv->rts_threshold);
+	if (err)
+		return err;
+
+	if (priv->config & CFG_STATIC_BSSID)
+		bssid = priv->bssid;
+	else
+		bssid = NULL;
+	err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->config & CFG_STATIC_ESSID)
+		err = ipw2100_set_essid(priv, priv->essid, priv->essid_len,
+					batch_mode);
+	else
+		err = ipw2100_set_essid(priv, NULL, 0, batch_mode);
+	if (err)
+		return err;
+
+	err = ipw2100_configure_security(priv, batch_mode);
+	if (err)
+		return err;
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		err = ipw2100_set_ibss_beacon_interval(
+			priv, priv->beacon_interval, batch_mode);
+		if (err)
+			return err;
+
+		err = ipw2100_set_tx_power(priv, priv->tx_power);
+		if (err)
+			return err;
+	}
+
+	/*
+	  err = ipw2100_set_fragmentation_threshold(
+	  priv, priv->frag_threshold, batch_mode);
+	  if (err)
+	  return err;
+	*/
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+
+/*************************************************************************
+ *
+ * EXTERNALLY CALLED METHODS
+ *
+ *************************************************************************/
+
+/* This method is called by the network layer -- not to be confused with
+ * ipw2100_set_mac_address() declared above called by this driver (and this
+ * method as well) to talk to the firmware */
+static int ipw2100_set_address(struct net_device *dev, void *p)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct sockaddr *addr = p;
+	int err = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	down(&priv->action_sem);
+
+	priv->config |= CFG_CUSTOM_MAC;
+	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+
+	err = ipw2100_set_mac_address(priv, 0);
+	if (err)
+		goto done;
+
+	priv->reset_backoff = 0;
+	up(&priv->action_sem);
+	ipw2100_reset_adapter(priv);
+	return 0;
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_open(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	unsigned long flags;
+	IPW_DEBUG_INFO("dev->open\n");
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+	if (priv->status & STATUS_ASSOCIATED)
+		netif_start_queue(dev);
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	return 0;
+}
+
+static int ipw2100_close(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	unsigned long flags;
+	struct list_head *element;
+	struct ipw2100_tx_packet *packet;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->status & STATUS_ASSOCIATED)
+		netif_carrier_off(dev);
+	netif_stop_queue(dev);
+
+	/* Flush the TX queue ... */
+	while (!list_empty(&priv->tx_pend_list)) {
+		element = priv->tx_pend_list.next;
+                packet = list_entry(element, struct ipw2100_tx_packet, list);
+
+		list_del(element);
+		DEC_STAT(&priv->tx_pend_stat);
+
+		ieee80211_txb_free(packet->info.d_struct.txb);
+		packet->info.d_struct.txb = NULL;
+
+		list_add_tail(element, &priv->tx_free_list);
+		INC_STAT(&priv->tx_free_stat);
+	}
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+
+	IPW_DEBUG_INFO("exit\n");
+
+	return 0;
+}
+
+
+
+/*
+ * TODO:  Fix this function... its just wrong
+ */
+static void ipw2100_tx_timeout(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	priv->ieee->stats.tx_errors++;
+
+#ifdef CONFIG_IPW2100_MONITOR
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		return;
+#endif
+
+	IPW_DEBUG_INFO("%s: TX timed out.  Scheduling firmware restart.\n",
+		       dev->name);
+	schedule_reset(priv);
+}
+
+
+/*
+ * TODO: reimplement it so that it reads statistics
+ *       from the adapter using ordinal tables
+ *       instead of/in addition to collecting them
+ *       in the driver
+ */
+static struct net_device_stats *ipw2100_stats(struct net_device *dev)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	return &priv->ieee->stats;
+}
+
+/* Support for wpa_supplicant. Will be replaced with WEXT once
+ * they get WPA support. */
+#ifdef CONFIG_IEEE80211_WPA
+
+/* following definitions must match definitions in driver_ipw2100.c */
+
+#define IPW2100_IOCTL_WPA_SUPPLICANT		SIOCIWFIRSTPRIV+30
+
+#define IPW2100_CMD_SET_WPA_PARAM		1
+#define	IPW2100_CMD_SET_WPA_IE			2
+#define IPW2100_CMD_SET_ENCRYPTION		3
+#define IPW2100_CMD_MLME			4
+
+#define IPW2100_PARAM_WPA_ENABLED		1
+#define IPW2100_PARAM_TKIP_COUNTERMEASURES	2
+#define IPW2100_PARAM_DROP_UNENCRYPTED		3
+#define IPW2100_PARAM_PRIVACY_INVOKED		4
+#define IPW2100_PARAM_AUTH_ALGS			5
+#define IPW2100_PARAM_IEEE_802_1X		6
+
+#define IPW2100_MLME_STA_DEAUTH			1
+#define IPW2100_MLME_STA_DISASSOC		2
+
+#define IPW2100_CRYPT_ERR_UNKNOWN_ALG		2
+#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR		3
+#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED	4
+#define IPW2100_CRYPT_ERR_KEY_SET_FAILED	5
+#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED	6
+#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED	7
+
+#define	IPW2100_CRYPT_ALG_NAME_LEN		16
+
+struct ipw2100_param {
+	u32 cmd;
+	u8 sta_addr[ETH_ALEN];
+        union {
+		struct {
+			u8 name;
+			u32 value;
+		} wpa_param;
+		struct {
+			u32 len;
+			u8 *data;
+		} wpa_ie;
+	        struct{
+			int command;
+    			int reason_code;
+		} mlme;
+		struct {
+			u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
+			u8 set_tx;
+			u32 err;
+			u8 idx;
+			u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+			u16 key_len;
+			u8 key[0];
+		} crypt;
+
+	} u;
+};
+
+/* end of driver_ipw2100.c code */
+
+static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value){
+
+	struct ieee80211_device *ieee = priv->ieee;
+	struct ieee80211_security sec = {
+		.flags = SEC_LEVEL | SEC_ENABLED,
+	};
+	int ret = 0;
+
+	ieee->wpa_enabled = value;
+
+	if (value){
+		sec.level = SEC_LEVEL_3;
+		sec.enabled = 1;
+	} else {
+		sec.level = SEC_LEVEL_0;
+		sec.enabled = 0;
+	}
+
+	if (ieee->set_security)
+		ieee->set_security(ieee->dev, &sec);
+	else
+		ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+#define AUTH_ALG_OPEN_SYSTEM			0x1
+#define AUTH_ALG_SHARED_KEY			0x2
+
+static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value){
+
+	struct ieee80211_device *ieee = priv->ieee;
+	struct ieee80211_security sec = {
+		.flags = SEC_AUTH_MODE,
+	};
+	int ret = 0;
+
+	if (value & AUTH_ALG_SHARED_KEY){
+		sec.auth_mode = WLAN_AUTH_SHARED_KEY;
+		ieee->open_wep = 0;
+	} else {
+		sec.auth_mode = WLAN_AUTH_OPEN;
+		ieee->open_wep = 1;
+	}
+
+	if (ieee->set_security)
+		ieee->set_security(ieee->dev, &sec);
+	else
+		ret = -EOPNOTSUPP;
+
+	return ret;
+}
+
+
+static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value){
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int ret=0;
+
+	switch(name){
+		case IPW2100_PARAM_WPA_ENABLED:
+			ret = ipw2100_wpa_enable(priv, value);
+			break;
+
+		case IPW2100_PARAM_TKIP_COUNTERMEASURES:
+			priv->ieee->tkip_countermeasures=value;
+			break;
+
+		case IPW2100_PARAM_DROP_UNENCRYPTED:
+			priv->ieee->drop_unencrypted=value;
+			break;
+
+		case IPW2100_PARAM_PRIVACY_INVOKED:
+			priv->ieee->privacy_invoked=value;
+			break;
+
+		case IPW2100_PARAM_AUTH_ALGS:
+			ret = ipw2100_wpa_set_auth_algs(priv, value);
+			break;
+
+		case IPW2100_PARAM_IEEE_802_1X:
+			priv->ieee->ieee802_1x=value;
+			break;
+
+		default:
+			IPW_DEBUG_ERROR("%s: Unknown WPA param: %d\n",
+					    dev->name, name);
+			ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason){
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int ret=0;
+
+	switch(command){
+		case IPW2100_MLME_STA_DEAUTH:
+			// silently ignore
+			break;
+
+		case IPW2100_MLME_STA_DISASSOC:
+			ipw2100_disassociate_bssid(priv);
+			break;
+
+		default:
+			IPW_DEBUG_ERROR("%s: Unknown MLME request: %d\n",
+					    dev->name, command);
+			ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+
+void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
+			     char *wpa_ie, int wpa_ie_len){
+
+	struct ipw2100_wpa_assoc_frame frame;
+
+	frame.fixed_ie_mask = 0;
+
+	/* copy WPA IE */
+	memcpy(frame.var_ie, wpa_ie, wpa_ie_len);
+	frame.var_ie_len = wpa_ie_len;
+
+	/* make sure WPA is enabled */
+	ipw2100_wpa_enable(priv, 1);
+	ipw2100_set_wpa_ie(priv, &frame, 0);
+}
+
+
+static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
+				struct ipw2100_param *param, int plen){
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct ieee80211_device *ieee = priv->ieee;
+	u8 *buf;
+
+	if (! ieee->wpa_enabled)
+	    return -EOPNOTSUPP;
+
+	if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
+	   (param->u.wpa_ie.len &&
+		param->u.wpa_ie.data==NULL))
+		return -EINVAL;
+
+	if (param->u.wpa_ie.len){
+		buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+		if (buf == NULL)
+			return -ENOMEM;
+
+		memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
+
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = buf;
+		ieee->wpa_ie_len = param->u.wpa_ie.len;
+
+	} else {
+		kfree(ieee->wpa_ie);
+		ieee->wpa_ie = NULL;
+		ieee->wpa_ie_len = 0;
+	}
+
+	ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
+
+	return 0;
+}
+
+/* implementation borrowed from hostap driver */
+
+static int ipw2100_wpa_set_encryption(struct net_device *dev,
+				struct ipw2100_param *param, int param_len){
+
+	int ret = 0;
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct ieee80211_device *ieee = priv->ieee;
+	struct ieee80211_crypto_ops *ops;
+	struct ieee80211_crypt_data **crypt;
+
+	struct ieee80211_security sec = {
+		.flags = 0,
+	};
+
+	param->u.crypt.err = 0;
+	param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+	if (param_len !=
+	    (int) ((char *) param->u.crypt.key - (char *) param) +
+	    param->u.crypt.key_len){
+		IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len, param->u.crypt.key_len);
+		return -EINVAL;
+	}
+	if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+	    param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+	    param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+		if (param->u.crypt.idx >= WEP_KEYS)
+			return -EINVAL;
+		crypt = &ieee->crypt[param->u.crypt.idx];
+	} else {
+		return -EINVAL;
+	}
+
+	if (strcmp(param->u.crypt.alg, "none") == 0) {
+		if (crypt){
+			sec.enabled = 0;
+			sec.level = SEC_LEVEL_0;
+			sec.flags |= SEC_ENABLED | SEC_LEVEL;
+			ieee80211_crypt_delayed_deinit(ieee, crypt);
+		}
+		goto done;
+	}
+	sec.enabled = 1;
+	sec.flags |= SEC_ENABLED;
+
+	ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+		request_module("ieee80211_crypt_wep");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	} else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+		request_module("ieee80211_crypt_tkip");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	} else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+		request_module("ieee80211_crypt_ccmp");
+		ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
+	}
+	if (ops == NULL) {
+		IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
+		       dev->name, param->u.crypt.alg);
+		param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (*crypt == NULL || (*crypt)->ops != ops) {
+		struct ieee80211_crypt_data *new_crypt;
+
+		ieee80211_crypt_delayed_deinit(ieee, crypt);
+
+		new_crypt = (struct ieee80211_crypt_data *)
+			kmalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
+		if (new_crypt == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+		new_crypt->ops = ops;
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+			new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
+
+		if (new_crypt->priv == NULL) {
+			kfree(new_crypt);
+			param->u.crypt.err =
+				IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
+			ret = -EINVAL;
+			goto done;
+		}
+
+		*crypt = new_crypt;
+	}
+
+	if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
+	    (*crypt)->ops->set_key(param->u.crypt.key,
+				   param->u.crypt.key_len, param->u.crypt.seq,
+				   (*crypt)->priv) < 0) {
+		IPW_DEBUG_INFO("%s: key setting failed\n",
+		       dev->name);
+		param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (param->u.crypt.set_tx){
+		ieee->tx_keyidx = param->u.crypt.idx;
+		sec.active_key = param->u.crypt.idx;
+		sec.flags |= SEC_ACTIVE_KEY;
+	}
+
+	if (ops->name != NULL){
+
+		if (strcmp(ops->name, "WEP") == 0) {
+			memcpy(sec.keys[param->u.crypt.idx], param->u.crypt.key, param->u.crypt.key_len);
+			sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
+			sec.flags |= (1 << param->u.crypt.idx);
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_1;
+		} else if (strcmp(ops->name, "TKIP") == 0) {
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_2;
+		} else if (strcmp(ops->name, "CCMP") == 0) {
+			sec.flags |= SEC_LEVEL;
+			sec.level = SEC_LEVEL_3;
+		}
+	}
+ done:
+	if (ieee->set_security)
+		ieee->set_security(ieee->dev, &sec);
+
+	/* Do not reset port if card is in Managed mode since resetting will
+	 * generate new IEEE 802.11 authentication which may end up in looping
+	 * with IEEE 802.1X.  If your hardware requires a reset after WEP
+	 * configuration (for example... Prism2), implement the reset_port in
+	 * the callbacks structures used to initialize the 802.11 stack. */
+	if (ieee->reset_on_keychange &&
+	    ieee->iw_mode != IW_MODE_INFRA &&
+	    ieee->reset_port &&
+	    ieee->reset_port(dev)) {
+		IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
+		param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+
+static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p){
+
+	struct ipw2100_param *param;
+	int ret=0;
+
+	IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
+
+	if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
+		return -EINVAL;
+
+	param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
+	if (param == NULL)
+		return -ENOMEM;
+
+	if (copy_from_user(param, p->pointer, p->length)){
+		kfree(param);
+		return -EFAULT;
+	}
+
+	switch (param->cmd){
+
+	case IPW2100_CMD_SET_WPA_PARAM:
+		ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
+					    param->u.wpa_param.value);
+		break;
+
+	case IPW2100_CMD_SET_WPA_IE:
+		ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
+		break;
+
+	case IPW2100_CMD_SET_ENCRYPTION:
+		ret = ipw2100_wpa_set_encryption(dev, param, p->length);
+		break;
+
+	case IPW2100_CMD_MLME:
+		ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
+				       param->u.mlme.reason_code);
+		break;
+
+	default:
+		IPW_DEBUG_ERROR("%s: Unknown WPA supplicant request: %d\n",
+				dev->name, param->cmd);
+		ret = -EOPNOTSUPP;
+
+	}
+
+	if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+		ret = -EFAULT;
+
+	kfree(param);
+	return ret;
+}
+#endif /* CONFIG_IEEE80211_WPA */
+
+static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_IEEE80211_WPA
+	struct iwreq *wrq = (struct iwreq *) rq;
+	int ret=-1;
+	switch (cmd){
+	    case IPW2100_IOCTL_WPA_SUPPLICANT:
+		ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
+		return ret;
+
+	    default:
+		return -EOPNOTSUPP;
+	}
+
+#endif /* CONFIG_IEEE80211_WPA */
+
+	return -EOPNOTSUPP;
+}
+
+
+static void ipw_ethtool_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *info)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	char fw_ver[64], ucode_ver[64];
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+
+	ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
+	ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
+
+	snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
+		 fw_ver, priv->eeprom_version, ucode_ver);
+
+	strcpy(info->bus_info, pci_name(priv->pci_dev));
+}
+
+static u32 ipw2100_ethtool_get_link(struct net_device *dev)
+{
+    struct ipw2100_priv *priv = ieee80211_priv(dev);
+    return (priv->status & STATUS_ASSOCIATED) ? 1 : 0;
+}
+
+
+static struct ethtool_ops ipw2100_ethtool_ops = {
+    .get_link        = ipw2100_ethtool_get_link,
+    .get_drvinfo     = ipw_ethtool_get_drvinfo,
+};
+
+static void ipw2100_hang_check(void *adapter)
+{
+	struct ipw2100_priv *priv = adapter;
+	unsigned long flags;
+	u32 rtc = 0xa5a5a5a5;
+	u32 len = sizeof(rtc);
+	int restart = 0;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (priv->fatal_error != 0) {
+		/* If fatal_error is set then we need to restart */
+		IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n",
+			       priv->net_dev->name);
+
+		restart = 1;
+	} else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) ||
+		   (rtc == priv->last_rtc)) {
+		/* Check if firmware is hung */
+		IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n",
+			       priv->net_dev->name);
+
+		restart = 1;
+	}
+
+	if (restart) {
+		/* Kill timer */
+		priv->stop_hang_check = 1;
+		priv->hangs++;
+
+		/* Restart the NIC */
+		schedule_reset(priv);
+	}
+
+	priv->last_rtc = rtc;
+
+	if (!priv->stop_hang_check)
+		queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
+
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+}
+
+
+static void ipw2100_rf_kill(void *adapter)
+{
+	struct ipw2100_priv *priv = adapter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->low_lock, flags);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
+		if (!priv->stop_rf_kill)
+			queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
+		goto exit_unlock;
+	}
+
+	/* RF Kill is now disabled, so bring the device back up */
+
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
+				  "device\n");
+		schedule_reset(priv);
+	} else
+		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
+				  "enabled\n");
+
+ exit_unlock:
+	spin_unlock_irqrestore(&priv->low_lock, flags);
+}
+
+static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
+
+/* Look into using netdev destructor to shutdown ieee80211? */
+
+static struct net_device *ipw2100_alloc_device(
+	struct pci_dev *pci_dev,
+	char *base_addr,
+	unsigned long mem_start,
+	unsigned long mem_len)
+{
+	struct ipw2100_priv *priv;
+	struct net_device *dev;
+
+	dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
+	if (!dev)
+		return NULL;
+	priv = ieee80211_priv(dev);
+	priv->ieee = netdev_priv(dev);
+	priv->pci_dev = pci_dev;
+	priv->net_dev = dev;
+
+	priv->ieee->hard_start_xmit = ipw2100_tx;
+	priv->ieee->set_security = shim__set_security;
+
+	dev->open = ipw2100_open;
+	dev->stop = ipw2100_close;
+	dev->init = ipw2100_net_init;
+	dev->do_ioctl = ipw2100_ioctl;
+	dev->get_stats = ipw2100_stats;
+	dev->ethtool_ops = &ipw2100_ethtool_ops;
+	dev->tx_timeout = ipw2100_tx_timeout;
+	dev->wireless_handlers = &ipw2100_wx_handler_def;
+	dev->get_wireless_stats = ipw2100_wx_wireless_stats;
+	dev->set_mac_address = ipw2100_set_address;
+	dev->watchdog_timeo = 3*HZ;
+	dev->irq = 0;
+
+	dev->base_addr = (unsigned long)base_addr;
+	dev->mem_start = mem_start;
+	dev->mem_end = dev->mem_start + mem_len - 1;
+
+	/* NOTE: We don't use the wireless_handlers hook
+	 * in dev as the system will start throwing WX requests
+	 * to us before we're actually initialized and it just
+	 * ends up causing problems.  So, we just handle
+	 * the WX extensions through the ipw2100_ioctl interface */
+
+
+	/* memset() puts everything to 0, so we only have explicitely set
+	 * those values that need to be something else */
+
+	/* If power management is turned on, default to AUTO mode */
+	priv->power_mode = IPW_POWER_AUTO;
+
+
+
+#ifdef CONFIG_IEEE80211_WPA
+	priv->ieee->wpa_enabled = 0;
+	priv->ieee->tkip_countermeasures = 0;
+	priv->ieee->drop_unencrypted = 0;
+	priv->ieee->privacy_invoked = 0;
+	priv->ieee->ieee802_1x = 1;
+#endif /* CONFIG_IEEE80211_WPA */
+
+	/* Set module parameters */
+	switch (mode) {
+	case 1:
+		priv->ieee->iw_mode = IW_MODE_ADHOC;
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case 2:
+		priv->ieee->iw_mode = IW_MODE_MONITOR;
+		break;
+#endif
+	default:
+	case 0:
+		priv->ieee->iw_mode = IW_MODE_INFRA;
+		break;
+	}
+
+	if (disable == 1)
+		priv->status |= STATUS_RF_KILL_SW;
+
+	if (channel != 0 &&
+	    ((channel >= REG_MIN_CHANNEL) &&
+	     (channel <= REG_MAX_CHANNEL))) {
+		priv->config |= CFG_STATIC_CHANNEL;
+		priv->channel = channel;
+	}
+
+	if (associate)
+		priv->config |= CFG_ASSOCIATE;
+
+	priv->beacon_interval = DEFAULT_BEACON_INTERVAL;
+	priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
+	priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
+	priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED;
+	priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED;
+	priv->tx_power = IPW_TX_POWER_DEFAULT;
+	priv->tx_rates = DEFAULT_TX_RATES;
+
+	strcpy(priv->nick, "ipw2100");
+
+	spin_lock_init(&priv->low_lock);
+	sema_init(&priv->action_sem, 1);
+	sema_init(&priv->adapter_sem, 1);
+
+	init_waitqueue_head(&priv->wait_command_queue);
+
+	netif_carrier_off(dev);
+
+	INIT_LIST_HEAD(&priv->msg_free_list);
+	INIT_LIST_HEAD(&priv->msg_pend_list);
+	INIT_STAT(&priv->msg_free_stat);
+	INIT_STAT(&priv->msg_pend_stat);
+
+	INIT_LIST_HEAD(&priv->tx_free_list);
+	INIT_LIST_HEAD(&priv->tx_pend_list);
+	INIT_STAT(&priv->tx_free_stat);
+	INIT_STAT(&priv->tx_pend_stat);
+
+	INIT_LIST_HEAD(&priv->fw_pend_list);
+	INIT_STAT(&priv->fw_pend_stat);
+
+
+#ifdef CONFIG_SOFTWARE_SUSPEND2
+	priv->workqueue = create_workqueue(DRV_NAME, 0);
+#else
+	priv->workqueue = create_workqueue(DRV_NAME);
+#endif
+	INIT_WORK(&priv->reset_work,
+		  (void (*)(void *))ipw2100_reset_adapter, priv);
+	INIT_WORK(&priv->security_work,
+		  (void (*)(void *))ipw2100_security_work, priv);
+	INIT_WORK(&priv->wx_event_work,
+		  (void (*)(void *))ipw2100_wx_event_work, priv);
+	INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
+	INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
+
+	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+		     ipw2100_irq_tasklet, (unsigned long)priv);
+
+	/* NOTE:  We do not start the deferred work for status checks yet */
+	priv->stop_rf_kill = 1;
+	priv->stop_hang_check = 1;
+
+	return dev;
+}
+
+static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
+				const struct pci_device_id *ent)
+{
+	unsigned long mem_start, mem_len, mem_flags;
+	char *base_addr = NULL;
+	struct net_device *dev = NULL;
+	struct ipw2100_priv *priv = NULL;
+	int err = 0;
+	int registered = 0;
+	u32 val;
+
+	IPW_DEBUG_INFO("enter\n");
+
+	mem_start = pci_resource_start(pci_dev, 0);
+	mem_len = pci_resource_len(pci_dev, 0);
+	mem_flags = pci_resource_flags(pci_dev, 0);
+
+	if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
+		IPW_DEBUG_INFO("weird - resource type is not memory\n");
+		err = -ENODEV;
+		goto fail;
+	}
+
+	base_addr = ioremap_nocache(mem_start, mem_len);
+	if (!base_addr) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling ioremap_nocache.\n");
+		err = -EIO;
+		goto fail;
+	}
+
+	/* allocate and initialize our net_device */
+	dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
+	if (!dev) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling ipw2100_alloc_device.\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+
+	/* set up PCI mappings for device */
+	err = pci_enable_device(pci_dev);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_enable_device.\n");
+		return err;
+	}
+
+	priv = ieee80211_priv(dev);
+
+	pci_set_master(pci_dev);
+	pci_set_drvdata(pci_dev, priv);
+
+	err = pci_set_dma_mask(pci_dev, DMA_32BIT_MASK);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_set_dma_mask.\n");
+		pci_disable_device(pci_dev);
+		return err;
+	}
+
+	err = pci_request_regions(pci_dev, DRV_NAME);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling pci_request_regions.\n");
+		pci_disable_device(pci_dev);
+		return err;
+	}
+
+        /* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_read_config_dword(pci_dev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
+
+	pci_set_power_state(pci_dev, PCI_D0);
+
+	if (!ipw2100_hw_is_adapter_in_system(dev)) {
+		printk(KERN_WARNING DRV_NAME
+		       "Device not found via register read.\n");
+		err = -ENODEV;
+		goto fail;
+	}
+
+	SET_NETDEV_DEV(dev, &pci_dev->dev);
+
+	/* Force interrupts to be shut off on the device */
+	priv->status |= STATUS_INT_ENABLED;
+	ipw2100_disable_interrupts(priv);
+
+	/* Allocate and initialize the Tx/Rx queues and lists */
+	if (ipw2100_queues_allocate(priv)) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calilng ipw2100_queues_allocate.\n");
+		err = -ENOMEM;
+		goto fail;
+	}
+	ipw2100_queues_initialize(priv);
+
+	err = request_irq(pci_dev->irq,
+			  ipw2100_interrupt, SA_SHIRQ,
+			  dev->name, priv);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling request_irq: %d.\n",
+		       pci_dev->irq);
+		goto fail;
+	}
+	dev->irq = pci_dev->irq;
+
+	IPW_DEBUG_INFO("Attempting to register device...\n");
+
+	SET_MODULE_OWNER(dev);
+
+	printk(KERN_INFO DRV_NAME
+	       ": Detected Intel PRO/Wireless 2100 Network Connection\n");
+
+	/* Bring up the interface.  Pre 0.46, after we registered the
+	 * network device we would call ipw2100_up.  This introduced a race
+	 * condition with newer hotplug configurations (network was coming
+	 * up and making calls before the device was initialized).
+	 *
+	 * If we called ipw2100_up before we registered the device, then the
+	 * device name wasn't registered.  So, we instead use the net_dev->init
+	 * member to call a function that then just turns and calls ipw2100_up.
+	 * net_dev->init is called after name allocation but before the
+	 * notifier chain is called */
+	down(&priv->action_sem);
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME
+		       "Error calling register_netdev.\n");
+		goto fail_unlock;
+	}
+	registered = 1;
+
+	IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
+
+	/* perform this after register_netdev so that dev->name is set */
+	sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
+	netif_carrier_off(dev);
+
+	/* If the RF Kill switch is disabled, go ahead and complete the
+	 * startup sequence */
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		/* Enable the adapter - sends HOST_COMPLETE */
+		if (ipw2100_enable_adapter(priv)) {
+			printk(KERN_WARNING DRV_NAME
+			       ": %s: failed in call to enable adapter.\n",
+			       priv->net_dev->name);
+			ipw2100_hw_stop_adapter(priv);
+			err = -EIO;
+			goto fail_unlock;
+		}
+
+		/* Start a scan . . . */
+		ipw2100_set_scan_options(priv);
+		ipw2100_start_scan(priv);
+	}
+
+	IPW_DEBUG_INFO("exit\n");
+
+	priv->status |= STATUS_INITIALIZED;
+
+	up(&priv->action_sem);
+
+	return 0;
+
+ fail_unlock:
+	up(&priv->action_sem);
+
+ fail:
+	if (dev) {
+		if (registered)
+			unregister_netdev(dev);
+
+		ipw2100_hw_stop_adapter(priv);
+
+		ipw2100_disable_interrupts(priv);
+
+		if (dev->irq)
+			free_irq(dev->irq, priv);
+
+		ipw2100_kill_workqueue(priv);
+
+		/* These are safe to call even if they weren't allocated */
+		ipw2100_queues_free(priv);
+		sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
+
+		free_ieee80211(dev);
+		pci_set_drvdata(pci_dev, NULL);
+	}
+
+	if (base_addr)
+		iounmap((char*)base_addr);
+
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+
+	return err;
+}
+
+static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev;
+
+	if (priv) {
+		down(&priv->action_sem);
+
+		priv->status &= ~STATUS_INITIALIZED;
+
+		dev = priv->net_dev;
+		sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
+
+#ifdef CONFIG_PM
+		if (ipw2100_firmware.version)
+			ipw2100_release_firmware(priv, &ipw2100_firmware);
+#endif
+		/* Take down the hardware */
+		ipw2100_down(priv);
+
+		/* Release the semaphore so that the network subsystem can
+		 * complete any needed calls into the driver... */
+		up(&priv->action_sem);
+
+		/* Unregister the device first - this results in close()
+		 * being called if the device is open.  If we free storage
+		 * first, then close() will crash. */
+		unregister_netdev(dev);
+
+		/* ipw2100_down will ensure that there is no more pending work
+		 * in the workqueue's, so we can safely remove them now. */
+		ipw2100_kill_workqueue(priv);
+
+		ipw2100_queues_free(priv);
+
+		/* Free potential debugging firmware snapshot */
+		ipw2100_snapshot_free(priv);
+
+		if (dev->irq)
+			free_irq(dev->irq, priv);
+
+		if (dev->base_addr)
+			iounmap((unsigned char *)dev->base_addr);
+
+		free_ieee80211(dev);
+	}
+
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+
+	IPW_DEBUG_INFO("exit\n");
+}
+
+
+#ifdef CONFIG_PM
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
+static int ipw2100_suspend(struct pci_dev *pci_dev, u32 state)
+#else
+static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
+#endif
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev = priv->net_dev;
+
+	IPW_DEBUG_INFO("%s: Going into suspend...\n",
+	       dev->name);
+
+	down(&priv->action_sem);
+	if (priv->status & STATUS_INITIALIZED) {
+		/* Take down the device; powers it off, etc. */
+		ipw2100_down(priv);
+	}
+
+	/* Remove the PRESENT state of the device */
+	netif_device_detach(dev);
+
+	pci_save_state(pci_dev);
+	pci_disable_device (pci_dev);
+	pci_set_power_state(pci_dev, PCI_D3hot);
+
+	up(&priv->action_sem);
+
+	return 0;
+}
+
+static int ipw2100_resume(struct pci_dev *pci_dev)
+{
+	struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
+	struct net_device *dev = priv->net_dev;
+	u32 val;
+
+	if (IPW2100_PM_DISABLED)
+		return 0;
+
+	down(&priv->action_sem);
+
+	IPW_DEBUG_INFO("%s: Coming out of suspend...\n",
+	       dev->name);
+
+	pci_set_power_state(pci_dev, PCI_D0);
+	pci_enable_device(pci_dev);
+	pci_restore_state(pci_dev);
+
+	/*
+	 * Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pci_dev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
+
+	/* Set the device back into the PRESENT state; this will also wake
+	 * the queue of needed */
+	netif_device_attach(dev);
+
+        /* Bring the device back up */
+        if (!(priv->status & STATUS_RF_KILL_SW))
+                ipw2100_up(priv, 0);
+
+	up(&priv->action_sem);
+
+	return 0;
+}
+#endif
+
+
+#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
+
+static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+	IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2525), /* IN 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2526), /* IN 2100A mPCI Gen A3 */
+	IPW2100_DEV_ID(0x2522), /* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2523), /* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2527), /* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2528), /* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2529), /* IN 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x252B), /* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x252C), /* IN 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x252D), /* IN 2100 mPCI 3A */
+
+	IPW2100_DEV_ID(0x2550), /* IB 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2551), /* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2553), /* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2554), /* IB 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2555), /* IB 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2560), /* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2562), /* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2563), /* DE 2100A mPCI 3A */
+	IPW2100_DEV_ID(0x2561), /* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2565), /* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2566), /* DE 2100 mPCI 3A */
+	IPW2100_DEV_ID(0x2567), /* DE 2100 mPCI 3A */
+
+	IPW2100_DEV_ID(0x2570), /* GA 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2580), /* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2582), /* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2583), /* TO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2581), /* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2585), /* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2586), /* TO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2587), /* TO 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x2590), /* SO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2592), /* SO 2100A mPCI 3B */
+	IPW2100_DEV_ID(0x2591), /* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2593), /* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2596), /* SO 2100 mPCI 3B */
+	IPW2100_DEV_ID(0x2598), /* SO 2100 mPCI 3B */
+
+	IPW2100_DEV_ID(0x25A0), /* HP 2100 mPCI 3B */
+	{0,},
+};
+
+MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table);
+
+static struct pci_driver ipw2100_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = ipw2100_pci_id_table,
+	.probe = ipw2100_pci_init_one,
+	.remove = __devexit_p(ipw2100_pci_remove_one),
+#ifdef CONFIG_PM
+	.suspend = ipw2100_suspend,
+	.resume = ipw2100_resume,
+#endif
+};
+
+
+/**
+ * Initialize the ipw2100 driver/module
+ *
+ * @returns 0 if ok, < 0 errno node con error.
+ *
+ * Note: we cannot init the /proc stuff until the PCI driver is there,
+ * or we risk an unlikely race condition on someone accessing
+ * uninitialized data in the PCI dev struct through /proc.
+ */
+static int __init ipw2100_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
+	printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
+
+#ifdef CONFIG_IEEE80211_NOWEP
+	IPW_DEBUG_INFO(DRV_NAME ": Compiled with WEP disabled.\n");
+#endif
+
+	ret = pci_module_init(&ipw2100_pci_driver);
+
+#ifdef CONFIG_IPW_DEBUG
+	ipw2100_debug_level = debug;
+	driver_create_file(&ipw2100_pci_driver.driver,
+			   &driver_attr_debug_level);
+#endif
+
+	return ret;
+}
+
+
+/**
+ * Cleanup ipw2100 driver registration
+ */
+static void __exit ipw2100_exit(void)
+{
+	/* FIXME: IPG: check that we have no instances of the devices open */
+#ifdef CONFIG_IPW_DEBUG
+	driver_remove_file(&ipw2100_pci_driver.driver,
+			   &driver_attr_debug_level);
+#endif
+	pci_unregister_driver(&ipw2100_pci_driver);
+}
+
+module_init(ipw2100_init);
+module_exit(ipw2100_exit);
+
+#define WEXT_USECHANNELS 1
+
+const long ipw2100_frequencies[] = {
+	2412, 2417, 2422, 2427,
+	2432, 2437, 2442, 2447,
+	2452, 2457, 2462, 2467,
+	2472, 2484
+};
+
+#define FREQ_COUNT (sizeof(ipw2100_frequencies) / \
+                    sizeof(ipw2100_frequencies[0]))
+
+const long ipw2100_rates_11b[] = {
+	1000000,
+	2000000,
+	5500000,
+	11000000
+};
+
+#define RATE_COUNT (sizeof(ipw2100_rates_11b) / sizeof(ipw2100_rates_11b[0]))
+
+static int ipw2100_wx_get_name(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	if (!(priv->status & STATUS_ASSOCIATED))
+		strcpy(wrqu->name, "unassociated");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
+
+	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
+	return 0;
+}
+
+
+static int ipw2100_wx_set_freq(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct iw_freq *fwrq = &wrqu->freq;
+	int err = 0;
+
+	if (priv->ieee->iw_mode == IW_MODE_INFRA)
+		return -EOPNOTSUPP;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	/* if setting by freq convert to channel */
+	if (fwrq->e == 1) {
+		if ((fwrq->m >= (int) 2.412e8 &&
+		     fwrq->m <= (int) 2.487e8)) {
+			int f = fwrq->m / 100000;
+			int c = 0;
+
+			while ((c < REG_MAX_CHANNEL) &&
+			       (f != ipw2100_frequencies[c]))
+				c++;
+
+			/* hack to fall through */
+			fwrq->e = 0;
+			fwrq->m = c + 1;
+		}
+	}
+
+	if (fwrq->e > 0 || fwrq->m > 1000)
+		return -EOPNOTSUPP;
+	else { /* Set the channel */
+		IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+		err = ipw2100_set_channel(priv, fwrq->m, 0);
+	}
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+
+static int ipw2100_wx_get_freq(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	wrqu->freq.e = 0;
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured CHANNEL then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_CHANNEL ||
+	    priv->status & STATUS_ASSOCIATED)
+		wrqu->freq.m = priv->channel;
+	else
+		wrqu->freq.m = 0;
+
+	IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+	return 0;
+
+}
+
+static int ipw2100_wx_set_mode(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
+
+	if (wrqu->mode == priv->ieee->iw_mode)
+		return 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	switch (wrqu->mode) {
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
+		break;
+#endif /* CONFIG_IPW2100_MONITOR */
+	case IW_MODE_ADHOC:
+		err = ipw2100_switch_mode(priv, IW_MODE_ADHOC);
+		break;
+	case IW_MODE_INFRA:
+	case IW_MODE_AUTO:
+	default:
+		err = ipw2100_switch_mode(priv, IW_MODE_INFRA);
+		break;
+	}
+
+done:
+	up(&priv->action_sem);
+ 	return err;
+}
+
+static int ipw2100_wx_get_mode(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	wrqu->mode = priv->ieee->iw_mode;
+	IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode);
+
+	return 0;
+}
+
+
+#define POWER_MODES 5
+
+/* Values are in microsecond */
+const s32 timeout_duration[POWER_MODES] = {
+	350000,
+	250000,
+	75000,
+	37000,
+	25000,
+};
+
+const s32 period_duration[POWER_MODES] = {
+	400000,
+	700000,
+	1000000,
+	1000000,
+	1000000
+};
+
+static int ipw2100_wx_get_range(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct iw_range *range = (struct iw_range *)extra;
+	u16 val;
+	int i, level;
+
+	wrqu->data.length = sizeof(*range);
+	memset(range, 0, sizeof(*range));
+
+	/* Let's try to keep this struct in the same order as in
+	 * linux/include/wireless.h
+	 */
+
+	/* TODO: See what values we can set, and remove the ones we can't
+	 * set, or fill them with some default data.
+	 */
+
+	/* ~5 Mb/s real (802.11b) */
+	range->throughput = 5 * 1000 * 1000;
+
+//	range->sensitivity;	/* signal level threshold range */
+
+	range->max_qual.qual = 100;
+	/* TODO: Find real max RSSI and stick here */
+	range->max_qual.level = 0;
+	range->max_qual.noise = 0;
+	range->max_qual.updated = 7; /* Updated all three */
+
+	range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
+	/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+	range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
+	range->avg_qual.noise = 0;
+	range->avg_qual.updated = 7; /* Updated all three */
+
+	range->num_bitrates = RATE_COUNT;
+
+	for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
+		range->bitrate[i] = ipw2100_rates_11b[i];
+	}
+
+	range->min_rts = MIN_RTS_THRESHOLD;
+	range->max_rts = MAX_RTS_THRESHOLD;
+	range->min_frag = MIN_FRAG_THRESHOLD;
+	range->max_frag = MAX_FRAG_THRESHOLD;
+
+	range->min_pmp = period_duration[0];	/* Minimal PM period */
+	range->max_pmp = period_duration[POWER_MODES-1];/* Maximal PM period */
+	range->min_pmt = timeout_duration[POWER_MODES-1];	/* Minimal PM timeout */
+	range->max_pmt = timeout_duration[0];/* Maximal PM timeout */
+
+        /* How to decode max/min PM period */
+	range->pmp_flags = IW_POWER_PERIOD;
+        /* How to decode max/min PM period */
+	range->pmt_flags = IW_POWER_TIMEOUT;
+	/* What PM options are supported */
+	range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD;
+
+	range->encoding_size[0] = 5;
+	range->encoding_size[1] = 13;           /* Different token sizes */
+	range->num_encoding_sizes = 2;		/* Number of entry in the list */
+	range->max_encoding_tokens = WEP_KEYS;  /* Max number of tokens */
+//	range->encoding_login_index;		/* token index for login token */
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		range->txpower_capa = IW_TXPOW_DBM;
+		range->num_txpower = IW_MAX_TXPOWER;
+		for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16); i < IW_MAX_TXPOWER;
+		     i++, level -= ((IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM) * 16) /
+			     (IW_MAX_TXPOWER - 1))
+			range->txpower[i] = level / 16;
+	} else {
+		range->txpower_capa = 0;
+		range->num_txpower = 0;
+	}
+
+
+	/* Set the Wireless Extension versions */
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 16;
+
+//	range->retry_capa;	/* What retry options are supported */
+//	range->retry_flags;	/* How to decode max/min retry limit */
+//	range->r_time_flags;	/* How to decode max/min retry life */
+//	range->min_retry;	/* Minimal number of retries */
+//	range->max_retry;	/* Maximal number of retries */
+//	range->min_r_time;	/* Minimal retry lifetime */
+//	range->max_r_time;	/* Maximal retry lifetime */
+
+        range->num_channels = FREQ_COUNT;
+
+	val = 0;
+	for (i = 0; i < FREQ_COUNT; i++) {
+		// TODO: Include only legal frequencies for some countries
+//		if (local->channel_mask & (1 << i)) {
+			range->freq[val].i = i + 1;
+			range->freq[val].m = ipw2100_frequencies[i] * 100000;
+			range->freq[val].e = 1;
+			val++;
+//		}
+		if (val == IW_MAX_FREQUENCIES)
+		break;
+	}
+	range->num_frequency = val;
+
+	IPW_DEBUG_WX("GET Range\n");
+
+	return 0;
+}
+
+static int ipw2100_wx_set_wap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	static const unsigned char any[] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+	static const unsigned char off[] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	};
+
+	// sanity checks
+	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
+	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+		/* we disable mandatory BSSID association */
+		IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
+		priv->config &= ~CFG_STATIC_BSSID;
+		err = ipw2100_set_mandatory_bssid(priv, NULL, 0);
+		goto done;
+	}
+
+	priv->config |= CFG_STATIC_BSSID;
+	memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN);
+
+	err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0);
+
+	IPW_DEBUG_WX("SET BSSID -> %02X:%02X:%02X:%02X:%02X:%02X\n",
+		     wrqu->ap_addr.sa_data[0] & 0xff,
+		     wrqu->ap_addr.sa_data[1] & 0xff,
+		     wrqu->ap_addr.sa_data[2] & 0xff,
+		     wrqu->ap_addr.sa_data[3] & 0xff,
+		     wrqu->ap_addr.sa_data[4] & 0xff,
+		     wrqu->ap_addr.sa_data[5] & 0xff);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_wap(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured BSSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_BSSID ||
+	    priv->status & STATUS_ASSOCIATED) {
+		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
+	} else
+		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+	IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
+		     MAC_ARG(wrqu->ap_addr.sa_data));
+	return 0;
+}
+
+static int ipw2100_wx_set_essid(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	char *essid = ""; /* ANY */
+	int length = 0;
+	int err = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->essid.flags && wrqu->essid.length) {
+		length = wrqu->essid.length - 1;
+		essid = extra;
+	}
+
+	if (length == 0) {
+		IPW_DEBUG_WX("Setting ESSID to ANY\n");
+		priv->config &= ~CFG_STATIC_ESSID;
+		err = ipw2100_set_essid(priv, NULL, 0, 0);
+		goto done;
+	}
+
+	length = min(length, IW_ESSID_MAX_SIZE);
+
+	priv->config |= CFG_STATIC_ESSID;
+
+	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
+		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
+		err = 0;
+		goto done;
+	}
+
+	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
+		     length);
+
+	priv->essid_len = length;
+	memcpy(priv->essid, essid, priv->essid_len);
+
+	err = ipw2100_set_essid(priv, essid, length, 0);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_essid(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured ESSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_ESSID ||
+	    priv->status & STATUS_ASSOCIATED) {
+		IPW_DEBUG_WX("Getting essid: '%s'\n",
+			     escape_essid(priv->essid, priv->essid_len));
+		memcpy(extra, priv->essid, priv->essid_len);
+		wrqu->essid.length = priv->essid_len;
+		wrqu->essid.flags = 1; /* active */
+	} else {
+		IPW_DEBUG_WX("Getting essid: ANY\n");
+		wrqu->essid.length = 0;
+		wrqu->essid.flags = 0; /* active */
+	}
+
+	return 0;
+}
+
+static int ipw2100_wx_set_nick(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
+		return -E2BIG;
+
+	wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
+	memset(priv->nick, 0, sizeof(priv->nick));
+	memcpy(priv->nick, extra,  wrqu->data.length);
+
+	IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
+
+	return 0;
+}
+
+static int ipw2100_wx_get_nick(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	wrqu->data.length = strlen(priv->nick) + 1;
+	memcpy(extra, priv->nick, wrqu->data.length);
+	wrqu->data.flags = 1; /* active */
+
+	IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_rate(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	u32 target_rate = wrqu->bitrate.value;
+	u32 rate;
+	int err = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	rate = 0;
+
+	if (target_rate == 1000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 1000000))
+		rate |= TX_RATE_1_MBIT;
+	if (target_rate == 2000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 2000000))
+		rate |= TX_RATE_2_MBIT;
+	if (target_rate == 5500000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 5500000))
+		rate |= TX_RATE_5_5_MBIT;
+	if (target_rate == 11000000 ||
+	    (!wrqu->bitrate.fixed && target_rate > 11000000))
+		rate |= TX_RATE_11_MBIT;
+	if (rate == 0)
+		rate = DEFAULT_TX_RATES;
+
+	err = ipw2100_set_tx_rates(priv, rate, 0);
+
+	IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+
+static int ipw2100_wx_get_rate(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int val;
+	int len = sizeof(val);
+	int err = 0;
+
+	if (!(priv->status & STATUS_ENABLED) ||
+	    priv->status & STATUS_RF_KILL_MASK ||
+	    !(priv->status & STATUS_ASSOCIATED)) {
+		wrqu->bitrate.value = 0;
+		return 0;
+	}
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
+	if (err) {
+		IPW_DEBUG_WX("failed querying ordinals.\n");
+		return err;
+	}
+
+	switch (val & TX_RATE_MASK) {
+	case TX_RATE_1_MBIT:
+		wrqu->bitrate.value = 1000000;
+		break;
+	case TX_RATE_2_MBIT:
+		wrqu->bitrate.value = 2000000;
+		break;
+	case TX_RATE_5_5_MBIT:
+		wrqu->bitrate.value = 5500000;
+		break;
+	case TX_RATE_11_MBIT:
+		wrqu->bitrate.value = 11000000;
+		break;
+	default:
+		wrqu->bitrate.value = 0;
+	}
+
+	IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_set_rts(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int value, err;
+
+	/* Auto RTS not yet supported */
+	if (wrqu->rts.fixed == 0)
+		return -EINVAL;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->rts.disabled)
+		value = priv->rts_threshold | RTS_DISABLED;
+	else {
+		if (wrqu->rts.value < 1 ||
+		    wrqu->rts.value > 2304) {
+			err = -EINVAL;
+			goto done;
+		}
+		value = wrqu->rts.value;
+	}
+
+	err = ipw2100_set_rts_threshold(priv, value);
+
+	IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_rts(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED;
+	wrqu->rts.fixed = 1; /* no auto select */
+
+	/* If RTS is set to the default value, then it is disabled */
+	wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
+
+	IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_txpow(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0, value;
+
+	if (priv->ieee->iw_mode != IW_MODE_ADHOC)
+		return -EINVAL;
+
+	if (wrqu->txpower.disabled == 1 || wrqu->txpower.fixed == 0)
+		value = IPW_TX_POWER_DEFAULT;
+	else {
+		if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
+		    wrqu->txpower.value > IPW_TX_POWER_MAX_DBM)
+			return -EINVAL;
+
+		value = (wrqu->txpower.value - IPW_TX_POWER_MIN_DBM) * 16 /
+			(IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM);
+	}
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	err = ipw2100_set_tx_power(priv, value);
+
+	IPW_DEBUG_WX("SET TX Power -> %d \n", value);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_txpow(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	if (priv->ieee->iw_mode != IW_MODE_ADHOC) {
+		wrqu->power.disabled = 1;
+		return 0;
+	}
+
+	if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
+		wrqu->power.fixed = 0;
+		wrqu->power.value = IPW_TX_POWER_MAX_DBM;
+		wrqu->power.disabled = 1;
+	} else {
+		wrqu->power.disabled = 0;
+		wrqu->power.fixed = 1;
+		wrqu->power.value =
+			(priv->tx_power *
+			 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM)) /
+			(IPW_TX_POWER_MAX - IPW_TX_POWER_MIN) +
+			IPW_TX_POWER_MIN_DBM;
+	}
+
+	wrqu->power.flags = IW_TXPOW_DBM;
+
+	IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->power.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	if (!wrqu->frag.fixed)
+		return -EINVAL;
+
+	if (wrqu->frag.disabled) {
+		priv->frag_threshold |= FRAG_DISABLED;
+		priv->ieee->fts = DEFAULT_FTS;
+	} else {
+		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+		    wrqu->frag.value > MAX_FRAG_THRESHOLD)
+			return -EINVAL;
+
+		priv->ieee->fts = wrqu->frag.value & ~0x1;
+		priv->frag_threshold = priv->ieee->fts;
+	}
+
+	IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
+
+	return 0;
+}
+
+static int ipw2100_wx_get_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED;
+	wrqu->frag.fixed = 0;	/* no auto select */
+	wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
+
+	IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_retry(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
+	    wrqu->retry.disabled)
+		return -EINVAL;
+
+	if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
+		return 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->retry.flags & IW_RETRY_MIN) {
+		err = ipw2100_set_short_retry(priv, wrqu->retry.value);
+		IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
+		       wrqu->retry.value);
+		goto done;
+	}
+
+	if (wrqu->retry.flags & IW_RETRY_MAX) {
+		err = ipw2100_set_long_retry(priv, wrqu->retry.value);
+		IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
+		       wrqu->retry.value);
+		goto done;
+	}
+
+	err = ipw2100_set_short_retry(priv, wrqu->retry.value);
+	if (!err)
+		err = ipw2100_set_long_retry(priv, wrqu->retry.value);
+
+	IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_retry(struct net_device *dev,
+				struct iw_request_info *info,
+				union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	wrqu->retry.disabled = 0; /* can't be disabled */
+
+	if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
+	    IW_RETRY_LIFETIME)
+		return -EINVAL;
+
+	if (wrqu->retry.flags & IW_RETRY_MAX) {
+		wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
+		wrqu->retry.value = priv->long_retry_limit;
+	} else {
+		wrqu->retry.flags =
+		    (priv->short_retry_limit !=
+		     priv->long_retry_limit) ?
+		    IW_RETRY_LIMIT & IW_RETRY_MIN : IW_RETRY_LIMIT;
+
+		wrqu->retry.value = priv->short_retry_limit;
+	}
+
+	IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
+
+	return 0;
+}
+
+static int ipw2100_wx_set_scan(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	IPW_DEBUG_WX("Initiating scan...\n");
+	if (ipw2100_set_scan_options(priv) ||
+	    ipw2100_start_scan(priv)) {
+		IPW_DEBUG_WX("Start scan failed.\n");
+
+		/* TODO: Mark a scan as pending so when hardware initialized
+		 *       a scan starts */
+	}
+
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_scan(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
+}
+
+
+/*
+ * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c
+ */
+static int ipw2100_wx_set_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	/*
+	 * No check of STATUS_INITIALIZED required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw2100_wx_get_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw2100_wx_set_power(struct net_device *dev,
+			        struct iw_request_info *info,
+			        union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (wrqu->power.disabled) {
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+		err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
+		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
+		goto done;
+	}
+
+	switch (wrqu->power.flags & IW_POWER_MODE) {
+	case IW_POWER_ON:    /* If not specified */
+	case IW_POWER_MODE:  /* If set all mask */
+	case IW_POWER_ALL_R: /* If explicitely state all */
+		break;
+	default: /* Otherwise we don't support it */
+		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
+			     wrqu->power.flags);
+		err = -EOPNOTSUPP;
+		goto done;
+	}
+
+	/* If the user hasn't specified a power management mode yet, default
+	 * to BATTERY */
+	priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+	err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
+
+	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
+		     priv->power_mode);
+
+ done:
+	up(&priv->action_sem);
+	return err;
+
+}
+
+static int ipw2100_wx_get_power(struct net_device *dev,
+			        struct iw_request_info *info,
+			        union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED)) {
+		wrqu->power.disabled = 1;
+	} else {
+		wrqu->power.disabled = 0;
+		wrqu->power.flags = 0;
+	}
+
+	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
+
+	return 0;
+}
+
+
+/*
+ *
+ * IWPRIV handlers
+ *
+ */
+#ifdef CONFIG_IPW2100_MONITOR
+static int ipw2100_wx_set_promisc(struct net_device *dev,
+				  struct iw_request_info *info,
+				  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int *parms = (int *)extra;
+	int enable = (parms[0] > 0);
+	int err = 0;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (enable) {
+		if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+			err = ipw2100_set_channel(priv, parms[1], 0);
+			goto done;
+		}
+		priv->channel = parms[1];
+		err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
+	} else {
+		if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+			err = ipw2100_switch_mode(priv, priv->last_mode);
+	}
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_reset(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	if (priv->status & STATUS_INITIALIZED)
+		schedule_reset(priv);
+	return 0;
+}
+
+#endif
+
+static int ipw2100_wx_set_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err = 0, mode = *(int *)extra;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if ((mode < 1) || (mode > POWER_MODES))
+		mode = IPW_POWER_AUTO;
+
+	if (priv->power_mode != mode)
+		err = ipw2100_set_power_mode(priv, mode);
+ done:
+	up(&priv->action_sem);
+	return err;
+}
+
+#define MAX_POWER_STRING 80
+static int ipw2100_wx_get_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int level = IPW_POWER_LEVEL(priv->power_mode);
+	s32 timeout, period;
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED)) {
+		snprintf(extra, MAX_POWER_STRING,
+			 "Power save level: %d (Off)", level);
+	} else {
+		switch (level) {
+		case IPW_POWER_MODE_CAM:
+			snprintf(extra, MAX_POWER_STRING,
+				 "Power save level: %d (None)", level);
+			break;
+		case IPW_POWER_AUTO:
+		snprintf(extra, MAX_POWER_STRING,
+			 "Power save level: %d (Auto)", 0);
+			break;
+		default:
+			timeout = timeout_duration[level - 1] / 1000;
+			period = period_duration[level - 1] / 1000;
+			snprintf(extra, MAX_POWER_STRING,
+				 "Power save level: %d "
+				 "(Timeout %dms, Period %dms)",
+				 level, timeout, period);
+		}
+	}
+
+	wrqu->data.length = strlen(extra) + 1;
+
+	return 0;
+}
+
+
+static int ipw2100_wx_set_preamble(struct net_device *dev,
+				   struct iw_request_info *info,
+				   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	int err, mode = *(int *)extra;
+
+	down(&priv->action_sem);
+	if (!(priv->status & STATUS_INITIALIZED)) {
+		err = -EIO;
+		goto done;
+	}
+
+	if (mode == 1)
+		priv->config |= CFG_LONG_PREAMBLE;
+	else if (mode == 0)
+		priv->config &= ~CFG_LONG_PREAMBLE;
+	else {
+		err = -EINVAL;
+		goto done;
+	}
+
+	err = ipw2100_system_config(priv, 0);
+
+done:
+	up(&priv->action_sem);
+	return err;
+}
+
+static int ipw2100_wx_get_preamble(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	/*
+	 * This can be called at any time.  No action lock required
+	 */
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+
+	if (priv->config & CFG_LONG_PREAMBLE)
+		snprintf(wrqu->name, IFNAMSIZ, "long (1)");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
+
+	return 0;
+}
+
+static iw_handler ipw2100_wx_handlers[] =
+{
+        NULL,                     /* SIOCSIWCOMMIT */
+        ipw2100_wx_get_name,      /* SIOCGIWNAME */
+        NULL,                     /* SIOCSIWNWID */
+        NULL,                     /* SIOCGIWNWID */
+        ipw2100_wx_set_freq,      /* SIOCSIWFREQ */
+        ipw2100_wx_get_freq,      /* SIOCGIWFREQ */
+        ipw2100_wx_set_mode,      /* SIOCSIWMODE */
+        ipw2100_wx_get_mode,      /* SIOCGIWMODE */
+        NULL,                     /* SIOCSIWSENS */
+        NULL,                     /* SIOCGIWSENS */
+        NULL,                     /* SIOCSIWRANGE */
+        ipw2100_wx_get_range,     /* SIOCGIWRANGE */
+        NULL,                     /* SIOCSIWPRIV */
+        NULL,                     /* SIOCGIWPRIV */
+        NULL,                     /* SIOCSIWSTATS */
+        NULL,                     /* SIOCGIWSTATS */
+        NULL,                     /* SIOCSIWSPY */
+        NULL,                     /* SIOCGIWSPY */
+        NULL,                     /* SIOCGIWTHRSPY */
+        NULL,                     /* SIOCWIWTHRSPY */
+        ipw2100_wx_set_wap,       /* SIOCSIWAP */
+        ipw2100_wx_get_wap,       /* SIOCGIWAP */
+        NULL,                     /* -- hole -- */
+        NULL,                     /* SIOCGIWAPLIST -- deprecated */
+        ipw2100_wx_set_scan,      /* SIOCSIWSCAN */
+        ipw2100_wx_get_scan,      /* SIOCGIWSCAN */
+        ipw2100_wx_set_essid,     /* SIOCSIWESSID */
+        ipw2100_wx_get_essid,     /* SIOCGIWESSID */
+        ipw2100_wx_set_nick,      /* SIOCSIWNICKN */
+        ipw2100_wx_get_nick,      /* SIOCGIWNICKN */
+        NULL,                     /* -- hole -- */
+        NULL,                     /* -- hole -- */
+        ipw2100_wx_set_rate,      /* SIOCSIWRATE */
+        ipw2100_wx_get_rate,      /* SIOCGIWRATE */
+        ipw2100_wx_set_rts,       /* SIOCSIWRTS */
+        ipw2100_wx_get_rts,       /* SIOCGIWRTS */
+        ipw2100_wx_set_frag,      /* SIOCSIWFRAG */
+        ipw2100_wx_get_frag,      /* SIOCGIWFRAG */
+        ipw2100_wx_set_txpow,     /* SIOCSIWTXPOW */
+        ipw2100_wx_get_txpow,     /* SIOCGIWTXPOW */
+        ipw2100_wx_set_retry,     /* SIOCSIWRETRY */
+        ipw2100_wx_get_retry,     /* SIOCGIWRETRY */
+        ipw2100_wx_set_encode,    /* SIOCSIWENCODE */
+        ipw2100_wx_get_encode,    /* SIOCGIWENCODE */
+        ipw2100_wx_set_power,     /* SIOCSIWPOWER */
+        ipw2100_wx_get_power,     /* SIOCGIWPOWER */
+};
+
+#define IPW2100_PRIV_SET_MONITOR	SIOCIWFIRSTPRIV
+#define IPW2100_PRIV_RESET		SIOCIWFIRSTPRIV+1
+#define IPW2100_PRIV_SET_POWER		SIOCIWFIRSTPRIV+2
+#define IPW2100_PRIV_GET_POWER		SIOCIWFIRSTPRIV+3
+#define IPW2100_PRIV_SET_LONGPREAMBLE	SIOCIWFIRSTPRIV+4
+#define IPW2100_PRIV_GET_LONGPREAMBLE	SIOCIWFIRSTPRIV+5
+
+static const struct iw_priv_args ipw2100_private_args[] = {
+
+#ifdef CONFIG_IPW2100_MONITOR
+	{
+		IPW2100_PRIV_SET_MONITOR,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
+	},
+	{
+		IPW2100_PRIV_RESET,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
+	},
+#endif /* CONFIG_IPW2100_MONITOR */
+
+	{
+		IPW2100_PRIV_SET_POWER,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"
+	},
+	{
+		IPW2100_PRIV_GET_POWER,
+		0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING, "get_power"
+	},
+	{
+		IPW2100_PRIV_SET_LONGPREAMBLE,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"
+	},
+	{
+		IPW2100_PRIV_GET_LONGPREAMBLE,
+		0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"
+	},
+};
+
+static iw_handler ipw2100_private_handler[] = {
+#ifdef CONFIG_IPW2100_MONITOR
+	ipw2100_wx_set_promisc,
+	ipw2100_wx_reset,
+#else /* CONFIG_IPW2100_MONITOR */
+	NULL,
+	NULL,
+#endif /* CONFIG_IPW2100_MONITOR */
+	ipw2100_wx_set_powermode,
+	ipw2100_wx_get_powermode,
+	ipw2100_wx_set_preamble,
+	ipw2100_wx_get_preamble,
+};
+
+struct iw_handler_def ipw2100_wx_handler_def =
+{
+	.standard = ipw2100_wx_handlers,
+	.num_standard = sizeof(ipw2100_wx_handlers) / sizeof(iw_handler),
+	.num_private = sizeof(ipw2100_private_handler) / sizeof(iw_handler),
+ 	.num_private_args = sizeof(ipw2100_private_args) /
+	sizeof(struct iw_priv_args),
+	.private = (iw_handler *)ipw2100_private_handler,
+	.private_args = (struct iw_priv_args *)ipw2100_private_args,
+};
+
+/*
+ * Get wireless statistics.
+ * Called by /proc/net/wireless
+ * Also called by SIOCGIWSTATS
+ */
+struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev)
+{
+	enum {
+		POOR = 30,
+		FAIR = 60,
+		GOOD = 80,
+		VERY_GOOD = 90,
+		EXCELLENT = 95,
+		PERFECT = 100
+	};
+	int rssi_qual;
+	int tx_qual;
+	int beacon_qual;
+
+	struct ipw2100_priv *priv = ieee80211_priv(dev);
+	struct iw_statistics *wstats;
+	u32 rssi, quality, tx_retries, missed_beacons, tx_failures;
+	u32 ord_len = sizeof(u32);
+
+	if (!priv)
+		return (struct iw_statistics *) NULL;
+
+	wstats = &priv->wstats;
+
+	/* if hw is disabled, then ipw2100_get_ordinal() can't be called.
+	 * ipw2100_wx_wireless_stats seems to be called before fw is
+	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
+	 * and associated; if not associcated, the values are all meaningless
+	 * anyway, so set them all to NULL and INVALID */
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->miss.beacon = 0;
+		wstats->discard.retries = 0;
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+		wstats->qual.noise = 0;
+		wstats->qual.updated = 7;
+		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
+			IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
+		return wstats;
+	}
+
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS,
+				&missed_beacons, &ord_len))
+		goto fail_get_ordinal;
+
+        /* If we don't have a connection the quality and level is 0*/
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+	} else {
+		if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR,
+					&rssi, &ord_len))
+			goto fail_get_ordinal;
+		wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
+		if (rssi < 10)
+			rssi_qual = rssi * POOR / 10;
+		else if (rssi < 15)
+			rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR;
+		else if (rssi < 20)
+			rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR;
+		else if (rssi < 30)
+			rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) /
+				10 + GOOD;
+		else
+			rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) /
+				10 + VERY_GOOD;
+
+		if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES,
+					&tx_retries, &ord_len))
+			goto fail_get_ordinal;
+
+		if (tx_retries > 75)
+			tx_qual = (90 - tx_retries) * POOR / 15;
+		else if (tx_retries > 70)
+			tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
+		else if (tx_retries > 65)
+			tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
+		else if (tx_retries > 50)
+			tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
+				15 + GOOD;
+		else
+			tx_qual = (50 - tx_retries) *
+				(PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
+
+		if (missed_beacons > 50)
+			beacon_qual = (60 - missed_beacons) * POOR / 10;
+		else if (missed_beacons > 40)
+			beacon_qual = (50 - missed_beacons) * (FAIR - POOR) /
+				10 + POOR;
+		else if (missed_beacons > 32)
+			beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) /
+				18 + FAIR;
+		else if (missed_beacons > 20)
+			beacon_qual = (32 - missed_beacons) *
+				(VERY_GOOD - GOOD) / 20 + GOOD;
+		else
+			beacon_qual = (20 - missed_beacons) *
+				(PERFECT - VERY_GOOD) / 20 + VERY_GOOD;
+
+		quality = min(beacon_qual, min(tx_qual, rssi_qual));
+
+#ifdef CONFIG_IPW_DEBUG
+		if (beacon_qual == quality)
+			IPW_DEBUG_WX("Quality clamped by Missed Beacons\n");
+		else if (tx_qual == quality)
+			IPW_DEBUG_WX("Quality clamped by Tx Retries\n");
+		else if (quality != 100)
+			IPW_DEBUG_WX("Quality clamped by Signal Strength\n");
+		else
+			IPW_DEBUG_WX("Quality not clamped.\n");
+#endif
+
+		wstats->qual.qual = quality;
+		wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
+	}
+
+	wstats->qual.noise = 0;
+	wstats->qual.updated = 7;
+	wstats->qual.updated |= IW_QUAL_NOISE_INVALID;
+
+        /* FIXME: this is percent and not a # */
+	wstats->miss.beacon = missed_beacons;
+
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES,
+				&tx_failures, &ord_len))
+		goto fail_get_ordinal;
+	wstats->discard.retries = tx_failures;
+
+	return wstats;
+
+ fail_get_ordinal:
+	IPW_DEBUG_WX("failed querying ordinals.\n");
+
+	return (struct iw_statistics *) NULL;
+}
+
+void ipw2100_wx_event_work(struct ipw2100_priv *priv)
+{
+	union iwreq_data wrqu;
+	int len = ETH_ALEN;
+
+	if (priv->status & STATUS_STOPPING)
+		return;
+
+	down(&priv->action_sem);
+
+	IPW_DEBUG_WX("enter\n");
+
+	up(&priv->action_sem);
+
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+	/* Fetch BSSID from the hardware */
+	if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) ||
+	    priv->status & STATUS_RF_KILL_MASK ||
+	    ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
+				&priv->bssid,  &len)) {
+		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+	} else {
+		/* We now have the BSSID, so can finish setting to the full
+		 * associated state */
+		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
+		memcpy(&priv->ieee->bssid, priv->bssid, ETH_ALEN);
+		priv->status &= ~STATUS_ASSOCIATING;
+		priv->status |= STATUS_ASSOCIATED;
+		netif_carrier_on(priv->net_dev);
+		if (netif_queue_stopped(priv->net_dev)) {
+			IPW_DEBUG_INFO("Waking net queue.\n");
+			netif_wake_queue(priv->net_dev);
+		} else {
+			IPW_DEBUG_INFO("Starting net queue.\n");
+			netif_start_queue(priv->net_dev);
+		}
+	}
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_WX("Configuring ESSID\n");
+		down(&priv->action_sem);
+		/* This is a disassociation event, so kick the firmware to
+		 * look for another AP */
+		if (priv->config & CFG_STATIC_ESSID)
+			ipw2100_set_essid(priv, priv->essid, priv->essid_len, 0);
+		else
+			ipw2100_set_essid(priv, NULL, 0, 0);
+		up(&priv->action_sem);
+	}
+
+	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+#define IPW2100_FW_MAJOR_VERSION 1
+#define IPW2100_FW_MINOR_VERSION 3
+
+#define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8)
+#define IPW2100_FW_MAJOR(x) (x & 0xff)
+
+#define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \
+                             IPW2100_FW_MAJOR_VERSION)
+
+#define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \
+"." __stringify(IPW2100_FW_MINOR_VERSION)
+
+#define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw"
+
+
+/*
+
+BINARY FIRMWARE HEADER FORMAT
+
+offset      length   desc
+0           2        version
+2           2        mode == 0:BSS,1:IBSS,2:MONITOR
+4           4        fw_len
+8           4        uc_len
+C           fw_len   firmware data
+12 + fw_len uc_len   microcode data
+
+*/
+
+struct ipw2100_fw_header {
+	short version;
+	short mode;
+	unsigned int fw_size;
+	unsigned int uc_size;
+} __attribute__ ((packed));
+
+
+
+static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
+{
+	struct ipw2100_fw_header *h =
+		(struct ipw2100_fw_header *)fw->fw_entry->data;
+
+	if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
+		IPW_DEBUG_WARNING("Firmware image not compatible "
+		       "(detected version id of %u). "
+		       "See Documentation/networking/README.ipw2100\n",
+		       h->version);
+		return 1;
+	}
+
+	fw->version = h->version;
+	fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header);
+	fw->fw.size = h->fw_size;
+	fw->uc.data = fw->fw.data + h->fw_size;
+	fw->uc.size = h->uc_size;
+
+	return 0;
+}
+
+
+int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
+{
+	char *fw_name;
+	int rc;
+
+	IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n",
+	       priv->net_dev->name);
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		fw_name = IPW2100_FW_NAME("-i");
+		break;
+#ifdef CONFIG_IPW2100_MONITOR
+	case IW_MODE_MONITOR:
+		fw_name = IPW2100_FW_NAME("-p");
+		break;
+#endif
+	case IW_MODE_INFRA:
+	default:
+		fw_name = IPW2100_FW_NAME("");
+		break;
+	}
+
+	rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev);
+
+	if (rc < 0) {
+		IPW_DEBUG_ERROR(
+		       "%s: Firmware '%s' not available or load failed.\n",
+		       priv->net_dev->name, fw_name);
+		return rc;
+	}
+	IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data,
+			   fw->fw_entry->size);
+
+	ipw2100_mod_firmware_load(fw);
+
+	return 0;
+}
+
+void ipw2100_release_firmware(struct ipw2100_priv *priv,
+			      struct ipw2100_fw *fw)
+{
+	fw->version = 0;
+	if (fw->fw_entry)
+		release_firmware(fw->fw_entry);
+	fw->fw_entry = NULL;
+}
+
+
+int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max)
+{
+	char ver[MAX_FW_VERSION_LEN];
+	u32 len = MAX_FW_VERSION_LEN;
+	u32 tmp;
+	int i;
+	/* firmware version is an ascii string (max len of 14) */
+	if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM,
+				ver, &len))
+		return -EIO;
+	tmp = max;
+	if (len >= max)
+		len = max - 1;
+	for (i = 0; i < len; i++)
+		buf[i] = ver[i];
+	buf[i] = '\0';
+	return tmp;
+}
+
+int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max)
+{
+	u32 ver;
+	u32 len = sizeof(ver);
+	/* microcode version is a 32 bit integer */
+	if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION,
+				&ver, &len))
+		return -EIO;
+	return snprintf(buf, max, "%08X", ver);
+}
+
+/*
+ * On exit, the firmware will have been freed from the fw list
+ */
+int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
+{
+	/* firmware is constructed of N contiguous entries, each entry is
+	 * structured as:
+	 *
+	 * offset    sie         desc
+	 * 0         4           address to write to
+	 * 4         2           length of data run
+         * 6         length      data
+	 */
+	unsigned int addr;
+	unsigned short len;
+
+	const unsigned char *firmware_data = fw->fw.data;
+	unsigned int firmware_data_left = fw->fw.size;
+
+	while (firmware_data_left > 0) {
+	   	addr = *(u32 *)(firmware_data);
+		firmware_data      += 4;
+		firmware_data_left -= 4;
+
+	   	len = *(u16 *)(firmware_data);
+		firmware_data      += 2;
+		firmware_data_left -= 2;
+
+		if (len > 32) {
+			IPW_DEBUG_ERROR(
+			       "Invalid firmware run-length of %d bytes\n",
+			       len);
+			return -EINVAL;
+		}
+
+		write_nic_memory(priv->net_dev, addr, len, firmware_data);
+		firmware_data      += len;
+		firmware_data_left -= len;
+	}
+
+	return 0;
+}
+
+struct symbol_alive_response {
+	u8 cmd_id;
+	u8 seq_num;
+	u8 ucode_rev;
+	u8 eeprom_valid;
+	u16 valid_flags;
+	u8 IEEE_addr[6];
+	u16 flags;
+	u16 pcb_rev;
+	u16 clock_settle_time;	// 1us LSB
+	u16 powerup_settle_time;	// 1us LSB
+	u16 hop_settle_time;	// 1us LSB
+	u8 date[3];		// month, day, year
+	u8 time[2];		// hours, minutes
+	u8 ucode_valid;
+};
+
+int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
+{
+	struct net_device *dev = priv->net_dev;
+	const unsigned char *microcode_data = fw->uc.data;
+	unsigned int microcode_data_left = fw->uc.size;
+
+	struct symbol_alive_response response;
+	int i, j;
+	u8 data;
+
+	/* Symbol control */
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
+	readl((void *)(dev->base_addr));
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
+	readl((void *)(dev->base_addr));
+
+	/* HW config */
+	write_nic_byte(dev, 0x210014, 0x72);	/* fifo width =16 */
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210014, 0x72);	/* fifo width =16 */
+	readl((void *)(dev->base_addr));
+
+	/* EN_CS_ACCESS bit to reset control store pointer */
+	write_nic_byte(dev, 0x210000, 0x40);
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210000, 0x40);
+	readl((void *)(dev->base_addr));
+
+	/* copy microcode from buffer into Symbol */
+
+	while (microcode_data_left > 0) {
+		write_nic_byte(dev, 0x210010, *microcode_data++);
+		write_nic_byte(dev, 0x210010, *microcode_data++);
+		microcode_data_left -= 2;
+	}
+
+	/* EN_CS_ACCESS bit to reset the control store pointer */
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl((void *)(dev->base_addr));
+
+	/* Enable System (Reg 0)
+	 * first enable causes garbage in RX FIFO */
+	write_nic_byte(dev, 0x210000, 0x0);
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210000, 0x80);
+	readl((void *)(dev->base_addr));
+
+	/* Reset External Baseband Reg */
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
+	readl((void *)(dev->base_addr));
+	write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
+	readl((void *)(dev->base_addr));
+
+	/* HW Config (Reg 5) */
+	write_nic_byte(dev, 0x210014, 0x72);	// fifo width =16
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210014, 0x72);	// fifo width =16
+	readl((void *)(dev->base_addr));
+
+	/* Enable System (Reg 0)
+	 * second enable should be OK */
+	write_nic_byte(dev, 0x210000, 0x00);	// clear enable system
+	readl((void *)(dev->base_addr));
+	write_nic_byte(dev, 0x210000, 0x80);	// set enable system
+
+	/* check Symbol is enabled - upped this from 5 as it wasn't always
+	 * catching the update */
+	for (i = 0; i < 10; i++) {
+		udelay(10);
+
+		/* check Dino is enabled bit */
+		read_nic_byte(dev, 0x210000, &data);
+		if (data & 0x1)
+			break;
+	}
+
+	if (i == 10) {
+		IPW_DEBUG_ERROR("%s: Error initializing Symbol\n",
+		       dev->name);
+		return -EIO;
+	}
+
+	/* Get Symbol alive response */
+	for (i = 0; i < 30; i++) {
+		/* Read alive response structure */
+		for (j = 0;
+		     j < (sizeof(struct symbol_alive_response) >> 1);
+		     j++)
+			read_nic_word(dev, 0x210004,
+				      ((u16 *)&response) + j);
+
+		if ((response.cmd_id == 1) &&
+		    (response.ucode_valid == 0x1))
+			break;
+		udelay(10);
+	}
+
+	if (i == 30) {
+		IPW_DEBUG_ERROR("%s: No response from Symbol - hw not alive\n",
+		       dev->name);
+		printk_buf(IPW_DL_ERROR, (u8*)&response, sizeof(response));
+		return -EIO;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.h
@@ -0,0 +1,1195 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#ifndef _IPW2100_H
+#define _IPW2100_H
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <asm/io.h>
+#include <linux/socket.h>
+#include <linux/if_arp.h>
+#include <linux/wireless.h>
+#include <linux/version.h>
+#include <net/iw_handler.h>	// new driver API
+
+#include <net/ieee80211.h>
+
+#include <linux/workqueue.h>
+
+struct ipw2100_priv;
+struct ipw2100_tx_packet;
+struct ipw2100_rx_packet;
+
+#ifdef CONFIG_IPW_DEBUG
+enum { IPW_DEBUG_ENABLED = 1 };
+extern u32 ipw2100_debug_level;
+#define IPW_DEBUG(level, message...) \
+do { \
+	if (ipw2100_debug_level & (level)) { \
+		printk(KERN_DEBUG "ipw2100: %c %s ", \
+                       in_interrupt() ? 'I' : 'U',  __FUNCTION__); \
+		printk(message); \
+	} \
+} while (0)
+#else
+enum { IPW_DEBUG_ENABLED = 0 };
+#define IPW_DEBUG(level, message...) do {} while (0)
+#endif /* CONFIG_IPW_DEBUG */
+
+#define IPW_DL_UNINIT    0x80000000
+#define IPW_DL_NONE      0x00000000
+#define IPW_DL_ALL       0x7FFFFFFF
+
+/*
+ * To use the debug system;
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define IPW_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry.  xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a IPW2100_xxxx_DEBUG() macro definition for your
+ * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ipw2100/debug_level
+ *
+ * you simply need to add your entry to the ipw2100_debug_levels array.
+ *
+ * If you do not see debug_level in /proc/net/ipw2100 then you do not have
+ * CONFIG_IPW_DEBUG defined in your kernel configuration
+ *
+ */
+
+#define IPW_DL_ERROR         (1<<0)
+#define IPW_DL_WARNING       (1<<1)
+#define IPW_DL_INFO          (1<<2)
+#define IPW_DL_WX            (1<<3)
+#define IPW_DL_HC            (1<<5)
+#define IPW_DL_STATE         (1<<6)
+
+#define IPW_DL_NOTIF         (1<<10)
+#define IPW_DL_SCAN          (1<<11)
+#define IPW_DL_ASSOC         (1<<12)
+#define IPW_DL_DROP          (1<<13)
+
+#define IPW_DL_IOCTL         (1<<14)
+#define IPW_DL_RF_KILL       (1<<17)
+
+
+#define IPW_DL_MANAGE        (1<<15)
+#define IPW_DL_FW            (1<<16)
+
+#define IPW_DL_FRAG          (1<<21)
+#define IPW_DL_WEP           (1<<22)
+#define IPW_DL_TX            (1<<23)
+#define IPW_DL_RX            (1<<24)
+#define IPW_DL_ISR           (1<<25)
+#define IPW_DL_IO            (1<<26)
+#define IPW_DL_TRACE         (1<<28)
+
+#define IPW_DEBUG_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_INFO(f...)    IPW_DEBUG(IPW_DL_INFO, ## f)
+#define IPW_DEBUG_WX(f...)     IPW_DEBUG(IPW_DL_WX, ## f)
+#define IPW_DEBUG_SCAN(f...)   IPW_DEBUG(IPW_DL_SCAN, ## f)
+#define IPW_DEBUG_NOTIF(f...) IPW_DEBUG(IPW_DL_NOTIF, ## f)
+#define IPW_DEBUG_TRACE(f...)  IPW_DEBUG(IPW_DL_TRACE, ## f)
+#define IPW_DEBUG_RX(f...)     IPW_DEBUG(IPW_DL_RX, ## f)
+#define IPW_DEBUG_TX(f...)     IPW_DEBUG(IPW_DL_TX, ## f)
+#define IPW_DEBUG_ISR(f...)    IPW_DEBUG(IPW_DL_ISR, ## f)
+#define IPW_DEBUG_MANAGEMENT(f...) IPW_DEBUG(IPW_DL_MANAGE, ## f)
+#define IPW_DEBUG_WEP(f...)    IPW_DEBUG(IPW_DL_WEP, ## f)
+#define IPW_DEBUG_HC(f...) IPW_DEBUG(IPW_DL_HC, ## f)
+#define IPW_DEBUG_FRAG(f...) IPW_DEBUG(IPW_DL_FRAG, ## f)
+#define IPW_DEBUG_FW(f...) IPW_DEBUG(IPW_DL_FW, ## f)
+#define IPW_DEBUG_RF_KILL(f...) IPW_DEBUG(IPW_DL_RF_KILL, ## f)
+#define IPW_DEBUG_DROP(f...) IPW_DEBUG(IPW_DL_DROP, ## f)
+#define IPW_DEBUG_IO(f...) IPW_DEBUG(IPW_DL_IO, ## f)
+#define IPW_DEBUG_IOCTL(f...) IPW_DEBUG(IPW_DL_IOCTL, ## f)
+#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+
+enum {
+	IPW_HW_STATE_DISABLED = 1,
+	IPW_HW_STATE_ENABLED = 0
+};
+
+struct ssid_context {
+	char ssid[IW_ESSID_MAX_SIZE + 1];
+	int ssid_len;
+	unsigned char bssid[ETH_ALEN];
+	int port_type;
+	int channel;
+
+};
+
+extern const char *port_type_str[];
+extern const char *band_str[];
+
+#define NUMBER_OF_BD_PER_COMMAND_PACKET		1
+#define NUMBER_OF_BD_PER_DATA_PACKET		2
+
+#define IPW_MAX_BDS 6
+#define NUMBER_OF_OVERHEAD_BDS_PER_PACKETR	2
+#define NUMBER_OF_BDS_TO_LEAVE_FOR_COMMANDS	1
+
+#define REQUIRED_SPACE_IN_RING_FOR_COMMAND_PACKET \
+    (IPW_BD_QUEUE_W_R_MIN_SPARE + NUMBER_OF_BD_PER_COMMAND_PACKET)
+
+struct bd_status {
+	union {
+		struct { u8 nlf:1, txType:2, intEnabled:1, reserved:4;} fields;
+		u8 field;
+	} info;
+} __attribute__ ((packed));
+
+struct ipw2100_bd {
+	u32 host_addr;
+	u32 buf_length;
+	struct bd_status status;
+        /* number of fragments for frame (should be set only for
+	 * 1st TBD) */
+	u8 num_fragments;
+	u8 reserved[6];
+} __attribute__ ((packed));
+
+#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
+#define IPW_BD_ALIGNMENT(L)    (L*sizeof(struct ipw2100_bd))
+
+#define IPW_BD_STATUS_TX_FRAME_802_3             0x00
+#define IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT 0x01
+#define IPW_BD_STATUS_TX_FRAME_COMMAND		 0x02
+#define IPW_BD_STATUS_TX_FRAME_802_11	         0x04
+#define IPW_BD_STATUS_TX_INTERRUPT_ENABLE	 0x08
+
+struct ipw2100_bd_queue {
+	/* driver (virtual) pointer to queue */
+	struct ipw2100_bd *drv;
+
+	/* firmware (physical) pointer to queue */
+	dma_addr_t nic;
+
+	/* Length of phy memory allocated for BDs */
+	u32 size;
+
+	/* Number of BDs in queue (and in array) */
+	u32 entries;
+
+	/* Number of available BDs (invalid for NIC BDs) */
+	u32 available;
+
+	/* Offset of oldest used BD in array (next one to
+	 * check for completion) */
+	u32 oldest;
+
+	/* Offset of next available (unused) BD */
+	u32 next;
+};
+
+#define RX_QUEUE_LENGTH 256
+#define TX_QUEUE_LENGTH 256
+#define HW_QUEUE_LENGTH 256
+
+#define TX_PENDED_QUEUE_LENGTH (TX_QUEUE_LENGTH / NUMBER_OF_BD_PER_DATA_PACKET)
+
+#define STATUS_TYPE_MASK	0x0000000f
+#define COMMAND_STATUS_VAL	0
+#define STATUS_CHANGE_VAL	1
+#define P80211_DATA_VAL 	2
+#define P8023_DATA_VAL		3
+#define HOST_NOTIFICATION_VAL	4
+
+#define IPW2100_RSSI_TO_DBM (-98)
+
+struct ipw2100_status {
+	u32 frame_size;
+	u16 status_fields;
+	u8 flags;
+#define IPW_STATUS_FLAG_DECRYPTED	(1<<0)
+#define IPW_STATUS_FLAG_WEP_ENCRYPTED	(1<<1)
+#define IPW_STATUS_FLAG_CRC_ERROR       (1<<2)
+	u8 rssi;
+} __attribute__ ((packed));
+
+struct ipw2100_status_queue {
+	/* driver (virtual) pointer to queue */
+	struct ipw2100_status *drv;
+
+	/* firmware (physical) pointer to queue */
+	dma_addr_t nic;
+
+	/* Length of phy memory allocated for BDs */
+	u32 size;
+};
+
+#define HOST_COMMAND_PARAMS_REG_LEN	100
+#define CMD_STATUS_PARAMS_REG_LEN 	3
+
+#define IPW_WPA_CAPABILITIES   0x1
+#define IPW_WPA_LISTENINTERVAL 0x2
+#define IPW_WPA_AP_ADDRESS     0x4
+
+#define IPW_MAX_VAR_IE_LEN ((HOST_COMMAND_PARAMS_REG_LEN - 4) * sizeof(u32))
+
+struct ipw2100_wpa_assoc_frame {
+	u16 fixed_ie_mask;
+	struct {
+		u16 capab_info;
+		u16 listen_interval;
+		u8 current_ap[ETH_ALEN];
+	} fixed_ies;
+	u32 var_ie_len;
+	u8 var_ie[IPW_MAX_VAR_IE_LEN];
+};
+
+#define IPW_BSS     1
+#define IPW_MONITOR 2
+#define IPW_IBSS    3
+
+/**
+ * @struct _tx_cmd - HWCommand
+ * @brief H/W command structure.
+ */
+struct ipw2100_cmd_header {
+	u32 host_command_reg;
+	u32 host_command_reg1;
+	u32 sequence;
+	u32 host_command_len_reg;
+	u32 host_command_params_reg[HOST_COMMAND_PARAMS_REG_LEN];
+	u32 cmd_status_reg;
+	u32 cmd_status_params_reg[CMD_STATUS_PARAMS_REG_LEN];
+	u32 rxq_base_ptr;
+	u32 rxq_next_ptr;
+	u32 rxq_host_ptr;
+	u32 txq_base_ptr;
+	u32 txq_next_ptr;
+	u32 txq_host_ptr;
+	u32 tx_status_reg;
+	u32 reserved;
+	u32 status_change_reg;
+	u32 reserved1[3];
+	u32 *ordinal1_ptr;
+	u32 *ordinal2_ptr;
+} __attribute__ ((packed));
+
+struct ipw2100_data_header {
+	u32 host_command_reg;
+	u32 host_command_reg1;
+	u8 encrypted;	// BOOLEAN in win! TRUE if frame is enc by driver
+	u8 needs_encryption;	// BOOLEAN in win! TRUE if frma need to be enc in NIC
+	u8 wep_index;		// 0 no key, 1-4 key index, 0xff immediate key
+	u8 key_size;	// 0 no imm key, 0x5 64bit encr, 0xd 128bit encr, 0x10 128bit encr and 128bit IV
+	u8 key[16];
+	u8 reserved[10];	// f/w reserved
+	u8 src_addr[ETH_ALEN];
+	u8 dst_addr[ETH_ALEN];
+	u16 fragment_size;
+} __attribute__ ((packed));
+
+/* Host command data structure */
+struct host_command {
+	u32 host_command;		// COMMAND ID
+	u32 host_command1;		// COMMAND ID
+	u32 host_command_sequence;	// UNIQUE COMMAND NUMBER (ID)
+	u32 host_command_length;	// LENGTH
+	u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN];	// COMMAND PARAMETERS
+} __attribute__ ((packed));
+
+
+typedef enum {
+	POWER_ON_RESET,
+	EXIT_POWER_DOWN_RESET,
+	SW_RESET,
+	EEPROM_RW,
+	SW_RE_INIT
+} ipw2100_reset_event;
+
+enum {
+	COMMAND = 0xCAFE,
+	DATA,
+	RX
+};
+
+
+struct ipw2100_tx_packet {
+	int type;
+	int index;
+	union {
+		struct { /* COMMAND */
+			struct ipw2100_cmd_header* cmd;
+			dma_addr_t cmd_phys;
+		} c_struct;
+		struct { /* DATA */
+			struct ipw2100_data_header* data;
+			dma_addr_t data_phys;
+			struct ieee80211_txb *txb;
+		} d_struct;
+	} info;
+	int jiffy_start;
+
+	struct list_head list;
+};
+
+
+struct ipw2100_rx_packet {
+	struct ipw2100_rx *rxp;
+	dma_addr_t dma_addr;
+	int jiffy_start;
+	struct sk_buff *skb;
+	struct list_head list;
+};
+
+#define FRAG_DISABLED             (1<<31)
+#define RTS_DISABLED              (1<<31)
+#define MAX_RTS_THRESHOLD         2304U
+#define MIN_RTS_THRESHOLD         1U
+#define DEFAULT_RTS_THRESHOLD     1000U
+
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+struct ipw2100_ordinals {
+	u32 table1_addr;
+	u32 table2_addr;
+	u32 table1_size;
+	u32 table2_size;
+};
+
+/* Host Notification header */
+struct ipw2100_notification {
+	u32 hnhdr_subtype;	/* type of host notification */
+	u32 hnhdr_size;		/* size in bytes of data
+				   or number of entries, if table.
+				   Does NOT include header */
+} __attribute__ ((packed));
+
+#define MAX_KEY_SIZE	16
+#define	MAX_KEYS	8
+
+#define IPW2100_WEP_ENABLE     (1<<1)
+#define IPW2100_WEP_DROP_CLEAR (1<<2)
+
+#define IPW_NONE_CIPHER   (1<<0)
+#define IPW_WEP40_CIPHER  (1<<1)
+#define IPW_TKIP_CIPHER   (1<<2)
+#define IPW_CCMP_CIPHER   (1<<4)
+#define IPW_WEP104_CIPHER (1<<5)
+#define IPW_CKIP_CIPHER   (1<<6)
+
+#define	IPW_AUTH_OPEN     0
+#define	IPW_AUTH_SHARED   1
+
+struct statistic {
+	int value;
+	int hi;
+	int lo;
+};
+
+#define INIT_STAT(x) do {  \
+  (x)->value = (x)->hi = 0; \
+  (x)->lo = 0x7fffffff; \
+} while (0)
+#define SET_STAT(x,y) do { \
+  (x)->value = y; \
+  if ((x)->value > (x)->hi) (x)->hi = (x)->value; \
+  if ((x)->value < (x)->lo) (x)->lo = (x)->value; \
+} while (0)
+#define INC_STAT(x) do { if (++(x)->value > (x)->hi) (x)->hi = (x)->value; } \
+while (0)
+#define DEC_STAT(x) do { if (--(x)->value < (x)->lo) (x)->lo = (x)->value; } \
+while (0)
+
+#define IPW2100_ERROR_QUEUE 5
+
+/* Power management code: enable or disable? */
+enum {
+#ifdef CONFIG_PM
+	IPW2100_PM_DISABLED = 0,
+	PM_STATE_SIZE = 16,
+#else
+	IPW2100_PM_DISABLED = 1,
+	PM_STATE_SIZE = 0,
+#endif
+};
+
+#define STATUS_POWERED          (1<<0)
+#define STATUS_CMD_ACTIVE       (1<<1)  /**< host command in progress */
+#define STATUS_RUNNING          (1<<2)  /* Card initialized, but not enabled */
+#define STATUS_ENABLED          (1<<3)  /* Card enabled -- can scan,Tx,Rx */
+#define STATUS_STOPPING         (1<<4)  /* Card is in shutdown phase */
+#define STATUS_INITIALIZED      (1<<5)  /* Card is ready for external calls */
+#define STATUS_ASSOCIATING      (1<<9)  /* Associated, but no BSSID yet */
+#define STATUS_ASSOCIATED       (1<<10) /* Associated and BSSID valid */
+#define STATUS_INT_ENABLED      (1<<11)
+#define STATUS_RF_KILL_HW       (1<<12)
+#define STATUS_RF_KILL_SW       (1<<13)
+#define STATUS_RF_KILL_MASK     (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
+#define STATUS_EXIT_PENDING     (1<<14)
+
+#define STATUS_SCAN_PENDING     (1<<23)
+#define STATUS_SCANNING         (1<<24)
+#define STATUS_SCAN_ABORTING    (1<<25)
+#define STATUS_SCAN_COMPLETE    (1<<26)
+#define STATUS_WX_EVENT_PENDING (1<<27)
+#define STATUS_RESET_PENDING    (1<<29)
+#define STATUS_SECURITY_UPDATED (1<<30) /* Security sync needed */
+
+
+
+/* Internal NIC states */
+#define IPW_STATE_INITIALIZED	(1<<0)
+#define IPW_STATE_COUNTRY_FOUND	(1<<1)
+#define IPW_STATE_ASSOCIATED    (1<<2)
+#define IPW_STATE_ASSN_LOST	(1<<3)
+#define IPW_STATE_ASSN_CHANGED 	(1<<4)
+#define IPW_STATE_SCAN_COMPLETE	(1<<5)
+#define IPW_STATE_ENTERED_PSP 	(1<<6)
+#define IPW_STATE_LEFT_PSP 	(1<<7)
+#define IPW_STATE_RF_KILL       (1<<8)
+#define IPW_STATE_DISABLED	(1<<9)
+#define IPW_STATE_POWER_DOWN	(1<<10)
+#define IPW_STATE_SCANNING      (1<<11)
+
+
+
+#define CFG_STATIC_CHANNEL      (1<<0) /* Restrict assoc. to single channel */
+#define CFG_STATIC_ESSID        (1<<1) /* Restrict assoc. to single SSID */
+#define CFG_STATIC_BSSID        (1<<2) /* Restrict assoc. to single BSSID */
+#define CFG_CUSTOM_MAC          (1<<3)
+#define CFG_LONG_PREAMBLE       (1<<4)
+#define CFG_ASSOCIATE           (1<<6)
+#define CFG_FIXED_RATE          (1<<7)
+#define CFG_ADHOC_CREATE        (1<<8)
+#define CFG_C3_DISABLED         (1<<9)
+#define CFG_PASSIVE_SCAN        (1<<10)
+
+#define CAP_SHARED_KEY          (1<<0) /* Off = OPEN */
+#define CAP_PRIVACY_ON          (1<<1) /* Off = No privacy */
+
+struct ipw2100_priv {
+
+	int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
+	int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
+
+	struct ieee80211_device *ieee;
+	unsigned long status;
+	unsigned long config;
+	unsigned long capability;
+
+	/* Statistics */
+	int resets;
+	int reset_backoff;
+
+	/* Context */
+	u8 essid[IW_ESSID_MAX_SIZE];
+	u8 essid_len;
+	u8 bssid[ETH_ALEN];
+	u8 channel;
+	int last_mode;
+	int cstate_limit;
+
+	unsigned long connect_start;
+	unsigned long last_reset;
+
+	u32 channel_mask;
+	u32 fatal_error;
+	u32 fatal_errors[IPW2100_ERROR_QUEUE];
+	u32 fatal_index;
+	int eeprom_version;
+	int firmware_version;
+	unsigned long hw_features;
+	int hangs;
+	u32 last_rtc;
+	int dump_raw; /* 1 to dump raw bytes in /sys/.../memory */
+	u8* snapshot[0x30];
+
+	u8 mandatory_bssid_mac[ETH_ALEN];
+	u8 mac_addr[ETH_ALEN];
+
+	int power_mode;
+
+	/* WEP data */
+	struct ieee80211_security sec;
+	int messages_sent;
+
+
+	int short_retry_limit;
+	int long_retry_limit;
+
+	u32 rts_threshold;
+	u32 frag_threshold;
+
+	int in_isr;
+
+	u32 tx_rates;
+	int tx_power;
+	u32 beacon_interval;
+
+	char nick[IW_ESSID_MAX_SIZE + 1];
+
+	struct ipw2100_status_queue status_queue;
+
+	struct statistic txq_stat;
+	struct statistic rxq_stat;
+	struct ipw2100_bd_queue rx_queue;
+	struct ipw2100_bd_queue tx_queue;
+	struct ipw2100_rx_packet *rx_buffers;
+
+	struct statistic fw_pend_stat;
+	struct list_head fw_pend_list;
+
+	struct statistic msg_free_stat;
+	struct statistic msg_pend_stat;
+	struct list_head msg_free_list;
+	struct list_head msg_pend_list;
+	struct ipw2100_tx_packet *msg_buffers;
+
+	struct statistic tx_free_stat;
+	struct statistic tx_pend_stat;
+	struct list_head tx_free_list;
+	struct list_head tx_pend_list;
+	struct ipw2100_tx_packet *tx_buffers;
+
+	struct ipw2100_ordinals ordinals;
+
+	struct pci_dev *pci_dev;
+
+	struct proc_dir_entry *dir_dev;
+
+	struct net_device *net_dev;
+	struct iw_statistics wstats;
+
+	struct tasklet_struct irq_tasklet;
+
+	struct workqueue_struct *workqueue;
+	struct work_struct reset_work;
+	struct work_struct security_work;
+	struct work_struct wx_event_work;
+	struct work_struct hang_check;
+	struct work_struct rf_kill;
+
+	u32 interrupts;
+	int tx_interrupts;
+	int rx_interrupts;
+	int inta_other;
+
+	spinlock_t low_lock;
+	struct semaphore action_sem;
+	struct semaphore adapter_sem;
+
+	wait_queue_head_t wait_command_queue;
+};
+
+
+/*********************************************************
+ * Host Command -> From Driver to FW
+ *********************************************************/
+
+/**
+ * Host command identifiers
+ */
+#define HOST_COMPLETE           2
+#define SYSTEM_CONFIG           6
+#define SSID                    8
+#define MANDATORY_BSSID         9
+#define AUTHENTICATION_TYPE    10
+#define ADAPTER_ADDRESS        11
+#define PORT_TYPE              12
+#define INTERNATIONAL_MODE     13
+#define CHANNEL                14
+#define RTS_THRESHOLD          15
+#define FRAG_THRESHOLD         16
+#define POWER_MODE             17
+#define TX_RATES               18
+#define BASIC_TX_RATES         19
+#define WEP_KEY_INFO           20
+#define WEP_KEY_INDEX          25
+#define WEP_FLAGS              26
+#define ADD_MULTICAST          27
+#define CLEAR_ALL_MULTICAST    28
+#define BEACON_INTERVAL        29
+#define ATIM_WINDOW            30
+#define CLEAR_STATISTICS       31
+#define SEND		       33
+#define TX_POWER_INDEX         36
+#define BROADCAST_SCAN         43
+#define CARD_DISABLE           44
+#define PREFERRED_BSSID        45
+#define SET_SCAN_OPTIONS       46
+#define SCAN_DWELL_TIME        47
+#define SWEEP_TABLE            48
+#define AP_OR_STATION_TABLE    49
+#define GROUP_ORDINALS         50
+#define SHORT_RETRY_LIMIT      51
+#define LONG_RETRY_LIMIT       52
+
+#define HOST_PRE_POWER_DOWN    58
+#define CARD_DISABLE_PHY_OFF   61
+#define MSDU_TX_RATES          62
+
+
+/* Rogue AP Detection */
+#define SET_STATION_STAT_BITS      64
+#define CLEAR_STATIONS_STAT_BITS   65
+#define LEAP_ROGUE_MODE            66	//TODO tbw replaced by CFG_LEAP_ROGUE_AP
+#define SET_SECURITY_INFORMATION   67
+#define DISASSOCIATION_BSSID	   68
+#define SET_WPA_IE                 69
+
+
+
+/* system configuration bit mask: */
+#define IPW_CFG_MONITOR               0x00004
+#define IPW_CFG_PREAMBLE_AUTO        0x00010
+#define IPW_CFG_IBSS_AUTO_START     0x00020
+#define IPW_CFG_LOOPBACK            0x00100
+#define IPW_CFG_ANSWER_BCSSID_PROBE 0x00800
+#define IPW_CFG_BT_SIDEBAND_SIGNAL	0x02000
+#define IPW_CFG_802_1x_ENABLE       0x04000
+#define IPW_CFG_BSS_MASK		0x08000
+#define IPW_CFG_IBSS_MASK		0x10000
+
+#define IPW_SCAN_NOASSOCIATE (1<<0)
+#define IPW_SCAN_MIXED_CELL (1<<1)
+/* RESERVED (1<<2) */
+#define IPW_SCAN_PASSIVE (1<<3)
+
+#define IPW_NIC_FATAL_ERROR 0x2A7F0
+#define IPW_ERROR_ADDR(x) (x & 0x3FFFF)
+#define IPW_ERROR_CODE(x) ((x & 0xFF000000) >> 24)
+#define IPW2100_ERR_C3_CORRUPTION (0x10 << 24)
+#define IPW2100_ERR_MSG_TIMEOUT   (0x11 << 24)
+#define IPW2100_ERR_FW_LOAD       (0x12 << 24)
+
+#define IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND			0x200
+#define IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND  	IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x0D80
+
+#define IPW_MEM_HOST_SHARED_RX_BD_BASE                  (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x40)
+#define IPW_MEM_HOST_SHARED_RX_STATUS_BASE              (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x44)
+#define IPW_MEM_HOST_SHARED_RX_BD_SIZE                  (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x48)
+#define IPW_MEM_HOST_SHARED_RX_READ_INDEX               (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0xa0)
+
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE          (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x00)
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE          (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x04)
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX       (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x80)
+
+#define IPW_MEM_HOST_SHARED_RX_WRITE_INDEX \
+    (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND + 0x20)
+
+#define IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX \
+    (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND)
+
+#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1   (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x180)
+#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2   (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x184)
+
+#define IPW2100_INTA_TX_TRANSFER               (0x00000001)	// Bit 0 (LSB)
+#define IPW2100_INTA_RX_TRANSFER               (0x00000002)	// Bit 1
+#define IPW2100_INTA_TX_COMPLETE	       (0x00000004)	// Bit 2
+#define IPW2100_INTA_EVENT_INTERRUPT           (0x00000008)     // Bit 3
+#define IPW2100_INTA_STATUS_CHANGE             (0x00000010)	// Bit 4
+#define IPW2100_INTA_BEACON_PERIOD_EXPIRED     (0x00000020)	// Bit 5
+#define IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE  (0x00010000)	// Bit 16
+#define IPW2100_INTA_FW_INIT_DONE              (0x01000000)	// Bit 24
+#define IPW2100_INTA_FW_CALIBRATION_CALC       (0x02000000)	// Bit 25
+#define IPW2100_INTA_FATAL_ERROR               (0x40000000)	// Bit 30
+#define IPW2100_INTA_PARITY_ERROR              (0x80000000)	// Bit 31 (MSB)
+
+#define IPW_AUX_HOST_RESET_REG_PRINCETON_RESET              (0x00000001)
+#define IPW_AUX_HOST_RESET_REG_FORCE_NMI                    (0x00000002)
+#define IPW_AUX_HOST_RESET_REG_PCI_HOST_CLUSTER_FATAL_NMI   (0x00000004)
+#define IPW_AUX_HOST_RESET_REG_CORE_FATAL_NMI               (0x00000008)
+#define IPW_AUX_HOST_RESET_REG_SW_RESET                     (0x00000080)
+#define IPW_AUX_HOST_RESET_REG_MASTER_DISABLED              (0x00000100)
+#define IPW_AUX_HOST_RESET_REG_STOP_MASTER                  (0x00000200)
+
+#define IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY           (0x00000001)	// Bit 0 (LSB)
+#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY   (0x00000002)	// Bit 1
+#define IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE             (0x00000004)	// Bit 2
+#define IPW_AUX_HOST_GP_CNTRL_BITS_SYS_CONFIG           (0x000007c0)	// Bits 6-10
+#define IPW_AUX_HOST_GP_CNTRL_BIT_BUS_TYPE              (0x00000200)	// Bit 9
+#define IPW_AUX_HOST_GP_CNTRL_BIT_BAR0_BLOCK_SIZE       (0x00000400)	// Bit 10
+#define IPW_AUX_HOST_GP_CNTRL_BIT_USB_MODE              (0x20000000)	// Bit 29
+#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_FORCES_SYS_CLK   (0x40000000)	// Bit 30
+#define IPW_AUX_HOST_GP_CNTRL_BIT_FW_FORCES_SYS_CLK     (0x80000000)	// Bit 31 (MSB)
+
+#define IPW_BIT_GPIO_GPIO1_MASK         0x0000000C
+#define IPW_BIT_GPIO_GPIO3_MASK         0x000000C0
+#define IPW_BIT_GPIO_GPIO1_ENABLE       0x00000008
+#define IPW_BIT_GPIO_RF_KILL            0x00010000
+
+#define IPW_BIT_GPIO_LED_OFF            0x00002000	// Bit 13 = 1
+
+#define IPW_REG_DOMAIN_0_OFFSET 	0x0000
+#define IPW_REG_DOMAIN_1_OFFSET 	IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND
+
+#define IPW_REG_INTA			IPW_REG_DOMAIN_0_OFFSET + 0x0008
+#define IPW_REG_INTA_MASK		IPW_REG_DOMAIN_0_OFFSET + 0x000C
+#define IPW_REG_INDIRECT_ACCESS_ADDRESS	IPW_REG_DOMAIN_0_OFFSET + 0x0010
+#define IPW_REG_INDIRECT_ACCESS_DATA	IPW_REG_DOMAIN_0_OFFSET + 0x0014
+#define IPW_REG_AUTOINCREMENT_ADDRESS	IPW_REG_DOMAIN_0_OFFSET + 0x0018
+#define IPW_REG_AUTOINCREMENT_DATA	IPW_REG_DOMAIN_0_OFFSET + 0x001C
+#define IPW_REG_RESET_REG		IPW_REG_DOMAIN_0_OFFSET + 0x0020
+#define IPW_REG_GP_CNTRL		IPW_REG_DOMAIN_0_OFFSET + 0x0024
+#define IPW_REG_GPIO			IPW_REG_DOMAIN_0_OFFSET + 0x0030
+#define IPW_REG_FW_TYPE                 IPW_REG_DOMAIN_1_OFFSET + 0x0188
+#define IPW_REG_FW_VERSION 		IPW_REG_DOMAIN_1_OFFSET + 0x018C
+#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
+
+#define IPW_REG_INDIRECT_ADDR_MASK	0x00FFFFFC
+
+#define IPW_INTERRUPT_MASK		0xC1010013
+
+#define IPW2100_CONTROL_REG             0x220000
+#define IPW2100_CONTROL_PHY_OFF         0x8
+
+#define IPW2100_COMMAND			0x00300004
+#define IPW2100_COMMAND_PHY_ON		0x0
+#define IPW2100_COMMAND_PHY_OFF		0x1
+
+/* in DEBUG_AREA, values of memory always 0xd55555d5 */
+#define IPW_REG_DOA_DEBUG_AREA_START    IPW_REG_DOMAIN_0_OFFSET + 0x0090
+#define IPW_REG_DOA_DEBUG_AREA_END      IPW_REG_DOMAIN_0_OFFSET + 0x00FF
+#define IPW_DATA_DOA_DEBUG_VALUE        0xd55555d5
+
+#define IPW_INTERNAL_REGISTER_HALT_AND_RESET	0x003000e0
+
+#define IPW_WAIT_CLOCK_STABILIZATION_DELAY	    50	// micro seconds
+#define IPW_WAIT_RESET_ARC_COMPLETE_DELAY	    10	// micro seconds
+#define IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY 10	// micro seconds
+
+// BD ring queue read/write difference
+#define IPW_BD_QUEUE_W_R_MIN_SPARE 2
+
+#define IPW_CACHE_LINE_LENGTH_DEFAULT		    0x80
+
+#define IPW_CARD_DISABLE_PHY_OFF_COMPLETE_WAIT	    100	// 100 milli
+#define IPW_PREPARE_POWER_DOWN_COMPLETE_WAIT	    100	// 100 milli
+
+
+
+
+#define IPW_HEADER_802_11_SIZE		 sizeof(struct ieee80211_hdr_3addr)
+#define IPW_MAX_80211_PAYLOAD_SIZE              2304U
+#define IPW_MAX_802_11_PAYLOAD_LENGTH		2312
+#define IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH	1536
+#define IPW_MIN_ACCEPTABLE_RX_FRAME_LENGTH	60
+#define IPW_MAX_ACCEPTABLE_RX_FRAME_LENGTH \
+	(IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH + IPW_HEADER_802_11_SIZE - \
+        sizeof(struct ethhdr))
+
+#define IPW_802_11_FCS_LENGTH 4
+#define IPW_RX_NIC_BUFFER_LENGTH \
+        (IPW_MAX_802_11_PAYLOAD_LENGTH + IPW_HEADER_802_11_SIZE + \
+		IPW_802_11_FCS_LENGTH)
+
+#define IPW_802_11_PAYLOAD_OFFSET \
+        (sizeof(struct ieee80211_hdr_3addr) + \
+         sizeof(struct ieee80211_snap_hdr))
+
+struct ipw2100_rx {
+	union {
+		unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
+		struct ieee80211_hdr header;
+		u32 status;
+		struct ipw2100_notification notification;
+		struct ipw2100_cmd_header command;
+	} rx_data;
+} __attribute__ ((packed));
+
+/* Bit 0-7 are for 802.11b tx rates - .  Bit 5-7 are reserved */
+#define TX_RATE_1_MBIT              0x0001
+#define TX_RATE_2_MBIT              0x0002
+#define TX_RATE_5_5_MBIT            0x0004
+#define TX_RATE_11_MBIT             0x0008
+#define TX_RATE_MASK                0x000F
+#define DEFAULT_TX_RATES            0x000F
+
+#define IPW_POWER_MODE_CAM           0x00	//(always on)
+#define IPW_POWER_INDEX_1            0x01
+#define IPW_POWER_INDEX_2            0x02
+#define IPW_POWER_INDEX_3            0x03
+#define IPW_POWER_INDEX_4            0x04
+#define IPW_POWER_INDEX_5            0x05
+#define IPW_POWER_AUTO               0x06
+#define IPW_POWER_MASK               0x0F
+#define IPW_POWER_ENABLED            0x10
+#define IPW_POWER_LEVEL(x)           ((x) & IPW_POWER_MASK)
+
+#define IPW_TX_POWER_AUTO            0
+#define IPW_TX_POWER_ENHANCED        1
+
+#define IPW_TX_POWER_DEFAULT         32
+#define IPW_TX_POWER_MIN             0
+#define IPW_TX_POWER_MAX             16
+#define IPW_TX_POWER_MIN_DBM         (-12)
+#define IPW_TX_POWER_MAX_DBM         16
+
+#define FW_SCAN_DONOT_ASSOCIATE     0x0001 // Dont Attempt to Associate after Scan
+#define FW_SCAN_PASSIVE             0x0008 // Force PASSSIVE Scan
+
+#define REG_MIN_CHANNEL             0
+#define REG_MAX_CHANNEL             14
+
+#define REG_CHANNEL_MASK            0x00003FFF
+#define IPW_IBSS_11B_DEFAULT_MASK   0x87ff
+
+#define DIVERSITY_EITHER            0	// Use both antennas
+#define DIVERSITY_ANTENNA_A         1	// Use antenna A
+#define DIVERSITY_ANTENNA_B         2	// Use antenna B
+
+
+#define HOST_COMMAND_WAIT 0
+#define HOST_COMMAND_NO_WAIT 1
+
+#define LOCK_NONE 0
+#define LOCK_DRIVER 1
+#define LOCK_FW 2
+
+#define TYPE_SWEEP_ORD                  0x000D
+#define TYPE_IBSS_STTN_ORD              0x000E
+#define TYPE_BSS_AP_ORD                 0x000F
+#define TYPE_RAW_BEACON_ENTRY           0x0010
+#define TYPE_CALIBRATION_DATA           0x0011
+#define TYPE_ROGUE_AP_DATA              0x0012
+#define TYPE_ASSOCIATION_REQUEST	0x0013
+#define TYPE_REASSOCIATION_REQUEST	0x0014
+
+
+#define HW_FEATURE_RFKILL (0x0001)
+#define RF_KILLSWITCH_OFF (1)
+#define RF_KILLSWITCH_ON  (0)
+
+#define IPW_COMMAND_POOL_SIZE        40
+
+#define IPW_START_ORD_TAB_1			1
+#define IPW_START_ORD_TAB_2			1000
+
+#define IPW_ORD_TAB_1_ENTRY_SIZE		sizeof(u32)
+
+#define IS_ORDINAL_TABLE_ONE(mgr,id) \
+    ((id >= IPW_START_ORD_TAB_1) && (id < mgr->table1_size))
+#define IS_ORDINAL_TABLE_TWO(mgr,id) \
+    ((id >= IPW_START_ORD_TAB_2) && (id < (mgr->table2_size + IPW_START_ORD_TAB_2)))
+
+#define BSS_ID_LENGTH               6
+
+// Fixed size data: Ordinal Table 1
+typedef enum _ORDINAL_TABLE_1 {	// NS - means Not Supported by FW
+// Transmit statistics
+	IPW_ORD_STAT_TX_HOST_REQUESTS = 1,// # of requested Host Tx's (MSDU)
+	IPW_ORD_STAT_TX_HOST_COMPLETE,	// # of successful Host Tx's (MSDU)
+	IPW_ORD_STAT_TX_DIR_DATA,	// # of successful Directed Tx's (MSDU)
+
+	IPW_ORD_STAT_TX_DIR_DATA1 = 4,	// # of successful Directed Tx's (MSDU) @ 1MB
+	IPW_ORD_STAT_TX_DIR_DATA2,	// # of successful Directed Tx's (MSDU) @ 2MB
+	IPW_ORD_STAT_TX_DIR_DATA5_5,	// # of successful Directed Tx's (MSDU) @ 5_5MB
+	IPW_ORD_STAT_TX_DIR_DATA11,	// # of successful Directed Tx's (MSDU) @ 11MB
+	IPW_ORD_STAT_TX_DIR_DATA22,	// # of successful Directed Tx's (MSDU) @ 22MB
+
+	IPW_ORD_STAT_TX_NODIR_DATA1 = 13,// # of successful Non_Directed Tx's (MSDU) @ 1MB
+	IPW_ORD_STAT_TX_NODIR_DATA2,	// # of successful Non_Directed Tx's (MSDU) @ 2MB
+	IPW_ORD_STAT_TX_NODIR_DATA5_5,	// # of successful Non_Directed Tx's (MSDU) @ 5.5MB
+	IPW_ORD_STAT_TX_NODIR_DATA11,	// # of successful Non_Directed Tx's (MSDU) @ 11MB
+
+	IPW_ORD_STAT_NULL_DATA = 21,	// # of successful NULL data Tx's
+	IPW_ORD_STAT_TX_RTS,	        // # of successful Tx RTS
+	IPW_ORD_STAT_TX_CTS,	        // # of successful Tx CTS
+	IPW_ORD_STAT_TX_ACK,	        // # of successful Tx ACK
+	IPW_ORD_STAT_TX_ASSN,	        // # of successful Association Tx's
+	IPW_ORD_STAT_TX_ASSN_RESP,	// # of successful Association response Tx's
+	IPW_ORD_STAT_TX_REASSN,	        // # of successful Reassociation Tx's
+	IPW_ORD_STAT_TX_REASSN_RESP,	// # of successful Reassociation response Tx's
+	IPW_ORD_STAT_TX_PROBE,	        // # of probes successfully transmitted
+	IPW_ORD_STAT_TX_PROBE_RESP,	// # of probe responses successfully transmitted
+	IPW_ORD_STAT_TX_BEACON,	        // # of tx beacon
+	IPW_ORD_STAT_TX_ATIM,	        // # of Tx ATIM
+	IPW_ORD_STAT_TX_DISASSN,	// # of successful Disassociation TX
+	IPW_ORD_STAT_TX_AUTH,	        // # of successful Authentication Tx
+	IPW_ORD_STAT_TX_DEAUTH,	        // # of successful Deauthentication TX
+
+	IPW_ORD_STAT_TX_TOTAL_BYTES = 41,// Total successful Tx data bytes
+	IPW_ORD_STAT_TX_RETRIES,         // # of Tx retries
+	IPW_ORD_STAT_TX_RETRY1,          // # of Tx retries at 1MBPS
+	IPW_ORD_STAT_TX_RETRY2,          // # of Tx retries at 2MBPS
+	IPW_ORD_STAT_TX_RETRY5_5,	 // # of Tx retries at 5.5MBPS
+	IPW_ORD_STAT_TX_RETRY11,	 // # of Tx retries at 11MBPS
+
+	IPW_ORD_STAT_TX_FAILURES = 51,	// # of Tx Failures
+	IPW_ORD_STAT_TX_ABORT_AT_HOP,	//NS // # of Tx's aborted at hop time
+	IPW_ORD_STAT_TX_MAX_TRIES_IN_HOP,// # of times max tries in a hop failed
+	IPW_ORD_STAT_TX_ABORT_LATE_DMA,	//NS // # of times tx aborted due to late dma setup
+	IPW_ORD_STAT_TX_ABORT_STX,	//NS // # of times backoff aborted
+	IPW_ORD_STAT_TX_DISASSN_FAIL,	// # of times disassociation failed
+	IPW_ORD_STAT_TX_ERR_CTS,         // # of missed/bad CTS frames
+	IPW_ORD_STAT_TX_BPDU,	        //NS // # of spanning tree BPDUs sent
+	IPW_ORD_STAT_TX_ERR_ACK,	// # of tx err due to acks
+
+	// Receive statistics
+	IPW_ORD_STAT_RX_HOST = 61,	// # of packets passed to host
+	IPW_ORD_STAT_RX_DIR_DATA,	// # of directed packets
+	IPW_ORD_STAT_RX_DIR_DATA1,	// # of directed packets at 1MB
+	IPW_ORD_STAT_RX_DIR_DATA2,	// # of directed packets at 2MB
+	IPW_ORD_STAT_RX_DIR_DATA5_5,	// # of directed packets at 5.5MB
+	IPW_ORD_STAT_RX_DIR_DATA11,	// # of directed packets at 11MB
+	IPW_ORD_STAT_RX_DIR_DATA22,	// # of directed packets at 22MB
+
+	IPW_ORD_STAT_RX_NODIR_DATA = 71,// # of nondirected packets
+	IPW_ORD_STAT_RX_NODIR_DATA1,	// # of nondirected packets at 1MB
+	IPW_ORD_STAT_RX_NODIR_DATA2,	// # of nondirected packets at 2MB
+	IPW_ORD_STAT_RX_NODIR_DATA5_5,	// # of nondirected packets at 5.5MB
+	IPW_ORD_STAT_RX_NODIR_DATA11,	// # of nondirected packets at 11MB
+
+	IPW_ORD_STAT_RX_NULL_DATA = 80,	// # of null data rx's
+	IPW_ORD_STAT_RX_POLL,	//NS // # of poll rx
+	IPW_ORD_STAT_RX_RTS,	// # of Rx RTS
+	IPW_ORD_STAT_RX_CTS,	// # of Rx CTS
+	IPW_ORD_STAT_RX_ACK,	// # of Rx ACK
+	IPW_ORD_STAT_RX_CFEND,	// # of Rx CF End
+	IPW_ORD_STAT_RX_CFEND_ACK,	// # of Rx CF End + CF Ack
+	IPW_ORD_STAT_RX_ASSN,	// # of Association Rx's
+	IPW_ORD_STAT_RX_ASSN_RESP,	// # of Association response Rx's
+	IPW_ORD_STAT_RX_REASSN,	// # of Reassociation Rx's
+	IPW_ORD_STAT_RX_REASSN_RESP,	// # of Reassociation response Rx's
+	IPW_ORD_STAT_RX_PROBE,	// # of probe Rx's
+	IPW_ORD_STAT_RX_PROBE_RESP,	// # of probe response Rx's
+	IPW_ORD_STAT_RX_BEACON,	// # of Rx beacon
+	IPW_ORD_STAT_RX_ATIM,	// # of Rx ATIM
+	IPW_ORD_STAT_RX_DISASSN,	// # of disassociation Rx
+	IPW_ORD_STAT_RX_AUTH,	// # of authentication Rx
+	IPW_ORD_STAT_RX_DEAUTH,	// # of deauthentication Rx
+
+	IPW_ORD_STAT_RX_TOTAL_BYTES = 101,// Total rx data bytes received
+	IPW_ORD_STAT_RX_ERR_CRC,	 // # of packets with Rx CRC error
+	IPW_ORD_STAT_RX_ERR_CRC1,	 // # of Rx CRC errors at 1MB
+	IPW_ORD_STAT_RX_ERR_CRC2,	 // # of Rx CRC errors at 2MB
+	IPW_ORD_STAT_RX_ERR_CRC5_5,	 // # of Rx CRC errors at 5.5MB
+	IPW_ORD_STAT_RX_ERR_CRC11,	 // # of Rx CRC errors at 11MB
+
+	IPW_ORD_STAT_RX_DUPLICATE1 = 112, // # of duplicate rx packets at 1MB
+	IPW_ORD_STAT_RX_DUPLICATE2,	 // # of duplicate rx packets at 2MB
+	IPW_ORD_STAT_RX_DUPLICATE5_5,	 // # of duplicate rx packets at 5.5MB
+	IPW_ORD_STAT_RX_DUPLICATE11,	 // # of duplicate rx packets at 11MB
+	IPW_ORD_STAT_RX_DUPLICATE = 119, // # of duplicate rx packets
+
+	IPW_ORD_PERS_DB_LOCK = 120,	// # locking fw permanent  db
+	IPW_ORD_PERS_DB_SIZE,	// # size of fw permanent  db
+	IPW_ORD_PERS_DB_ADDR,	// # address of fw permanent  db
+	IPW_ORD_STAT_RX_INVALID_PROTOCOL,	// # of rx frames with invalid protocol
+	IPW_ORD_SYS_BOOT_TIME,	// # Boot time
+	IPW_ORD_STAT_RX_NO_BUFFER,	// # of rx frames rejected due to no buffer
+	IPW_ORD_STAT_RX_ABORT_LATE_DMA,	//NS // # of rx frames rejected due to dma setup too late
+	IPW_ORD_STAT_RX_ABORT_AT_HOP,	//NS // # of rx frames aborted due to hop
+	IPW_ORD_STAT_RX_MISSING_FRAG,	// # of rx frames dropped due to missing fragment
+	IPW_ORD_STAT_RX_ORPHAN_FRAG,	// # of rx frames dropped due to non-sequential fragment
+	IPW_ORD_STAT_RX_ORPHAN_FRAME,	// # of rx frames dropped due to unmatched 1st frame
+	IPW_ORD_STAT_RX_FRAG_AGEOUT,	// # of rx frames dropped due to uncompleted frame
+	IPW_ORD_STAT_RX_BAD_SSID,	//NS // Bad SSID (unused)
+	IPW_ORD_STAT_RX_ICV_ERRORS,	// # of ICV errors during decryption
+
+// PSP Statistics
+	IPW_ORD_STAT_PSP_SUSPENSION = 137,// # of times adapter suspended
+	IPW_ORD_STAT_PSP_BCN_TIMEOUT,	// # of beacon timeout
+	IPW_ORD_STAT_PSP_POLL_TIMEOUT,	// # of poll response timeouts
+	IPW_ORD_STAT_PSP_NONDIR_TIMEOUT,// # of timeouts waiting for last broadcast/muticast pkt
+	IPW_ORD_STAT_PSP_RX_DTIMS,	// # of PSP DTIMs received
+	IPW_ORD_STAT_PSP_RX_TIMS,	// # of PSP TIMs received
+	IPW_ORD_STAT_PSP_STATION_ID,	// PSP Station ID
+
+// Association and roaming
+	IPW_ORD_LAST_ASSN_TIME = 147,	// RTC time of last association
+	IPW_ORD_STAT_PERCENT_MISSED_BCNS,// current calculation of % missed beacons
+	IPW_ORD_STAT_PERCENT_RETRIES,	// current calculation of % missed tx retries
+	IPW_ORD_ASSOCIATED_AP_PTR,	// If associated, this is ptr to the associated
+	// AP table entry. set to 0 if not associated
+	IPW_ORD_AVAILABLE_AP_CNT,	// # of AP's decsribed in the AP table
+	IPW_ORD_AP_LIST_PTR,	// Ptr to list of available APs
+	IPW_ORD_STAT_AP_ASSNS,	// # of associations
+	IPW_ORD_STAT_ASSN_FAIL,	// # of association failures
+	IPW_ORD_STAT_ASSN_RESP_FAIL,	// # of failuresdue to response fail
+	IPW_ORD_STAT_FULL_SCANS,	// # of full scans
+
+	IPW_ORD_CARD_DISABLED,	// # Card Disabled
+	IPW_ORD_STAT_ROAM_INHIBIT,	// # of times roaming was inhibited due to ongoing activity
+	IPW_FILLER_40,
+	IPW_ORD_RSSI_AT_ASSN = 160,	// RSSI of associated AP at time of association
+	IPW_ORD_STAT_ASSN_CAUSE1,	// # of reassociations due to no tx from AP in last N
+	// hops or no prob_ responses in last 3 minutes
+	IPW_ORD_STAT_ASSN_CAUSE2,	// # of reassociations due to poor tx/rx quality
+	IPW_ORD_STAT_ASSN_CAUSE3,	// # of reassociations due to tx/rx quality with excessive
+	// load at the AP
+	IPW_ORD_STAT_ASSN_CAUSE4,	// # of reassociations due to AP RSSI level fell below
+	// eligible group
+	IPW_ORD_STAT_ASSN_CAUSE5,	// # of reassociations due to load leveling
+	IPW_ORD_STAT_ASSN_CAUSE6,	//NS // # of reassociations due to dropped by Ap
+	IPW_FILLER_41,
+	IPW_FILLER_42,
+	IPW_FILLER_43,
+	IPW_ORD_STAT_AUTH_FAIL,	// # of times authentication failed
+	IPW_ORD_STAT_AUTH_RESP_FAIL,	// # of times authentication response failed
+	IPW_ORD_STATION_TABLE_CNT,	// # of entries in association table
+
+// Other statistics
+	IPW_ORD_RSSI_AVG_CURR = 173,	// Current avg RSSI
+	IPW_ORD_STEST_RESULTS_CURR,	//NS // Current self test results word
+	IPW_ORD_STEST_RESULTS_CUM,	//NS // Cummulative self test results word
+	IPW_ORD_SELF_TEST_STATUS,	//NS //
+	IPW_ORD_POWER_MGMT_MODE,	// Power mode - 0=CAM, 1=PSP
+	IPW_ORD_POWER_MGMT_INDEX,	//NS //
+	IPW_ORD_COUNTRY_CODE,	// IEEE country code as recv'd from beacon
+	IPW_ORD_COUNTRY_CHANNELS,	// channels suported by country
+// IPW_ORD_COUNTRY_CHANNELS:
+// For 11b the lower 2-byte are used for channels from 1-14
+//   and the higher 2-byte are not used.
+	IPW_ORD_RESET_CNT,	// # of adapter resets (warm)
+	IPW_ORD_BEACON_INTERVAL,	// Beacon interval
+
+	IPW_ORD_PRINCETON_VERSION = 184,	//NS // Princeton Version
+	IPW_ORD_ANTENNA_DIVERSITY,	// TRUE if antenna diversity is disabled
+	IPW_ORD_CCA_RSSI,	//NS // CCA RSSI value (factory programmed)
+	IPW_ORD_STAT_EEPROM_UPDATE,	//NS // # of times config EEPROM updated
+	IPW_ORD_DTIM_PERIOD,	// # of beacon intervals between DTIMs
+	IPW_ORD_OUR_FREQ,	// current radio freq lower digits - channel ID
+
+	IPW_ORD_RTC_TIME = 190,	// current RTC time
+	IPW_ORD_PORT_TYPE,	// operating mode
+	IPW_ORD_CURRENT_TX_RATE,	// current tx rate
+	IPW_ORD_SUPPORTED_RATES,	// Bitmap of supported tx rates
+	IPW_ORD_ATIM_WINDOW,	// current ATIM Window
+	IPW_ORD_BASIC_RATES,	// bitmap of basic tx rates
+	IPW_ORD_NIC_HIGHEST_RATE,	// bitmap of basic tx rates
+	IPW_ORD_AP_HIGHEST_RATE,	// bitmap of basic tx rates
+	IPW_ORD_CAPABILITIES,	// Management frame capability field
+	IPW_ORD_AUTH_TYPE,	// Type of authentication
+	IPW_ORD_RADIO_TYPE,	// Adapter card platform type
+	IPW_ORD_RTS_THRESHOLD = 201,	// Min length of packet after which RTS handshaking is used
+	IPW_ORD_INT_MODE,	// International mode
+	IPW_ORD_FRAGMENTATION_THRESHOLD,	// protocol frag threshold
+	IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS,	// EEPROM offset in SRAM
+	IPW_ORD_EEPROM_SRAM_DB_BLOCK_SIZE,	// EEPROM size in SRAM
+	IPW_ORD_EEPROM_SKU_CAPABILITY,	// EEPROM SKU Capability    206 =
+	IPW_ORD_EEPROM_IBSS_11B_CHANNELS,	// EEPROM IBSS 11b channel set
+
+	IPW_ORD_MAC_VERSION = 209,	// MAC Version
+	IPW_ORD_MAC_REVISION,	// MAC Revision
+	IPW_ORD_RADIO_VERSION,	// Radio Version
+	IPW_ORD_NIC_MANF_DATE_TIME,	// MANF Date/Time STAMP
+	IPW_ORD_UCODE_VERSION,	// Ucode Version
+	IPW_ORD_HW_RF_SWITCH_STATE = 214,	// HW RF Kill Switch State
+} ORDINALTABLE1;
+
+// ordinal table 2
+// Variable length data:
+#define IPW_FIRST_VARIABLE_LENGTH_ORDINAL   1001
+
+typedef enum _ORDINAL_TABLE_2 {	// NS - means Not Supported by FW
+	IPW_ORD_STAT_BASE = 1000,	// contains number of variable ORDs
+	IPW_ORD_STAT_ADAPTER_MAC = 1001,	// 6 bytes: our adapter MAC address
+	IPW_ORD_STAT_PREFERRED_BSSID = 1002,	// 6 bytes: BSSID of the preferred AP
+	IPW_ORD_STAT_MANDATORY_BSSID = 1003,	// 6 bytes: BSSID of the mandatory AP
+	IPW_FILL_1,		//NS //
+	IPW_ORD_STAT_COUNTRY_TEXT = 1005,	// 36 bytes: Country name text, First two bytes are Country code
+	IPW_ORD_STAT_ASSN_SSID = 1006,	// 32 bytes: ESSID String
+	IPW_ORD_STATION_TABLE = 1007,	// ? bytes: Station/AP table (via Direct SSID Scans)
+	IPW_ORD_STAT_SWEEP_TABLE = 1008,	// ? bytes: Sweep/Host Table table (via Broadcast Scans)
+	IPW_ORD_STAT_ROAM_LOG = 1009,	// ? bytes: Roaming log
+	IPW_ORD_STAT_RATE_LOG = 1010,	//NS // 0 bytes: Rate log
+	IPW_ORD_STAT_FIFO = 1011,	//NS // 0 bytes: Fifo buffer data structures
+	IPW_ORD_STAT_FW_VER_NUM = 1012,	// 14 bytes: fw version ID string as in (a.bb.ccc; "0.08.011")
+	IPW_ORD_STAT_FW_DATE = 1013,	// 14 bytes: fw date string (mmm dd yyyy; "Mar 13 2002")
+	IPW_ORD_STAT_ASSN_AP_BSSID = 1014,	// 6 bytes: MAC address of associated AP
+	IPW_ORD_STAT_DEBUG = 1015,	//NS // ? bytes:
+	IPW_ORD_STAT_NIC_BPA_NUM = 1016,	// 11 bytes: NIC BPA number in ASCII
+	IPW_ORD_STAT_UCODE_DATE = 1017,	// 5 bytes: uCode date
+	IPW_ORD_SECURITY_NGOTIATION_RESULT = 1018,
+} ORDINALTABLE2;		// NS - means Not Supported by FW
+
+#define IPW_LAST_VARIABLE_LENGTH_ORDINAL   1018
+
+#ifndef WIRELESS_SPY
+#define WIRELESS_SPY		// enable iwspy support
+#endif
+
+extern struct iw_handler_def ipw2100_wx_handler_def;
+extern struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev);
+extern void ipw2100_wx_event_work(struct ipw2100_priv *priv);
+
+#define IPW_HOST_FW_SHARED_AREA0 	0x0002f200
+#define IPW_HOST_FW_SHARED_AREA0_END 	0x0002f510	// 0x310 bytes
+
+#define IPW_HOST_FW_SHARED_AREA1 	0x0002f610
+#define IPW_HOST_FW_SHARED_AREA1_END 	0x0002f630	// 0x20 bytes
+
+#define IPW_HOST_FW_SHARED_AREA2 	0x0002fa00
+#define IPW_HOST_FW_SHARED_AREA2_END 	0x0002fa20	// 0x20 bytes
+
+#define IPW_HOST_FW_SHARED_AREA3 	0x0002fc00
+#define IPW_HOST_FW_SHARED_AREA3_END 	0x0002fc10	// 0x10 bytes
+
+#define IPW_HOST_FW_INTERRUPT_AREA 	0x0002ff80
+#define IPW_HOST_FW_INTERRUPT_AREA_END 	0x00030000	// 0x80 bytes
+
+struct ipw2100_fw_chunk {
+	unsigned char *buf;
+	long len;
+	long pos;
+	struct list_head list;
+};
+
+struct ipw2100_fw_chunk_set {
+   	const void *data;
+	unsigned long size;
+};
+
+struct ipw2100_fw {
+	int version;
+	struct ipw2100_fw_chunk_set fw;
+	struct ipw2100_fw_chunk_set uc;
+	const struct firmware *fw_entry;
+};
+
+int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
+void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
+int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
+int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
+
+#define MAX_FW_VERSION_LEN 14
+
+int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max);
+int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max);
+
+#endif /* _IPW2100_H */
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.c
@@ -0,0 +1,7361 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+
+  802.11 status code portion of this file from ethereal-0.10.6:
+    Copyright 2000, Axis Communications AB
+    Ethereal - Network traffic analyzer
+    By Gerald Combs <gerald@ethereal.com>
+    Copyright 1998 Gerald Combs
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+#include "ipw2200.h"
+
+#define IPW2200_VERSION "1.0.0"
+#define DRV_DESCRIPTION	"Intel(R) PRO/Wireless 2200/2915 Network Driver"
+#define DRV_COPYRIGHT	"Copyright(c) 2003-2004 Intel Corporation"
+#define DRV_VERSION     IPW2200_VERSION
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT);
+MODULE_LICENSE("GPL");
+
+static int debug = 0;
+static int channel = 0;
+static char *ifname;
+static int mode = 0;
+
+static u32 ipw_debug_level;
+static int associate = 1;
+static int auto_create = 1;
+static int disable = 0;
+static const char ipw_modes[] = {
+	'a', 'b', 'g', '?'
+};
+
+static void ipw_rx(struct ipw_priv *priv);
+static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
+				struct clx2_tx_queue *txq, int qindex);
+static int ipw_queue_reset(struct ipw_priv *priv);
+
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+			     int len, int sync);
+
+static void ipw_tx_queue_free(struct ipw_priv *);
+
+static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
+static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
+static void ipw_rx_queue_replenish(void *);
+
+static int ipw_up(struct ipw_priv *);
+static void ipw_down(struct ipw_priv *);
+static int ipw_config(struct ipw_priv *);
+static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates);
+
+static u8 band_b_active_channel[MAX_B_CHANNELS] = {
+	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
+};
+static u8 band_a_active_channel[MAX_A_CHANNELS] = {
+	36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
+};
+
+static int is_valid_channel(int mode_mask, int channel)
+{
+	int i;
+
+	if (!channel)
+		return 0;
+
+	if (mode_mask & IEEE_A)
+		for (i = 0; i < MAX_A_CHANNELS; i++)
+			if (band_a_active_channel[i] == channel)
+				return IEEE_A;
+
+	if (mode_mask & (IEEE_B | IEEE_G))
+		for (i = 0; i < MAX_B_CHANNELS; i++)
+			if (band_b_active_channel[i] == channel)
+				return mode_mask & (IEEE_B | IEEE_G);
+
+	return 0;
+}
+
+static char *snprint_line(char *buf, size_t count,
+			  const u8 *data, u32 len, u32 ofs)
+{
+	int out, i, j, l;
+	char c;
+
+	out = snprintf(buf, count, "%08X", ofs);
+
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++)
+			out += snprintf(buf + out, count - out, "%02X ",
+					data[(i * 8 + j)]);
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, "   ");
+	}
+
+	out += snprintf(buf + out, count - out, " ");
+	for (l = 0, i = 0; i < 2; i++) {
+		out += snprintf(buf + out, count - out, " ");
+		for (j = 0; j < 8 && l < len; j++, l++) {
+			c = data[(i * 8 + j)];
+			if (!isascii(c) || !isprint(c))
+				c = '.';
+
+			out += snprintf(buf + out, count - out, "%c", c);
+		}
+
+		for (; j < 8; j++)
+			out += snprintf(buf + out, count - out, " ");
+	}
+
+	return buf;
+}
+
+static void printk_buf(int level, const u8 *data, u32 len)
+{
+	char line[81];
+	u32 ofs = 0;
+	if (!(ipw_debug_level & level))
+		return;
+
+	while (len) {
+		printk(KERN_DEBUG "%s\n",
+		       snprint_line(line, sizeof(line), &data[ofs],
+				    min(len, 16U), ofs));
+		ofs += 16;
+		len -= min(len, 16U);
+	}
+}
+
+static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
+#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
+
+static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
+#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
+
+static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
+static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
+	_ipw_write_reg8(a, b, c);
+}
+
+static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
+static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
+	_ipw_write_reg16(a, b, c);
+}
+
+static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
+static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
+{
+	IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
+	_ipw_write_reg32(a, b, c);
+}
+
+#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
+#define ipw_write8(ipw, ofs, val) \
+ IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
+ _ipw_write8(ipw, ofs, val)
+
+#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
+#define ipw_write16(ipw, ofs, val) \
+ IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
+ _ipw_write16(ipw, ofs, val)
+
+#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
+#define ipw_write32(ipw, ofs, val) \
+ IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
+ _ipw_write32(ipw, ofs, val)
+
+#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
+static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
+	IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32)(ofs));
+	return _ipw_read8(ipw, ofs);
+}
+#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
+
+#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
+static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
+	IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32)(ofs));
+	return _ipw_read16(ipw, ofs);
+}
+#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
+
+#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
+static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
+	IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32)(ofs));
+	return _ipw_read32(ipw, ofs);
+}
+#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
+
+static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
+#define ipw_read_indirect(a, b, c, d) \
+	IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
+	_ipw_read_indirect(a, b, c, d)
+
+static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *data, int num);
+#define ipw_write_indirect(a, b, c, d) \
+	IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
+        _ipw_write_indirect(a, b, c, d)
+
+/* indirect write s */
+static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg,
+			     u32 value)
+{
+	IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n",
+		     priv, reg, value);
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
+	_ipw_write32(priv, CX2_INDIRECT_DATA, value);
+}
+
+
+static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
+{
+	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
+	_ipw_write8(priv, CX2_INDIRECT_DATA, value);
+	IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
+		     (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA),
+		     value);
+}
+
+static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg,
+			     u16 value)
+{
+	IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
+	_ipw_write16(priv, CX2_INDIRECT_DATA, value);
+}
+
+/* indirect read s */
+
+static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
+{
+	u32 word;
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
+	IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
+	word = _ipw_read32(priv, CX2_INDIRECT_DATA);
+	return (word >> ((reg & 0x3)*8)) & 0xff;
+}
+
+static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
+{
+	u32 value;
+
+	IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
+
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
+	value = _ipw_read32(priv, CX2_INDIRECT_DATA);
+	IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
+	return value;
+}
+
+/* iterative/auto-increment 32 bit reads and writes */
+static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
+			       int num)
+{
+	u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
+	u32 dif_len = addr - aligned_addr;
+	u32 aligned_len;
+	u32 i;
+
+	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
+
+	/* Read the first nibble byte by byte */
+	if (unlikely(dif_len)) {
+		/* Start reading at aligned_addr + dif_len */
+		_ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			*buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
+		num -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* Read DWs through autoinc register */
+	_ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
+	aligned_len = num & CX2_INDIRECT_ADDR_MASK;
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		*(u32*)buf = ipw_read32(priv, CX2_AUTOINC_DATA);
+
+	/* Copy the last nibble */
+	dif_len = num - aligned_len;
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		*buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
+}
+
+static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf,
+				int num)
+{
+	u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
+	u32 dif_len = addr - aligned_addr;
+	u32 aligned_len;
+	u32 i;
+
+	IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
+
+	/* Write the first nibble byte by byte */
+	if (unlikely(dif_len)) {
+		/* Start writing at aligned_addr + dif_len */
+		_ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
+		for (i = dif_len; i < 4; i++, buf++)
+			_ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
+		num -= dif_len;
+		aligned_addr += 4;
+	}
+
+	/* Write DWs through autoinc register */
+	_ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
+	aligned_len = num & CX2_INDIRECT_ADDR_MASK;
+	for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
+		_ipw_write32(priv, CX2_AUTOINC_DATA, *(u32*)buf);
+
+	/* Copy the last nibble */
+	dif_len = num - aligned_len;
+	_ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
+	for (i = 0; i < dif_len; i++, buf++)
+		_ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
+}
+
+static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
+			     int num)
+{
+	memcpy_toio((priv->hw_base + addr), buf, num);
+}
+
+static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
+{
+	ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
+}
+
+static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
+{
+	ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
+}
+
+static inline void ipw_enable_interrupts(struct ipw_priv *priv)
+{
+	if (priv->status & STATUS_INT_ENABLED)
+		return;
+	priv->status |= STATUS_INT_ENABLED;
+	ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
+}
+
+static inline void ipw_disable_interrupts(struct ipw_priv *priv)
+{
+	if (!(priv->status & STATUS_INT_ENABLED))
+		return;
+	priv->status &= ~STATUS_INT_ENABLED;
+	ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
+}
+
+static char *ipw_error_desc(u32 val)
+{
+	switch (val) {
+	case IPW_FW_ERROR_OK:
+		return "ERROR_OK";
+	case IPW_FW_ERROR_FAIL:
+		return "ERROR_FAIL";
+	case IPW_FW_ERROR_MEMORY_UNDERFLOW:
+		return "MEMORY_UNDERFLOW";
+	case IPW_FW_ERROR_MEMORY_OVERFLOW:
+		return "MEMORY_OVERFLOW";
+	case IPW_FW_ERROR_BAD_PARAM:
+		return "ERROR_BAD_PARAM";
+	case IPW_FW_ERROR_BAD_CHECKSUM:
+		return "ERROR_BAD_CHECKSUM";
+	case IPW_FW_ERROR_NMI_INTERRUPT:
+		return "ERROR_NMI_INTERRUPT";
+	case IPW_FW_ERROR_BAD_DATABASE:
+		return "ERROR_BAD_DATABASE";
+	case IPW_FW_ERROR_ALLOC_FAIL:
+		return "ERROR_ALLOC_FAIL";
+	case IPW_FW_ERROR_DMA_UNDERRUN:
+		return "ERROR_DMA_UNDERRUN";
+	case IPW_FW_ERROR_DMA_STATUS:
+		return "ERROR_DMA_STATUS";
+	case IPW_FW_ERROR_DINOSTATUS_ERROR:
+		return "ERROR_DINOSTATUS_ERROR";
+	case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
+		return "ERROR_EEPROMSTATUS_ERROR";
+	case IPW_FW_ERROR_SYSASSERT:
+		return "ERROR_SYSASSERT";
+	case IPW_FW_ERROR_FATAL_ERROR:
+		return "ERROR_FATALSTATUS_ERROR";
+	default:
+		return "UNKNOWNSTATUS_ERROR";
+	}
+}
+
+static void ipw_dump_nic_error_log(struct ipw_priv *priv)
+{
+	u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
+
+	base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
+	count = ipw_read_reg32(priv, base);
+
+	if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+		IPW_ERROR("Start IPW Error Log Dump:\n");
+		IPW_ERROR("Status: 0x%08X, Config: %08X\n",
+			  priv->status, priv->config);
+	}
+
+	for (i = ERROR_START_OFFSET;
+	     i <= count * ERROR_ELEM_SIZE;
+	     i += ERROR_ELEM_SIZE) {
+		desc   = ipw_read_reg32(priv, base + i);
+		time   = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
+		blink1 = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
+		blink2 = ipw_read_reg32(priv, base + i + 3*sizeof(u32));
+		ilink1 = ipw_read_reg32(priv, base + i + 4*sizeof(u32));
+		ilink2 = ipw_read_reg32(priv, base + i + 5*sizeof(u32));
+		idata =  ipw_read_reg32(priv, base + i + 6*sizeof(u32));
+
+		IPW_ERROR(
+			"%s %i 0x%08x  0x%08x  0x%08x  0x%08x  0x%08x\n",
+			ipw_error_desc(desc), time, blink1, blink2,
+			ilink1, ilink2, idata);
+	}
+}
+
+static void ipw_dump_nic_event_log(struct ipw_priv *priv)
+{
+	u32 ev, time, data, i, count, base;
+
+	base = ipw_read32(priv, IPW_EVENT_LOG);
+	count = ipw_read_reg32(priv, base);
+
+	if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
+		IPW_ERROR("Start IPW Event Log Dump:\n");
+
+	for (i = EVENT_START_OFFSET;
+	     i <= count * EVENT_ELEM_SIZE;
+	     i += EVENT_ELEM_SIZE) {
+		ev = ipw_read_reg32(priv, base + i);
+		time  = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
+		data  = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
+
+#ifdef CONFIG_IPW_DEBUG
+		IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
+#endif
+	}
+}
+
+static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
+			   u32 *len)
+{
+	u32 addr, field_info, field_len, field_count, total_len;
+
+	IPW_DEBUG_ORD("ordinal = %i\n", ord);
+
+	if (!priv || !val || !len) {
+		IPW_DEBUG_ORD("Invalid argument\n");
+		return -EINVAL;
+	}
+
+	/* verify device ordinal tables have been initialized */
+	if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
+		IPW_DEBUG_ORD("Access ordinals before initialization\n");
+		return -EINVAL;
+	}
+
+	switch (IPW_ORD_TABLE_ID_MASK & ord) {
+	case IPW_ORD_TABLE_0_MASK:
+		/*
+		 * TABLE 0: Direct access to a table of 32 bit values
+		 *
+		 * This is a very simple table with the data directly
+		 * read from the table
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table0_len) {
+			IPW_DEBUG_ORD("ordinal value (%i) longer then "
+				      "max (%i)\n", ord, priv->table0_len);
+			return -EINVAL;
+		}
+
+		/* verify we have enough room to store the value */
+		if (*len < sizeof(u32)) {
+			IPW_DEBUG_ORD("ordinal buffer length too small, "
+				      "need %zd\n", sizeof(u32));
+			return -EINVAL;
+		}
+
+		IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
+			      ord, priv->table0_addr + (ord  << 2));
+
+		*len = sizeof(u32);
+		ord <<= 2;
+		*((u32 *)val) = ipw_read32(priv, priv->table0_addr + ord);
+		break;
+
+	case IPW_ORD_TABLE_1_MASK:
+		/*
+		 * TABLE 1: Indirect access to a table of 32 bit values
+		 *
+		 * This is a fairly large table of u32 values each
+		 * representing starting addr for the data (which is
+		 * also a u32)
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table1_len) {
+			IPW_DEBUG_ORD("ordinal value too long\n");
+			return -EINVAL;
+		}
+
+		/* verify we have enough room to store the value */
+		if (*len < sizeof(u32)) {
+			IPW_DEBUG_ORD("ordinal buffer length too small, "
+				      "need %zd\n", sizeof(u32));
+			return -EINVAL;
+		}
+
+		*((u32 *)val) = ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
+		*len = sizeof(u32);
+		break;
+
+	case IPW_ORD_TABLE_2_MASK:
+		/*
+		 * TABLE 2: Indirect access to a table of variable sized values
+		 *
+		 * This table consist of six values, each containing
+		 *     - dword containing the starting offset of the data
+		 *     - dword containing the lengh in the first 16bits
+		 *       and the count in the second 16bits
+		 */
+
+		/* remove the table id from the ordinal */
+		ord &= IPW_ORD_TABLE_VALUE_MASK;
+
+		/* boundary check */
+		if (ord > priv->table2_len) {
+			IPW_DEBUG_ORD("ordinal value too long\n");
+			return -EINVAL;
+		}
+
+		/* get the address of statistic */
+		addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
+
+		/* get the second DW of statistics ;
+		 * two 16-bit words - first is length, second is count */
+		field_info = ipw_read_reg32(priv, priv->table2_addr + (ord << 3) + sizeof(u32));
+
+		/* get each entry length */
+		field_len = *((u16 *)&field_info);
+
+		/* get number of entries */
+		field_count = *(((u16 *)&field_info) + 1);
+
+		/* abort if not enought memory */
+		total_len = field_len * field_count;
+		if (total_len > *len) {
+			*len = total_len;
+			return -EINVAL;
+		}
+
+		*len = total_len;
+		if (!total_len)
+			return 0;
+
+		IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
+			      "field_info = 0x%08x\n",
+			      addr, total_len, field_info);
+		ipw_read_indirect(priv, addr, val, total_len);
+		break;
+
+	default:
+		IPW_DEBUG_ORD("Invalid ordinal!\n");
+		return -EINVAL;
+
+	}
+
+
+	return 0;
+}
+
+static void ipw_init_ordinals(struct ipw_priv *priv)
+{
+	priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
+	priv->table0_len = ipw_read32(priv, priv->table0_addr);
+
+	IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
+		      priv->table0_addr, priv->table0_len);
+
+	priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
+	priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
+
+	IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
+		      priv->table1_addr, priv->table1_len);
+
+	priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
+	priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
+	priv->table2_len &= 0x0000ffff; /* use first two bytes */
+
+	IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
+		      priv->table2_addr, priv->table2_len);
+
+}
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
+ * used for controling the debug level.
+ *
+ * See the level definitions in ipw for details.
+ */
+static ssize_t show_debug_level(struct device_driver *d, char *buf)
+{
+	return sprintf(buf, "0x%08X\n", ipw_debug_level);
+}
+static ssize_t store_debug_level(struct device_driver *d,
+				const char *buf, size_t count)
+{
+	char *p = (char *)buf;
+	u32 val;
+
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		printk(KERN_INFO DRV_NAME
+		       ": %s is not in hex or decimal form.\n", buf);
+	else
+		ipw_debug_level = val;
+
+	return strnlen(buf, count);
+}
+
+static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
+		   show_debug_level, store_debug_level);
+
+static ssize_t show_status(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *p = d->driver_data;
+	return sprintf(buf, "0x%08x\n", (int)p->status);
+}
+static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+
+static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	struct ipw_priv *p = d->driver_data;
+	return sprintf(buf, "0x%08x\n", (int)p->config);
+}
+static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
+
+static ssize_t show_nic_type(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct ipw_priv *p = d->driver_data;
+	u8 type = p->eeprom[EEPROM_NIC_TYPE];
+
+	switch (type) {
+	case EEPROM_NIC_TYPE_STANDARD:
+		return sprintf(buf, "STANDARD\n");
+	case EEPROM_NIC_TYPE_DELL:
+		return sprintf(buf, "DELL\n");
+	case EEPROM_NIC_TYPE_FUJITSU:
+		return sprintf(buf, "FUJITSU\n");
+	case EEPROM_NIC_TYPE_IBM:
+		return sprintf(buf, "IBM\n");
+	case EEPROM_NIC_TYPE_HP:
+		return sprintf(buf, "HP\n");
+	}
+
+	return sprintf(buf, "UNKNOWN\n");
+}
+static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
+
+static ssize_t dump_error_log(struct device *d,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	char *p = (char *)buf;
+
+	if (p[0] == '1')
+		ipw_dump_nic_error_log((struct ipw_priv*)d->driver_data);
+
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
+
+static ssize_t dump_event_log(struct device *d,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	char *p = (char *)buf;
+
+	if (p[0] == '1')
+		ipw_dump_nic_event_log((struct ipw_priv*)d->driver_data);
+
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
+
+static ssize_t show_ucode_version(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	u32 len = sizeof(u32), tmp = 0;
+	struct ipw_priv *p = d->driver_data;
+
+	if(ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
+		return 0;
+
+	return sprintf(buf, "0x%08x\n", tmp);
+}
+static DEVICE_ATTR(ucode_version, S_IWUSR|S_IRUGO, show_ucode_version, NULL);
+
+static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
+			char *buf)
+{
+	u32 len = sizeof(u32), tmp = 0;
+	struct ipw_priv *p = d->driver_data;
+
+	if(ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
+		return 0;
+
+	return sprintf(buf, "0x%08x\n", tmp);
+}
+static DEVICE_ATTR(rtc, S_IWUSR|S_IRUGO, show_rtc, NULL);
+
+/*
+ * Add a device attribute to view/control the delay between eeprom
+ * operations.
+ */
+static ssize_t show_eeprom_delay(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	int n = ((struct ipw_priv*)d->driver_data)->eeprom_delay;
+	return sprintf(buf, "%i\n", n);
+}
+static ssize_t store_eeprom_delay(struct device *d,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct ipw_priv *p = d->driver_data;
+	sscanf(buf, "%i", &p->eeprom_delay);
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(eeprom_delay, S_IWUSR|S_IRUGO,
+		   show_eeprom_delay,store_eeprom_delay);
+
+static ssize_t show_command_event_reg(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *p = d->driver_data;
+
+	reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_command_event_reg(struct device *d,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	u32 reg;
+	struct ipw_priv *p = d->driver_data;
+
+	sscanf(buf, "%x", &reg);
+	ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(command_event_reg, S_IWUSR|S_IRUGO,
+		   show_command_event_reg,store_command_event_reg);
+
+static ssize_t show_mem_gpio_reg(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *p = d->driver_data;
+
+	reg = ipw_read_reg32(p, 0x301100);
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_mem_gpio_reg(struct device *d,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	u32 reg;
+	struct ipw_priv *p = d->driver_data;
+
+	sscanf(buf, "%x", &reg);
+	ipw_write_reg32(p, 0x301100, reg);
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(mem_gpio_reg, S_IWUSR|S_IRUGO,
+		   show_mem_gpio_reg,store_mem_gpio_reg);
+
+static ssize_t show_indirect_dword(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *priv = d->driver_data;
+	if (priv->status & STATUS_INDIRECT_DWORD)
+		reg = ipw_read_reg32(priv, priv->indirect_dword);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_indirect_dword(struct device *d,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct ipw_priv *priv = d->driver_data;
+
+	sscanf(buf, "%x", &priv->indirect_dword);
+	priv->status |= STATUS_INDIRECT_DWORD;
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(indirect_dword, S_IWUSR|S_IRUGO,
+		   show_indirect_dword,store_indirect_dword);
+
+static ssize_t show_indirect_byte(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	u8 reg = 0;
+	struct ipw_priv *priv = d->driver_data;
+	if (priv->status & STATUS_INDIRECT_BYTE)
+		reg = ipw_read_reg8(priv, priv->indirect_byte);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%02x\n", reg);
+}
+static ssize_t store_indirect_byte(struct device *d,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct ipw_priv *priv = d->driver_data;
+
+	sscanf(buf, "%x", &priv->indirect_byte);
+	priv->status |= STATUS_INDIRECT_BYTE;
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(indirect_byte, S_IWUSR|S_IRUGO,
+		   show_indirect_byte, store_indirect_byte);
+
+static ssize_t show_direct_dword(struct device *d,
+				struct device_attribute *attr, char *buf)
+{
+	u32 reg = 0;
+	struct ipw_priv *priv = d->driver_data;
+
+	if (priv->status & STATUS_DIRECT_DWORD)
+		reg = ipw_read32(priv, priv->direct_dword);
+	else
+		reg = 0;
+
+	return sprintf(buf, "0x%08x\n", reg);
+}
+static ssize_t store_direct_dword(struct device *d,
+			struct device_attribute *attr, const char *buf,
+			size_t count)
+{
+	struct ipw_priv *priv = d->driver_data;
+
+	sscanf(buf, "%x", &priv->direct_dword);
+	priv->status |= STATUS_DIRECT_DWORD;
+	return strnlen(buf, count);
+}
+static DEVICE_ATTR(direct_dword, S_IWUSR|S_IRUGO,
+		   show_direct_dword,store_direct_dword);
+
+
+static inline int rf_kill_active(struct ipw_priv *priv)
+{
+	if (0 == (ipw_read32(priv, 0x30) & 0x10000))
+		priv->status |= STATUS_RF_KILL_HW;
+	else
+		priv->status &= ~STATUS_RF_KILL_HW;
+
+	return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
+}
+
+static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
+				char *buf)
+{
+	/* 0 - RF kill not enabled
+	   1 - SW based RF kill active (sysfs)
+	   2 - HW based RF kill active
+	   3 - Both HW and SW baed RF kill active */
+	struct ipw_priv *priv = d->driver_data;
+	int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
+		(rf_kill_active(priv) ? 0x2 : 0x0);
+	return sprintf(buf, "%i\n", val);
+}
+
+static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
+{
+	if ((disable_radio ? 1 : 0) ==
+	    (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
+		return 0 ;
+
+	IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO  %s\n",
+			  disable_radio ? "OFF" : "ON");
+
+	if (disable_radio) {
+		priv->status |= STATUS_RF_KILL_SW;
+
+		if (priv->workqueue) {
+			cancel_delayed_work(&priv->request_scan);
+		}
+		wake_up_interruptible(&priv->wait_command_queue);
+		queue_work(priv->workqueue, &priv->down);
+	} else {
+		priv->status &= ~STATUS_RF_KILL_SW;
+		if (rf_kill_active(priv)) {
+			IPW_DEBUG_RF_KILL("Can not turn radio back on - "
+					  "disabled by HW switch\n");
+			/* Make sure the RF_KILL check timer is running */
+			cancel_delayed_work(&priv->rf_kill);
+			queue_delayed_work(priv->workqueue, &priv->rf_kill,
+					   2 * HZ);
+		} else
+			queue_work(priv->workqueue, &priv->up);
+	}
+
+	return 1;
+}
+
+static ssize_t store_rf_kill(struct device *d,  struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	struct ipw_priv *priv = d->driver_data;
+
+	ipw_radio_kill_sw(priv, buf[0] == '1');
+
+	return count;
+}
+static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
+
+static void ipw_irq_tasklet(struct ipw_priv *priv)
+{
+	u32 inta, inta_mask, handled = 0;
+	unsigned long flags;
+	int rc = 0;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	inta = ipw_read32(priv, CX2_INTA_RW);
+	inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
+	inta &= (CX2_INTA_MASK_ALL & inta_mask);
+
+	/* Add any cached INTA values that need to be handled */
+	inta |= priv->isr_inta;
+
+	/* handle all the justifications for the interrupt */
+	if (inta & CX2_INTA_BIT_RX_TRANSFER) {
+		ipw_rx(priv);
+		handled |= CX2_INTA_BIT_RX_TRANSFER;
+	}
+
+	if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
+		IPW_DEBUG_HC("Command completed.\n");
+		rc = ipw_queue_tx_reclaim( priv, &priv->txq_cmd, -1);
+		priv->status &= ~STATUS_HCMD_ACTIVE;
+		wake_up_interruptible(&priv->wait_command_queue);
+		handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
+	}
+
+	if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
+		IPW_DEBUG_TX("TX_QUEUE_1\n");
+		rc = ipw_queue_tx_reclaim( priv, &priv->txq[0], 0);
+		handled |= CX2_INTA_BIT_TX_QUEUE_1;
+	}
+
+	if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
+		IPW_DEBUG_TX("TX_QUEUE_2\n");
+		rc = ipw_queue_tx_reclaim( priv, &priv->txq[1], 1);
+		handled |= CX2_INTA_BIT_TX_QUEUE_2;
+	}
+
+	if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
+		IPW_DEBUG_TX("TX_QUEUE_3\n");
+		rc = ipw_queue_tx_reclaim( priv, &priv->txq[2], 2);
+		handled |= CX2_INTA_BIT_TX_QUEUE_3;
+	}
+
+	if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
+		IPW_DEBUG_TX("TX_QUEUE_4\n");
+		rc = ipw_queue_tx_reclaim( priv, &priv->txq[3], 3);
+		handled |= CX2_INTA_BIT_TX_QUEUE_4;
+	}
+
+	if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
+		IPW_WARNING("STATUS_CHANGE\n");
+		handled |= CX2_INTA_BIT_STATUS_CHANGE;
+	}
+
+	if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
+		IPW_WARNING("TX_PERIOD_EXPIRED\n");
+		handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
+	}
+
+	if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
+		IPW_WARNING("HOST_CMD_DONE\n");
+		handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
+	}
+
+	if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
+		IPW_WARNING("FW_INITIALIZATION_DONE\n");
+		handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
+	}
+
+	if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
+		IPW_WARNING("PHY_OFF_DONE\n");
+		handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
+	}
+
+	if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
+		IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
+		priv->status |= STATUS_RF_KILL_HW;
+		wake_up_interruptible(&priv->wait_command_queue);
+		netif_carrier_off(priv->net_dev);
+		netif_stop_queue(priv->net_dev);
+		cancel_delayed_work(&priv->request_scan);
+		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+		handled |= CX2_INTA_BIT_RF_KILL_DONE;
+	}
+
+	if (inta & CX2_INTA_BIT_FATAL_ERROR) {
+		IPW_ERROR("Firmware error detected.  Restarting.\n");
+#ifdef CONFIG_IPW_DEBUG
+		if (ipw_debug_level & IPW_DL_FW_ERRORS) {
+			ipw_dump_nic_error_log(priv);
+			ipw_dump_nic_event_log(priv);
+		}
+#endif
+		queue_work(priv->workqueue, &priv->adapter_restart);
+		handled |= CX2_INTA_BIT_FATAL_ERROR;
+	}
+
+	if (inta & CX2_INTA_BIT_PARITY_ERROR) {
+		IPW_ERROR("Parity error\n");
+		handled |= CX2_INTA_BIT_PARITY_ERROR;
+	}
+
+	if (handled != inta) {
+		IPW_ERROR("Unhandled INTA bits 0x%08x\n",
+				inta & ~handled);
+	}
+
+	/* enable all interrupts */
+	ipw_enable_interrupts(priv);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+#ifdef CONFIG_IPW_DEBUG
+#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
+static char *get_cmd_string(u8 cmd)
+{
+	switch (cmd) {
+		IPW_CMD(HOST_COMPLETE);
+		IPW_CMD(POWER_DOWN);
+		IPW_CMD(SYSTEM_CONFIG);
+		IPW_CMD(MULTICAST_ADDRESS);
+		IPW_CMD(SSID);
+		IPW_CMD(ADAPTER_ADDRESS);
+		IPW_CMD(PORT_TYPE);
+		IPW_CMD(RTS_THRESHOLD);
+		IPW_CMD(FRAG_THRESHOLD);
+		IPW_CMD(POWER_MODE);
+		IPW_CMD(WEP_KEY);
+		IPW_CMD(TGI_TX_KEY);
+		IPW_CMD(SCAN_REQUEST);
+		IPW_CMD(SCAN_REQUEST_EXT);
+		IPW_CMD(ASSOCIATE);
+		IPW_CMD(SUPPORTED_RATES);
+		IPW_CMD(SCAN_ABORT);
+		IPW_CMD(TX_FLUSH);
+		IPW_CMD(QOS_PARAMETERS);
+		IPW_CMD(DINO_CONFIG);
+		IPW_CMD(RSN_CAPABILITIES);
+		IPW_CMD(RX_KEY);
+		IPW_CMD(CARD_DISABLE);
+		IPW_CMD(SEED_NUMBER);
+		IPW_CMD(TX_POWER);
+		IPW_CMD(COUNTRY_INFO);
+		IPW_CMD(AIRONET_INFO);
+		IPW_CMD(AP_TX_POWER);
+		IPW_CMD(CCKM_INFO);
+		IPW_CMD(CCX_VER_INFO);
+		IPW_CMD(SET_CALIBRATION);
+		IPW_CMD(SENSITIVITY_CALIB);
+		IPW_CMD(RETRY_LIMIT);
+		IPW_CMD(IPW_PRE_POWER_DOWN);
+		IPW_CMD(VAP_BEACON_TEMPLATE);
+		IPW_CMD(VAP_DTIM_PERIOD);
+		IPW_CMD(EXT_SUPPORTED_RATES);
+		IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
+		IPW_CMD(VAP_QUIET_INTERVALS);
+		IPW_CMD(VAP_CHANNEL_SWITCH);
+		IPW_CMD(VAP_MANDATORY_CHANNELS);
+		IPW_CMD(VAP_CELL_PWR_LIMIT);
+		IPW_CMD(VAP_CF_PARAM_SET);
+		IPW_CMD(VAP_SET_BEACONING_STATE);
+		IPW_CMD(MEASUREMENT);
+		IPW_CMD(POWER_CAPABILITY);
+		IPW_CMD(SUPPORTED_CHANNELS);
+		IPW_CMD(TPC_REPORT);
+		IPW_CMD(WME_INFO);
+		IPW_CMD(PRODUCTION_COMMAND);
+	default:
+		return "UNKNOWN";
+	}
+}
+#endif /* CONFIG_IPW_DEBUG */
+
+#define HOST_COMPLETE_TIMEOUT HZ
+static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
+{
+	int rc = 0;
+
+	if (priv->status & STATUS_HCMD_ACTIVE) {
+		IPW_ERROR("Already sending a command\n");
+		return -1;
+	}
+
+	priv->status |= STATUS_HCMD_ACTIVE;
+
+	IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
+		     get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
+	printk_buf(IPW_DL_HOST_COMMAND, (u8*)cmd->param, cmd->len);
+
+	rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
+	if (rc)
+		return rc;
+
+	rc = wait_event_interruptible_timeout(
+		priv->wait_command_queue, !(priv->status & STATUS_HCMD_ACTIVE),
+		HOST_COMPLETE_TIMEOUT);
+	if (rc == 0) {
+		IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
+			       jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+		priv->status &= ~STATUS_HCMD_ACTIVE;
+		return -EIO;
+	}
+	if (priv->status & STATUS_RF_KILL_MASK) {
+		IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int ipw_send_host_complete(struct ipw_priv *priv)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_HOST_COMPLETE,
+		.len = 0
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send HOST_COMPLETE command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_system_config(struct ipw_priv *priv,
+				  struct ipw_sys_config *config)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SYSTEM_CONFIG,
+		.len = sizeof(*config)
+	};
+
+	if (!priv || !config) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param,config,sizeof(*config));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SSID,
+		.len = min(len, IW_ESSID_MAX_SIZE)
+	};
+
+	if (!priv || !ssid) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param, ssid, cmd.len);
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SSID command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_adapter_address(struct ipw_priv *priv, u8 *mac)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_ADAPTER_ADDRESS,
+		.len = ETH_ALEN
+	};
+
+	if (!priv || !mac) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
+		       priv->net_dev->name, MAC_ARG(mac));
+
+	memcpy(&cmd.param, mac, ETH_ALEN);
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static void ipw_adapter_restart(void *adapter)
+{
+	struct ipw_priv *priv = adapter;
+
+	if (priv->status & STATUS_RF_KILL_MASK)
+		return;
+
+	ipw_down(priv);
+	if (ipw_up(priv)) {
+		IPW_ERROR("Failed to up device\n");
+		return;
+	}
+}
+
+
+
+
+#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
+
+static void ipw_scan_check(void *data)
+{
+	struct ipw_priv *priv = data;
+	if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
+		IPW_DEBUG_SCAN("Scan completion watchdog resetting "
+			       "adapter (%dms).\n",
+			       IPW_SCAN_CHECK_WATCHDOG / 100);
+		ipw_adapter_restart(priv);
+	}
+}
+
+static int ipw_send_scan_request_ext(struct ipw_priv *priv,
+				     struct ipw_scan_request_ext *request)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SCAN_REQUEST_EXT,
+		.len = sizeof(*request)
+	};
+
+	if (!priv || !request) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param,request,sizeof(*request));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
+		return -1;
+	}
+
+	queue_delayed_work(priv->workqueue, &priv->scan_check,
+			   IPW_SCAN_CHECK_WATCHDOG);
+	return 0;
+}
+
+static int ipw_send_scan_abort(struct ipw_priv *priv)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SCAN_ABORT,
+		.len = 0
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SCAN_ABORT command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SENSITIVITY_CALIB,
+		.len = sizeof(struct ipw_sensitivity_calib)
+	};
+	struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
+		&cmd.param;
+	calib->beacon_rssi_raw = sens;
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_associate(struct ipw_priv *priv,
+			      struct ipw_associate *associate)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_ASSOCIATE,
+		.len = sizeof(*associate)
+	};
+
+	if (!priv || !associate) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param,associate,sizeof(*associate));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send ASSOCIATE command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_supported_rates(struct ipw_priv *priv,
+				    struct ipw_supported_rates *rates)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SUPPORTED_RATES,
+		.len = sizeof(*rates)
+	};
+
+	if (!priv || !rates) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param,rates,sizeof(*rates));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SUPPORTED_RATES command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_set_random_seed(struct ipw_priv *priv)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_SEED_NUMBER,
+		.len = sizeof(u32)
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	get_random_bytes(&cmd.param, sizeof(u32));
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send SEED_NUMBER command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+#if 0
+static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_CARD_DISABLE,
+		.len = sizeof(u32)
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	*((u32*)&cmd.param) = phy_off;
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send CARD_DISABLE command\n");
+		return -1;
+	}
+
+	return 0;
+}
+#endif
+
+static int ipw_send_tx_power(struct ipw_priv *priv,
+			     struct ipw_tx_power *power)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_TX_POWER,
+		.len = sizeof(*power)
+	};
+
+	if (!priv || !power) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param,power,sizeof(*power));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send TX_POWER command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
+{
+	struct ipw_rts_threshold rts_threshold = {
+		.rts_threshold = rts,
+	};
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_RTS_THRESHOLD,
+		.len = sizeof(rts_threshold)
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send RTS_THRESHOLD command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
+{
+	struct ipw_frag_threshold frag_threshold = {
+		.frag_threshold = frag,
+	};
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_FRAG_THRESHOLD,
+		.len = sizeof(frag_threshold)
+	};
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
+{
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_POWER_MODE,
+		.len = sizeof(u32)
+	};
+	u32 *param = (u32*)(&cmd.param);
+
+	if (!priv) {
+		IPW_ERROR("Invalid args\n");
+		return -1;
+	}
+
+	/* If on battery, set to 3, if AC set to CAM, else user
+	 * level */
+	switch (mode) {
+	case IPW_POWER_BATTERY:
+		*param = IPW_POWER_INDEX_3;
+		break;
+	case IPW_POWER_AC:
+		*param = IPW_POWER_MODE_CAM;
+		break;
+	default:
+		*param = mode;
+		break;
+	}
+
+	if (ipw_send_cmd(priv, &cmd)) {
+		IPW_ERROR("failed to send POWER_MODE command\n");
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * The IPW device contains a Microwire compatible EEPROM that stores
+ * various data like the MAC address.  Usually the firmware has exclusive
+ * access to the eeprom, but during device initialization (before the
+ * device driver has sent the HostComplete command to the firmware) the
+ * device driver has read access to the EEPROM by way of indirect addressing
+ * through a couple of memory mapped registers.
+ *
+ * The following is a simplified implementation for pulling data out of the
+ * the eeprom, along with some helper functions to find information in
+ * the per device private data's copy of the eeprom.
+ *
+ * NOTE: To better understand how these functions work (i.e what is a chip
+ *       select and why do have to keep driving the eeprom clock?), read
+ *       just about any data sheet for a Microwire compatible EEPROM.
+ */
+
+/* write a 32 bit value into the indirect accessor register */
+static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
+{
+	ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
+
+	/* the eeprom requires some time to complete the operation */
+	udelay(p->eeprom_delay);
+
+	return;
+}
+
+/* perform a chip select operation */
+static inline void eeprom_cs(struct ipw_priv* priv)
+{
+	eeprom_write_reg(priv,0);
+	eeprom_write_reg(priv,EEPROM_BIT_CS);
+	eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
+	eeprom_write_reg(priv,EEPROM_BIT_CS);
+}
+
+/* perform a chip select operation */
+static inline void eeprom_disable_cs(struct ipw_priv* priv)
+{
+	eeprom_write_reg(priv,EEPROM_BIT_CS);
+	eeprom_write_reg(priv,0);
+	eeprom_write_reg(priv,EEPROM_BIT_SK);
+}
+
+/* push a single bit down to the eeprom */
+static inline void eeprom_write_bit(struct ipw_priv *p,u8 bit)
+{
+	int d = ( bit ? EEPROM_BIT_DI : 0);
+	eeprom_write_reg(p,EEPROM_BIT_CS|d);
+	eeprom_write_reg(p,EEPROM_BIT_CS|d|EEPROM_BIT_SK);
+}
+
+/* push an opcode followed by an address down to the eeprom */
+static void eeprom_op(struct ipw_priv* priv, u8 op, u8 addr)
+{
+	int i;
+
+	eeprom_cs(priv);
+	eeprom_write_bit(priv,1);
+	eeprom_write_bit(priv,op&2);
+	eeprom_write_bit(priv,op&1);
+	for ( i=7; i>=0; i-- ) {
+		eeprom_write_bit(priv,addr&(1<<i));
+	}
+}
+
+/* pull 16 bits off the eeprom, one bit at a time */
+static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr)
+{
+	int i;
+	u16 r=0;
+
+	/* Send READ Opcode */
+	eeprom_op(priv,EEPROM_CMD_READ,addr);
+
+	/* Send dummy bit */
+	eeprom_write_reg(priv,EEPROM_BIT_CS);
+
+	/* Read the byte off the eeprom one bit at a time */
+	for ( i=0; i<16; i++ ) {
+		u32 data = 0;
+		eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
+		eeprom_write_reg(priv,EEPROM_BIT_CS);
+		data = ipw_read_reg32(priv,FW_MEM_REG_EEPROM_ACCESS);
+		r = (r<<1) | ((data & EEPROM_BIT_DO)?1:0);
+	}
+
+	/* Send another dummy bit */
+	eeprom_write_reg(priv,0);
+	eeprom_disable_cs(priv);
+
+	return r;
+}
+
+/* helper function for pulling the mac address out of the private */
+/* data's copy of the eeprom data                                 */
+static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac)
+{
+	u8* ee = (u8*)priv->eeprom;
+	memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
+}
+
+/*
+ * Either the device driver (i.e. the host) or the firmware can
+ * load eeprom data into the designated region in SRAM.  If neither
+ * happens then the FW will shutdown with a fatal error.
+ *
+ * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
+ * bit needs region of shared SRAM needs to be non-zero.
+ */
+static void ipw_eeprom_init_sram(struct ipw_priv *priv)
+{
+	int i;
+	u16 *eeprom = (u16 *)priv->eeprom;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	/* read entire contents of eeprom into private buffer */
+	for ( i=0; i<128; i++ )
+		eeprom[i] = eeprom_read_u16(priv,(u8)i);
+
+	/*
+	   If the data looks correct, then copy it to our private
+	   copy.  Otherwise let the firmware know to perform the operation
+	   on it's own
+	*/
+	if ((priv->eeprom + EEPROM_VERSION) != 0) {
+		IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
+
+		/* write the eeprom data to sram */
+		for( i=0; i<CX2_EEPROM_IMAGE_SIZE; i++ )
+			ipw_write8(priv, IPW_EEPROM_DATA + i,
+				   priv->eeprom[i]);
+
+		/* Do not load eeprom data on fatal error or suspend */
+		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
+	} else {
+		IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
+
+		/* Load eeprom data on fatal error or suspend */
+		ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
+	}
+
+	IPW_DEBUG_TRACE("<<\n");
+}
+
+
+static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
+{
+	count >>= 2;
+	if (!count) return;
+	_ipw_write32(priv, CX2_AUTOINC_ADDR, start);
+	while (count--)
+		_ipw_write32(priv, CX2_AUTOINC_DATA, 0);
+}
+
+static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
+{
+	ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
+			CB_NUMBER_OF_ELEMENTS_SMALL *
+			sizeof(struct command_block));
+}
+
+static int ipw_fw_dma_enable(struct ipw_priv *priv)
+{ /* start dma engine but no transfers yet*/
+
+	IPW_DEBUG_FW(">> : \n");
+
+	/* Start the dma */
+	ipw_fw_dma_reset_command_blocks(priv);
+
+	/* Write CB base address */
+	ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
+
+	IPW_DEBUG_FW("<< : \n");
+	return 0;
+}
+
+static void ipw_fw_dma_abort(struct ipw_priv *priv)
+{
+	u32 control = 0;
+
+	IPW_DEBUG_FW(">> :\n");
+
+	//set the Stop and Abort bit
+	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
+	ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
+	priv->sram_desc.last_cb_index = 0;
+
+	IPW_DEBUG_FW("<< \n");
+}
+
+static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, struct command_block *cb)
+{
+	u32 address = CX2_SHARED_SRAM_DMA_CONTROL + (sizeof(struct command_block) * index);
+	IPW_DEBUG_FW(">> :\n");
+
+	ipw_write_indirect(priv, address, (u8*)cb, (int)sizeof(struct command_block));
+
+	IPW_DEBUG_FW("<< :\n");
+	return 0;
+
+}
+
+static int ipw_fw_dma_kick(struct ipw_priv *priv)
+{
+	u32 control = 0;
+	u32 index=0;
+
+	IPW_DEBUG_FW(">> :\n");
+
+	for (index = 0; index < priv->sram_desc.last_cb_index; index++)
+		ipw_fw_dma_write_command_block(priv, index, &priv->sram_desc.cb_list[index]);
+
+	/* Enable the DMA in the CSR register */
+	ipw_clear_bit(priv, CX2_RESET_REG,CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
+
+        /* Set the Start bit. */
+	control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
+	ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
+
+	IPW_DEBUG_FW("<< :\n");
+	return 0;
+}
+
+static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
+{
+	u32 address;
+	u32 register_value=0;
+	u32 cb_fields_address=0;
+
+	IPW_DEBUG_FW(">> :\n");
+	address = ipw_read_reg32(priv,CX2_DMA_I_CURRENT_CB);
+	IPW_DEBUG_FW_INFO("Current CB is 0x%x \n",address);
+
+	/* Read the DMA Controlor register */
+	register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
+	IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n",register_value);
+
+	/* Print the CB values*/
+	cb_fields_address = address;
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n",register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n",register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
+			  register_value);
+
+	cb_fields_address += sizeof(u32);
+	register_value = ipw_read_reg32(priv, cb_fields_address);
+	IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n",register_value);
+
+	IPW_DEBUG_FW(">> :\n");
+}
+
+static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
+{
+	u32 current_cb_address = 0;
+	u32 current_cb_index = 0;
+
+	IPW_DEBUG_FW("<< :\n");
+	current_cb_address= ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
+
+	current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL )/
+		sizeof (struct command_block);
+
+	IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
+			  current_cb_index, current_cb_address );
+
+	IPW_DEBUG_FW(">> :\n");
+	return current_cb_index;
+
+}
+
+static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
+					u32 src_address,
+					u32 dest_address,
+					u32 length,
+					int interrupt_enabled,
+					int is_last)
+{
+
+	u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
+		CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
+		CB_DEST_SIZE_LONG;
+	struct command_block *cb;
+	u32 last_cb_element=0;
+
+	IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
+			  src_address, dest_address, length);
+
+	if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
+		return -1;
+
+	last_cb_element = priv->sram_desc.last_cb_index;
+	cb = &priv->sram_desc.cb_list[last_cb_element];
+	priv->sram_desc.last_cb_index++;
+
+	/* Calculate the new CB control word */
+	if (interrupt_enabled )
+		control |= CB_INT_ENABLED;
+
+	if (is_last)
+		control |= CB_LAST_VALID;
+
+	control |= length;
+
+	/* Calculate the CB Element's checksum value */
+	cb->status = control ^src_address ^dest_address;
+
+	/* Copy the Source and Destination addresses */
+	cb->dest_addr = dest_address;
+	cb->source_addr = src_address;
+
+	/* Copy the Control Word last */
+	cb->control = control;
+
+	return 0;
+}
+
+static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
+				 u32 src_phys,
+				 u32 dest_address,
+				 u32 length)
+{
+	u32 bytes_left = length;
+	u32 src_offset=0;
+	u32 dest_offset=0;
+	int status = 0;
+	IPW_DEBUG_FW(">> \n");
+	IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
+			  src_phys, dest_address, length);
+	while (bytes_left > CB_MAX_LENGTH) {
+		status = ipw_fw_dma_add_command_block( priv,
+						       src_phys + src_offset,
+						       dest_address + dest_offset,
+						       CB_MAX_LENGTH, 0, 0);
+		if (status) {
+			IPW_DEBUG_FW_INFO(": Failed\n");
+			return -1;
+		} else
+			IPW_DEBUG_FW_INFO(": Added new cb\n");
+
+		src_offset += CB_MAX_LENGTH;
+		dest_offset += CB_MAX_LENGTH;
+		bytes_left -= CB_MAX_LENGTH;
+	}
+
+	/* add the buffer tail */
+	if (bytes_left > 0) {
+		status = ipw_fw_dma_add_command_block(
+			priv, src_phys + src_offset,
+			dest_address + dest_offset,
+			bytes_left, 0, 0);
+		if (status) {
+			IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
+			return -1;
+		} else
+			IPW_DEBUG_FW_INFO(": Adding new cb - the buffer tail\n");
+	}
+
+
+	IPW_DEBUG_FW("<< \n");
+	return 0;
+}
+
+static int ipw_fw_dma_wait(struct ipw_priv *priv)
+{
+	u32 current_index = 0;
+	u32 watchdog = 0;
+
+	IPW_DEBUG_FW(">> : \n");
+
+	current_index = ipw_fw_dma_command_block_index(priv);
+	IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
+			  (int) priv->sram_desc.last_cb_index);
+
+	while (current_index < priv->sram_desc.last_cb_index) {
+		udelay(50);
+		current_index = ipw_fw_dma_command_block_index(priv);
+
+		watchdog++;
+
+		if (watchdog > 400) {
+			IPW_DEBUG_FW_INFO("Timeout\n");
+			ipw_fw_dma_dump_command_block(priv);
+			ipw_fw_dma_abort(priv);
+			return -1;
+		}
+	}
+
+	ipw_fw_dma_abort(priv);
+
+	/*Disable the DMA in the CSR register*/
+ 	ipw_set_bit(priv, CX2_RESET_REG,
+		    CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
+
+	IPW_DEBUG_FW("<< dmaWaitSync \n");
+	return 0;
+}
+
+static void ipw_remove_current_network(struct ipw_priv *priv)
+{
+	struct list_head *element, *safe;
+	struct ieee80211_network *network = NULL;
+	list_for_each_safe(element, safe, &priv->ieee->network_list) {
+		network = list_entry(element, struct ieee80211_network, list);
+		if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+			list_del(element);
+			list_add_tail(&network->list,
+				      &priv->ieee->network_free_list);
+		}
+	}
+}
+
+/**
+ * Check that card is still alive.
+ * Reads debug register from domain0.
+ * If card is present, pre-defined value should
+ * be found there.
+ *
+ * @param priv
+ * @return 1 if card is present, 0 otherwise
+ */
+static inline int ipw_alive(struct ipw_priv *priv)
+{
+	return ipw_read32(priv, 0x90) == 0xd55555d5;
+}
+
+static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
+			       int timeout)
+{
+	int i = 0;
+
+	do {
+		if ((ipw_read32(priv, addr) & mask) == mask)
+			return i;
+		mdelay(10);
+		i += 10;
+	} while (i < timeout);
+
+	return -ETIME;
+}
+
+/* These functions load the firmware and micro code for the operation of
+ * the ipw hardware.  It assumes the buffer has all the bits for the
+ * image and the caller is handling the memory allocation and clean up.
+ */
+
+
+static int ipw_stop_master(struct ipw_priv * priv)
+{
+	int rc;
+
+	IPW_DEBUG_TRACE(">> \n");
+	/* stop master. typical delay - 0 */
+	ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
+
+	rc = ipw_poll_bit(priv, CX2_RESET_REG,
+			  CX2_RESET_REG_MASTER_DISABLED, 100);
+	if (rc < 0) {
+		IPW_ERROR("stop master failed in 10ms\n");
+		return -1;
+	}
+
+	IPW_DEBUG_INFO("stop master %dms\n", rc);
+
+	return rc;
+}
+
+static void ipw_arc_release(struct ipw_priv *priv)
+{
+	IPW_DEBUG_TRACE(">> \n");
+	mdelay(5);
+
+	ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
+
+	/* no one knows timing, for safety add some delay */
+	mdelay(5);
+}
+
+struct fw_header {
+	u32 version;
+	u32 mode;
+};
+
+struct fw_chunk {
+	u32 address;
+	u32 length;
+};
+
+#define IPW_FW_MAJOR_VERSION 2
+#define IPW_FW_MINOR_VERSION 2
+
+#define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
+#define IPW_FW_MAJOR(x) (x & 0xff)
+
+#define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
+                         IPW_FW_MAJOR_VERSION)
+
+#define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
+"." __stringify(IPW_FW_MINOR_VERSION) "-"
+
+#if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
+#define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
+#else
+#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
+#endif
+
+static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
+			  size_t len)
+{
+	int rc = 0, i, addr;
+	u8 cr = 0;
+	u16 *image;
+
+	image = (u16 *)data;
+
+	IPW_DEBUG_TRACE(">> \n");
+
+	rc = ipw_stop_master(priv);
+
+	if (rc < 0)
+		return rc;
+
+//	spin_lock_irqsave(&priv->lock, flags);
+
+	for (addr = CX2_SHARED_LOWER_BOUND;
+	     addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
+		ipw_write32(priv, addr, 0);
+	}
+
+	/* no ucode (yet) */
+	memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
+	/* destroy DMA queues */
+	/* reset sequence */
+
+	ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET ,CX2_BIT_HALT_RESET_ON);
+	ipw_arc_release(priv);
+	ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
+	mdelay(1);
+
+	/* reset PHY */
+	ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
+	mdelay(1);
+
+	ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
+	mdelay(1);
+
+	/* enable ucode store */
+	ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
+	ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
+	mdelay(1);
+
+	/* write ucode */
+	/**
+	 * @bug
+	 * Do NOT set indirect address register once and then
+	 * store data to indirect data register in the loop.
+	 * It seems very reasonable, but in this case DINO do not
+	 * accept ucode. It is essential to set address each time.
+	 */
+	/* load new ipw uCode */
+	for (i = 0; i < len / 2; i++)
+		ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
+
+
+	/* enable DINO */
+	ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
+	ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS,
+		       DINO_ENABLE_SYSTEM );
+
+	/* this is where the igx / win driver deveates from the VAP driver.*/
+
+	/* wait for alive response */
+	for (i = 0; i < 100; i++) {
+		/* poll for incoming data */
+		cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
+		if (cr & DINO_RXFIFO_DATA)
+			break;
+		mdelay(1);
+	}
+
+	if (cr & DINO_RXFIFO_DATA) {
+		/* alive_command_responce size is NOT multiple of 4 */
+		u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
+
+		for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
+			response_buffer[i] =
+				ipw_read_reg32(priv,
+					       CX2_BASEBAND_RX_FIFO_READ);
+		memcpy(&priv->dino_alive, response_buffer,
+		       sizeof(priv->dino_alive));
+		if (priv->dino_alive.alive_command == 1
+		    && priv->dino_alive.ucode_valid == 1) {
+			rc = 0;
+			IPW_DEBUG_INFO(
+				"Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
+				"of %02d/%02d/%02d %02d:%02d\n",
+				priv->dino_alive.software_revision,
+				priv->dino_alive.software_revision,
+				priv->dino_alive.device_identifier,
+				priv->dino_alive.device_identifier,
+				priv->dino_alive.time_stamp[0],
+				priv->dino_alive.time_stamp[1],
+				priv->dino_alive.time_stamp[2],
+				priv->dino_alive.time_stamp[3],
+				priv->dino_alive.time_stamp[4]);
+		} else {
+			IPW_DEBUG_INFO("Microcode is not alive\n");
+			rc = -EINVAL;
+		}
+	} else {
+		IPW_DEBUG_INFO("No alive response from DINO\n");
+		rc = -ETIME;
+	}
+
+	/* disable DINO, otherwise for some reason
+	   firmware have problem getting alive resp. */
+	ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
+
+//	spin_unlock_irqrestore(&priv->lock, flags);
+
+	return rc;
+}
+
+static int ipw_load_firmware(struct ipw_priv *priv, u8 * data,
+			     size_t len)
+{
+	int rc = -1;
+	int offset = 0;
+	struct fw_chunk *chunk;
+	dma_addr_t shared_phys;
+	u8 *shared_virt;
+
+	IPW_DEBUG_TRACE("<< : \n");
+	shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
+
+	if (!shared_virt)
+		return -ENOMEM;
+
+	memmove(shared_virt, data, len);
+
+	/* Start the Dma */
+	rc = ipw_fw_dma_enable(priv);
+
+	if (priv->sram_desc.last_cb_index > 0) {
+		/* the DMA is already ready this would be a bug. */
+		BUG();
+		goto out;
+	}
+
+	do {
+		chunk = (struct fw_chunk *)(data + offset);
+		offset += sizeof(struct fw_chunk);
+		/* build DMA packet and queue up for sending */
+		/* dma to chunk->address, the chunk->length bytes from data +
+		 * offeset*/
+		/* Dma loading */
+		rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
+					   chunk->address, chunk->length);
+		if (rc) {
+			IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
+			goto out;
+		}
+
+		offset += chunk->length;
+	} while (offset < len);
+
+	/* Run the DMA and wait for the answer*/
+	rc = ipw_fw_dma_kick(priv);
+	if (rc) {
+		IPW_ERROR("dmaKick Failed\n");
+		goto out;
+	}
+
+	rc = ipw_fw_dma_wait(priv);
+	if (rc) {
+		IPW_ERROR("dmaWaitSync Failed\n");
+		goto out;
+	}
+ out:
+	pci_free_consistent( priv->pci_dev, len, shared_virt, shared_phys);
+	return rc;
+}
+
+/* stop nic */
+static int ipw_stop_nic(struct ipw_priv *priv)
+{
+	int rc = 0;
+
+	/* stop*/
+	ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
+
+	rc = ipw_poll_bit(priv, CX2_RESET_REG,
+			  CX2_RESET_REG_MASTER_DISABLED, 500);
+	if (rc < 0) {
+		IPW_ERROR("wait for reg master disabled failed\n");
+		return rc;
+	}
+
+	ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
+
+	return rc;
+}
+
+static void ipw_start_nic(struct ipw_priv *priv)
+{
+	IPW_DEBUG_TRACE(">>\n");
+
+	/* prvHwStartNic  release ARC*/
+	ipw_clear_bit(priv, CX2_RESET_REG,
+		      CX2_RESET_REG_MASTER_DISABLED |
+		      CX2_RESET_REG_STOP_MASTER |
+		      CBD_RESET_REG_PRINCETON_RESET);
+
+	/* enable power management */
+	ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
+
+	IPW_DEBUG_TRACE("<<\n");
+}
+
+static int ipw_init_nic(struct ipw_priv *priv)
+{
+	int rc;
+
+	IPW_DEBUG_TRACE(">>\n");
+	/* reset */
+	/*prvHwInitNic */
+	/* set "initialization complete" bit to move adapter to D0 state */
+	ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
+
+	/* low-level PLL activation */
+	ipw_write32(priv, CX2_READ_INT_REGISTER,  CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
+
+	/* wait for clock stabilization */
+	rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
+			  CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
+	if (rc < 0 )
+		IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
+
+	/* assert SW reset */
+	ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
+
+	udelay(10);
+
+	/* set "initialization complete" bit to move adapter to D0 state */
+	ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
+
+	IPW_DEBUG_TRACE(">>\n");
+	return 0;
+}
+
+
+/* Call this function from process context, it will sleep in request_firmware.
+ * Probe is an ok place to call this from.
+ */
+static int ipw_reset_nic(struct ipw_priv *priv)
+{
+	int rc = 0;
+
+	IPW_DEBUG_TRACE(">>\n");
+
+	rc = ipw_init_nic(priv);
+
+	/* Clear the 'host command active' bit... */
+	priv->status &= ~STATUS_HCMD_ACTIVE;
+	wake_up_interruptible(&priv->wait_command_queue);
+
+	IPW_DEBUG_TRACE("<<\n");
+	return rc;
+}
+
+static int ipw_get_fw(struct ipw_priv *priv,
+		      const struct firmware **fw, const char *name)
+{
+	struct fw_header *header;
+	int rc;
+
+	/* ask firmware_class module to get the boot firmware off disk */
+	rc = request_firmware(fw, name, &priv->pci_dev->dev);
+	if (rc < 0) {
+		IPW_ERROR("%s load failed: Reason %d\n", name, rc);
+		return rc;
+	}
+
+	header = (struct fw_header *)(*fw)->data;
+	if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
+		IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
+			  name,
+			  IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
+		return -EINVAL;
+	}
+
+	IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
+		       name,
+		       IPW_FW_MAJOR(header->version),
+		       IPW_FW_MINOR(header->version),
+		       (*fw)->size - sizeof(struct fw_header));
+	return 0;
+}
+
+#define CX2_RX_BUF_SIZE (3000)
+
+static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
+				      struct ipw_rx_queue *rxq)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+		/* In the reset function, these buffers may have been allocated
+		 * to an SKB, so we need to unmap and free potential storage */
+		if (rxq->pool[i].skb != NULL) {
+			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
+					 CX2_RX_BUF_SIZE,
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(rxq->pool[i].skb);
+		}
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+	}
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->processed = RX_QUEUE_SIZE - 1;
+	rxq->free_count = 0;
+	spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+#ifdef CONFIG_PM
+static int fw_loaded = 0;
+static const struct firmware *bootfw = NULL;
+static const struct firmware *firmware = NULL;
+static const struct firmware *ucode = NULL;
+#endif
+
+static int ipw_load(struct ipw_priv *priv)
+{
+#ifndef CONFIG_PM
+	const struct firmware *bootfw = NULL;
+	const struct firmware *firmware = NULL;
+	const struct firmware *ucode = NULL;
+#endif
+	int rc = 0, retries = 3;
+
+#ifdef CONFIG_PM
+	if (!fw_loaded) {
+#endif
+		rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
+		if (rc)
+			goto error;
+
+		switch (priv->ieee->iw_mode) {
+		case IW_MODE_ADHOC:
+			rc = ipw_get_fw(priv, &ucode,
+					IPW_FW_NAME("ibss_ucode"));
+			if (rc)
+				goto error;
+
+			rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
+			break;
+
+#ifdef CONFIG_IPW_PROMISC
+		case IW_MODE_MONITOR:
+			rc = ipw_get_fw(priv, &ucode,
+					IPW_FW_NAME("ibss_ucode"));
+			if (rc)
+				goto error;
+
+			rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("sniffer"));
+			break;
+#endif
+		case IW_MODE_INFRA:
+			rc = ipw_get_fw(priv, &ucode,
+					IPW_FW_NAME("bss_ucode"));
+			if (rc)
+				goto error;
+
+			rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
+			break;
+
+		default:
+			rc = -EINVAL;
+		}
+
+		if (rc)
+			goto error;
+
+#ifdef CONFIG_PM
+		fw_loaded = 1;
+	}
+#endif
+
+	if (!priv->rxq)
+		priv->rxq = ipw_rx_queue_alloc(priv);
+	else
+		ipw_rx_queue_reset(priv, priv->rxq);
+	if (!priv->rxq) {
+		IPW_ERROR("Unable to initialize Rx queue\n");
+		goto error;
+	}
+
+ retry:
+	/* Ensure interrupts are disabled */
+	ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
+	priv->status &= ~STATUS_INT_ENABLED;
+
+	/* ack pending interrupts */
+	ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
+
+	ipw_stop_nic(priv);
+
+	rc = ipw_reset_nic(priv);
+	if (rc) {
+		IPW_ERROR("Unable to reset NIC\n");
+		goto error;
+	}
+
+	ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
+			CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
+
+	/* DMA the initial boot firmware into the device */
+	rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
+			       bootfw->size - sizeof(struct fw_header));
+	if (rc < 0) {
+		IPW_ERROR("Unable to load boot firmware\n");
+		goto error;
+	}
+
+	/* kick start the device */
+	ipw_start_nic(priv);
+
+	/* wait for the device to finish it's initial startup sequence */
+	rc = ipw_poll_bit(priv, CX2_INTA_RW,
+			  CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
+	if (rc < 0) {
+		IPW_ERROR("device failed to boot initial fw image\n");
+		goto error;
+	}
+	IPW_DEBUG_INFO("initial device response after %dms\n", rc);
+
+	/* ack fw init done interrupt */
+	ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
+
+	/* DMA the ucode into the device */
+	rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
+			    ucode->size - sizeof(struct fw_header));
+	if (rc < 0) {
+		IPW_ERROR("Unable to load ucode\n");
+		goto error;
+	}
+
+	/* stop nic */
+	ipw_stop_nic(priv);
+
+	/* DMA bss firmware into the device */
+	rc = ipw_load_firmware(priv, firmware->data +
+			       sizeof(struct fw_header),
+			       firmware->size - sizeof(struct fw_header));
+	if (rc < 0 ) {
+		IPW_ERROR("Unable to load firmware\n");
+		goto error;
+	}
+
+	ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
+
+	rc = ipw_queue_reset(priv);
+	if (rc) {
+		IPW_ERROR("Unable to initialize queues\n");
+		goto error;
+	}
+
+	/* Ensure interrupts are disabled */
+	ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
+
+	/* kick start the device */
+	ipw_start_nic(priv);
+
+	if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
+		if (retries > 0) {
+			IPW_WARNING("Parity error.  Retrying init.\n");
+			retries--;
+			goto retry;
+		}
+
+		IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
+		rc = -EIO;
+		goto error;
+	}
+
+	/* wait for the device */
+	rc = ipw_poll_bit(priv, CX2_INTA_RW,
+			  CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
+	if (rc < 0) {
+		IPW_ERROR("device failed to start after 500ms\n");
+		goto error;
+	}
+	IPW_DEBUG_INFO("device response after %dms\n", rc);
+
+	/* ack fw init done interrupt */
+	ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
+
+	/* read eeprom data and initialize the eeprom region of sram */
+	priv->eeprom_delay = 1;
+	ipw_eeprom_init_sram(priv);
+
+	/* enable interrupts */
+	ipw_enable_interrupts(priv);
+
+	/* Ensure our queue has valid packets */
+	ipw_rx_queue_replenish(priv);
+
+	ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
+
+	/* ack pending interrupts */
+	ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
+
+#ifndef CONFIG_PM
+	release_firmware(bootfw);
+	release_firmware(ucode);
+	release_firmware(firmware);
+#endif
+	return 0;
+
+ error:
+	if (priv->rxq) {
+		ipw_rx_queue_free(priv, priv->rxq);
+		priv->rxq = NULL;
+	}
+	ipw_tx_queue_free(priv);
+	if (bootfw)
+		release_firmware(bootfw);
+	if (ucode)
+		release_firmware(ucode);
+	if (firmware)
+		release_firmware(firmware);
+#ifdef CONFIG_PM
+	fw_loaded = 0;
+	bootfw = ucode = firmware = NULL;
+#endif
+
+	return rc;
+}
+
+/**
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A queue is a circular buffers with 'Read' and 'Write' pointers.
+ * 2 empty entries always kept in the buffer to protect from overflow.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * The IPW operates with six queues, one receive queue in the device's
+ * sram, one transmit queue for sending commands to the device firmware,
+ * and four transmit queues for data.
+ *
+ * The four transmit queues allow for performing quality of service (qos)
+ * transmissions as per the 802.11 protocol.  Currently Linux does not
+ * provide a mechanism to the user for utilizing prioritized queues, so
+ * we only utilize the first data transmit queue (queue1).
+ */
+
+/**
+ * Driver allocates buffers of this size for Rx
+ */
+
+static inline int ipw_queue_space(const struct clx2_queue *q)
+{
+	int s = q->last_used - q->first_empty;
+	if (s <= 0)
+		s += q->n_bd;
+	s -= 2;			/* keep some reserve to not confuse empty and full situations */
+	if (s < 0)
+		s = 0;
+	return s;
+}
+
+static inline int ipw_queue_inc_wrap(int index, int n_bd)
+{
+	return (++index == n_bd) ? 0 : index;
+}
+
+/**
+ * Initialize common DMA queue structure
+ *
+ * @param q                queue to init
+ * @param count            Number of BD's to allocate. Should be power of 2
+ * @param read_register    Address for 'read' register
+ *                         (not offset within BAR, full address)
+ * @param write_register   Address for 'write' register
+ *                         (not offset within BAR, full address)
+ * @param base_register    Address for 'base' register
+ *                         (not offset within BAR, full address)
+ * @param size             Address for 'size' register
+ *                         (not offset within BAR, full address)
+ */
+static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
+			   int count, u32 read, u32 write,
+			   u32 base, u32 size)
+{
+	q->n_bd = count;
+
+	q->low_mark = q->n_bd / 4;
+	if (q->low_mark < 4)
+		q->low_mark = 4;
+
+	q->high_mark = q->n_bd / 8;
+	if (q->high_mark < 2)
+		q->high_mark = 2;
+
+	q->first_empty = q->last_used = 0;
+	q->reg_r = read;
+	q->reg_w = write;
+
+	ipw_write32(priv, base, q->dma_addr);
+	ipw_write32(priv, size, count);
+	ipw_write32(priv, read, 0);
+	ipw_write32(priv, write, 0);
+
+	_ipw_read32(priv, 0x90);
+}
+
+static int ipw_queue_tx_init(struct ipw_priv *priv,
+			     struct clx2_tx_queue *q,
+			     int count, u32 read, u32 write,
+			     u32 base, u32 size)
+{
+	struct pci_dev *dev = priv->pci_dev;
+
+	q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
+	if (!q->txb) {
+		IPW_ERROR("vmalloc for auxilary BD structures failed\n");
+		return -ENOMEM;
+	}
+
+	q->bd = pci_alloc_consistent(dev,sizeof(q->bd[0])*count, &q->q.dma_addr);
+	if (!q->bd) {
+		IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
+				sizeof(q->bd[0]) * count);
+		kfree(q->txb);
+		q->txb = NULL;
+		return -ENOMEM;
+	}
+
+	ipw_queue_init(priv, &q->q, count, read, write, base, size);
+	return 0;
+}
+
+/**
+ * Free one TFD, those at index [txq->q.last_used].
+ * Do NOT advance any indexes
+ *
+ * @param dev
+ * @param txq
+ */
+static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
+				  struct clx2_tx_queue *txq)
+{
+	struct tfd_frame *bd = &txq->bd[txq->q.last_used];
+	struct pci_dev *dev = priv->pci_dev;
+	int i;
+
+	/* classify bd */
+	if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
+		/* nothing to cleanup after for host commands */
+		return;
+
+	/* sanity check */
+	if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
+		IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
+		/** @todo issue fatal error, it is quite serious situation */
+		return;
+	}
+
+	/* unmap chunks if any */
+	for (i = 0; i < bd->u.data.num_chunks; i++) {
+		pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
+				 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
+		if (txq->txb[txq->q.last_used]) {
+			ieee80211_txb_free(txq->txb[txq->q.last_used]);
+			txq->txb[txq->q.last_used] = NULL;
+		}
+	}
+}
+
+/**
+ * Deallocate DMA queue.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ *
+ * @param dev
+ * @param q
+ */
+static void ipw_queue_tx_free(struct ipw_priv *priv,
+			    struct clx2_tx_queue *txq)
+{
+	struct clx2_queue *q = &txq->q;
+	struct pci_dev *dev = priv->pci_dev;
+
+	if (q->n_bd == 0)
+		return;
+
+	/* first, empty all BD's */
+	for (; q->first_empty != q->last_used;
+	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
+		ipw_queue_tx_free_tfd(priv, txq);
+	}
+
+	/* free buffers belonging to queue itself */
+	pci_free_consistent(dev, sizeof(txq->bd[0])*q->n_bd, txq->bd,
+			    q->dma_addr);
+	kfree(txq->txb);
+
+	/* 0 fill whole structure */
+	memset(txq, 0, sizeof(*txq));
+}
+
+
+/**
+ * Destroy all DMA queues and structures
+ *
+ * @param priv
+ */
+static void ipw_tx_queue_free(struct ipw_priv *priv)
+{
+	/* Tx CMD queue */
+	ipw_queue_tx_free(priv, &priv->txq_cmd);
+
+	/* Tx queues */
+	ipw_queue_tx_free(priv, &priv->txq[0]);
+	ipw_queue_tx_free(priv, &priv->txq[1]);
+	ipw_queue_tx_free(priv, &priv->txq[2]);
+	ipw_queue_tx_free(priv, &priv->txq[3]);
+}
+
+static void inline __maybe_wake_tx(struct ipw_priv *priv)
+{
+	if (netif_running(priv->net_dev)) {
+		switch (priv->port_type) {
+		case DCR_TYPE_MU_BSS:
+		case DCR_TYPE_MU_IBSS:
+			if (!(priv->status & STATUS_ASSOCIATED)) {
+				return;
+			}
+		}
+		netif_wake_queue(priv->net_dev);
+	}
+
+}
+
+static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid)
+{
+	/* First 3 bytes are manufacturer */
+	bssid[0] = priv->mac_addr[0];
+	bssid[1] = priv->mac_addr[1];
+	bssid[2] = priv->mac_addr[2];
+
+	/* Last bytes are random */
+        get_random_bytes(&bssid[3], ETH_ALEN-3);
+
+        bssid[0] &= 0xfe;       /* clear multicast bit */
+        bssid[0] |= 0x02;       /* set local assignment bit (IEEE802) */
+}
+
+static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid)
+{
+	struct ipw_station_entry entry;
+	int i;
+
+	for (i = 0; i < priv->num_stations; i++) {
+		if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
+			/* Another node is active in network */
+			priv->missed_adhoc_beacons = 0;
+			if (!(priv->config & CFG_STATIC_CHANNEL))
+				/* when other nodes drop out, we drop out */
+				priv->config &= ~CFG_ADHOC_PERSIST;
+
+			return i;
+		}
+	}
+
+	if (i == MAX_STATIONS)
+		return IPW_INVALID_STATION;
+
+	IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
+
+	entry.reserved = 0;
+	entry.support_mode = 0;
+	memcpy(entry.mac_addr, bssid, ETH_ALEN);
+	memcpy(priv->stations[i], bssid, ETH_ALEN);
+	ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
+			 &entry,
+			 sizeof(entry));
+	priv->num_stations++;
+
+	return i;
+}
+
+static inline u8 ipw_find_station(struct ipw_priv *priv, u8 *bssid)
+{
+	int i;
+
+	for (i = 0; i < priv->num_stations; i++)
+		if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
+			return i;
+
+	return IPW_INVALID_STATION;
+}
+
+static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
+{
+	int err;
+
+	if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
+		IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
+		return;
+	}
+
+	IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
+			"on channel %d.\n",
+			MAC_ARG(priv->assoc_request.bssid),
+			priv->assoc_request.channel);
+
+	priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
+	priv->status |= STATUS_DISASSOCIATING;
+
+	if (quiet)
+		priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
+	else
+		priv->assoc_request.assoc_type = HC_DISASSOCIATE;
+	err = ipw_send_associate(priv, &priv->assoc_request);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send [dis]associate command "
+			     "failed.\n");
+		return;
+	}
+
+}
+
+static void ipw_disassociate(void *data)
+{
+	ipw_send_disassociate(data, 0);
+}
+
+static void notify_wx_assoc_event(struct ipw_priv *priv)
+{
+	union iwreq_data wrqu;
+	wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+	if (priv->status & STATUS_ASSOCIATED)
+		memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
+	else
+		memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+	wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
+}
+
+struct ipw_status_code {
+	u16 status;
+	const char *reason;
+};
+
+static const struct ipw_status_code ipw_status_codes[] = {
+	{0x00, "Successful"},
+	{0x01, "Unspecified failure"},
+	{0x0A, "Cannot support all requested capabilities in the "
+	 "Capability information field"},
+	{0x0B, "Reassociation denied due to inability to confirm that "
+	 "association exists"},
+	{0x0C, "Association denied due to reason outside the scope of this "
+	 "standard"},
+	{0x0D, "Responding station does not support the specified authentication "
+	 "algorithm"},
+	{0x0E, "Received an Authentication frame with authentication sequence "
+	 "transaction sequence number out of expected sequence"},
+	{0x0F, "Authentication rejected because of challenge failure"},
+	{0x10, "Authentication rejected due to timeout waiting for next "
+	 "frame in sequence"},
+	{0x11, "Association denied because AP is unable to handle additional "
+	 "associated stations"},
+	{0x12, "Association denied due to requesting station not supporting all "
+	 "of the datarates in the BSSBasicServiceSet Parameter"},
+	{0x13, "Association denied due to requesting station not supporting "
+	 "short preamble operation"},
+	{0x14, "Association denied due to requesting station not supporting "
+	 "PBCC encoding"},
+	{0x15, "Association denied due to requesting station not supporting "
+	 "channel agility"},
+	{0x19, "Association denied due to requesting station not supporting "
+	 "short slot operation"},
+	{0x1A, "Association denied due to requesting station not supporting "
+	 "DSSS-OFDM operation"},
+	{0x28, "Invalid Information Element"},
+	{0x29, "Group Cipher is not valid"},
+	{0x2A, "Pairwise Cipher is not valid"},
+	{0x2B, "AKMP is not valid"},
+	{0x2C, "Unsupported RSN IE version"},
+	{0x2D, "Invalid RSN IE Capabilities"},
+	{0x2E, "Cipher suite is rejected per security policy"},
+};
+
+#ifdef CONFIG_IPW_DEBUG
+static const char *ipw_get_status_code(u16 status)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
+		if (ipw_status_codes[i].status == status)
+			return ipw_status_codes[i].reason;
+	return "Unknown status value.";
+}
+#endif
+
+static void inline average_init(struct average *avg)
+{
+	memset(avg, 0, sizeof(*avg));
+}
+
+static void inline average_add(struct average *avg, s16 val)
+{
+	avg->sum -= avg->entries[avg->pos];
+	avg->sum += val;
+	avg->entries[avg->pos++] = val;
+	if (unlikely(avg->pos == AVG_ENTRIES)) {
+		avg->init = 1;
+		avg->pos = 0;
+	}
+}
+
+static s16 inline average_value(struct average *avg)
+{
+	if (!unlikely(avg->init)) {
+		if (avg->pos)
+			return avg->sum / avg->pos;
+		return 0;
+	}
+
+	return avg->sum / AVG_ENTRIES;
+}
+
+static void ipw_reset_stats(struct ipw_priv *priv)
+{
+	u32 len = sizeof(u32);
+
+	priv->quality = 0;
+
+	average_init(&priv->average_missed_beacons);
+	average_init(&priv->average_rssi);
+	average_init(&priv->average_noise);
+
+	priv->last_rate = 0;
+	priv->last_missed_beacons = 0;
+	priv->last_rx_packets = 0;
+	priv->last_tx_packets = 0;
+	priv->last_tx_failures = 0;
+
+	/* Firmware managed, reset only when NIC is restarted, so we have to
+	 * normalize on the current value */
+	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
+			&priv->last_rx_err, &len);
+	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
+			&priv->last_tx_failures, &len);
+
+	/* Driver managed, reset with each association */
+	priv->missed_adhoc_beacons = 0;
+	priv->missed_beacons = 0;
+	priv->tx_packets = 0;
+	priv->rx_packets = 0;
+
+}
+
+
+static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
+{
+	u32 i = 0x80000000;
+	u32 mask = priv->rates_mask;
+	/* If currently associated in B mode, restrict the maximum
+	 * rate match to B rates */
+	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
+		mask &= IEEE80211_CCK_RATES_MASK;
+
+	/* TODO: Verify that the rate is supported by the current rates
+	 * list. */
+
+	while (i && !(mask & i)) i >>= 1;
+	switch (i) {
+	case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
+	case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
+	case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
+	case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
+	case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
+	case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
+	case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
+	case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
+	case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
+	case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
+	case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
+	case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
+	}
+
+	if (priv->ieee->mode == IEEE_B)
+		return 11000000;
+	else
+		return 54000000;
+}
+
+static u32 ipw_get_current_rate(struct ipw_priv *priv)
+{
+	u32 rate, len = sizeof(rate);
+	int err;
+
+	if (!(priv->status & STATUS_ASSOCIATED))
+		return 0;
+
+	if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
+		err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
+				      &len);
+		if (err) {
+			IPW_DEBUG_INFO("failed querying ordinals.\n");
+			return 0;
+		}
+	} else
+		return ipw_get_max_rate(priv);
+
+	switch (rate) {
+	case IPW_TX_RATE_1MB:  return  1000000;
+	case IPW_TX_RATE_2MB:  return  2000000;
+	case IPW_TX_RATE_5MB:  return  5500000;
+	case IPW_TX_RATE_6MB:  return  6000000;
+	case IPW_TX_RATE_9MB:  return  9000000;
+	case IPW_TX_RATE_11MB: return 11000000;
+	case IPW_TX_RATE_12MB: return 12000000;
+	case IPW_TX_RATE_18MB: return 18000000;
+	case IPW_TX_RATE_24MB: return 24000000;
+	case IPW_TX_RATE_36MB: return 36000000;
+	case IPW_TX_RATE_48MB: return 48000000;
+	case IPW_TX_RATE_54MB: return 54000000;
+	}
+
+	return 0;
+}
+
+#define PERFECT_RSSI (-50)
+#define WORST_RSSI   (-85)
+#define IPW_STATS_INTERVAL (2 * HZ)
+static void ipw_gather_stats(struct ipw_priv *priv)
+{
+	u32 rx_err, rx_err_delta, rx_packets_delta;
+	u32 tx_failures, tx_failures_delta, tx_packets_delta;
+	u32 missed_beacons_percent, missed_beacons_delta;
+	u32 quality = 0;
+	u32 len = sizeof(u32);
+	s16 rssi;
+	u32 beacon_quality, signal_quality, tx_quality, rx_quality,
+		rate_quality;
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		priv->quality = 0;
+		return;
+	}
+
+	/* Update the statistics */
+	ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
+			&priv->missed_beacons, &len);
+	missed_beacons_delta = priv->missed_beacons -
+		priv->last_missed_beacons;
+	priv->last_missed_beacons = priv->missed_beacons;
+	if (priv->assoc_request.beacon_interval) {
+		missed_beacons_percent = missed_beacons_delta *
+			(HZ * priv->assoc_request.beacon_interval) /
+			(IPW_STATS_INTERVAL * 10);
+	} else {
+		missed_beacons_percent = 0;
+	}
+	average_add(&priv->average_missed_beacons, missed_beacons_percent);
+
+	ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
+	rx_err_delta = rx_err - priv->last_rx_err;
+	priv->last_rx_err = rx_err;
+
+	ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
+	tx_failures_delta = tx_failures - priv->last_tx_failures;
+	priv->last_tx_failures = tx_failures;
+
+	rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
+	priv->last_rx_packets = priv->rx_packets;
+
+	tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
+	priv->last_tx_packets = priv->tx_packets;
+
+	/* Calculate quality based on the following:
+	 *
+	 * Missed beacon: 100% = 0, 0% = 70% missed
+	 * Rate: 60% = 1Mbs, 100% = Max
+	 * Rx and Tx errors represent a straight % of total Rx/Tx
+	 * RSSI: 100% = > -50,  0% = < -80
+	 * Rx errors: 100% = 0, 0% = 50% missed
+	 *
+	 * The lowest computed quality is used.
+	 *
+	 */
+#define BEACON_THRESHOLD 5
+	beacon_quality = 100 - missed_beacons_percent;
+	if (beacon_quality < BEACON_THRESHOLD)
+		beacon_quality = 0;
+	else
+		beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
+			(100 - BEACON_THRESHOLD);
+	IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
+			beacon_quality, missed_beacons_percent);
+
+	priv->last_rate = ipw_get_current_rate(priv);
+	rate_quality =  priv->last_rate * 40 / priv->last_rate + 60;
+	IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
+			rate_quality, priv->last_rate / 1000000);
+
+	if (rx_packets_delta > 100 &&
+	    rx_packets_delta + rx_err_delta)
+		rx_quality = 100 - (rx_err_delta * 100) /
+			(rx_packets_delta + rx_err_delta);
+	else
+		rx_quality = 100;
+	IPW_DEBUG_STATS("Rx quality   : %3d%% (%u errors, %u packets)\n",
+			rx_quality, rx_err_delta, rx_packets_delta);
+
+	if (tx_packets_delta > 100 &&
+	    tx_packets_delta + tx_failures_delta)
+		tx_quality = 100 - (tx_failures_delta * 100) /
+			(tx_packets_delta + tx_failures_delta);
+	else
+		tx_quality = 100;
+	IPW_DEBUG_STATS("Tx quality   : %3d%% (%u errors, %u packets)\n",
+			tx_quality, tx_failures_delta, tx_packets_delta);
+
+	rssi = average_value(&priv->average_rssi);
+	if (rssi > PERFECT_RSSI)
+		signal_quality = 100;
+	else if (rssi < WORST_RSSI)
+		signal_quality = 0;
+	else
+		signal_quality = (rssi - WORST_RSSI) * 100 /
+			(PERFECT_RSSI - WORST_RSSI);
+	IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
+			signal_quality, rssi);
+
+	quality = min(beacon_quality,
+		      min(rate_quality,
+			  min(tx_quality, min(rx_quality, signal_quality))));
+	if (quality == beacon_quality)
+		IPW_DEBUG_STATS(
+			"Quality (%d%%): Clamped to missed beacons.\n",
+			quality);
+	if (quality == rate_quality)
+		IPW_DEBUG_STATS(
+			"Quality (%d%%): Clamped to rate quality.\n",
+			quality);
+	if (quality == tx_quality)
+		IPW_DEBUG_STATS(
+			"Quality (%d%%): Clamped to Tx quality.\n",
+			quality);
+	if (quality == rx_quality)
+		IPW_DEBUG_STATS(
+			"Quality (%d%%): Clamped to Rx quality.\n",
+			quality);
+	if (quality == signal_quality)
+		IPW_DEBUG_STATS(
+			"Quality (%d%%): Clamped to signal quality.\n",
+			quality);
+
+	priv->quality = quality;
+
+	queue_delayed_work(priv->workqueue, &priv->gather_stats,
+			   IPW_STATS_INTERVAL);
+}
+
+/**
+ * Handle host notification packet.
+ * Called from interrupt routine
+ */
+static inline void ipw_rx_notification(struct ipw_priv* priv,
+				       struct ipw_rx_notification *notif)
+{
+	IPW_DEBUG_NOTIF("type = %i (%d bytes)\n",
+			notif->subtype, notif->size);
+
+	switch (notif->subtype) {
+	case HOST_NOTIFICATION_STATUS_ASSOCIATED: {
+		struct notif_association *assoc = &notif->u.assoc;
+
+		switch (assoc->state) {
+		case CMAS_ASSOCIATED: {
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "associated: '%s' " MAC_FMT " \n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+
+			switch (priv->ieee->iw_mode) {
+			case IW_MODE_INFRA:
+				memcpy(priv->ieee->bssid, priv->bssid,
+				       ETH_ALEN);
+				break;
+
+			case IW_MODE_ADHOC:
+				memcpy(priv->ieee->bssid, priv->bssid,
+				       ETH_ALEN);
+
+				/* clear out the station table */
+				priv->num_stations = 0;
+
+				IPW_DEBUG_ASSOC("queueing adhoc check\n");
+				queue_delayed_work(priv->workqueue,
+						   &priv->adhoc_check,
+						   priv->assoc_request.beacon_interval);
+				break;
+			}
+
+			priv->status &= ~STATUS_ASSOCIATING;
+			priv->status |= STATUS_ASSOCIATED;
+
+			netif_carrier_on(priv->net_dev);
+			if (netif_queue_stopped(priv->net_dev)) {
+				IPW_DEBUG_NOTIF("waking queue\n");
+				netif_wake_queue(priv->net_dev);
+			} else {
+				IPW_DEBUG_NOTIF("starting queue\n");
+				netif_start_queue(priv->net_dev);
+			}
+
+			ipw_reset_stats(priv);
+			/* Ensure the rate is updated immediately */
+			priv->last_rate = ipw_get_current_rate(priv);
+			schedule_work(&priv->gather_stats);
+			notify_wx_assoc_event(priv);
+
+/*			queue_delayed_work(priv->workqueue,
+					   &priv->request_scan,
+					   SCAN_ASSOCIATED_INTERVAL);
+*/
+			break;
+		}
+
+		case CMAS_AUTHENTICATED: {
+			if (priv->status & (STATUS_ASSOCIATED | STATUS_AUTH)) {
+#ifdef CONFIG_IPW_DEBUG
+				struct notif_authenticate *auth = &notif->u.auth;
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+					  "deauthenticated: '%s' " MAC_FMT ": (0x%04X) - %s \n",
+					  escape_essid(priv->essid, priv->essid_len),
+					  MAC_ARG(priv->bssid),
+					  ntohs(auth->status),
+					  ipw_get_status_code(ntohs(auth->status)));
+#endif
+
+				priv->status &= ~(STATUS_ASSOCIATING |
+						  STATUS_AUTH |
+						  STATUS_ASSOCIATED);
+
+				netif_carrier_off(priv->net_dev);
+				netif_stop_queue(priv->net_dev);
+				queue_work(priv->workqueue, &priv->request_scan);
+				notify_wx_assoc_event(priv);
+				break;
+			}
+
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "authenticated: '%s' " MAC_FMT "\n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+			break;
+		}
+
+		case CMAS_INIT: {
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "disassociated: '%s' " MAC_FMT " \n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+
+			priv->status &= ~(
+				STATUS_DISASSOCIATING |
+				STATUS_ASSOCIATING |
+				STATUS_ASSOCIATED |
+				STATUS_AUTH);
+
+			netif_stop_queue(priv->net_dev);
+			if (!(priv->status & STATUS_ROAMING)) {
+				netif_carrier_off(priv->net_dev);
+				notify_wx_assoc_event(priv);
+
+				/* Cancel any queued work ... */
+				cancel_delayed_work(&priv->request_scan);
+				cancel_delayed_work(&priv->adhoc_check);
+
+				/* Queue up another scan... */
+				queue_work(priv->workqueue,
+					   &priv->request_scan);
+
+				cancel_delayed_work(&priv->gather_stats);
+			} else {
+				priv->status |= STATUS_ROAMING;
+				queue_work(priv->workqueue,
+					   &priv->request_scan);
+			}
+
+			ipw_reset_stats(priv);
+			break;
+		}
+
+		default:
+			IPW_ERROR("assoc: unknown (%d)\n",
+				  assoc->state);
+			break;
+		}
+
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_AUTHENTICATE: {
+		struct notif_authenticate *auth = &notif->u.auth;
+		switch (auth->state) {
+		case CMAS_AUTHENTICATED:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+				  "authenticated: '%s' " MAC_FMT " \n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+			priv->status |= STATUS_AUTH;
+			break;
+
+		case CMAS_INIT:
+			if (priv->status & STATUS_AUTH) {
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+					  "authentication failed (0x%04X): %s\n",
+					  ntohs(auth->status),
+					  ipw_get_status_code(ntohs(auth->status)));
+			}
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "deauthenticated: '%s' " MAC_FMT "\n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+
+			priv->status &= ~(STATUS_ASSOCIATING |
+					  STATUS_AUTH |
+					  STATUS_ASSOCIATED);
+
+			netif_carrier_off(priv->net_dev);
+			netif_stop_queue(priv->net_dev);
+			queue_work(priv->workqueue, &priv->request_scan);
+			notify_wx_assoc_event(priv);
+			break;
+
+		case CMAS_TX_AUTH_SEQ_1:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_1\n");
+			break;
+		case CMAS_RX_AUTH_SEQ_2:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_2\n");
+			break;
+		case CMAS_AUTH_SEQ_1_PASS:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_1_PASS\n");
+			break;
+		case CMAS_AUTH_SEQ_1_FAIL:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_1_FAIL\n");
+			break;
+		case CMAS_TX_AUTH_SEQ_3:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_3\n");
+			break;
+		case CMAS_RX_AUTH_SEQ_4:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "RX_AUTH_SEQ_4\n");
+			break;
+		case CMAS_AUTH_SEQ_2_PASS:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUTH_SEQ_2_PASS\n");
+			break;
+		case CMAS_AUTH_SEQ_2_FAIL:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "AUT_SEQ_2_FAIL\n");
+			break;
+		case CMAS_TX_ASSOC:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "TX_ASSOC\n");
+			break;
+		case CMAS_RX_ASSOC_RESP:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "RX_ASSOC_RESP\n");
+			break;
+		case CMAS_ASSOCIATED:
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
+				  "ASSOCIATED\n");
+			break;
+		default:
+			IPW_DEBUG_NOTIF("auth: failure - %d\n", auth->state);
+			break;
+		}
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT: {
+		struct notif_channel_result *x = &notif->u.channel_result;
+
+		if (notif->size == sizeof(*x)) {
+			IPW_DEBUG_SCAN("Scan result for channel %d\n",
+				       x->channel_num);
+		} else {
+			IPW_DEBUG_SCAN("Scan result of wrong size %d "
+				       "(should be %zd)\n",
+				       notif->size, sizeof(*x));
+		}
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED: {
+		struct notif_scan_complete* x = &notif->u.scan_complete;
+		if (notif->size == sizeof(*x)) {
+			IPW_DEBUG_SCAN("Scan completed: type %d, %d channels, "
+				       "%d status\n",
+				       x->scan_type,
+				       x->num_channels,
+				       x->status);
+		} else {
+			IPW_ERROR("Scan completed of wrong size %d "
+				  "(should be %zd)\n",
+				  notif->size, sizeof(*x));
+		}
+
+		priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
+
+		cancel_delayed_work(&priv->scan_check);
+
+		if (!(priv->status & (STATUS_ASSOCIATED |
+				      STATUS_ASSOCIATING |
+				      STATUS_ROAMING |
+				      STATUS_DISASSOCIATING)))
+			queue_work(priv->workqueue, &priv->associate);
+		else if (priv->status & STATUS_ROAMING) {
+			/* If a scan completed and we are in roam mode, then
+			 * the scan that completed was the one requested as a
+			 * result of entering roam... so, schedule the
+			 * roam work */
+			queue_work(priv->workqueue, &priv->roam);
+		} else if (priv->status & STATUS_SCAN_PENDING)
+			queue_work(priv->workqueue, &priv->request_scan);
+
+		priv->ieee->scans++;
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_FRAG_LENGTH: {
+		struct notif_frag_length *x = &notif->u.frag_len;
+
+		if (notif->size == sizeof(*x)) {
+			IPW_ERROR("Frag length: %d\n", x->frag_length);
+		} else {
+			IPW_ERROR("Frag length of wrong size %d "
+				  "(should be %zd)\n",
+				  notif->size, sizeof(*x));
+		}
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION: {
+		struct notif_link_deterioration *x =
+			&notif->u.link_deterioration;
+		if (notif->size==sizeof(*x)) {
+			IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+				  "link deterioration: '%s' " MAC_FMT " \n",
+				  escape_essid(priv->essid, priv->essid_len),
+				  MAC_ARG(priv->bssid));
+			memcpy(&priv->last_link_deterioration, x, sizeof(*x));
+		} else {
+			IPW_ERROR("Link Deterioration of wrong size %d "
+				  "(should be %zd)\n",
+				  notif->size, sizeof(*x));
+		}
+		break;
+	}
+
+	case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE: {
+		IPW_ERROR("Dino config\n");
+		if (priv->hcmd && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
+			/* TODO: Do anything special? */
+		} else {
+			IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
+		}
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_BEACON_STATE: {
+		struct notif_beacon_state *x = &notif->u.beacon_state;
+		if (notif->size != sizeof(*x)) {
+			IPW_ERROR("Beacon state of wrong size %d (should "
+				  "be %zd)\n", notif->size, sizeof(*x));
+			break;
+		}
+
+		if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
+			if (priv->status & STATUS_SCANNING) {
+				/* Stop scan to keep fw from getting
+				 * stuck... */
+				queue_work(priv->workqueue,
+					   &priv->abort_scan);
+			}
+
+			if (x->number > priv->missed_beacon_threshold &&
+			    priv->status & STATUS_ASSOCIATED) {
+				IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
+					  IPW_DL_STATE,
+					  "Missed beacon: %d - disassociate\n",
+					  x->number);
+				queue_work(priv->workqueue,
+					   &priv->disassociate);
+			} else if (x->number > priv->roaming_threshold) {
+				IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
+					  "Missed beacon: %d - initiate "
+					  "roaming\n",
+					  x->number);
+				queue_work(priv->workqueue,
+					   &priv->roam);
+			} else {
+				IPW_DEBUG_NOTIF("Missed beacon: %d\n",
+						x->number);
+			}
+
+			priv->notif_missed_beacons = x->number;
+
+                }
+
+
+		break;
+	}
+
+	case HOST_NOTIFICATION_STATUS_TGI_TX_KEY: {
+		struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
+		if (notif->size==sizeof(*x)) {
+			IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
+				  "0x%02x station %d\n",
+				  x->key_state,x->security_type,
+				  x->station_index);
+			break;
+		}
+
+		IPW_ERROR("TGi Tx Key of wrong size %d (should be %zd)\n",
+			  notif->size, sizeof(*x));
+		break;
+	}
+
+	case HOST_NOTIFICATION_CALIB_KEEP_RESULTS: {
+		struct notif_calibration *x = &notif->u.calibration;
+
+		if (notif->size == sizeof(*x)) {
+			memcpy(&priv->calib, x, sizeof(*x));
+			IPW_DEBUG_INFO("TODO: Calibration\n");
+			break;
+		}
+
+		IPW_ERROR("Calibration of wrong size %d (should be %zd)\n",
+			  notif->size, sizeof(*x));
+		break;
+	}
+
+	case HOST_NOTIFICATION_NOISE_STATS: {
+		if (notif->size == sizeof(u32)) {
+			priv->last_noise = (u8)(notif->u.noise.value & 0xff);
+			average_add(&priv->average_noise, priv->last_noise);
+			break;
+		}
+
+		IPW_ERROR("Noise stat is wrong size %d (should be %zd)\n",
+			  notif->size, sizeof(u32));
+		break;
+	}
+
+	default:
+		IPW_ERROR("Unknown notification: "
+			  "subtype=%d,flags=0x%2x,size=%d\n",
+			  notif->subtype, notif->flags, notif->size);
+	}
+}
+
+/**
+ * Destroys all DMA structures and initialise them again
+ *
+ * @param priv
+ * @return error code
+ */
+static int ipw_queue_reset(struct ipw_priv *priv)
+{
+	int rc = 0;
+	/** @todo customize queue sizes */
+	int nTx = 64, nTxCmd = 8;
+	ipw_tx_queue_free(priv);
+	/* Tx CMD queue */
+	rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
+			       CX2_TX_CMD_QUEUE_READ_INDEX,
+			       CX2_TX_CMD_QUEUE_WRITE_INDEX,
+			       CX2_TX_CMD_QUEUE_BD_BASE,
+			       CX2_TX_CMD_QUEUE_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx Cmd queue init failed\n");
+		goto error;
+	}
+	/* Tx queue(s) */
+	rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
+			       CX2_TX_QUEUE_0_READ_INDEX,
+			       CX2_TX_QUEUE_0_WRITE_INDEX,
+			       CX2_TX_QUEUE_0_BD_BASE,
+			       CX2_TX_QUEUE_0_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 0 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
+			       CX2_TX_QUEUE_1_READ_INDEX,
+			       CX2_TX_QUEUE_1_WRITE_INDEX,
+			       CX2_TX_QUEUE_1_BD_BASE,
+			       CX2_TX_QUEUE_1_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 1 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
+			       CX2_TX_QUEUE_2_READ_INDEX,
+			       CX2_TX_QUEUE_2_WRITE_INDEX,
+			       CX2_TX_QUEUE_2_BD_BASE,
+			       CX2_TX_QUEUE_2_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 2 queue init failed\n");
+		goto error;
+	}
+	rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
+			       CX2_TX_QUEUE_3_READ_INDEX,
+			       CX2_TX_QUEUE_3_WRITE_INDEX,
+			       CX2_TX_QUEUE_3_BD_BASE,
+			       CX2_TX_QUEUE_3_BD_SIZE);
+	if (rc) {
+		IPW_ERROR("Tx 3 queue init failed\n");
+		goto error;
+	}
+	/* statistics */
+	priv->rx_bufs_min = 0;
+	priv->rx_pend_max = 0;
+	return rc;
+
+ error:
+	ipw_tx_queue_free(priv);
+	return rc;
+}
+
+/**
+ * Reclaim Tx queue entries no more used by NIC.
+ *
+ * When FW adwances 'R' index, all entries between old and
+ * new 'R' index need to be reclaimed. As result, some free space
+ * forms. If there is enough free space (> low mark), wake Tx queue.
+ *
+ * @note Need to protect against garbage in 'R' index
+ * @param priv
+ * @param txq
+ * @param qindex
+ * @return Number of used entries remains in the queue
+ */
+static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
+				struct clx2_tx_queue *txq, int qindex)
+{
+	u32 hw_tail;
+	int used;
+	struct clx2_queue *q = &txq->q;
+
+	hw_tail = ipw_read32(priv, q->reg_r);
+	if (hw_tail >= q->n_bd) {
+		IPW_ERROR
+			("Read index for DMA queue (%d) is out of range [0-%d)\n",
+			 hw_tail, q->n_bd);
+		goto done;
+	}
+	for (; q->last_used != hw_tail;
+	     q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
+		ipw_queue_tx_free_tfd(priv, txq);
+		priv->tx_packets++;
+	}
+ done:
+	if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
+		__maybe_wake_tx(priv);
+	}
+	used = q->first_empty - q->last_used;
+	if (used < 0)
+		used += q->n_bd;
+
+	return used;
+}
+
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+			     int len, int sync)
+{
+	struct clx2_tx_queue *txq = &priv->txq_cmd;
+	struct clx2_queue *q = &txq->q;
+	struct tfd_frame *tfd;
+
+	if (ipw_queue_space(q) < (sync ? 1 : 2)) {
+		IPW_ERROR("No space for Tx\n");
+		return -EBUSY;
+	}
+
+	tfd = &txq->bd[q->first_empty];
+	txq->txb[q->first_empty] = NULL;
+
+	memset(tfd, 0, sizeof(*tfd));
+	tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
+	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
+	priv->hcmd_seq++;
+	tfd->u.cmd.index = hcmd;
+	tfd->u.cmd.length = len;
+	memcpy(tfd->u.cmd.payload, buf, len);
+	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
+	ipw_write32(priv, q->reg_w, q->first_empty);
+	_ipw_read32(priv, 0x90);
+
+	return 0;
+}
+
+
+
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two index registers for managing the Rx buffers.
+ *
+ * The READ index maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ index is managed by the firmware once the card is enabled.
+ *
+ * The WRITE index maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization the host sets up the READ queue position to the first
+ * INDEX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer it will advance the READ index
+ * and fire the RX interrupt.  The driver can then query the READ index and
+ * process as many packets as possible, moving the WRITE index forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free.  When
+ *   ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ *   to replensish the ipw->rxq->rx_free.
+ * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
+ *   ipw->rxq is replenished and the READ INDEX is updated (updating the
+ *   'processed' and 'read' driver indexes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ *   detached from the ipw->rxq.  The driver 'processed' index is updated.
+ * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
+ *   list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
+ *   INDEX is not incremented and ipw->status(RX_STALLED) is set.  If there
+ *   were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * ipw_rx_queue_alloc()       Allocates rx_free
+ * ipw_rx_queue_replenish()   Replenishes rx_free list from rx_used, and calls
+ *                            ipw_rx_queue_restock
+ * ipw_rx_queue_restock()     Moves available buffers from rx_free into Rx
+ *                            queue, updates firmware pointers, and updates
+ *                            the WRITE index.  If insufficient rx_free buffers
+ *                            are available, schedules ipw_rx_queue_replenish
+ *
+ * -- enable interrupts --
+ * ISR - ipw_rx()             Detach ipw_rx_mem_buffers from pool up to the
+ *                            READ INDEX, detaching the SKB from the pool.
+ *                            Moves the packet buffer from queue to rx_used.
+ *                            Calls ipw_rx_queue_restock to refill any empty
+ *                            slots.
+ * ...
+ *
+ */
+
+/*
+ * If there are slots in the RX queue that  need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can pulling from rx_free.
+ *
+ * This moves the 'write' index forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void ipw_rx_queue_restock(struct ipw_priv *priv)
+{
+	struct ipw_rx_queue *rxq = priv->rxq;
+	struct list_head *element;
+	struct ipw_rx_mem_buffer *rxb;
+	unsigned long flags;
+	int write;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	write = rxq->write;
+	while ((rxq->write != rxq->processed) && (rxq->free_count)) {
+		element = rxq->rx_free.next;
+		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
+		list_del(element);
+
+		ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
+			    rxb->dma_addr);
+		rxq->queue[rxq->write] = rxb;
+		rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
+		rxq->free_count--;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	/* If the pre-allocated buffer pool is dropping low, schedule to
+	 * refill it */
+	if (rxq->free_count <= RX_LOW_WATERMARK)
+		queue_work(priv->workqueue, &priv->rx_replenish);
+
+	/* If we've added more space for the firmware to place data, tell it */
+	if (write != rxq->write)
+		ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
+}
+
+/*
+ * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
+ * Also restock the Rx queue via ipw_rx_queue_restock.
+ *
+ * This is called as a scheduled work item (except for during intialization)
+ */
+static void ipw_rx_queue_replenish(void *data)
+{
+	struct ipw_priv *priv = data;
+	struct ipw_rx_queue *rxq = priv->rxq;
+	struct list_head *element;
+	struct ipw_rx_mem_buffer *rxb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&rxq->lock, flags);
+	while (!list_empty(&rxq->rx_used)) {
+		element = rxq->rx_used.next;
+		rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
+		rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
+		if (!rxb->skb) {
+			printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
+			       priv->net_dev->name);
+			/* We don't reschedule replenish work here -- we will
+			 * call the restock method and if it still needs
+			 * more buffers it will schedule replenish */
+			break;
+		}
+		list_del(element);
+
+		rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
+		rxb->dma_addr = pci_map_single(
+			priv->pci_dev, rxb->skb->data, CX2_RX_BUF_SIZE,
+			PCI_DMA_FROMDEVICE);
+
+		list_add_tail(&rxb->list, &rxq->rx_free);
+		rxq->free_count++;
+	}
+	spin_unlock_irqrestore(&rxq->lock, flags);
+
+	ipw_rx_queue_restock(priv);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void ipw_rx_queue_free(struct ipw_priv *priv,
+			      struct ipw_rx_queue *rxq)
+{
+	int i;
+
+	if (!rxq)
+		return;
+
+	for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+		if (rxq->pool[i].skb != NULL) {
+			pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
+					 CX2_RX_BUF_SIZE,
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(rxq->pool[i].skb);
+		}
+	}
+
+	kfree(rxq);
+}
+
+static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
+{
+	struct ipw_rx_queue *rxq;
+	int i;
+
+	rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
+	memset(rxq, 0, sizeof(*rxq));
+	spin_lock_init(&rxq->lock);
+	INIT_LIST_HEAD(&rxq->rx_free);
+	INIT_LIST_HEAD(&rxq->rx_used);
+
+	/* Fill the rx_used queue with _all_ of the Rx buffers */
+	for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+		list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+	/* Set us so that we have processed and used all buffers, but have
+	 * not restocked the Rx queue with fresh buffers */
+	rxq->read = rxq->write = 0;
+	rxq->processed = RX_QUEUE_SIZE - 1;
+	rxq->free_count = 0;
+
+	return rxq;
+}
+
+static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
+{
+	rate &= ~IEEE80211_BASIC_RATE_MASK;
+	if (ieee_mode == IEEE_A) {
+		switch (rate) {
+		case IEEE80211_OFDM_RATE_6MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_9MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_12MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_18MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_24MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_36MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_48MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ?
+				1 : 0;
+		case IEEE80211_OFDM_RATE_54MB:
+			return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ?
+				1 : 0;
+		default:
+			return 0;
+		}
+	}
+
+	/* B and G mixed */
+	switch (rate) {
+	case IEEE80211_CCK_RATE_1MB:
+		return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
+	case IEEE80211_CCK_RATE_2MB:
+		return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
+	case IEEE80211_CCK_RATE_5MB:
+		return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
+	case IEEE80211_CCK_RATE_11MB:
+		return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
+	}
+
+	/* If we are limited to B modulations, bail at this point */
+	if (ieee_mode == IEEE_B)
+		return 0;
+
+	/* G */
+	switch (rate) {
+	case IEEE80211_OFDM_RATE_6MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_9MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_12MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_18MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_24MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_36MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_48MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
+	case IEEE80211_OFDM_RATE_54MB:
+		return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
+	}
+
+	return 0;
+}
+
+static int ipw_compatible_rates(struct ipw_priv *priv,
+				const struct ieee80211_network *network,
+				struct ipw_supported_rates *rates)
+{
+	int num_rates, i;
+
+	memset(rates, 0, sizeof(*rates));
+	num_rates = min(network->rates_len, (u8)IPW_MAX_RATES);
+	rates->num_rates = 0;
+	for (i = 0; i < num_rates; i++) {
+		if (!ipw_is_rate_in_mask(priv, network->mode, network->rates[i])) {
+			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
+				       network->rates[i], priv->rates_mask);
+			continue;
+		}
+
+		rates->supported_rates[rates->num_rates++] = network->rates[i];
+	}
+
+	num_rates = min(network->rates_ex_len, (u8)(IPW_MAX_RATES - num_rates));
+	for (i = 0; i < num_rates; i++) {
+		if (!ipw_is_rate_in_mask(priv, network->mode, network->rates_ex[i])) {
+			IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
+				       network->rates_ex[i], priv->rates_mask);
+			continue;
+		}
+
+		rates->supported_rates[rates->num_rates++] = network->rates_ex[i];
+	}
+
+	return rates->num_rates;
+}
+
+static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
+				  const struct ipw_supported_rates *src)
+{
+	u8 i;
+	for (i = 0; i < src->num_rates; i++)
+		dest->supported_rates[i] = src->supported_rates[i];
+	dest->num_rates = src->num_rates;
+}
+
+/* TODO: Look at sniffed packets in the air to determine if the basic rate
+ * mask should ever be used -- right now all callers to add the scan rates are
+ * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
+static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
+			       u8 modulation, u32 rate_mask)
+{
+	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
+		IEEE80211_BASIC_RATE_MASK : 0;
+
+	if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
+
+	if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
+
+	if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+			IEEE80211_CCK_RATE_5MB;
+
+	if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+			IEEE80211_CCK_RATE_11MB;
+}
+
+static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
+				u8 modulation, u32 rate_mask)
+{
+	u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
+		IEEE80211_BASIC_RATE_MASK : 0;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+			IEEE80211_OFDM_RATE_6MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_OFDM_RATE_9MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+			IEEE80211_OFDM_RATE_12MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_OFDM_RATE_18MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
+		rates->supported_rates[rates->num_rates++] = basic_mask |
+			IEEE80211_OFDM_RATE_24MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_OFDM_RATE_36MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_OFDM_RATE_48MB;
+
+	if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
+		rates->supported_rates[rates->num_rates++] =
+			IEEE80211_OFDM_RATE_54MB;
+}
+
+struct ipw_network_match {
+	struct ieee80211_network *network;
+	struct ipw_supported_rates rates;
+};
+
+static int ipw_best_network(
+	struct ipw_priv *priv,
+	struct ipw_network_match *match,
+	struct ieee80211_network *network,
+	int roaming)
+{
+	struct ipw_supported_rates rates;
+
+	/* Verify that this network's capability is compatible with the
+	 * current mode (AdHoc or Infrastructure) */
+	if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
+	     !(network->capability & WLAN_CAPABILITY_ESS)) ||
+	    (priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	     !(network->capability & WLAN_CAPABILITY_IBSS))) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
+				"capability mismatch.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid));
+		return 0;
+	}
+
+	/* If we do not have an ESSID for this AP, we can not associate with
+	 * it */
+	if (network->flags & NETWORK_EMPTY_ESSID) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of hidden ESSID.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid));
+		return 0;
+	}
+
+	if (unlikely(roaming)) {
+		/* If we are roaming, then ensure check if this is a valid
+		 * network to try and roam to */
+		if ((network->ssid_len != match->network->ssid_len) ||
+		    memcmp(network->ssid, match->network->ssid,
+			   network->ssid_len)) {
+			IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
+					"because of non-network ESSID.\n",
+					escape_essid(network->ssid,
+						     network->ssid_len),
+					MAC_ARG(network->bssid));
+			return 0;
+		}
+	} else {
+		/* If an ESSID has been configured then compare the broadcast
+		 * ESSID to ours */
+		if ((priv->config & CFG_STATIC_ESSID) &&
+		    ((network->ssid_len != priv->essid_len) ||
+		     memcmp(network->ssid, priv->essid,
+			    min(network->ssid_len, priv->essid_len)))) {
+			char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
+			strncpy(escaped, escape_essid(
+					network->ssid, network->ssid_len),
+				sizeof(escaped));
+			IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+					"because of ESSID mismatch: '%s'.\n",
+					escaped, MAC_ARG(network->bssid),
+					escape_essid(priv->essid, priv->essid_len));
+			return 0;
+		}
+	}
+
+	/* If the old network rate is better than this one, don't bother
+	 * testing everything else. */
+	if (match->network && match->network->stats.rssi >
+	    network->stats.rssi) {
+		char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
+		strncpy(escaped,
+			escape_essid(network->ssid, network->ssid_len),
+			sizeof(escaped));
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
+				"'%s (" MAC_FMT ")' has a stronger signal.\n",
+				escaped, MAC_ARG(network->bssid),
+				escape_essid(match->network->ssid,
+					     match->network->ssid_len),
+				MAC_ARG(match->network->bssid));
+		return 0;
+	}
+
+	/* If this network has already had an association attempt within the
+	 * last 3 seconds, do not try and associate again... */
+	if (network->last_associate &&
+	    time_after(network->last_associate + (HZ * 5UL), jiffies)) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of storming (%lu since last "
+				"assoc attempt).\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid),
+				(jiffies - network->last_associate) / HZ);
+		return 0;
+	}
+
+	/* Now go through and see if the requested network is valid... */
+	if (priv->ieee->scan_age != 0 &&
+	    jiffies - network->last_scanned > priv->ieee->scan_age) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of age: %lums.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid),
+				(jiffies - network->last_scanned) / (HZ / 100));
+		return 0;
+	}
+
+	if ((priv->config & CFG_STATIC_CHANNEL) &&
+	    (network->channel != priv->channel)) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of channel mismatch: %d != %d.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid),
+				network->channel, priv->channel);
+		return 0;
+	}
+
+	/* Verify privacy compatability */
+	if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
+	    ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of privacy mismatch: %s != %s.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid),
+				priv->capability & CAP_PRIVACY_ON ? "on" :
+				"off",
+				network->capability &
+				WLAN_CAPABILITY_PRIVACY ?"on" : "off");
+		return 0;
+	}
+
+	if ((priv->config & CFG_STATIC_BSSID) &&
+	    memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of BSSID mismatch: " MAC_FMT ".\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid),
+				MAC_ARG(priv->bssid));
+		return 0;
+	}
+
+	/* Filter out any incompatible freq / mode combinations */
+	if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of invalid frequency/mode "
+				"combination.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid));
+		return 0;
+	}
+
+	ipw_compatible_rates(priv, network, &rates);
+	if (rates.num_rates == 0) {
+		IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
+				"because of no compatible rates.\n",
+				escape_essid(network->ssid, network->ssid_len),
+				MAC_ARG(network->bssid));
+		return 0;
+	}
+
+	/* TODO: Perform any further minimal comparititive tests.  We do not
+	 * want to put too much policy logic here; intelligent scan selection
+	 * should occur within a generic IEEE 802.11 user space tool.  */
+
+	/* Set up 'new' AP to this network */
+	ipw_copy_rates(&match->rates, &rates);
+	match->network = network;
+
+	IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
+			escape_essid(network->ssid, network->ssid_len),
+			MAC_ARG(network->bssid));
+
+	return 1;
+}
+
+
+static void ipw_adhoc_create(struct ipw_priv *priv,
+			    struct ieee80211_network *network)
+{
+	/*
+	 * For the purposes of scanning, we can set our wireless mode
+	 * to trigger scans across combinations of bands, but when it
+	 * comes to creating a new ad-hoc network, we have tell the FW
+	 * exactly which band to use.
+	 *
+	 * We also have the possibility of an invalid channel for the
+	 * chossen band.  Attempting to create a new ad-hoc network
+	 * with an invalid channel for wireless mode will trigger a
+	 * FW fatal error.
+	 */
+	network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
+	if (network->mode) {
+		network->channel = priv->channel;
+	} else {
+		IPW_WARNING("Overriding invalid channel\n");
+		if (priv->ieee->mode & IEEE_A) {
+			network->mode = IEEE_A;
+			priv->channel = band_a_active_channel[0];
+		} else if (priv->ieee->mode & IEEE_G) {
+			network->mode = IEEE_G;
+			priv->channel = band_b_active_channel[0];
+		} else {
+			network->mode = IEEE_B;
+			priv->channel = band_b_active_channel[0];
+		}
+	}
+
+	network->channel = priv->channel;
+	priv->config |= CFG_ADHOC_PERSIST;
+	ipw_create_bssid(priv, network->bssid);
+	network->ssid_len = priv->essid_len;
+	memcpy(network->ssid, priv->essid, priv->essid_len);
+	memset(&network->stats, 0, sizeof(network->stats));
+	network->capability = WLAN_CAPABILITY_IBSS;
+	if (priv->capability & CAP_PRIVACY_ON)
+		network->capability |= WLAN_CAPABILITY_PRIVACY;
+	network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
+	memcpy(network->rates, priv->rates.supported_rates,
+	       network->rates_len);
+	network->rates_ex_len = priv->rates.num_rates - network->rates_len;
+	memcpy(network->rates_ex,
+	       &priv->rates.supported_rates[network->rates_len],
+	       network->rates_ex_len);
+	network->last_scanned = 0;
+	network->flags = 0;
+	network->last_associate = 0;
+	network->time_stamp[0] = 0;
+	network->time_stamp[1] = 0;
+	network->beacon_interval = 100; /* Default */
+	network->listen_interval = 10;  /* Default */
+	network->atim_window = 0;       /* Default */
+#ifdef CONFIG_IEEE80211_WPA
+	network->wpa_ie_len = 0;
+	network->rsn_ie_len = 0;
+#endif /* CONFIG_IEEE80211_WPA */
+}
+
+static void ipw_send_wep_keys(struct ipw_priv *priv)
+{
+	struct ipw_wep_key *key;
+	int i;
+	struct host_cmd cmd = {
+		.cmd = IPW_CMD_WEP_KEY,
+		.len = sizeof(*key)
+	};
+
+	key = (struct ipw_wep_key *)&cmd.param;
+	key->cmd_id = DINO_CMD_WEP_KEY;
+	key->seq_num = 0;
+
+	for (i = 0; i < 4; i++) {
+		key->key_index = i;
+		if (!(priv->sec.flags & (1 << i))) {
+			key->key_size = 0;
+		} else {
+			key->key_size = priv->sec.key_sizes[i];
+			memcpy(key->key, priv->sec.keys[i], key->key_size);
+		}
+
+		if (ipw_send_cmd(priv, &cmd)) {
+			IPW_ERROR("failed to send WEP_KEY command\n");
+			return;
+		}
+	}
+}
+
+static void ipw_adhoc_check(void *data)
+{
+	struct ipw_priv *priv = data;
+
+	if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
+	    !(priv->config & CFG_ADHOC_PERSIST)) {
+		IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
+		ipw_remove_current_network(priv);
+		ipw_disassociate(priv);
+		return;
+	}
+
+	queue_delayed_work(priv->workqueue, &priv->adhoc_check,
+			   priv->assoc_request.beacon_interval);
+}
+
+#ifdef CONFIG_IPW_DEBUG
+static void ipw_debug_config(struct ipw_priv *priv)
+{
+	IPW_DEBUG_INFO("Scan completed, no valid APs matched "
+		       "[CFG 0x%08X]\n", priv->config);
+	if (priv->config & CFG_STATIC_CHANNEL)
+		IPW_DEBUG_INFO("Channel locked to %d\n",
+			       priv->channel);
+	else
+		IPW_DEBUG_INFO("Channel unlocked.\n");
+	if (priv->config & CFG_STATIC_ESSID)
+		IPW_DEBUG_INFO("ESSID locked to '%s'\n",
+			       escape_essid(priv->essid,
+					    priv->essid_len));
+	else
+		IPW_DEBUG_INFO("ESSID unlocked.\n");
+	if (priv->config & CFG_STATIC_BSSID)
+		IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
+	else
+		IPW_DEBUG_INFO("BSSID unlocked.\n");
+	if (priv->capability & CAP_PRIVACY_ON)
+		IPW_DEBUG_INFO("PRIVACY on\n");
+	else
+		IPW_DEBUG_INFO("PRIVACY off\n");
+	IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
+}
+#else
+#define ipw_debug_config(x) do {} while (0);
+#endif
+
+static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
+				      struct ieee80211_network *network)
+{
+	/* TODO: Verify that this works... */
+	struct ipw_fixed_rate fr = {
+		.tx_rates = priv->rates_mask
+	};
+	u32 reg;
+	u16 mask = 0;
+
+	/* Identify 'current FW band' and match it with the fixed
+	 * Tx rates */
+
+	switch (priv->ieee->freq_band) {
+	case IEEE80211_52GHZ_BAND: /* A only */
+		/* IEEE_A */
+		if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
+			/* Invalid fixed rate mask */
+			fr.tx_rates = 0;
+			break;
+		}
+
+		fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
+		break;
+
+	default: /* 2.4Ghz or Mixed */
+		/* IEEE_B */
+		if (network->mode == IEEE_B) {
+			if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
+				/* Invalid fixed rate mask */
+				fr.tx_rates = 0;
+			}
+			break;
+		}
+
+		/* IEEE_G */
+		if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
+				    IEEE80211_OFDM_RATES_MASK)) {
+			/* Invalid fixed rate mask */
+			fr.tx_rates = 0;
+			break;
+		}
+
+		if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
+			mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
+			fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
+		}
+
+		if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
+			mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
+			fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
+		}
+
+		if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
+			mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
+			fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
+		}
+
+		fr.tx_rates |= mask;
+		break;
+	}
+
+	reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
+	ipw_write_reg32(priv, reg, *(u32*)&fr);
+}
+
+static int ipw_associate_network(struct ipw_priv *priv,
+				 struct ieee80211_network *network,
+				 struct ipw_supported_rates *rates,
+				 int roaming)
+{
+	int err;
+
+	if (priv->config & CFG_FIXED_RATE)
+		ipw_set_fixed_rate(priv, network);
+
+	if (!(priv->config & CFG_STATIC_ESSID)) {
+		priv->essid_len = min(network->ssid_len,
+				      (u8)IW_ESSID_MAX_SIZE);
+		memcpy(priv->essid, network->ssid, priv->essid_len);
+	}
+
+	network->last_associate = jiffies;
+
+	memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
+	priv->assoc_request.channel = network->channel;
+	if ((priv->capability & CAP_PRIVACY_ON) &&
+	    (priv->capability & CAP_SHARED_KEY)) {
+		priv->assoc_request.auth_type = AUTH_SHARED_KEY;
+		priv->assoc_request.auth_key = priv->sec.active_key;
+	} else {
+		priv->assoc_request.auth_type = AUTH_OPEN;
+		priv->assoc_request.auth_key = 0;
+	}
+
+	if (priv->capability & CAP_PRIVACY_ON)
+		ipw_send_wep_keys(priv);
+
+	/*
+	 * It is valid for our ieee device to support multiple modes, but
+	 * when it comes to associating to a given network we have to choose
+	 * just one mode.
+	 */
+	if (network->mode & priv->ieee->mode & IEEE_A)
+		priv->assoc_request.ieee_mode = IPW_A_MODE;
+	else if (network->mode & priv->ieee->mode & IEEE_G)
+		priv->assoc_request.ieee_mode = IPW_G_MODE;
+	else if (network->mode & priv->ieee->mode & IEEE_B)
+		priv->assoc_request.ieee_mode = IPW_B_MODE;
+
+	IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
+			"802.11%c [%d], enc=%s%s%s%c%c\n",
+			roaming ? "Rea" : "A",
+			escape_essid(priv->essid, priv->essid_len),
+			network->channel,
+			ipw_modes[priv->assoc_request.ieee_mode],
+			rates->num_rates,
+			priv->capability & CAP_PRIVACY_ON ? "on " : "off",
+			priv->capability & CAP_PRIVACY_ON ?
+			(priv->capability & CAP_SHARED_KEY ? "(shared)" :
+			 "(open)") : "",
+			priv->capability & CAP_PRIVACY_ON ? " key=" : "",
+			priv->capability & CAP_PRIVACY_ON ?
+			'1' + priv->sec.active_key : '.',
+			priv->capability & CAP_PRIVACY_ON ?
+			'.' : ' ');
+
+	priv->assoc_request.beacon_interval = network->beacon_interval;
+	if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
+	    (network->time_stamp[0] == 0) &&
+	    (network->time_stamp[1] == 0)) {
+		priv->assoc_request.assoc_type = HC_IBSS_START;
+		priv->assoc_request.assoc_tsf_msw = 0;
+		priv->assoc_request.assoc_tsf_lsw = 0;
+	} else {
+		if (unlikely(roaming))
+			priv->assoc_request.assoc_type = HC_REASSOCIATE;
+		else
+			priv->assoc_request.assoc_type = HC_ASSOCIATE;
+		priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
+		priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
+	}
+
+	memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
+
+	if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
+		memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
+		priv->assoc_request.atim_window = network->atim_window;
+	} else {
+		memcpy(&priv->assoc_request.dest, network->bssid,
+		       ETH_ALEN);
+		priv->assoc_request.atim_window = 0;
+	}
+
+	priv->assoc_request.capability = network->capability;
+	priv->assoc_request.listen_interval = network->listen_interval;
+
+	err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
+		return err;
+	}
+
+	rates->ieee_mode = priv->assoc_request.ieee_mode;
+	rates->purpose = IPW_RATE_CONNECT;
+	ipw_send_supported_rates(priv, rates);
+
+	if (priv->assoc_request.ieee_mode == IPW_G_MODE)
+		priv->sys_config.dot11g_auto_detection = 1;
+	else
+		priv->sys_config.dot11g_auto_detection = 0;
+	err = ipw_send_system_config(priv, &priv->sys_config);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
+		return err;
+	}
+
+	IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
+	err = ipw_set_sensitivity(priv, network->stats.rssi);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
+		return err;
+	}
+
+	/*
+	 * If preemption is enabled, it is possible for the association
+	 * to complete before we return from ipw_send_associate.  Therefore
+	 * we have to be sure and update our priviate data first.
+	 */
+	priv->channel = network->channel;
+	memcpy(priv->bssid, network->bssid, ETH_ALEN);
+	priv->status |= STATUS_ASSOCIATING;
+	priv->status &= ~STATUS_SECURITY_UPDATED;
+
+	priv->assoc_network = network;
+
+	err = ipw_send_associate(priv, &priv->assoc_request);
+	if (err) {
+		IPW_DEBUG_HC("Attempt to send associate command failed.\n");
+		return err;
+	}
+
+	IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
+		  escape_essid(priv->essid, priv->essid_len),
+		  MAC_ARG(priv->bssid));
+
+	return 0;
+}
+
+static void ipw_roam(void *data)
+{
+	struct ipw_priv *priv = data;
+	struct ieee80211_network *network = NULL;
+	struct ipw_network_match match = {
+		.network = priv->assoc_network
+	};
+
+	/* The roaming process is as follows:
+	 *
+	 * 1.  Missed beacon threshold triggers the roaming process by
+	 *     setting the status ROAM bit and requesting a scan.
+	 * 2.  When the scan completes, it schedules the ROAM work
+	 * 3.  The ROAM work looks at all of the known networks for one that
+	 *     is a better network than the currently associated.  If none
+	 *     found, the ROAM process is over (ROAM bit cleared)
+	 * 4.  If a better network is found, a disassociation request is
+	 *     sent.
+	 * 5.  When the disassociation completes, the roam work is again
+	 *     scheduled.  The second time through, the driver is no longer
+	 *     associated, and the newly selected network is sent an
+	 *     association request.
+	 * 6.  At this point ,the roaming process is complete and the ROAM
+	 *     status bit is cleared.
+	 */
+
+	/* If we are no longer associated, and the roaming bit is no longer
+	 * set, then we are not actively roaming, so just return */
+	if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
+		return;
+
+	if (priv->status & STATUS_ASSOCIATED) {
+		/* First pass through ROAM process -- look for a better
+		 * network */
+		u8 rssi = priv->assoc_network->stats.rssi;
+		priv->assoc_network->stats.rssi = -128;
+		list_for_each_entry(network, &priv->ieee->network_list, list) {
+			if (network != priv->assoc_network)
+				ipw_best_network(priv, &match, network, 1);
+		}
+		priv->assoc_network->stats.rssi = rssi;
+
+		if (match.network == priv->assoc_network) {
+			IPW_DEBUG_ASSOC("No better APs in this network to "
+					"roam to.\n");
+			priv->status &= ~STATUS_ROAMING;
+			ipw_debug_config(priv);
+			return;
+		}
+
+		ipw_send_disassociate(priv, 1);
+		priv->assoc_network = match.network;
+
+		return;
+	}
+
+	/* Second pass through ROAM process -- request association */
+	ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
+	ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
+	priv->status &= ~STATUS_ROAMING;
+}
+
+static void ipw_associate(void *data)
+{
+	struct ipw_priv *priv = data;
+
+	struct ieee80211_network *network = NULL;
+	struct ipw_network_match match = {
+		.network = NULL
+	};
+	struct ipw_supported_rates *rates;
+	struct list_head *element;
+
+	if (!(priv->config & CFG_ASSOCIATE) &&
+	    !(priv->config & (CFG_STATIC_ESSID |
+			      CFG_STATIC_CHANNEL |
+			      CFG_STATIC_BSSID))) {
+		IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
+		return;
+	}
+
+	list_for_each_entry(network, &priv->ieee->network_list, list)
+		ipw_best_network(priv, &match, network, 0);
+
+	network = match.network;
+	rates = &match.rates;
+
+	if (network == NULL &&
+	    priv->ieee->iw_mode == IW_MODE_ADHOC &&
+	    priv->config & CFG_ADHOC_CREATE &&
+	    priv->config & CFG_STATIC_ESSID &&
+	    !list_empty(&priv->ieee->network_free_list)) {
+		element = priv->ieee->network_free_list.next;
+		network = list_entry(element, struct ieee80211_network,
+				     list);
+		ipw_adhoc_create(priv, network);
+		rates = &priv->rates;
+		list_del(element);
+		list_add_tail(&network->list, &priv->ieee->network_list);
+	}
+
+	/* If we reached the end of the list, then we don't have any valid
+	 * matching APs */
+	if (!network) {
+		ipw_debug_config(priv);
+
+		queue_delayed_work(priv->workqueue, &priv->request_scan,
+				   SCAN_INTERVAL);
+
+		return;
+	}
+
+	ipw_associate_network(priv, network, rates, 0);
+}
+
+static inline void ipw_handle_data_packet(struct ipw_priv *priv,
+					      struct ipw_rx_mem_buffer *rxb,
+					      struct ieee80211_rx_stats *stats)
+{
+	struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
+
+	/* We received data from the HW, so stop the watchdog */
+	priv->net_dev->trans_start = jiffies;
+
+	/* We only process data packets if the
+	 * interface is open */
+	if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
+		     skb_tailroom(rxb->skb))) {
+		priv->ieee->stats.rx_errors++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
+		return;
+	} else if (unlikely(!netif_running(priv->net_dev))) {
+		priv->ieee->stats.rx_dropped++;
+		priv->wstats.discard.misc++;
+		IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
+		return;
+	}
+
+	/* Advance skb->data to the start of the actual payload */
+	skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
+
+	/* Set the size of the skb to the size of the frame */
+	skb_put(rxb->skb, pkt->u.frame.length);
+
+	IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
+
+	if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
+		priv->ieee->stats.rx_errors++;
+	else /* ieee80211_rx succeeded, so it now owns the SKB */
+		rxb->skb = NULL;
+}
+
+
+/*
+ * Main entry function for recieving a packet with 80211 headers.  This
+ * should be called when ever the FW has notified us that there is a new
+ * skb in the recieve queue.
+ */
+static void ipw_rx(struct ipw_priv *priv)
+{
+	struct ipw_rx_mem_buffer *rxb;
+	struct ipw_rx_packet *pkt;
+	struct ieee80211_hdr *header;
+	u32 r, w, i;
+	u8 network_packet;
+
+	r = ipw_read32(priv, CX2_RX_READ_INDEX);
+	w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
+	i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
+
+	while (i != r) {
+		rxb = priv->rxq->queue[i];
+#ifdef CONFIG_IPW_DEBUG
+		if (unlikely(rxb == NULL)) {
+			printk(KERN_CRIT "Queue not allocated!\n");
+			break;
+		}
+#endif
+		priv->rxq->queue[i] = NULL;
+
+		pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
+					    CX2_RX_BUF_SIZE,
+					    PCI_DMA_FROMDEVICE);
+
+		pkt = (struct ipw_rx_packet *)rxb->skb->data;
+		IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
+			     pkt->header.message_type,
+			     pkt->header.rx_seq_num,
+			     pkt->header.control_bits);
+
+		switch (pkt->header.message_type) {
+		case RX_FRAME_TYPE: /* 802.11 frame */ {
+			struct ieee80211_rx_stats stats = {
+				.rssi = pkt->u.frame.rssi_dbm -
+				IPW_RSSI_TO_DBM,
+				.signal = pkt->u.frame.signal,
+				.rate = pkt->u.frame.rate,
+				.mac_time = jiffies,
+	       			.received_channel =
+				pkt->u.frame.received_channel,
+				.freq = (pkt->u.frame.control & (1<<0)) ?
+				IEEE80211_24GHZ_BAND : IEEE80211_52GHZ_BAND,
+				.len = pkt->u.frame.length,
+			};
+
+			if (stats.rssi != 0)
+				stats.mask |= IEEE80211_STATMASK_RSSI;
+			if (stats.signal != 0)
+				stats.mask |= IEEE80211_STATMASK_SIGNAL;
+			if (stats.rate != 0)
+				stats.mask |= IEEE80211_STATMASK_RATE;
+
+			priv->rx_packets++;
+
+#ifdef CONFIG_IPW_PROMISC
+			if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
+				ipw_handle_data_packet(priv, rxb, &stats);
+				break;
+			}
+#endif
+
+			header = (struct ieee80211_hdr *)(rxb->skb->data +
+							  IPW_RX_FRAME_SIZE);
+				/* TODO: Check Ad-Hoc dest/source and make sure
+				 * that we are actually parsing these packets
+				 * correctly -- we should probably use the
+				 * frame control of the packet and disregard
+				 * the current iw_mode */
+			switch (priv->ieee->iw_mode) {
+			case IW_MODE_ADHOC:
+				network_packet =
+					!memcmp(header->addr1,
+						priv->net_dev->dev_addr,
+						ETH_ALEN) ||
+					!memcmp(header->addr3,
+						priv->bssid, ETH_ALEN) ||
+					is_broadcast_ether_addr(header->addr1) ||
+					is_multicast_ether_addr(header->addr1);
+				break;
+
+			case IW_MODE_INFRA:
+			default:
+				network_packet =
+					!memcmp(header->addr3,
+						priv->bssid, ETH_ALEN) ||
+					!memcmp(header->addr1,
+						priv->net_dev->dev_addr,
+						ETH_ALEN) ||
+					is_broadcast_ether_addr(header->addr1) ||
+					is_multicast_ether_addr(header->addr1);
+				break;
+			}
+
+			if (network_packet && priv->assoc_network) {
+				priv->assoc_network->stats.rssi = stats.rssi;
+				average_add(&priv->average_rssi,
+					    stats.rssi);
+				priv->last_rx_rssi = stats.rssi;
+			}
+
+			IPW_DEBUG_RX("Frame: len=%u\n", pkt->u.frame.length);
+
+			if (pkt->u.frame.length < frame_hdr_len(header)) {
+				IPW_DEBUG_DROP("Received packet is too small. "
+					       "Dropping.\n");
+				priv->ieee->stats.rx_errors++;
+				priv->wstats.discard.misc++;
+				break;
+			}
+
+			switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
+			case IEEE80211_FTYPE_MGMT:
+				ieee80211_rx_mgt(priv->ieee, header, &stats);
+				if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
+				    ((WLAN_FC_GET_STYPE(header->frame_ctl) ==
+				      IEEE80211_STYPE_PROBE_RESP) ||
+				     (WLAN_FC_GET_STYPE(header->frame_ctl) ==
+				      IEEE80211_STYPE_BEACON)) &&
+				    !memcmp(header->addr3, priv->bssid, ETH_ALEN))
+					ipw_add_station(priv, header->addr2);
+				break;
+
+			case IEEE80211_FTYPE_CTL:
+				break;
+
+			case IEEE80211_FTYPE_DATA:
+				if (network_packet)
+					ipw_handle_data_packet(priv, rxb, &stats);
+				else
+					IPW_DEBUG_DROP("Dropping: " MAC_FMT
+						       ", " MAC_FMT ", " MAC_FMT "\n",
+						       MAC_ARG(header->addr1), MAC_ARG(header->addr2),
+						       MAC_ARG(header->addr3));
+				break;
+			}
+			break;
+		}
+
+		case RX_HOST_NOTIFICATION_TYPE: {
+			IPW_DEBUG_RX("Notification: subtype=%02X flags=%02X size=%d\n",
+				     pkt->u.notification.subtype,
+				     pkt->u.notification.flags,
+				     pkt->u.notification.size);
+			ipw_rx_notification(priv, &pkt->u.notification);
+			break;
+		}
+
+		default:
+			IPW_DEBUG_RX("Bad Rx packet of type %d\n",
+				     pkt->header.message_type);
+			break;
+		}
+
+		/* For now we just don't re-use anything.  We can tweak this
+		 * later to try and re-use notification packets and SKBs that
+		 * fail to Rx correctly */
+		if (rxb->skb != NULL) {
+			dev_kfree_skb_any(rxb->skb);
+			rxb->skb = NULL;
+		}
+
+		pci_unmap_single(priv->pci_dev, rxb->dma_addr,
+				 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+		list_add_tail(&rxb->list, &priv->rxq->rx_used);
+
+		i = (i + 1) % RX_QUEUE_SIZE;
+	}
+
+	/* Backtrack one entry */
+	priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
+
+	ipw_rx_queue_restock(priv);
+}
+
+static void ipw_abort_scan(struct ipw_priv *priv)
+{
+	int err;
+
+	if (priv->status & STATUS_SCAN_ABORTING) {
+		IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
+		return;
+	}
+	priv->status |= STATUS_SCAN_ABORTING;
+
+	err = ipw_send_scan_abort(priv);
+	if (err)
+		IPW_DEBUG_HC("Request to abort scan failed.\n");
+}
+
+static int ipw_request_scan(struct ipw_priv *priv)
+{
+	struct ipw_scan_request_ext scan;
+	int channel_index = 0;
+	int i, err, scan_type;
+
+	if (priv->status & STATUS_EXIT_PENDING) {
+		IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
+		priv->status |= STATUS_SCAN_PENDING;
+		return 0;
+	}
+
+	if (priv->status & STATUS_SCANNING) {
+		IPW_DEBUG_HC("Concurrent scan requested.  Aborting first.\n");
+		priv->status |= STATUS_SCAN_PENDING;
+		ipw_abort_scan(priv);
+		return 0;
+	}
+
+	if (priv->status & STATUS_SCAN_ABORTING) {
+		IPW_DEBUG_HC("Scan request while abort pending.  Queuing.\n");
+		priv->status |= STATUS_SCAN_PENDING;
+		return 0;
+	}
+
+	if (priv->status & STATUS_RF_KILL_MASK) {
+		IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
+		priv->status |= STATUS_SCAN_PENDING;
+		return 0;
+	}
+
+	memset(&scan, 0, sizeof(scan));
+
+	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
+	scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
+	scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
+
+	scan.full_scan_index = ieee80211_get_scans(priv->ieee);
+	/* If we are roaming, then make this a directed scan for the current
+	 * network.  Otherwise, ensure that every other scan is a fast
+	 * channel hop scan */
+	if ((priv->status & STATUS_ROAMING) || (
+		    !(priv->status & STATUS_ASSOCIATED) &&
+		    (priv->config & CFG_STATIC_ESSID) &&
+		    (scan.full_scan_index % 2))) {
+		err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
+		if (err) {
+			IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
+			return err;
+		}
+
+		scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
+	} else {
+		scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
+	}
+
+        if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
+		int start = channel_index;
+		for (i = 0; i < MAX_A_CHANNELS; i++) {
+			if (band_a_active_channel[i] == 0)
+				break;
+			if ((priv->status & STATUS_ASSOCIATED) &&
+			    band_a_active_channel[i] == priv->channel)
+				continue;
+			channel_index++;
+			scan.channels_list[channel_index] =
+				band_a_active_channel[i];
+			ipw_set_scan_type(&scan, channel_index, scan_type);
+		}
+
+		if (start != channel_index) {
+			scan.channels_list[start] = (u8)(IPW_A_MODE << 6) |
+				(channel_index - start);
+			channel_index++;
+		}
+	}
+
+        if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
+		int start = channel_index;
+		for (i = 0; i < MAX_B_CHANNELS; i++) {
+			if (band_b_active_channel[i] == 0)
+				break;
+			if ((priv->status & STATUS_ASSOCIATED) &&
+			    band_b_active_channel[i] == priv->channel)
+				continue;
+			channel_index++;
+			scan.channels_list[channel_index] =
+				band_b_active_channel[i];
+			ipw_set_scan_type(&scan, channel_index, scan_type);
+		}
+
+		if (start != channel_index) {
+			scan.channels_list[start] = (u8)(IPW_B_MODE << 6) |
+				(channel_index - start);
+		}
+	}
+
+	err = ipw_send_scan_request_ext(priv, &scan);
+	if (err) {
+		IPW_DEBUG_HC("Sending scan command failed: %08X\n",
+			     err);
+		return -EIO;
+	}
+
+	priv->status |= STATUS_SCANNING;
+	priv->status &= ~STATUS_SCAN_PENDING;
+
+	return 0;
+}
+
+/*
+ * This file defines the Wireless Extension handlers.  It does not
+ * define any methods of hardware manipulation and relies on the
+ * functions defined in ipw_main to provide the HW interaction.
+ *
+ * The exception to this is the use of the ipw_get_ordinal()
+ * function used to poll the hardware vs. making unecessary calls.
+ *
+ */
+
+static int ipw_wx_get_name(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	if (!(priv->status & STATUS_ASSOCIATED))
+		strcpy(wrqu->name, "unassociated");
+	else
+		snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
+			 ipw_modes[priv->assoc_request.ieee_mode]);
+	IPW_DEBUG_WX("Name: %s\n", wrqu->name);
+	return 0;
+}
+
+static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
+{
+	if (channel == 0) {
+		IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
+		priv->config &= ~CFG_STATIC_CHANNEL;
+		if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
+				      STATUS_ASSOCIATING))) {
+			IPW_DEBUG_ASSOC("Attempting to associate with new "
+					"parameters.\n");
+			ipw_associate(priv);
+		}
+
+		return 0;
+	}
+
+	priv->config |= CFG_STATIC_CHANNEL;
+
+	if (priv->channel == channel) {
+		IPW_DEBUG_INFO(
+			"Request to set channel to current value (%d)\n",
+			channel);
+		return 0;
+	}
+
+	IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
+	priv->channel = channel;
+
+	/* If we are currently associated, or trying to associate
+	 * then see if this is a new channel (causing us to disassociate) */
+	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
+		ipw_disassociate(priv);
+	} else {
+		ipw_associate(priv);
+	}
+
+	return 0;
+}
+
+static int ipw_wx_set_freq(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	struct iw_freq *fwrq = &wrqu->freq;
+
+	/* if setting by freq convert to channel */
+	if (fwrq->e == 1) {
+		if ((fwrq->m >= (int) 2.412e8 &&
+		     fwrq->m <= (int) 2.487e8)) {
+			int f = fwrq->m / 100000;
+			int c = 0;
+
+			while ((c < REG_MAX_CHANNEL) &&
+			       (f != ipw_frequencies[c]))
+				c++;
+
+			/* hack to fall through */
+			fwrq->e = 0;
+			fwrq->m = c + 1;
+		}
+	}
+
+	if (fwrq->e > 0 || fwrq->m > 1000)
+		return -EOPNOTSUPP;
+
+	IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
+	return ipw_set_channel(priv, (u8)fwrq->m);
+
+	return 0;
+}
+
+
+static int ipw_wx_get_freq(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	wrqu->freq.e = 0;
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured CHANNEL then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_CHANNEL ||
+	    priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
+		wrqu->freq.m = priv->channel;
+	else
+		wrqu->freq.m = 0;
+
+	IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
+	return 0;
+}
+
+static int ipw_wx_set_mode(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int err = 0;
+
+	IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
+
+	if (wrqu->mode == priv->ieee->iw_mode)
+		return 0;
+
+	switch (wrqu->mode) {
+#ifdef CONFIG_IPW_PROMISC
+	case IW_MODE_MONITOR:
+#endif
+	case IW_MODE_ADHOC:
+	case IW_MODE_INFRA:
+		break;
+	case IW_MODE_AUTO:
+		wrqu->mode = IW_MODE_INFRA;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+#ifdef CONFIG_IPW_PROMISC
+	if (priv->ieee->iw_mode == IW_MODE_MONITOR)
+		priv->net_dev->type = ARPHRD_ETHER;
+
+	if (wrqu->mode == IW_MODE_MONITOR)
+		priv->net_dev->type = ARPHRD_IEEE80211;
+#endif /* CONFIG_IPW_PROMISC */
+
+#ifdef CONFIG_PM
+	/* Free the existing firmware and reset the fw_loaded
+	 * flag so ipw_load() will bring in the new firmawre */
+	if (fw_loaded) {
+		fw_loaded = 0;
+	}
+
+	release_firmware(bootfw);
+	release_firmware(ucode);
+	release_firmware(firmware);
+	bootfw = ucode = firmware = NULL;
+#endif
+
+	priv->ieee->iw_mode = wrqu->mode;
+	ipw_adapter_restart(priv);
+
+ 	return err;
+}
+
+static int ipw_wx_get_mode(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	wrqu->mode = priv->ieee->iw_mode;
+	IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
+
+	return 0;
+}
+
+
+#define DEFAULT_RTS_THRESHOLD     2304U
+#define MIN_RTS_THRESHOLD         1U
+#define MAX_RTS_THRESHOLD         2304U
+#define DEFAULT_BEACON_INTERVAL   100U
+#define	DEFAULT_SHORT_RETRY_LIMIT 7U
+#define	DEFAULT_LONG_RETRY_LIMIT  4U
+
+/* Values are in microsecond */
+static const s32 timeout_duration[] = {
+	350000,
+	250000,
+	75000,
+	37000,
+	25000,
+};
+
+static const s32 period_duration[] = {
+	400000,
+	700000,
+	1000000,
+	1000000,
+	1000000
+};
+
+static int ipw_wx_get_range(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	struct iw_range *range = (struct iw_range *)extra;
+	u16 val;
+	int i;
+
+	wrqu->data.length = sizeof(*range);
+	memset(range, 0, sizeof(*range));
+
+	/* 54Mbs == ~27 Mb/s real (802.11g) */
+	range->throughput = 27 * 1000 * 1000;
+
+	range->max_qual.qual = 100;
+	/* TODO: Find real max RSSI and stick here */
+	range->max_qual.level = 0;
+	range->max_qual.noise = 0;
+	range->max_qual.updated = 7; /* Updated all three */
+
+	range->avg_qual.qual = 70;
+	/* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+	range->avg_qual.level = 0; /* FIXME to real average level */
+	range->avg_qual.noise = 0;
+	range->avg_qual.updated = 7; /* Updated all three */
+
+	range->num_bitrates = min(priv->rates.num_rates, (u8)IW_MAX_BITRATES);
+
+	for (i = 0; i < range->num_bitrates; i++)
+		range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
+			500000;
+
+	range->max_rts = DEFAULT_RTS_THRESHOLD;
+	range->min_frag = MIN_FRAG_THRESHOLD;
+	range->max_frag = MAX_FRAG_THRESHOLD;
+
+	range->encoding_size[0] = 5;
+	range->encoding_size[1] = 13;
+	range->num_encoding_sizes = 2;
+	range->max_encoding_tokens = WEP_KEYS;
+
+	/* Set the Wireless Extension versions */
+	range->we_version_compiled = WIRELESS_EXT;
+	range->we_version_source = 16;
+
+        range->num_channels = FREQ_COUNT;
+
+	val = 0;
+	for (i = 0; i < FREQ_COUNT; i++) {
+		range->freq[val].i = i + 1;
+		range->freq[val].m = ipw_frequencies[i] * 100000;
+		range->freq[val].e = 1;
+		val++;
+
+		if (val == IW_MAX_FREQUENCIES)
+			break;
+	}
+	range->num_frequency = val;
+
+	IPW_DEBUG_WX("GET Range\n");
+	return 0;
+}
+
+static int ipw_wx_set_wap(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	static const unsigned char any[] = {
+		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+	};
+	static const unsigned char off[] = {
+		0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+	};
+
+	if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
+		return -EINVAL;
+
+	if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
+	    !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+		/* we disable mandatory BSSID association */
+		IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
+		priv->config &= ~CFG_STATIC_BSSID;
+		if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
+				      STATUS_ASSOCIATING))) {
+			IPW_DEBUG_ASSOC("Attempting to associate with new "
+					"parameters.\n");
+			ipw_associate(priv);
+		}
+
+		return 0;
+	}
+
+	priv->config |= CFG_STATIC_BSSID;
+	if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
+		IPW_DEBUG_WX("BSSID set to current BSSID.\n");
+		return 0;
+	}
+
+	IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
+		     MAC_ARG(wrqu->ap_addr.sa_data));
+
+	memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
+
+	/* If we are currently associated, or trying to associate
+	 * then see if this is a new BSSID (causing us to disassociate) */
+	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
+		ipw_disassociate(priv);
+	} else {
+		ipw_associate(priv);
+	}
+
+	return 0;
+}
+
+static int ipw_wx_get_wap(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	/* If we are associated, trying to associate, or have a statically
+	 * configured BSSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_BSSID ||
+	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+		memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
+	} else
+		memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+	IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
+		     MAC_ARG(wrqu->ap_addr.sa_data));
+	return 0;
+}
+
+static int ipw_wx_set_essid(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	char *essid = ""; /* ANY */
+	int length = 0;
+
+	if (wrqu->essid.flags && wrqu->essid.length) {
+		length = wrqu->essid.length - 1;
+		essid = extra;
+	}
+	if (length == 0) {
+		IPW_DEBUG_WX("Setting ESSID to ANY\n");
+		priv->config &= ~CFG_STATIC_ESSID;
+		if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
+				      STATUS_ASSOCIATING))) {
+			IPW_DEBUG_ASSOC("Attempting to associate with new "
+					"parameters.\n");
+			ipw_associate(priv);
+		}
+
+		return 0;
+	}
+
+	length = min(length, IW_ESSID_MAX_SIZE);
+
+	priv->config |= CFG_STATIC_ESSID;
+
+	if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
+		IPW_DEBUG_WX("ESSID set to current ESSID.\n");
+		return 0;
+	}
+
+	IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
+		     length);
+
+	priv->essid_len = length;
+	memcpy(priv->essid, essid, priv->essid_len);
+
+	/* If we are currently associated, or trying to associate
+	 * then see if this is a new ESSID (causing us to disassociate) */
+	if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
+		ipw_disassociate(priv);
+	} else {
+		ipw_associate(priv);
+	}
+
+	return 0;
+}
+
+static int ipw_wx_get_essid(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	/* If we are associated, trying to associate, or have a statically
+	 * configured ESSID then return that; otherwise return ANY */
+	if (priv->config & CFG_STATIC_ESSID ||
+	    priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		IPW_DEBUG_WX("Getting essid: '%s'\n",
+			     escape_essid(priv->essid, priv->essid_len));
+		memcpy(extra, priv->essid, priv->essid_len);
+		wrqu->essid.length = priv->essid_len;
+		wrqu->essid.flags = 1; /* active */
+	} else {
+		IPW_DEBUG_WX("Getting essid: ANY\n");
+		wrqu->essid.length = 0;
+		wrqu->essid.flags = 0; /* active */
+	}
+
+	return 0;
+}
+
+static int ipw_wx_set_nick(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
+	if (wrqu->data.length > IW_ESSID_MAX_SIZE)
+		return -E2BIG;
+
+	wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
+	memset(priv->nick, 0, sizeof(priv->nick));
+	memcpy(priv->nick, extra,  wrqu->data.length);
+	IPW_DEBUG_TRACE("<<\n");
+	return 0;
+
+}
+
+
+static int ipw_wx_get_nick(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	IPW_DEBUG_WX("Getting nick\n");
+	wrqu->data.length = strlen(priv->nick) + 1;
+	memcpy(extra, priv->nick, wrqu->data.length);
+	wrqu->data.flags = 1; /* active */
+	return 0;
+}
+
+
+static int ipw_wx_set_rate(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
+	return -EOPNOTSUPP;
+}
+
+static int ipw_wx_get_rate(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv * priv = ieee80211_priv(dev);
+	wrqu->bitrate.value = priv->last_rate;
+
+	IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
+	return 0;
+}
+
+
+static int ipw_wx_set_rts(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	if (wrqu->rts.disabled)
+		priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
+	else {
+		if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
+		    wrqu->rts.value > MAX_RTS_THRESHOLD)
+			return -EINVAL;
+
+		priv->rts_threshold = wrqu->rts.value;
+	}
+
+	ipw_send_rts_threshold(priv, priv->rts_threshold);
+	IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
+	return 0;
+}
+
+static int ipw_wx_get_rts(struct net_device *dev,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	wrqu->rts.value = priv->rts_threshold;
+	wrqu->rts.fixed = 0;	/* no auto select */
+	wrqu->rts.disabled =
+		(wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
+
+	IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
+	return 0;
+}
+
+
+static int ipw_wx_set_txpow(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	struct ipw_tx_power tx_power;
+	int i;
+
+	if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
+		return -EINPROGRESS;
+
+	if (wrqu->power.flags != IW_TXPOW_DBM)
+		return -EINVAL;
+
+	if ((wrqu->power.value > 20) ||
+	    (wrqu->power.value < -12))
+		return -EINVAL;
+
+	priv->tx_power = wrqu->power.value;
+
+	memset(&tx_power, 0, sizeof(tx_power));
+
+	/* configure device for 'G' band */
+	tx_power.ieee_mode = IPW_G_MODE;
+	tx_power.num_channels = 11;
+	for (i = 0; i < 11; i++) {
+		tx_power.channels_tx_power[i].channel_number = i + 1;
+		tx_power.channels_tx_power[i].tx_power = priv->tx_power;
+	}
+	if (ipw_send_tx_power(priv, &tx_power))
+		goto error;
+
+	/* configure device to also handle 'B' band */
+	tx_power.ieee_mode = IPW_B_MODE;
+	if (ipw_send_tx_power(priv, &tx_power))
+		goto error;
+
+	return 0;
+
+ error:
+	return -EIO;
+}
+
+
+static int ipw_wx_get_txpow(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	wrqu->power.value = priv->tx_power;
+	wrqu->power.fixed = 1;
+	wrqu->power.flags = IW_TXPOW_DBM;
+	wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
+
+	IPW_DEBUG_WX("GET TX Power -> %s %d \n",
+		     wrqu->power.disabled ? "ON" : "OFF",
+		     wrqu->power.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	if (wrqu->frag.disabled)
+		priv->ieee->fts = DEFAULT_FTS;
+	else {
+		if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+		    wrqu->frag.value > MAX_FRAG_THRESHOLD)
+			return -EINVAL;
+
+		priv->ieee->fts = wrqu->frag.value & ~0x1;
+	}
+
+	ipw_send_frag_threshold(priv, wrqu->frag.value);
+	IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
+	return 0;
+}
+
+static int ipw_wx_get_frag(struct net_device *dev,
+			       struct iw_request_info *info,
+			       union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	wrqu->frag.value = priv->ieee->fts;
+	wrqu->frag.fixed = 0;	/* no auto select */
+	wrqu->frag.disabled =
+		(wrqu->frag.value == DEFAULT_FTS);
+
+	IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
+
+	return 0;
+}
+
+static int ipw_wx_set_retry(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
+	return -EOPNOTSUPP;
+}
+
+
+static int ipw_wx_get_retry(struct net_device *dev,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *extra)
+{
+	IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
+	return -EOPNOTSUPP;
+}
+
+
+static int ipw_wx_set_scan(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	IPW_DEBUG_WX("Start scan\n");
+	if (ipw_request_scan(priv))
+		return -EIO;
+	return 0;
+}
+
+static int ipw_wx_get_scan(struct net_device *dev,
+			   struct iw_request_info *info,
+			   union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
+}
+
+static int ipw_wx_set_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw_wx_get_encode(struct net_device *dev,
+				 struct iw_request_info *info,
+				 union iwreq_data *wrqu, char *key)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
+}
+
+static int ipw_wx_set_power(struct net_device *dev,
+			        struct iw_request_info *info,
+			        union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int err;
+
+	if (wrqu->power.disabled) {
+		priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
+		err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
+		if (err) {
+			IPW_DEBUG_WX("failed setting power mode.\n");
+			return err;
+		}
+
+		IPW_DEBUG_WX("SET Power Management Mode -> off\n");
+
+		return 0;
+	}
+
+	switch (wrqu->power.flags & IW_POWER_MODE) {
+	case IW_POWER_ON:    /* If not specified */
+	case IW_POWER_MODE:  /* If set all mask */
+	case IW_POWER_ALL_R: /* If explicitely state all */
+		break;
+	default: /* Otherwise we don't support it */
+		IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
+			     wrqu->power.flags);
+		return -EOPNOTSUPP;
+	}
+
+	/* If the user hasn't specified a power management mode yet, default
+	 * to BATTERY */
+        if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
+		priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
+	else
+		priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
+	err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
+	if (err) {
+		IPW_DEBUG_WX("failed setting power mode.\n");
+		return err;
+	}
+
+	IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
+		     priv->power_mode);
+
+	return 0;
+}
+
+static int ipw_wx_get_power(struct net_device *dev,
+			        struct iw_request_info *info,
+			        union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED)) {
+		wrqu->power.disabled = 1;
+	} else {
+		wrqu->power.disabled = 0;
+	}
+
+	IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
+
+	return 0;
+}
+
+static int ipw_wx_set_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int mode = *(int *)extra;
+	int err;
+
+	if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
+		mode = IPW_POWER_AC;
+		priv->power_mode = mode;
+	} else {
+		priv->power_mode = IPW_POWER_ENABLED | mode;
+	}
+
+	if (priv->power_mode != mode) {
+		err = ipw_send_power_mode(priv, mode);
+
+		if (err) {
+			IPW_DEBUG_WX("failed setting power mode.\n");
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+#define MAX_WX_STRING 80
+static int ipw_wx_get_powermode(struct net_device *dev,
+				    struct iw_request_info *info,
+				    union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int level = IPW_POWER_LEVEL(priv->power_mode);
+	char *p = extra;
+
+	p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
+
+	switch (level) {
+	case IPW_POWER_AC:
+		p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
+		break;
+	case IPW_POWER_BATTERY:
+		p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
+		break;
+	default:
+		p += snprintf(p, MAX_WX_STRING - (p - extra),
+			      "(Timeout %dms, Period %dms)",
+			      timeout_duration[level - 1] / 1000,
+			      period_duration[level - 1] / 1000);
+	}
+
+	if (!(priv->power_mode & IPW_POWER_ENABLED))
+		p += snprintf(p, MAX_WX_STRING - (p - extra)," OFF");
+
+	wrqu->data.length = p - extra + 1;
+
+	return 0;
+}
+
+static int ipw_wx_set_wireless_mode(struct net_device *dev,
+                                    struct iw_request_info *info,
+                                    union iwreq_data *wrqu, char *extra)
+{
+        struct ipw_priv *priv = ieee80211_priv(dev);
+	int mode = *(int *)extra;
+	u8 band = 0, modulation = 0;
+
+	if (mode == 0 || mode & ~IEEE_MODE_MASK) {
+		IPW_WARNING("Attempt to set invalid wireless mode: %d\n",
+			    mode);
+		return -EINVAL;
+	}
+
+	if (priv->adapter == IPW_2915ABG) {
+		priv->ieee->abg_ture = 1;
+		if (mode & IEEE_A) {
+			band |= IEEE80211_52GHZ_BAND;
+			modulation |= IEEE80211_OFDM_MODULATION;
+		} else
+			priv->ieee->abg_ture = 0;
+	} else {
+		if (mode & IEEE_A) {
+			IPW_WARNING("Attempt to set 2200BG into "
+				    "802.11a mode\n");
+			return -EINVAL;
+		}
+
+		priv->ieee->abg_ture = 0;
+	}
+
+	if (mode & IEEE_B) {
+		band |= IEEE80211_24GHZ_BAND;
+		modulation |= IEEE80211_CCK_MODULATION;
+	} else
+		priv->ieee->abg_ture = 0;
+
+	if (mode & IEEE_G) {
+		band |= IEEE80211_24GHZ_BAND;
+		modulation |= IEEE80211_OFDM_MODULATION;
+	} else
+		priv->ieee->abg_ture = 0;
+
+	priv->ieee->mode = mode;
+	priv->ieee->freq_band = band;
+	priv->ieee->modulation = modulation;
+      	init_supported_rates(priv, &priv->rates);
+
+	/* If we are currently associated, or trying to associate
+         * then see if this is a new configuration (causing us to
+	 * disassociate) */
+        if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
+		/* The resulting association will trigger
+		 * the new rates to be sent to the device */
+                IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
+                ipw_disassociate(priv);
+	} else
+		ipw_send_supported_rates(priv, &priv->rates);
+
+	IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
+		     mode & IEEE_A ? 'a' : '.',
+		     mode & IEEE_B ? 'b' : '.',
+		     mode & IEEE_G ? 'g' : '.');
+	return 0;
+}
+
+static int ipw_wx_get_wireless_mode(struct net_device *dev,
+                                    struct iw_request_info *info,
+                                    union iwreq_data *wrqu, char *extra)
+{
+        struct ipw_priv *priv = ieee80211_priv(dev);
+
+	switch (priv->ieee->freq_band) {
+	case IEEE80211_24GHZ_BAND:
+		switch (priv->ieee->modulation) {
+		case IEEE80211_CCK_MODULATION:
+			strncpy(extra, "802.11b (2)", MAX_WX_STRING);
+			break;
+		case IEEE80211_OFDM_MODULATION:
+			strncpy(extra, "802.11g (4)", MAX_WX_STRING);
+			break;
+		default:
+			strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
+			break;
+		}
+		break;
+
+	case IEEE80211_52GHZ_BAND:
+		strncpy(extra, "802.11a (1)", MAX_WX_STRING);
+		break;
+
+	default: /* Mixed Band */
+		switch (priv->ieee->modulation) {
+		case IEEE80211_CCK_MODULATION:
+			strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
+			break;
+		case IEEE80211_OFDM_MODULATION:
+			strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
+			break;
+		default:
+			strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
+			break;
+		}
+		break;
+	}
+
+	IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
+
+        wrqu->data.length = strlen(extra) + 1;
+
+        return 0;
+}
+
+#ifdef CONFIG_IPW_PROMISC
+static int ipw_wx_set_promisc(struct net_device *dev,
+			      struct iw_request_info *info,
+			      union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int *parms = (int *)extra;
+	int enable = (parms[0] > 0);
+
+	IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
+	if (enable) {
+		if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
+			priv->net_dev->type = ARPHRD_IEEE80211;
+			ipw_adapter_restart(priv);
+		}
+
+		ipw_set_channel(priv, parms[1]);
+	} else {
+		if (priv->ieee->iw_mode != IW_MODE_MONITOR)
+			return 0;
+		priv->net_dev->type = ARPHRD_ETHER;
+		ipw_adapter_restart(priv);
+	}
+	return 0;
+}
+
+
+static int ipw_wx_reset(struct net_device *dev,
+			struct iw_request_info *info,
+			union iwreq_data *wrqu, char *extra)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	IPW_DEBUG_WX("RESET\n");
+	ipw_adapter_restart(priv);
+	return 0;
+}
+#endif // CONFIG_IPW_PROMISC
+
+/* Rebase the WE IOCTLs to zero for the handler array */
+#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
+static iw_handler ipw_wx_handlers[] =
+{
+	IW_IOCTL(SIOCGIWNAME)   = ipw_wx_get_name,
+	IW_IOCTL(SIOCSIWFREQ)   = ipw_wx_set_freq,
+	IW_IOCTL(SIOCGIWFREQ)   = ipw_wx_get_freq,
+	IW_IOCTL(SIOCSIWMODE)   = ipw_wx_set_mode,
+	IW_IOCTL(SIOCGIWMODE)   = ipw_wx_get_mode,
+	IW_IOCTL(SIOCGIWRANGE)  = ipw_wx_get_range,
+	IW_IOCTL(SIOCSIWAP)     = ipw_wx_set_wap,
+	IW_IOCTL(SIOCGIWAP)     = ipw_wx_get_wap,
+	IW_IOCTL(SIOCSIWSCAN)   = ipw_wx_set_scan,
+	IW_IOCTL(SIOCGIWSCAN)   = ipw_wx_get_scan,
+	IW_IOCTL(SIOCSIWESSID)  = ipw_wx_set_essid,
+	IW_IOCTL(SIOCGIWESSID)  = ipw_wx_get_essid,
+	IW_IOCTL(SIOCSIWNICKN)  = ipw_wx_set_nick,
+	IW_IOCTL(SIOCGIWNICKN)  = ipw_wx_get_nick,
+	IW_IOCTL(SIOCSIWRATE)   = ipw_wx_set_rate,
+	IW_IOCTL(SIOCGIWRATE)   = ipw_wx_get_rate,
+	IW_IOCTL(SIOCSIWRTS)    = ipw_wx_set_rts,
+	IW_IOCTL(SIOCGIWRTS)    = ipw_wx_get_rts,
+	IW_IOCTL(SIOCSIWFRAG)   = ipw_wx_set_frag,
+	IW_IOCTL(SIOCGIWFRAG)   = ipw_wx_get_frag,
+	IW_IOCTL(SIOCSIWTXPOW)  = ipw_wx_set_txpow,
+	IW_IOCTL(SIOCGIWTXPOW)  = ipw_wx_get_txpow,
+	IW_IOCTL(SIOCSIWRETRY)  = ipw_wx_set_retry,
+	IW_IOCTL(SIOCGIWRETRY)  = ipw_wx_get_retry,
+	IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
+	IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
+	IW_IOCTL(SIOCSIWPOWER)  = ipw_wx_set_power,
+	IW_IOCTL(SIOCGIWPOWER)  = ipw_wx_get_power,
+};
+
+#define IPW_PRIV_SET_POWER	SIOCIWFIRSTPRIV
+#define IPW_PRIV_GET_POWER	SIOCIWFIRSTPRIV+1
+#define IPW_PRIV_SET_MODE	SIOCIWFIRSTPRIV+2
+#define IPW_PRIV_GET_MODE	SIOCIWFIRSTPRIV+3
+#define IPW_PRIV_SET_PROMISC	SIOCIWFIRSTPRIV+4
+#define IPW_PRIV_RESET		SIOCIWFIRSTPRIV+5
+
+
+static struct iw_priv_args ipw_priv_args[] = {
+	{
+		.cmd = IPW_PRIV_SET_POWER,
+		.set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		.name = "set_power"
+	},
+	{
+		.cmd = IPW_PRIV_GET_POWER,
+		.get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		.name = "get_power"
+	},
+	{
+		.cmd = IPW_PRIV_SET_MODE,
+		.set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
+		.name = "set_mode"
+	},
+	{
+		.cmd = IPW_PRIV_GET_MODE,
+		.get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
+		.name = "get_mode"
+	},
+#ifdef CONFIG_IPW_PROMISC
+	{
+		IPW_PRIV_SET_PROMISC,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
+	},
+	{
+		IPW_PRIV_RESET,
+		IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
+	},
+#endif /* CONFIG_IPW_PROMISC */
+};
+
+static iw_handler ipw_priv_handler[] = {
+	ipw_wx_set_powermode,
+	ipw_wx_get_powermode,
+	ipw_wx_set_wireless_mode,
+	ipw_wx_get_wireless_mode,
+#ifdef CONFIG_IPW_PROMISC
+	ipw_wx_set_promisc,
+	ipw_wx_reset,
+#endif
+};
+
+static struct iw_handler_def ipw_wx_handler_def =
+{
+	.standard 	= ipw_wx_handlers,
+	.num_standard	= ARRAY_SIZE(ipw_wx_handlers),
+	.num_private	= ARRAY_SIZE(ipw_priv_handler),
+ 	.num_private_args = ARRAY_SIZE(ipw_priv_args),
+	.private	= ipw_priv_handler,
+	.private_args	= ipw_priv_args,
+};
+
+
+
+
+/*
+ * Get wireless statistics.
+ * Called by /proc/net/wireless
+ * Also called by SIOCGIWSTATS
+ */
+static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	struct iw_statistics *wstats;
+
+	wstats = &priv->wstats;
+
+	/* if hw is disabled, then ipw2100_get_ordinal() can't be called.
+	 * ipw2100_wx_wireless_stats seems to be called before fw is
+	 * initialized.  STATUS_ASSOCIATED will only be set if the hw is up
+	 * and associated; if not associcated, the values are all meaningless
+	 * anyway, so set them all to NULL and INVALID */
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		wstats->miss.beacon = 0;
+		wstats->discard.retries = 0;
+		wstats->qual.qual = 0;
+		wstats->qual.level = 0;
+		wstats->qual.noise = 0;
+		wstats->qual.updated = 7;
+		wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
+			IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
+		return wstats;
+	}
+
+	wstats->qual.qual = priv->quality;
+	wstats->qual.level = average_value(&priv->average_rssi);
+	wstats->qual.noise = average_value(&priv->average_noise);
+	wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
+		IW_QUAL_NOISE_UPDATED;
+
+	wstats->miss.beacon = average_value(&priv->average_missed_beacons);
+	wstats->discard.retries = priv->last_tx_failures;
+	wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
+
+/*	if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
+	goto fail_get_ordinal;
+	wstats->discard.retries += tx_retry; */
+
+	return wstats;
+}
+
+
+/* net device stuff */
+
+static inline void init_sys_config(struct ipw_sys_config *sys_config)
+{
+        memset(sys_config, 0, sizeof(struct ipw_sys_config));
+	sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
+	sys_config->answer_broadcast_ssid_probe = 0;
+	sys_config->accept_all_data_frames = 0;
+	sys_config->accept_non_directed_frames = 1;
+	sys_config->exclude_unicast_unencrypted = 0;
+	sys_config->disable_unicast_decryption = 1;
+	sys_config->exclude_multicast_unencrypted = 0;
+	sys_config->disable_multicast_decryption = 1;
+	sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
+	sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
+	sys_config->dot11g_auto_detection = 0;
+	sys_config->enable_cts_to_self = 0;
+	sys_config->bt_coexist_collision_thr = 0;
+	sys_config->pass_noise_stats_to_host = 1;
+}
+
+static int ipw_net_open(struct net_device *dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	IPW_DEBUG_INFO("dev->open\n");
+	/* we should be verifying the device is ready to be opened */
+	if (!(priv->status & STATUS_RF_KILL_MASK) &&
+	    (priv->status & STATUS_ASSOCIATED))
+		netif_start_queue(dev);
+	return 0;
+}
+
+static int ipw_net_stop(struct net_device *dev)
+{
+	IPW_DEBUG_INFO("dev->close\n");
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+todo:
+
+modify to send one tfd per fragment instead of using chunking.  otherwise
+we need to heavily modify the ieee80211_skb_to_txb.
+*/
+
+static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
+		txb->fragments[0]->data;
+	int i = 0;
+	struct tfd_frame *tfd;
+	struct clx2_tx_queue *txq = &priv->txq[0];
+	struct clx2_queue *q = &txq->q;
+	u8 id, hdr_len, unicast;
+	u16 remaining_bytes;
+
+	switch (priv->ieee->iw_mode) {
+	case IW_MODE_ADHOC:
+		hdr_len = IEEE80211_3ADDR_LEN;
+		unicast = !is_broadcast_ether_addr(hdr->addr1) &&
+			!is_multicast_ether_addr(hdr->addr1);
+		id = ipw_find_station(priv, hdr->addr1);
+		if (id == IPW_INVALID_STATION) {
+			id = ipw_add_station(priv, hdr->addr1);
+			if (id == IPW_INVALID_STATION) {
+				IPW_WARNING("Attempt to send data to "
+					    "invalid cell: " MAC_FMT "\n",
+					    MAC_ARG(hdr->addr1));
+				goto drop;
+			}
+		}
+		break;
+
+	case IW_MODE_INFRA:
+	default:
+		unicast = !is_broadcast_ether_addr(hdr->addr3) &&
+			!is_multicast_ether_addr(hdr->addr3);
+		hdr_len = IEEE80211_3ADDR_LEN;
+		id = 0;
+		break;
+	}
+
+	tfd = &txq->bd[q->first_empty];
+	txq->txb[q->first_empty] = txb;
+	memset(tfd, 0, sizeof(*tfd));
+	tfd->u.data.station_number = id;
+
+	tfd->control_flags.message_type = TX_FRAME_TYPE;
+	tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
+
+	tfd->u.data.cmd_id = DINO_CMD_TX;
+	tfd->u.data.len = txb->payload_size;
+	remaining_bytes = txb->payload_size;
+	if (unlikely(!unicast))
+		tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
+	else
+		tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
+
+	if (priv->assoc_request.ieee_mode == IPW_B_MODE)
+		tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
+	else
+		tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
+
+	if (priv->config & CFG_PREAMBLE)
+		tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
+
+	memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
+
+	/* payload */
+	tfd->u.data.num_chunks = min((u8)(NUM_TFD_CHUNKS - 2), txb->nr_frags);
+	for (i = 0; i < tfd->u.data.num_chunks; i++) {
+		IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
+			     i, tfd->u.data.num_chunks,
+			     txb->fragments[i]->len - hdr_len);
+		printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
+			   txb->fragments[i]->len - hdr_len);
+
+		tfd->u.data.chunk_ptr[i] = pci_map_single(
+			priv->pci_dev, txb->fragments[i]->data + hdr_len,
+			txb->fragments[i]->len - hdr_len, PCI_DMA_TODEVICE);
+		tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
+	}
+
+	if (i != txb->nr_frags) {
+		struct sk_buff *skb;
+		u16 remaining_bytes = 0;
+		int j;
+
+		for (j = i; j < txb->nr_frags; j++)
+			remaining_bytes += txb->fragments[j]->len - hdr_len;
+
+		printk(KERN_INFO "Trying to reallocate for %d bytes\n",
+		       remaining_bytes);
+		skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
+		if (skb != NULL) {
+			tfd->u.data.chunk_len[i] = remaining_bytes;
+			for (j = i; j < txb->nr_frags; j++) {
+				int size = txb->fragments[j]->len - hdr_len;
+				printk(KERN_INFO "Adding frag %d %d...\n",
+					j, size);
+				memcpy(skb_put(skb, size),
+					txb->fragments[j]->data + hdr_len,
+					size);
+			}
+			dev_kfree_skb_any(txb->fragments[i]);
+			txb->fragments[i] = skb;
+			tfd->u.data.chunk_ptr[i] = pci_map_single(
+				priv->pci_dev, skb->data,
+				tfd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
+			tfd->u.data.num_chunks++;
+		}
+	}
+
+	/* kick DMA */
+	q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
+	ipw_write32(priv, q->reg_w, q->first_empty);
+
+	if (ipw_queue_space(q) < q->high_mark)
+		netif_stop_queue(priv->net_dev);
+
+	return;
+
+ drop:
+	IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
+	ieee80211_txb_free(txb);
+}
+
+static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
+				   struct net_device *dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	unsigned long flags;
+
+	IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (!(priv->status & STATUS_ASSOCIATED)) {
+		IPW_DEBUG_INFO("Tx attempt while not associated.\n");
+		priv->ieee->stats.tx_carrier_errors++;
+		netif_stop_queue(dev);
+		goto fail_unlock;
+	}
+
+	ipw_tx_skb(priv, txb);
+
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 0;
+
+ fail_unlock:
+	spin_unlock_irqrestore(&priv->lock, flags);
+	return 1;
+}
+
+static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	priv->ieee->stats.tx_packets = priv->tx_packets;
+	priv->ieee->stats.rx_packets = priv->rx_packets;
+	return &priv->ieee->stats;
+}
+
+static void ipw_net_set_multicast_list(struct net_device *dev)
+{
+
+}
+
+static int ipw_net_set_mac_address(struct net_device *dev, void *p)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	struct sockaddr *addr = p;
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+	priv->config |= CFG_CUSTOM_MAC;
+	memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+	printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
+	       priv->net_dev->name, MAC_ARG(priv->mac_addr));
+	ipw_adapter_restart(priv);
+	return 0;
+}
+
+static void ipw_ethtool_get_drvinfo(struct net_device *dev,
+				    struct ethtool_drvinfo *info)
+{
+	struct ipw_priv *p = ieee80211_priv(dev);
+	char vers[64];
+	char date[32];
+	u32 len;
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+
+	len = sizeof(vers);
+	ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
+	len = sizeof(date);
+	ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
+
+	snprintf(info->fw_version, sizeof(info->fw_version),"%s (%s)",
+		 vers, date);
+	strcpy(info->bus_info, pci_name(p->pci_dev));
+	info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
+}
+
+static u32 ipw_ethtool_get_link(struct net_device *dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	return (priv->status & STATUS_ASSOCIATED) != 0;
+}
+
+static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
+{
+	return CX2_EEPROM_IMAGE_SIZE;
+}
+
+static int ipw_ethtool_get_eeprom(struct net_device *dev,
+				  struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct ipw_priv *p = ieee80211_priv(dev);
+
+	if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
+		return -EINVAL;
+
+	memcpy(bytes, &((u8 *)p->eeprom)[eeprom->offset], eeprom->len);
+	return 0;
+}
+
+static int ipw_ethtool_set_eeprom(struct net_device *dev,
+				  struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct ipw_priv *p = ieee80211_priv(dev);
+	int i;
+
+	if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
+		return -EINVAL;
+
+	memcpy(&((u8 *)p->eeprom)[eeprom->offset], bytes, eeprom->len);
+	for (i = IPW_EEPROM_DATA;
+	     i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE;
+	     i++)
+		ipw_write8(p, i, p->eeprom[i]);
+
+	return 0;
+}
+
+static struct ethtool_ops ipw_ethtool_ops = {
+	 .get_link       = ipw_ethtool_get_link,
+	 .get_drvinfo	 = ipw_ethtool_get_drvinfo,
+	 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
+	 .get_eeprom	 = ipw_ethtool_get_eeprom,
+	 .set_eeprom	 = ipw_ethtool_set_eeprom,
+};
+
+static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
+{
+	struct ipw_priv *priv = data;
+	u32 inta, inta_mask;
+
+	if (!priv)
+		return IRQ_NONE;
+
+	spin_lock(&priv->lock);
+
+	if (!(priv->status & STATUS_INT_ENABLED)) {
+		/* Shared IRQ */
+		goto none;
+	}
+
+	inta = ipw_read32(priv, CX2_INTA_RW);
+	inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
+
+	if (inta == 0xFFFFFFFF) {
+		/* Hardware disappeared */
+		IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
+		goto none;
+	}
+
+	if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
+		/* Shared interrupt */
+		goto none;
+	}
+
+	/* tell the device to stop sending interrupts */
+	ipw_disable_interrupts(priv);
+
+	/* ack current interrupts */
+	inta &= (CX2_INTA_MASK_ALL & inta_mask);
+	ipw_write32(priv, CX2_INTA_RW, inta);
+
+	/* Cache INTA value for our tasklet */
+	priv->isr_inta = inta;
+
+	tasklet_schedule(&priv->irq_tasklet);
+
+ 	spin_unlock(&priv->lock);
+
+	return IRQ_HANDLED;
+ none:
+	spin_unlock(&priv->lock);
+	return IRQ_NONE;
+}
+
+static void ipw_rf_kill(void *adapter)
+{
+	struct ipw_priv *priv = adapter;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->lock, flags);
+
+	if (rf_kill_active(priv)) {
+		IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
+		if (priv->workqueue)
+			queue_delayed_work(priv->workqueue,
+					   &priv->rf_kill, 2 * HZ);
+		goto exit_unlock;
+	}
+
+	/* RF Kill is now disabled, so bring the device back up */
+
+	if (!(priv->status & STATUS_RF_KILL_MASK)) {
+		IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
+				  "device\n");
+
+		/* we can not do an adapter restart while inside an irq lock */
+		queue_work(priv->workqueue, &priv->adapter_restart);
+	} else
+		IPW_DEBUG_RF_KILL("HW RF Kill deactivated.  SW RF Kill still "
+				  "enabled\n");
+
+ exit_unlock:
+	spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static int ipw_setup_deferred_work(struct ipw_priv *priv)
+{
+	int ret = 0;
+
+#ifdef CONFIG_SOFTWARE_SUSPEND2
+	priv->workqueue = create_workqueue(DRV_NAME, 0);
+#else
+	priv->workqueue = create_workqueue(DRV_NAME);
+#endif
+	init_waitqueue_head(&priv->wait_command_queue);
+
+	INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
+	INIT_WORK(&priv->associate, ipw_associate, priv);
+	INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
+	INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
+	INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
+	INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
+	INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
+	INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
+	INIT_WORK(&priv->request_scan,
+		  (void (*)(void *))ipw_request_scan, priv);
+	INIT_WORK(&priv->gather_stats,
+		  (void (*)(void *))ipw_gather_stats, priv);
+	INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
+	INIT_WORK(&priv->roam, ipw_roam, priv);
+	INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
+
+	tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
+		     ipw_irq_tasklet, (unsigned long)priv);
+
+	return ret;
+}
+
+
+static void shim__set_security(struct net_device *dev,
+			       struct ieee80211_security *sec)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		if (sec->flags & (1 << i)) {
+			priv->sec.key_sizes[i] = sec->key_sizes[i];
+			if (sec->key_sizes[i] == 0)
+				priv->sec.flags &= ~(1 << i);
+			else
+				memcpy(priv->sec.keys[i], sec->keys[i],
+				       sec->key_sizes[i]);
+			priv->sec.flags |= (1 << i);
+			priv->status |= STATUS_SECURITY_UPDATED;
+		}
+	}
+
+	if ((sec->flags & SEC_ACTIVE_KEY) &&
+	    priv->sec.active_key != sec->active_key) {
+		if (sec->active_key <= 3) {
+			priv->sec.active_key = sec->active_key;
+			priv->sec.flags |= SEC_ACTIVE_KEY;
+		} else
+			priv->sec.flags &= ~SEC_ACTIVE_KEY;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if ((sec->flags & SEC_AUTH_MODE) &&
+	    (priv->sec.auth_mode != sec->auth_mode)) {
+		priv->sec.auth_mode = sec->auth_mode;
+		priv->sec.flags |= SEC_AUTH_MODE;
+		if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
+			priv->capability |= CAP_SHARED_KEY;
+		else
+			priv->capability &= ~CAP_SHARED_KEY;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	if (sec->flags & SEC_ENABLED &&
+	    priv->sec.enabled != sec->enabled) {
+		priv->sec.flags |= SEC_ENABLED;
+		priv->sec.enabled = sec->enabled;
+		priv->status |= STATUS_SECURITY_UPDATED;
+		if (sec->enabled)
+			priv->capability |= CAP_PRIVACY_ON;
+		else
+			priv->capability &= ~CAP_PRIVACY_ON;
+	}
+
+	if (sec->flags & SEC_LEVEL &&
+	    priv->sec.level != sec->level) {
+		priv->sec.level = sec->level;
+		priv->sec.flags |= SEC_LEVEL;
+		priv->status |= STATUS_SECURITY_UPDATED;
+	}
+
+	/* To match current functionality of ipw2100 (which works well w/
+	 * various supplicants, we don't force a disassociate if the
+	 * privacy capability changes ... */
+#if 0
+	if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
+	    (((priv->assoc_request.capability &
+	       WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
+	     (!(priv->assoc_request.capability &
+		 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
+		IPW_DEBUG_ASSOC("Disassociating due to capability "
+				"change.\n");
+		ipw_disassociate(priv);
+	}
+#endif
+}
+
+static int init_supported_rates(struct ipw_priv *priv,
+				struct ipw_supported_rates *rates)
+{
+	/* TODO: Mask out rates based on priv->rates_mask */
+
+	memset(rates, 0, sizeof(*rates));
+        /* configure supported rates */
+	switch (priv->ieee->freq_band) {
+	case IEEE80211_52GHZ_BAND:
+		rates->ieee_mode = IPW_A_MODE;
+		rates->purpose = IPW_RATE_CAPABILITIES;
+		ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
+					IEEE80211_OFDM_DEFAULT_RATES_MASK);
+		break;
+
+	default: /* Mixed or 2.4Ghz */
+		rates->ieee_mode = IPW_G_MODE;
+		rates->purpose = IPW_RATE_CAPABILITIES;
+		ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
+				       IEEE80211_CCK_DEFAULT_RATES_MASK);
+		if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
+			ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
+						IEEE80211_OFDM_DEFAULT_RATES_MASK);
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int ipw_config(struct ipw_priv *priv)
+{
+	int i;
+	struct ipw_tx_power tx_power;
+
+	memset(&priv->sys_config, 0, sizeof(priv->sys_config));
+	memset(&tx_power, 0, sizeof(tx_power));
+
+	/* This is only called from ipw_up, which resets/reloads the firmware
+	   so, we don't need to first disable the card before we configure
+	   it */
+
+	/* configure device for 'G' band */
+	tx_power.ieee_mode = IPW_G_MODE;
+	tx_power.num_channels = 11;
+	for (i = 0; i < 11; i++) {
+		tx_power.channels_tx_power[i].channel_number = i + 1;
+		tx_power.channels_tx_power[i].tx_power = priv->tx_power;
+	}
+	if (ipw_send_tx_power(priv, &tx_power))
+		goto error;
+
+	/* configure device to also handle 'B' band */
+	tx_power.ieee_mode = IPW_B_MODE;
+	if (ipw_send_tx_power(priv, &tx_power))
+		goto error;
+
+	/* initialize adapter address */
+	if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
+		goto error;
+
+	/* set basic system config settings */
+	init_sys_config(&priv->sys_config);
+	if (ipw_send_system_config(priv, &priv->sys_config))
+		goto error;
+
+        init_supported_rates(priv, &priv->rates);
+        if (ipw_send_supported_rates(priv, &priv->rates))
+		goto error;
+
+	/* Set request-to-send threshold */
+	if (priv->rts_threshold) {
+		if (ipw_send_rts_threshold(priv, priv->rts_threshold))
+			goto error;
+	}
+
+	if (ipw_set_random_seed(priv))
+		goto error;
+
+	/* final state transition to the RUN state */
+	if (ipw_send_host_complete(priv))
+		goto error;
+
+	/* If configured to try and auto-associate, kick off a scan */
+	if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
+		goto error;
+
+	return 0;
+
+ error:
+	return -EIO;
+}
+
+#define MAX_HW_RESTARTS 5
+static int ipw_up(struct ipw_priv *priv)
+{
+	int rc, i;
+
+	if (priv->status & STATUS_EXIT_PENDING)
+		return -EIO;
+
+	for (i = 0; i < MAX_HW_RESTARTS; i++ ) {
+		/* Load the microcode, firmware, and eeprom.
+		 * Also start the clocks. */
+		rc = ipw_load(priv);
+		if (rc) {
+			IPW_ERROR("Unable to load firmware: 0x%08X\n",
+					rc);
+			return rc;
+		}
+
+		ipw_init_ordinals(priv);
+		if (!(priv->config & CFG_CUSTOM_MAC))
+			eeprom_parse_mac(priv, priv->mac_addr);
+		memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+
+		if (priv->status & STATUS_RF_KILL_MASK)
+			return 0;
+
+		rc = ipw_config(priv);
+		if (!rc) {
+			IPW_DEBUG_INFO("Configured device on count %i\n", i);
+			priv->notif_missed_beacons = 0;
+			netif_start_queue(priv->net_dev);
+			return 0;
+		} else {
+			IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
+				       rc);
+		}
+
+		IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
+			       i, MAX_HW_RESTARTS);
+
+		/* We had an error bringing up the hardware, so take it
+		 * all the way back down so we can try again */
+		ipw_down(priv);
+	}
+
+	/* tried to restart and config the device for as long as our
+	 * patience could withstand */
+	IPW_ERROR("Unable to initialize device after %d attempts.\n",
+		  i);
+	return -EIO;
+}
+
+static void ipw_down(struct ipw_priv *priv)
+{
+	/* Attempt to disable the card */
+#if 0
+	ipw_send_card_disable(priv, 0);
+#endif
+
+	/* tell the device to stop sending interrupts */
+	ipw_disable_interrupts(priv);
+
+	/* Clear all bits but the RF Kill */
+	priv->status &= STATUS_RF_KILL_MASK;
+
+	netif_carrier_off(priv->net_dev);
+	netif_stop_queue(priv->net_dev);
+
+	ipw_stop_nic(priv);
+}
+
+/* Called by register_netdev() */
+static int ipw_net_init(struct net_device *dev)
+{
+	struct ipw_priv *priv = ieee80211_priv(dev);
+
+	if (priv->status & STATUS_RF_KILL_SW) {
+		IPW_WARNING("Radio disabled by module parameter.\n");
+		return 0;
+	} else if (rf_kill_active(priv)) {
+		IPW_WARNING("Radio Frequency Kill Switch is On:\n"
+			    "Kill switch must be turned off for "
+			    "wireless networking to work.\n");
+		queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
+		return 0;
+	}
+
+	if (ipw_up(priv))
+		return -EIO;
+
+	return 0;
+}
+
+/* PCI driver stuff */
+static struct pci_device_id card_ids[] = {
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
+	{PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
+	{PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
+	{PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
+
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, card_ids);
+
+static struct attribute *ipw_sysfs_entries[] = {
+	&dev_attr_rf_kill.attr,
+	&dev_attr_direct_dword.attr,
+	&dev_attr_indirect_byte.attr,
+	&dev_attr_indirect_dword.attr,
+	&dev_attr_mem_gpio_reg.attr,
+	&dev_attr_command_event_reg.attr,
+	&dev_attr_nic_type.attr,
+	&dev_attr_status.attr,
+	&dev_attr_cfg.attr,
+	&dev_attr_dump_errors.attr,
+	&dev_attr_dump_events.attr,
+	&dev_attr_eeprom_delay.attr,
+	&dev_attr_ucode_version.attr,
+	&dev_attr_rtc.attr,
+	NULL
+};
+
+static struct attribute_group ipw_attribute_group = {
+	.name = NULL,		/* put in device directory */
+	.attrs	= ipw_sysfs_entries,
+};
+
+static int ipw_pci_probe(struct pci_dev *pdev,
+			 const struct pci_device_id *ent)
+{
+	int err = 0;
+	struct net_device *net_dev;
+	void __iomem *base;
+	u32 length, val;
+	struct ipw_priv *priv;
+	int band, modulation;
+
+	net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
+	if (net_dev == NULL) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	priv = ieee80211_priv(net_dev);
+	priv->ieee = netdev_priv(net_dev);
+	priv->net_dev = net_dev;
+	priv->pci_dev = pdev;
+#ifdef CONFIG_IPW_DEBUG
+	ipw_debug_level = debug;
+#endif
+	spin_lock_init(&priv->lock);
+
+	if (pci_enable_device(pdev)) {
+		err = -ENODEV;
+		goto out_free_ieee80211;
+	}
+
+	pci_set_master(pdev);
+
+	err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (!err)
+		err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+	if (err) {
+		printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
+		goto out_pci_disable_device;
+	}
+
+	pci_set_drvdata(pdev, priv);
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto out_pci_disable_device;
+
+	/* We disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+	length = pci_resource_len(pdev, 0);
+	priv->hw_len = length;
+
+	base = ioremap_nocache(pci_resource_start(pdev, 0), length);
+	if (!base) {
+		err = -ENODEV;
+		goto out_pci_release_regions;
+	}
+
+	priv->hw_base = base;
+	IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
+	IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
+
+	err = ipw_setup_deferred_work(priv);
+	if (err) {
+		IPW_ERROR("Unable to setup deferred work\n");
+		goto out_iounmap;
+	}
+
+	/* Initialize module parameter values here */
+	if (ifname)
+		strncpy(net_dev->name, ifname, IFNAMSIZ);
+
+	if (associate)
+		priv->config |= CFG_ASSOCIATE;
+	else
+		IPW_DEBUG_INFO("Auto associate disabled.\n");
+
+	if (auto_create)
+		priv->config |= CFG_ADHOC_CREATE;
+	else
+		IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
+
+	if (disable) {
+		priv->status |= STATUS_RF_KILL_SW;
+		IPW_DEBUG_INFO("Radio disabled.\n");
+	}
+
+	if (channel != 0) {
+		priv->config |= CFG_STATIC_CHANNEL;
+		priv->channel = channel;
+		IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
+ 		IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
+		/* TODO: Validate that provided channel is in range */
+	}
+
+	switch (mode) {
+	case 1:
+		priv->ieee->iw_mode = IW_MODE_ADHOC;
+		break;
+#ifdef CONFIG_IPW_PROMISC
+	case 2:
+		priv->ieee->iw_mode = IW_MODE_MONITOR;
+		break;
+#endif
+	default:
+	case 0:
+		priv->ieee->iw_mode = IW_MODE_INFRA;
+		break;
+	}
+
+	if ((priv->pci_dev->device == 0x4223) ||
+	    (priv->pci_dev->device == 0x4224)) {
+		printk(KERN_INFO DRV_NAME
+		       ": Detected Intel PRO/Wireless 2915ABG Network "
+		       "Connection\n");
+		priv->ieee->abg_ture = 1;
+		band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
+		modulation = IEEE80211_OFDM_MODULATION |
+			IEEE80211_CCK_MODULATION;
+		priv->adapter = IPW_2915ABG;
+		priv->ieee->mode = IEEE_A|IEEE_G|IEEE_B;
+	} else {
+		if (priv->pci_dev->device == 0x4221)
+			printk(KERN_INFO DRV_NAME
+			       ": Detected Intel PRO/Wireless 2225BG Network "
+			       "Connection\n");
+		else
+			printk(KERN_INFO DRV_NAME
+			       ": Detected Intel PRO/Wireless 2200BG Network "
+			       "Connection\n");
+
+		priv->ieee->abg_ture = 0;
+		band = IEEE80211_24GHZ_BAND;
+		modulation = IEEE80211_OFDM_MODULATION |
+			IEEE80211_CCK_MODULATION;
+		priv->adapter = IPW_2200BG;
+		priv->ieee->mode = IEEE_G|IEEE_B;
+	}
+
+	priv->ieee->freq_band = band;
+	priv->ieee->modulation = modulation;
+
+	priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
+
+	priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
+	priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
+
+	priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
+
+	/* If power management is turned on, default to AC mode */
+        priv->power_mode = IPW_POWER_AC;
+	priv->tx_power = IPW_DEFAULT_TX_POWER;
+
+	err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME,
+			  priv);
+	if (err) {
+		IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
+		goto out_destroy_workqueue;
+	}
+
+	SET_MODULE_OWNER(net_dev);
+	SET_NETDEV_DEV(net_dev, &pdev->dev);
+
+	priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
+	priv->ieee->set_security = shim__set_security;
+
+	net_dev->open = ipw_net_open;
+	net_dev->stop = ipw_net_stop;
+	net_dev->init = ipw_net_init;
+	net_dev->get_stats = ipw_net_get_stats;
+	net_dev->set_multicast_list = ipw_net_set_multicast_list;
+	net_dev->set_mac_address = ipw_net_set_mac_address;
+	net_dev->get_wireless_stats = ipw_get_wireless_stats;
+	net_dev->wireless_handlers = &ipw_wx_handler_def;
+	net_dev->ethtool_ops = &ipw_ethtool_ops;
+	net_dev->irq = pdev->irq;
+	net_dev->base_addr = (unsigned long )priv->hw_base;
+	net_dev->mem_start = pci_resource_start(pdev, 0);
+	net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
+
+	err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
+	if (err) {
+		IPW_ERROR("failed to create sysfs device attributes\n");
+		goto out_release_irq;
+	}
+
+	err = register_netdev(net_dev);
+	if (err) {
+		IPW_ERROR("failed to register network device\n");
+		goto out_remove_group;
+	}
+
+	return 0;
+
+ out_remove_group:
+	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
+ out_release_irq:
+	free_irq(pdev->irq, priv);
+ out_destroy_workqueue:
+	destroy_workqueue(priv->workqueue);
+	priv->workqueue = NULL;
+ out_iounmap:
+	iounmap(priv->hw_base);
+ out_pci_release_regions:
+	pci_release_regions(pdev);
+ out_pci_disable_device:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+ out_free_ieee80211:
+	free_ieee80211(priv->net_dev);
+ out:
+	return err;
+}
+
+static void ipw_pci_remove(struct pci_dev *pdev)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	if (!priv)
+		return;
+
+	priv->status |= STATUS_EXIT_PENDING;
+
+	sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
+
+	ipw_down(priv);
+
+	unregister_netdev(priv->net_dev);
+
+	if (priv->rxq) {
+		ipw_rx_queue_free(priv, priv->rxq);
+		priv->rxq = NULL;
+	}
+	ipw_tx_queue_free(priv);
+
+	/* ipw_down will ensure that there is no more pending work
+	 * in the workqueue's, so we can safely remove them now. */
+	if (priv->workqueue) {
+		cancel_delayed_work(&priv->adhoc_check);
+		cancel_delayed_work(&priv->gather_stats);
+		cancel_delayed_work(&priv->request_scan);
+		cancel_delayed_work(&priv->rf_kill);
+		cancel_delayed_work(&priv->scan_check);
+		destroy_workqueue(priv->workqueue);
+		priv->workqueue = NULL;
+	}
+
+	free_irq(pdev->irq, priv);
+	iounmap(priv->hw_base);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	free_ieee80211(priv->net_dev);
+
+#ifdef CONFIG_PM
+	if (fw_loaded) {
+		release_firmware(bootfw);
+		release_firmware(ucode);
+		release_firmware(firmware);
+		fw_loaded = 0;
+	}
+#endif
+}
+
+
+#ifdef CONFIG_PM
+static int ipw_pci_suspend(struct pci_dev *pdev, u32 state)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	struct net_device *dev = priv->net_dev;
+
+	printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
+
+ 	/* Take down the device; powers it off, etc. */
+	ipw_down(priv);
+
+	/* Remove the PRESENT state of the device */
+	netif_device_detach(dev);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+	pci_save_state(pdev, priv->pm_state);
+#else
+	pci_save_state(pdev);
+#endif
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, state);
+
+	return 0;
+}
+
+static int ipw_pci_resume(struct pci_dev *pdev)
+{
+	struct ipw_priv *priv = pci_get_drvdata(pdev);
+	struct net_device *dev = priv->net_dev;
+	u32 val;
+
+	printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
+
+	pci_set_power_state(pdev, 0);
+	pci_enable_device(pdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
+	pci_restore_state(pdev, priv->pm_state);
+#else
+	pci_restore_state(pdev);
+#endif
+	/*
+	 * Suspend/Resume resets the PCI configuration space, so we have to
+	 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
+	 * from interfering with C3 CPU state. pci_restore_state won't help
+	 * here since it only restores the first 64 bytes pci config header.
+	 */
+	pci_read_config_dword(pdev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+
+	/* Set the device back into the PRESENT state; this will also wake
+	 * the queue of needed */
+	netif_device_attach(dev);
+
+	/* Bring the device back up */
+	queue_work(priv->workqueue, &priv->up);
+
+	return 0;
+}
+#endif
+
+/* driver initialization stuff */
+static struct pci_driver ipw_driver = {
+	.name = DRV_NAME,
+	.id_table = card_ids,
+	.probe = ipw_pci_probe,
+	.remove = __devexit_p(ipw_pci_remove),
+#ifdef CONFIG_PM
+	.suspend = ipw_pci_suspend,
+	.resume = ipw_pci_resume,
+#endif
+};
+
+static int __init ipw_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
+	printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
+
+	ret = pci_module_init(&ipw_driver);
+	if (ret) {
+		IPW_ERROR("Unable to initialize PCI module\n");
+		return ret;
+	}
+
+	ret = driver_create_file(&ipw_driver.driver,
+				 &driver_attr_debug_level);
+	if (ret) {
+		IPW_ERROR("Unable to create driver sysfs file\n");
+		pci_unregister_driver(&ipw_driver);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void __exit ipw_exit(void)
+{
+	driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
+	pci_unregister_driver(&ipw_driver);
+}
+
+module_param(disable, int, 0444);
+MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
+
+module_param(associate, int, 0444);
+MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
+
+module_param(auto_create, int, 0444);
+MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
+
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "debug output mask");
+
+module_param(channel, int, 0444);
+MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
+
+module_param(ifname, charp, 0444);
+MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
+
+#ifdef CONFIG_IPW_PROMISC
+module_param(mode, int, 0444);
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
+#else
+module_param(mode, int, 0444);
+MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
+#endif
+
+module_exit(ipw_exit);
+module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
new file mode 100644
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.h
@@ -0,0 +1,1770 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+
+#ifndef __ipw2200_h__
+#define __ipw2200_h__
+
+#define WEXT_USECHANNELS 1
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/config.h>
+#include <linux/init.h>
+
+#include <linux/version.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/random.h>
+
+#include <linux/firmware.h>
+#include <linux/wireless.h>
+#include <asm/io.h>
+
+#include <net/ieee80211.h>
+
+#define DRV_NAME	"ipw2200"
+
+#include <linux/workqueue.h>
+
+#ifndef IRQ_NONE
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) )
+#define __iomem
+#endif
+
+#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
+#define pci_dma_sync_single_for_cpu	pci_dma_sync_single
+#define pci_dma_sync_single_for_device	pci_dma_sync_single
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x) kfree(x)
+#endif
+
+/* Authentication  and Association States */
+enum connection_manager_assoc_states
+{
+	CMAS_INIT = 0,
+	CMAS_TX_AUTH_SEQ_1,
+	CMAS_RX_AUTH_SEQ_2,
+	CMAS_AUTH_SEQ_1_PASS,
+	CMAS_AUTH_SEQ_1_FAIL,
+	CMAS_TX_AUTH_SEQ_3,
+	CMAS_RX_AUTH_SEQ_4,
+	CMAS_AUTH_SEQ_2_PASS,
+	CMAS_AUTH_SEQ_2_FAIL,
+	CMAS_AUTHENTICATED,
+	CMAS_TX_ASSOC,
+	CMAS_RX_ASSOC_RESP,
+	CMAS_ASSOCIATED,
+	CMAS_LAST
+};
+
+
+#define IPW_NORMAL                   0
+#define IPW_NOWAIT                   0
+#define IPW_WAIT                     (1<<0)
+#define IPW_QUIET                    (1<<1)
+#define IPW_ROAMING                  (1<<2)
+
+#define IPW_POWER_MODE_CAM           0x00	//(always on)
+#define IPW_POWER_INDEX_1            0x01
+#define IPW_POWER_INDEX_2            0x02
+#define IPW_POWER_INDEX_3            0x03
+#define IPW_POWER_INDEX_4            0x04
+#define IPW_POWER_INDEX_5            0x05
+#define IPW_POWER_AC                 0x06
+#define IPW_POWER_BATTERY            0x07
+#define IPW_POWER_LIMIT              0x07
+#define IPW_POWER_MASK               0x0F
+#define IPW_POWER_ENABLED            0x10
+#define IPW_POWER_LEVEL(x)           ((x) & IPW_POWER_MASK)
+
+#define IPW_CMD_HOST_COMPLETE                 2
+#define IPW_CMD_POWER_DOWN                    4
+#define IPW_CMD_SYSTEM_CONFIG                 6
+#define IPW_CMD_MULTICAST_ADDRESS             7
+#define IPW_CMD_SSID                          8
+#define IPW_CMD_ADAPTER_ADDRESS              11
+#define IPW_CMD_PORT_TYPE                    12
+#define IPW_CMD_RTS_THRESHOLD                15
+#define IPW_CMD_FRAG_THRESHOLD               16
+#define IPW_CMD_POWER_MODE                   17
+#define IPW_CMD_WEP_KEY                      18
+#define IPW_CMD_TGI_TX_KEY                   19
+#define IPW_CMD_SCAN_REQUEST                 20
+#define IPW_CMD_ASSOCIATE                    21
+#define IPW_CMD_SUPPORTED_RATES              22
+#define IPW_CMD_SCAN_ABORT                   23
+#define IPW_CMD_TX_FLUSH                     24
+#define IPW_CMD_QOS_PARAMETERS               25
+#define IPW_CMD_SCAN_REQUEST_EXT             26
+#define IPW_CMD_DINO_CONFIG                  30
+#define IPW_CMD_RSN_CAPABILITIES             31
+#define IPW_CMD_RX_KEY                       32
+#define IPW_CMD_CARD_DISABLE                 33
+#define IPW_CMD_SEED_NUMBER                  34
+#define IPW_CMD_TX_POWER                     35
+#define IPW_CMD_COUNTRY_INFO                 36
+#define IPW_CMD_AIRONET_INFO                 37
+#define IPW_CMD_AP_TX_POWER                  38
+#define IPW_CMD_CCKM_INFO                    39
+#define IPW_CMD_CCX_VER_INFO                 40
+#define IPW_CMD_SET_CALIBRATION              41
+#define IPW_CMD_SENSITIVITY_CALIB            42
+#define IPW_CMD_RETRY_LIMIT                  51
+#define IPW_CMD_IPW_PRE_POWER_DOWN           58
+#define IPW_CMD_VAP_BEACON_TEMPLATE          60
+#define IPW_CMD_VAP_DTIM_PERIOD              61
+#define IPW_CMD_EXT_SUPPORTED_RATES          62
+#define IPW_CMD_VAP_LOCAL_TX_PWR_CONSTRAINT  63
+#define IPW_CMD_VAP_QUIET_INTERVALS          64
+#define IPW_CMD_VAP_CHANNEL_SWITCH           65
+#define IPW_CMD_VAP_MANDATORY_CHANNELS       66
+#define IPW_CMD_VAP_CELL_PWR_LIMIT           67
+#define IPW_CMD_VAP_CF_PARAM_SET             68
+#define IPW_CMD_VAP_SET_BEACONING_STATE      69
+#define IPW_CMD_MEASUREMENT                  80
+#define IPW_CMD_POWER_CAPABILITY             81
+#define IPW_CMD_SUPPORTED_CHANNELS           82
+#define IPW_CMD_TPC_REPORT                   83
+#define IPW_CMD_WME_INFO                     84
+#define IPW_CMD_PRODUCTION_COMMAND	     85
+#define IPW_CMD_LINKSYS_EOU_INFO             90
+
+#define RFD_SIZE                              4
+#define NUM_TFD_CHUNKS                        6
+
+#define TX_QUEUE_SIZE                        32
+#define RX_QUEUE_SIZE                        32
+
+#define DINO_CMD_WEP_KEY                   0x08
+#define DINO_CMD_TX                        0x0B
+#define DCT_ANTENNA_A                      0x01
+#define DCT_ANTENNA_B                      0x02
+
+#define IPW_A_MODE                         0
+#define IPW_B_MODE                         1
+#define IPW_G_MODE                         2
+
+/*
+ * TX Queue Flag Definitions
+ */
+
+/* abort attempt if mgmt frame is rx'd */
+#define DCT_FLAG_ABORT_MGMT                0x01
+
+/* require CTS */
+#define DCT_FLAG_CTS_REQUIRED              0x02
+
+/* use short preamble */
+#define DCT_FLAG_SHORT_PREMBL              0x04
+
+/* RTS/CTS first */
+#define DCT_FLAG_RTS_REQD                  0x08
+
+/* dont calculate duration field */
+#define DCT_FLAG_DUR_SET                   0x10
+
+/* even if MAC WEP set (allows pre-encrypt) */
+#define DCT_FLAG_NO_WEP              0x20
+#define IPW_
+/* overwrite TSF field */
+#define DCT_FLAG_TSF_REQD                  0x40
+
+/* ACK rx is expected to follow */
+#define DCT_FLAG_ACK_REQD                  0x80
+
+#define DCT_FLAG_EXT_MODE_CCK  0x01
+#define DCT_FLAG_EXT_MODE_OFDM 0x00
+
+
+#define TX_RX_TYPE_MASK                    0xFF
+#define TX_FRAME_TYPE                      0x00
+#define TX_HOST_COMMAND_TYPE               0x01
+#define RX_FRAME_TYPE                      0x09
+#define RX_HOST_NOTIFICATION_TYPE          0x03
+#define RX_HOST_CMD_RESPONSE_TYPE          0x04
+#define RX_TX_FRAME_RESPONSE_TYPE          0x05
+#define TFD_NEED_IRQ_MASK                  0x04
+
+#define HOST_CMD_DINO_CONFIG               30
+
+#define HOST_NOTIFICATION_STATUS_ASSOCIATED             10
+#define HOST_NOTIFICATION_STATUS_AUTHENTICATE           11
+#define HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT    12
+#define HOST_NOTIFICATION_STATUS_SCAN_COMPLETED         13
+#define HOST_NOTIFICATION_STATUS_FRAG_LENGTH            14
+#define HOST_NOTIFICATION_STATUS_LINK_DETERIORATION     15
+#define HOST_NOTIFICATION_DINO_CONFIG_RESPONSE          16
+#define HOST_NOTIFICATION_STATUS_BEACON_STATE           17
+#define HOST_NOTIFICATION_STATUS_TGI_TX_KEY             18
+#define HOST_NOTIFICATION_TX_STATUS                     19
+#define HOST_NOTIFICATION_CALIB_KEEP_RESULTS            20
+#define HOST_NOTIFICATION_MEASUREMENT_STARTED           21
+#define HOST_NOTIFICATION_MEASUREMENT_ENDED             22
+#define HOST_NOTIFICATION_CHANNEL_SWITCHED              23
+#define HOST_NOTIFICATION_RX_DURING_QUIET_PERIOD        24
+#define HOST_NOTIFICATION_NOISE_STATS			25
+#define HOST_NOTIFICATION_S36_MEASUREMENT_ACCEPTED      30
+#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED       31
+
+#define HOST_NOTIFICATION_STATUS_BEACON_MISSING         1
+#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT           24
+#define IPW_MB_ROAMING_THRESHOLD_DEFAULT                8
+#define IPW_REAL_RATE_RX_PACKET_THRESHOLD               300
+
+#define MACADRR_BYTE_LEN                     6
+
+#define DCR_TYPE_AP                       0x01
+#define DCR_TYPE_WLAP                     0x02
+#define DCR_TYPE_MU_ESS                   0x03
+#define DCR_TYPE_MU_IBSS                  0x04
+#define DCR_TYPE_MU_PIBSS                 0x05
+#define DCR_TYPE_SNIFFER                  0x06
+#define DCR_TYPE_MU_BSS        DCR_TYPE_MU_ESS
+
+/**
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct clx2_queue {
+	int n_bd;                      /**< number of BDs in this queue */
+	int first_empty;               /**< 1-st empty entry (index) */
+	int last_used;                 /**< last used entry (index) */
+	u32 reg_w;                   /**< 'write' reg (queue head), addr in domain 1 */
+	u32 reg_r;                   /**< 'read' reg (queue tail), addr in domain 1 */
+	dma_addr_t dma_addr;            /**< physical addr for BD's */
+	int low_mark;                  /**< low watermark, resume queue if free space more than this */
+	int high_mark;                 /**< high watermark, stop queue if free space less than this */
+} __attribute__ ((packed));
+
+struct machdr32
+{
+	u16 frame_ctl;
+	u16 duration;     // watch out for endians!
+	u8 addr1[ MACADRR_BYTE_LEN ];
+	u8 addr2[ MACADRR_BYTE_LEN ];
+	u8 addr3[ MACADRR_BYTE_LEN ];
+	u16 seq_ctrl;     // more endians!
+	u8 addr4[ MACADRR_BYTE_LEN ];
+	u16 qos_ctrl;
+} __attribute__ ((packed)) ;
+
+struct machdr30
+{
+	u16 frame_ctl;
+	u16 duration;     // watch out for endians!
+	u8 addr1[ MACADRR_BYTE_LEN ];
+	u8 addr2[ MACADRR_BYTE_LEN ];
+	u8 addr3[ MACADRR_BYTE_LEN ];
+	u16 seq_ctrl;     // more endians!
+	u8 addr4[ MACADRR_BYTE_LEN ];
+} __attribute__ ((packed)) ;
+
+struct machdr26
+{
+	u16 frame_ctl;
+	u16 duration;     // watch out for endians!
+	u8 addr1[ MACADRR_BYTE_LEN ];
+	u8 addr2[ MACADRR_BYTE_LEN ];
+	u8 addr3[ MACADRR_BYTE_LEN ];
+	u16 seq_ctrl;     // more endians!
+	u16 qos_ctrl;
+} __attribute__ ((packed)) ;
+
+struct machdr24
+{
+	u16 frame_ctl;
+	u16 duration;     // watch out for endians!
+	u8 addr1[ MACADRR_BYTE_LEN ];
+	u8 addr2[ MACADRR_BYTE_LEN ];
+	u8 addr3[ MACADRR_BYTE_LEN ];
+	u16 seq_ctrl;     // more endians!
+} __attribute__ ((packed)) ;
+
+// TX TFD with 32 byte MAC Header
+struct tx_tfd_32
+{
+	struct machdr32    mchdr;                      // 32
+	u32                uivplaceholder[2];          // 8
+} __attribute__ ((packed)) ;
+
+// TX TFD with 30 byte MAC Header
+struct tx_tfd_30
+{
+	struct machdr30    mchdr;                      // 30
+	u8                 reserved[2];                // 2
+	u32                uivplaceholder[2];          // 8
+} __attribute__ ((packed)) ;
+
+// tx tfd with 26 byte mac header
+struct tx_tfd_26
+{
+	struct machdr26    mchdr;                      // 26
+	u8                 reserved1[2];               // 2
+	u32                uivplaceholder[2];          // 8
+	u8                 reserved2[4];               // 4
+} __attribute__ ((packed)) ;
+
+// tx tfd with 24 byte mac header
+struct tx_tfd_24
+{
+	struct machdr24    mchdr;                      // 24
+	u32                uivplaceholder[2];          // 8
+	u8                 reserved[8];                // 8
+} __attribute__ ((packed)) ;
+
+
+#define DCT_WEP_KEY_FIELD_LENGTH 16
+
+struct tfd_command
+{
+	u8 index;
+	u8 length;
+	u16 reserved;
+	u8 payload[0];
+} __attribute__ ((packed)) ;
+
+struct tfd_data {
+	/* Header */
+	u32 work_area_ptr;
+	u8 station_number; /* 0 for BSS */
+	u8 reserved1;
+	u16 reserved2;
+
+	/* Tx Parameters */
+	u8 cmd_id;
+	u8 seq_num;
+	u16 len;
+	u8 priority;
+	u8 tx_flags;
+	u8 tx_flags_ext;
+	u8 key_index;
+	u8 wepkey[DCT_WEP_KEY_FIELD_LENGTH];
+	u8 rate;
+	u8 antenna;
+	u16 next_packet_duration;
+	u16 next_frag_len;
+	u16 back_off_counter; //////txop;
+	u8 retrylimit;
+	u16 cwcurrent;
+	u8 reserved3;
+
+	/* 802.11 MAC Header */
+	union
+	{
+		struct tx_tfd_24 tfd_24;
+		struct tx_tfd_26 tfd_26;
+		struct tx_tfd_30 tfd_30;
+		struct tx_tfd_32 tfd_32;
+	} tfd;
+
+	/* Payload DMA info */
+	u32 num_chunks;
+	u32 chunk_ptr[NUM_TFD_CHUNKS];
+	u16 chunk_len[NUM_TFD_CHUNKS];
+} __attribute__ ((packed));
+
+struct txrx_control_flags
+{
+	u8 message_type;
+	u8 rx_seq_num;
+	u8 control_bits;
+	u8 reserved;
+} __attribute__ ((packed));
+
+#define  TFD_SIZE                           128
+#define  TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH   (TFD_SIZE - sizeof(struct txrx_control_flags))
+
+struct tfd_frame
+{
+	struct txrx_control_flags control_flags;
+	union {
+		struct tfd_data data;
+		struct tfd_command cmd;
+		u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
+	} u;
+} __attribute__ ((packed)) ;
+
+typedef void destructor_func(const void*);
+
+/**
+ * Tx Queue for DMA. Queue consists of circular buffer of
+ * BD's and required locking structures.
+ */
+struct clx2_tx_queue {
+	struct clx2_queue q;
+	struct tfd_frame* bd;
+	struct ieee80211_txb **txb;
+};
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 32
+#define RX_LOW_WATERMARK 8
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS  (8)
+#define SUP_RATE_11B_MAX_NUM_CHANNELS  (4)
+#define SUP_RATE_11G_MAX_NUM_CHANNELS  (12)
+
+// Used for passing to driver number of successes and failures per rate
+struct rate_histogram
+{
+	union {
+		u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} success;
+	union {
+		u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
+		u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
+		u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
+	} failed;
+} __attribute__ ((packed));
+
+/* statistics command response */
+struct ipw_cmd_stats {
+	u8 cmd_id;
+	u8 seq_num;
+	u16 good_sfd;
+	u16 bad_plcp;
+	u16 wrong_bssid;
+	u16 valid_mpdu;
+	u16 bad_mac_header;
+	u16 reserved_frame_types;
+	u16 rx_ina;
+	u16 bad_crc32;
+	u16 invalid_cts;
+	u16 invalid_acks;
+	u16 long_distance_ina_fina;
+	u16 dsp_silence_unreachable;
+	u16 accumulated_rssi;
+	u16 rx_ovfl_frame_tossed;
+	u16 rssi_silence_threshold;
+	u16 rx_ovfl_frame_supplied;
+	u16 last_rx_frame_signal;
+	u16 last_rx_frame_noise;
+	u16 rx_autodetec_no_ofdm;
+	u16 rx_autodetec_no_barker;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct notif_channel_result {
+	u8 channel_num;
+	struct ipw_cmd_stats stats;
+	u8 uReserved;
+} __attribute__ ((packed));
+
+struct notif_scan_complete {
+	u8 scan_type;
+	u8 num_channels;
+	u8 status;
+	u8 reserved;
+}  __attribute__ ((packed));
+
+struct notif_frag_length {
+	u16 frag_length;
+	u16 reserved;
+}  __attribute__ ((packed));
+
+struct notif_beacon_state {
+	u32 state;
+	u32 number;
+} __attribute__ ((packed));
+
+struct notif_tgi_tx_key {
+	u8 key_state;
+	u8 security_type;
+	u8 station_index;
+	u8 reserved;
+} __attribute__ ((packed));
+
+struct notif_link_deterioration {
+	struct ipw_cmd_stats stats;
+	u8 rate;
+	u8 modulation;
+	struct rate_histogram histogram;
+	u8 reserved1;
+	u16 reserved2;
+} __attribute__ ((packed));
+
+struct notif_association {
+	u8 state;
+} __attribute__ ((packed));
+
+struct notif_authenticate {
+	u8 state;
+	struct machdr24 addr;
+	u16 status;
+} __attribute__ ((packed));
+
+struct temperature
+{
+	s32 measured;
+	s32 active;
+} __attribute__ ((packed));
+
+struct notif_calibration {
+	u8 data[104];
+} __attribute__ ((packed));
+
+struct notif_noise {
+	u32 value;
+} __attribute__ ((packed));
+
+struct ipw_rx_notification {
+	u8 reserved[8];
+	u8 subtype;
+	u8 flags;
+	u16 size;
+	union {
+		struct notif_association assoc;
+		struct notif_authenticate auth;
+		struct notif_channel_result channel_result;
+		struct notif_scan_complete scan_complete;
+		struct notif_frag_length frag_len;
+		struct notif_beacon_state beacon_state;
+		struct notif_tgi_tx_key tgi_tx_key;
+		struct notif_link_deterioration link_deterioration;
+		struct notif_calibration calibration;
+		struct notif_noise noise;
+		u8 raw[0];
+	} u;
+} __attribute__ ((packed));
+
+struct ipw_rx_frame {
+	u32 reserved1;
+	u8 parent_tsf[4];     // fw_use[0] is boolean for OUR_TSF_IS_GREATER
+	u8 received_channel;  // The channel that this frame was received on.
+			      // Note that for .11b this does not have to be
+	                      // the same as the channel that it was sent.
+                              // Filled by LMAC
+	u8 frameStatus;
+	u8 rate;
+	u8 rssi;
+	u8 agc;
+	u8 rssi_dbm;
+	u16 signal;
+	u16 noise;
+	u8 antennaAndPhy;
+	u8 control;           // control bit should be on in bg
+	u8 rtscts_rate;       // rate of rts or cts (in rts cts sequence rate
+	                      // is identical)
+	u8 rtscts_seen;       // 0x1 RTS seen ; 0x2 CTS seen
+	u16 length;
+	u8 data[0];
+} __attribute__ ((packed));
+
+struct ipw_rx_header {
+	u8 message_type;
+	u8 rx_seq_num;
+	u8 control_bits;
+	u8 reserved;
+} __attribute__ ((packed));
+
+struct ipw_rx_packet
+{
+	struct ipw_rx_header header;
+	union {
+		struct ipw_rx_frame frame;
+		struct ipw_rx_notification notification;
+	} u;
+} __attribute__ ((packed));
+
+#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
+#define IPW_RX_FRAME_SIZE        sizeof(struct ipw_rx_header) + \
+                                 sizeof(struct ipw_rx_frame)
+
+struct ipw_rx_mem_buffer {
+	dma_addr_t dma_addr;
+	struct ipw_rx_buffer *rxb;
+	struct sk_buff *skb;
+	struct list_head list;
+}; /* Not transferred over network, so not  __attribute__ ((packed)) */
+
+struct ipw_rx_queue {
+	struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+	struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
+	u32 processed; /* Internal index to last handled Rx packet */
+	u32 read;      /* Shared index to newest available Rx buffer */
+	u32 write;     /* Shared index to oldest written Rx packet */
+	u32 free_count;/* Number of pre-allocated buffers in rx_free */
+	/* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */
+	struct list_head rx_free;  /* Own an SKBs */
+	struct list_head rx_used;  /* No SKB allocated */
+	spinlock_t lock;
+}; /* Not transferred over network, so not  __attribute__ ((packed)) */
+
+
+struct alive_command_responce {
+	u8 alive_command;
+	u8 sequence_number;
+	u16 software_revision;
+	u8 device_identifier;
+	u8 reserved1[5];
+	u16 reserved2;
+	u16 reserved3;
+	u16 clock_settle_time;
+	u16 powerup_settle_time;
+	u16 reserved4;
+	u8 time_stamp[5];	/* month, day, year, hours, minutes */
+	u8 ucode_valid;
+} __attribute__ ((packed));
+
+#define IPW_MAX_RATES 12
+
+struct ipw_rates {
+	u8 num_rates;
+	u8 rates[IPW_MAX_RATES];
+} __attribute__ ((packed));
+
+struct command_block
+{
+	unsigned int control;
+	u32 source_addr;
+	u32 dest_addr;
+	unsigned int status;
+} __attribute__ ((packed));
+
+#define CB_NUMBER_OF_ELEMENTS_SMALL 64
+struct fw_image_desc
+{
+	unsigned long last_cb_index;
+	unsigned long current_cb_index;
+	struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL];
+	void * v_addr;
+	unsigned long p_addr;
+	unsigned long len;
+};
+
+struct ipw_sys_config
+{
+	u8 bt_coexistence;
+	u8 reserved1;
+	u8 answer_broadcast_ssid_probe;
+	u8 accept_all_data_frames;
+	u8 accept_non_directed_frames;
+	u8 exclude_unicast_unencrypted;
+	u8 disable_unicast_decryption;
+	u8 exclude_multicast_unencrypted;
+	u8 disable_multicast_decryption;
+	u8 antenna_diversity;
+	u8 pass_crc_to_host;
+	u8 dot11g_auto_detection;
+	u8 enable_cts_to_self;
+	u8 enable_multicast_filtering;
+	u8 bt_coexist_collision_thr;
+	u8 reserved2;
+	u8 accept_all_mgmt_bcpr;
+	u8 accept_all_mgtm_frames;
+	u8 pass_noise_stats_to_host;
+	u8 reserved3;
+} __attribute__ ((packed));
+
+struct ipw_multicast_addr
+{
+	u8 num_of_multicast_addresses;
+	u8 reserved[3];
+	u8 mac1[6];
+	u8 mac2[6];
+	u8 mac3[6];
+	u8 mac4[6];
+} __attribute__ ((packed));
+
+struct ipw_wep_key
+{
+	u8 cmd_id;
+	u8 seq_num;
+	u8 key_index;
+	u8 key_size;
+	u8 key[16];
+} __attribute__ ((packed));
+
+struct ipw_tgi_tx_key
+{
+	u8 key_id;
+	u8 security_type;
+	u8 station_index;
+	u8 flags;
+	u8 key[16];
+	u32 tx_counter[2];
+} __attribute__ ((packed));
+
+#define IPW_SCAN_CHANNELS 54
+
+struct ipw_scan_request
+{
+	u8 scan_type;
+	u16 dwell_time;
+	u8 channels_list[IPW_SCAN_CHANNELS];
+	u8 channels_reserved[3];
+} __attribute__ ((packed));
+
+enum {
+	IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
+	IPW_SCAN_PASSIVE_FULL_DWELL_SCAN,
+	IPW_SCAN_ACTIVE_DIRECT_SCAN,
+	IPW_SCAN_ACTIVE_BROADCAST_SCAN,
+	IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN,
+	IPW_SCAN_TYPES
+};
+
+struct ipw_scan_request_ext
+{
+	u32 full_scan_index;
+	u8 channels_list[IPW_SCAN_CHANNELS];
+	u8 scan_type[IPW_SCAN_CHANNELS / 2];
+	u8 reserved;
+	u16 dwell_time[IPW_SCAN_TYPES];
+} __attribute__ ((packed));
+
+extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
+{
+	if (index % 2)
+		return scan->scan_type[index / 2] & 0x0F;
+	else
+		return (scan->scan_type[index / 2] & 0xF0) >> 4;
+}
+
+extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
+				     u8 index, u8 scan_type)
+{
+	if (index % 2)
+		scan->scan_type[index / 2] =
+			(scan->scan_type[index / 2] & 0xF0) |
+			(scan_type & 0x0F);
+	else
+		scan->scan_type[index / 2] =
+			(scan->scan_type[index / 2] & 0x0F) |
+			((scan_type & 0x0F) << 4);
+}
+
+struct ipw_associate
+{
+	u8 channel;
+	u8 auth_type:4,
+	   auth_key:4;
+	u8 assoc_type;
+	u8 reserved;
+	u16 policy_support;
+	u8 preamble_length;
+	u8 ieee_mode;
+	u8 bssid[ETH_ALEN];
+	u32 assoc_tsf_msw;
+	u32 assoc_tsf_lsw;
+	u16 capability;
+	u16 listen_interval;
+	u16 beacon_interval;
+	u8 dest[ETH_ALEN];
+	u16 atim_window;
+	u8 smr;
+	u8 reserved1;
+	u16 reserved2;
+} __attribute__ ((packed));
+
+struct ipw_supported_rates
+{
+	u8 ieee_mode;
+	u8 num_rates;
+	u8 purpose;
+	u8 reserved;
+	u8 supported_rates[IPW_MAX_RATES];
+} __attribute__ ((packed));
+
+struct ipw_rts_threshold
+{
+	u16 rts_threshold;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct ipw_frag_threshold
+{
+	u16 frag_threshold;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct ipw_retry_limit
+{
+	u8 short_retry_limit;
+	u8 long_retry_limit;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct ipw_dino_config
+{
+	u32 dino_config_addr;
+	u16 dino_config_size;
+	u8 dino_response;
+	u8 reserved;
+} __attribute__ ((packed));
+
+struct ipw_aironet_info
+{
+	u8 id;
+	u8 length;
+	u16 reserved;
+} __attribute__ ((packed));
+
+struct ipw_rx_key
+{
+	u8 station_index;
+	u8 key_type;
+	u8 key_id;
+	u8 key_flag;
+	u8 key[16];
+	u8 station_address[6];
+	u8 key_index;
+	u8 reserved;
+} __attribute__ ((packed));
+
+struct ipw_country_channel_info
+{
+	u8 first_channel;
+	u8 no_channels;
+	s8 max_tx_power;
+} __attribute__ ((packed));
+
+struct ipw_country_info
+{
+	u8 id;
+	u8 length;
+	u8 country_str[3];
+	struct ipw_country_channel_info groups[7];
+} __attribute__ ((packed));
+
+struct ipw_channel_tx_power
+{
+	u8 channel_number;
+	s8 tx_power;
+} __attribute__ ((packed));
+
+#define SCAN_ASSOCIATED_INTERVAL (HZ)
+#define SCAN_INTERVAL (HZ / 10)
+#define MAX_A_CHANNELS  37
+#define MAX_B_CHANNELS  14
+
+struct ipw_tx_power
+{
+	u8 num_channels;
+	u8 ieee_mode;
+	struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
+} __attribute__ ((packed));
+
+struct ipw_qos_parameters
+{
+	u16 cw_min[4];
+	u16 cw_max[4];
+	u8 aifs[4];
+	u8 flag[4];
+	u16 tx_op_limit[4];
+} __attribute__ ((packed));
+
+struct ipw_rsn_capabilities
+{
+	u8 id;
+	u8 length;
+	u16 version;
+} __attribute__ ((packed));
+
+struct ipw_sensitivity_calib
+{
+	u16 beacon_rssi_raw;
+	u16 reserved;
+} __attribute__ ((packed));
+
+/**
+ * Host command structure.
+ *
+ * On input, the following fields should be filled:
+ * - cmd
+ * - len
+ * - status_len
+ * - param (if needed)
+ *
+ * On output,
+ * - \a status contains status;
+ * - \a param filled with status parameters.
+ */
+struct ipw_cmd {
+  u32 cmd;         /**< Host command */
+  u32 status;      /**< Status */
+  u32 status_len;  /**< How many 32 bit parameters in the status */
+  u32 len;         /**< incoming parameters length, bytes */
+  /**
+   * command parameters.
+   * There should be enough space for incoming and
+   * outcoming parameters.
+   * Incoming parameters listed 1-st, followed by outcoming params.
+   * nParams=(len+3)/4+status_len
+   */
+  u32 param[0];
+} __attribute__ ((packed));
+
+#define STATUS_HCMD_ACTIVE      (1<<0)  /**< host command in progress */
+
+#define STATUS_INT_ENABLED      (1<<1)
+#define STATUS_RF_KILL_HW       (1<<2)
+#define STATUS_RF_KILL_SW       (1<<3)
+#define STATUS_RF_KILL_MASK     (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
+
+#define STATUS_INIT             (1<<5)
+#define STATUS_AUTH             (1<<6)
+#define STATUS_ASSOCIATED       (1<<7)
+#define STATUS_STATE_MASK       (STATUS_INIT | STATUS_AUTH | STATUS_ASSOCIATED)
+
+#define STATUS_ASSOCIATING      (1<<8)
+#define STATUS_DISASSOCIATING   (1<<9)
+#define STATUS_ROAMING          (1<<10)
+#define STATUS_EXIT_PENDING     (1<<11)
+#define STATUS_DISASSOC_PENDING (1<<12)
+#define STATUS_STATE_PENDING    (1<<13)
+
+#define STATUS_SCAN_PENDING     (1<<20)
+#define STATUS_SCANNING         (1<<21)
+#define STATUS_SCAN_ABORTING    (1<<22)
+
+#define STATUS_INDIRECT_BYTE    (1<<28) /* sysfs entry configured for access */
+#define STATUS_INDIRECT_DWORD   (1<<29) /* sysfs entry configured for access */
+#define STATUS_DIRECT_DWORD     (1<<30) /* sysfs entry configured for access */
+
+#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */
+
+#define CFG_STATIC_CHANNEL      (1<<0) /* Restrict assoc. to single channel */
+#define CFG_STATIC_ESSID        (1<<1) /* Restrict assoc. to single SSID */
+#define CFG_STATIC_BSSID        (1<<2) /* Restrict assoc. to single BSSID */
+#define CFG_CUSTOM_MAC          (1<<3)
+#define CFG_PREAMBLE            (1<<4)
+#define CFG_ADHOC_PERSIST       (1<<5)
+#define CFG_ASSOCIATE           (1<<6)
+#define CFG_FIXED_RATE          (1<<7)
+#define CFG_ADHOC_CREATE        (1<<8)
+
+#define CAP_SHARED_KEY          (1<<0) /* Off = OPEN */
+#define CAP_PRIVACY_ON          (1<<1) /* Off = No privacy */
+
+#define MAX_STATIONS            32
+#define IPW_INVALID_STATION     (0xff)
+
+struct ipw_station_entry {
+	u8 mac_addr[ETH_ALEN];
+	u8 reserved;
+	u8 support_mode;
+};
+
+#define AVG_ENTRIES 8
+struct average {
+	s16 entries[AVG_ENTRIES];
+	u8 pos;
+	u8 init;
+	s32 sum;
+};
+
+struct ipw_priv {
+	/* ieee device used by generic ieee processing code */
+	struct ieee80211_device *ieee;
+	struct ieee80211_security sec;
+
+	/* spinlock */
+	spinlock_t lock;
+
+	/* basic pci-network driver stuff */
+	struct pci_dev *pci_dev;
+	struct net_device *net_dev;
+
+	/* pci hardware address support */
+	void __iomem *hw_base;
+	unsigned long hw_len;
+
+	struct fw_image_desc sram_desc;
+
+	/* result of ucode download */
+	struct alive_command_responce dino_alive;
+
+  	wait_queue_head_t wait_command_queue;
+  	wait_queue_head_t wait_state;
+
+	/* Rx and Tx DMA processing queues */
+	struct ipw_rx_queue *rxq;
+	struct clx2_tx_queue txq_cmd;
+	struct clx2_tx_queue txq[4];
+	u32 status;
+	u32 config;
+	u32 capability;
+
+	u8 last_rx_rssi;
+	u8 last_noise;
+	struct average average_missed_beacons;
+	struct average average_rssi;
+	struct average average_noise;
+	u32 port_type;
+	int rx_bufs_min;          /**< minimum number of bufs in Rx queue */
+	int rx_pend_max;          /**< maximum pending buffers for one IRQ */
+	u32 hcmd_seq;             /**< sequence number for hcmd */
+	u32 missed_beacon_threshold;
+	u32 roaming_threshold;
+
+	struct ipw_associate assoc_request;
+	struct ieee80211_network *assoc_network;
+
+	unsigned long ts_scan_abort;
+	struct ipw_supported_rates rates;
+	struct ipw_rates phy[3];           /**< PHY restrictions, per band */
+	struct ipw_rates supp;             /**< software defined */
+	struct ipw_rates extended;         /**< use for corresp. IE, AP only */
+
+	struct notif_link_deterioration last_link_deterioration; /** for statistics */
+	struct ipw_cmd* hcmd; /**< host command currently executed */
+
+	wait_queue_head_t hcmd_wq;     /**< host command waits for execution */
+	u32 tsf_bcn[2];              /**< TSF from latest beacon */
+
+	struct notif_calibration calib; /**< last calibration */
+
+	/* ordinal interface with firmware */
+	u32 table0_addr;
+	u32 table0_len;
+	u32 table1_addr;
+	u32 table1_len;
+	u32 table2_addr;
+	u32 table2_len;
+
+	/* context information */
+	u8 essid[IW_ESSID_MAX_SIZE];
+	u8 essid_len;
+	u8 nick[IW_ESSID_MAX_SIZE];
+	u16 rates_mask;
+	u8 channel;
+	struct ipw_sys_config sys_config;
+	u32 power_mode;
+	u8 bssid[ETH_ALEN];
+	u16 rts_threshold;
+	u8 mac_addr[ETH_ALEN];
+	u8 num_stations;
+	u8 stations[MAX_STATIONS][ETH_ALEN];
+
+	u32 notif_missed_beacons;
+
+	/* Statistics and counters normalized with each association */
+	u32 last_missed_beacons;
+	u32 last_tx_packets;
+	u32 last_rx_packets;
+	u32 last_tx_failures;
+	u32 last_rx_err;
+	u32 last_rate;
+
+	u32 missed_adhoc_beacons;
+	u32 missed_beacons;
+	u32 rx_packets;
+	u32 tx_packets;
+	u32 quality;
+
+        /* eeprom */
+	u8 eeprom[0x100];  /* 256 bytes of eeprom */
+	int eeprom_delay;
+
+	struct iw_statistics wstats;
+
+	struct workqueue_struct *workqueue;
+
+	struct work_struct adhoc_check;
+	struct work_struct associate;
+	struct work_struct disassociate;
+	struct work_struct rx_replenish;
+	struct work_struct request_scan;
+	struct work_struct adapter_restart;
+	struct work_struct rf_kill;
+	struct work_struct up;
+	struct work_struct down;
+	struct work_struct gather_stats;
+	struct work_struct abort_scan;
+	struct work_struct roam;
+	struct work_struct scan_check;
+
+	struct tasklet_struct irq_tasklet;
+
+
+#define IPW_2200BG  1
+#define IPW_2915ABG 2
+	u8 adapter;
+
+#define IPW_DEFAULT_TX_POWER 0x14
+	u8 tx_power;
+
+#ifdef CONFIG_PM
+	u32 pm_state[16];
+#endif
+
+	/* network state */
+
+	/* Used to pass the current INTA value from ISR to Tasklet */
+	u32 isr_inta;
+
+	/* debugging info */
+	u32 indirect_dword;
+	u32 direct_dword;
+	u32 indirect_byte;
+};				/*ipw_priv */
+
+
+/* debug macros */
+
+#ifdef CONFIG_IPW_DEBUG
+#define IPW_DEBUG(level, fmt, args...) \
+do { if (ipw_debug_level & (level)) \
+  printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
+         in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
+#else
+#define IPW_DEBUG(level, fmt, args...) do {} while (0)
+#endif				/* CONFIG_IPW_DEBUG */
+
+/*
+ * To use the debug system;
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of:
+ *
+ * #define IPW_DL_xxxx VALUE
+ *
+ * shifting value to the left one bit from the previous entry.  xxxx should be
+ * the name of the classification (for example, WEP)
+ *
+ * You then need to either add a IPW_xxxx_DEBUG() macro definition for your
+ * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * To add your debug level to the list of levels seen when you perform
+ *
+ * % cat /proc/net/ipw/debug_level
+ *
+ * you simply need to add your entry to the ipw_debug_levels array.
+ *
+ * If you do not see debug_level in /proc/net/ipw then you do not have
+ * CONFIG_IPW_DEBUG defined in your kernel configuration
+ *
+ */
+
+#define IPW_DL_ERROR         (1<<0)
+#define IPW_DL_WARNING       (1<<1)
+#define IPW_DL_INFO          (1<<2)
+#define IPW_DL_WX            (1<<3)
+#define IPW_DL_HOST_COMMAND  (1<<5)
+#define IPW_DL_STATE         (1<<6)
+
+#define IPW_DL_NOTIF         (1<<10)
+#define IPW_DL_SCAN          (1<<11)
+#define IPW_DL_ASSOC         (1<<12)
+#define IPW_DL_DROP          (1<<13)
+#define IPW_DL_IOCTL         (1<<14)
+
+#define IPW_DL_MANAGE        (1<<15)
+#define IPW_DL_FW            (1<<16)
+#define IPW_DL_RF_KILL       (1<<17)
+#define IPW_DL_FW_ERRORS     (1<<18)
+
+
+#define IPW_DL_ORD           (1<<20)
+
+#define IPW_DL_FRAG          (1<<21)
+#define IPW_DL_WEP           (1<<22)
+#define IPW_DL_TX            (1<<23)
+#define IPW_DL_RX            (1<<24)
+#define IPW_DL_ISR           (1<<25)
+#define IPW_DL_FW_INFO       (1<<26)
+#define IPW_DL_IO            (1<<27)
+#define IPW_DL_TRACE         (1<<28)
+
+#define IPW_DL_STATS         (1<<29)
+
+
+#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
+#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
+#define IPW_DEBUG_INFO(f, a...)    IPW_DEBUG(IPW_DL_INFO, f, ## a)
+
+#define IPW_DEBUG_WX(f, a...)     IPW_DEBUG(IPW_DL_WX, f, ## a)
+#define IPW_DEBUG_SCAN(f, a...)   IPW_DEBUG(IPW_DL_SCAN, f, ## a)
+#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a)
+#define IPW_DEBUG_TRACE(f, a...)  IPW_DEBUG(IPW_DL_TRACE, f, ## a)
+#define IPW_DEBUG_RX(f, a...)     IPW_DEBUG(IPW_DL_RX, f, ## a)
+#define IPW_DEBUG_TX(f, a...)     IPW_DEBUG(IPW_DL_TX, f, ## a)
+#define IPW_DEBUG_ISR(f, a...)    IPW_DEBUG(IPW_DL_ISR, f, ## a)
+#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
+#define IPW_DEBUG_WEP(f, a...)    IPW_DEBUG(IPW_DL_WEP, f, ## a)
+#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
+#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a)
+#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a)
+#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
+#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
+#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a)
+#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a)
+#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a)
+#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
+#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
+#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a)
+
+#include <linux/ctype.h>
+
+/*
+* Register bit definitions
+*/
+
+/* Dino control registers bits */
+
+#define DINO_ENABLE_SYSTEM 0x80
+#define DINO_ENABLE_CS     0x40
+#define DINO_RXFIFO_DATA   0x01
+#define DINO_CONTROL_REG   0x00200000
+
+#define CX2_INTA_RW       0x00000008
+#define CX2_INTA_MASK_R   0x0000000C
+#define CX2_INDIRECT_ADDR 0x00000010
+#define CX2_INDIRECT_DATA 0x00000014
+#define CX2_AUTOINC_ADDR  0x00000018
+#define CX2_AUTOINC_DATA  0x0000001C
+#define CX2_RESET_REG     0x00000020
+#define CX2_GP_CNTRL_RW   0x00000024
+
+#define CX2_READ_INT_REGISTER 0xFF4
+
+#define CX2_GP_CNTRL_BIT_INIT_DONE	0x00000004
+
+#define CX2_REGISTER_DOMAIN1_END        0x00001000
+#define CX2_SRAM_READ_INT_REGISTER 	0x00000ff4
+
+#define CX2_SHARED_LOWER_BOUND          0x00000200
+#define CX2_INTERRUPT_AREA_LOWER_BOUND  0x00000f80
+
+#define CX2_NIC_SRAM_LOWER_BOUND        0x00000000
+#define CX2_NIC_SRAM_UPPER_BOUND        0x00030000
+
+#define CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER (1 << 29)
+#define CX2_GP_CNTRL_BIT_CLOCK_READY    0x00000001
+#define CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY 0x00000002
+
+/*
+ * RESET Register Bit Indexes
+ */
+#define CBD_RESET_REG_PRINCETON_RESET 0x00000001  /* Bit 0 (LSB) */
+#define CX2_RESET_REG_SW_RESET        0x00000080  /* Bit 7       */
+#define CX2_RESET_REG_MASTER_DISABLED 0x00000100  /* Bit 8       */
+#define CX2_RESET_REG_STOP_MASTER     0x00000200  /* Bit 9       */
+#define CX2_ARC_KESHET_CONFIG         0x08000000  /* Bit 27      */
+#define CX2_START_STANDBY             0x00000004  /* Bit 2       */
+
+#define CX2_CSR_CIS_UPPER_BOUND	0x00000200
+#define CX2_DOMAIN_0_END 0x1000
+#define CLX_MEM_BAR_SIZE 0x1000
+
+#define CX2_BASEBAND_CONTROL_STATUS	0X00200000
+#define CX2_BASEBAND_TX_FIFO_WRITE	0X00200004
+#define CX2_BASEBAND_RX_FIFO_READ	0X00200004
+#define CX2_BASEBAND_CONTROL_STORE	0X00200010
+
+#define CX2_INTERNAL_CMD_EVENT 	0X00300004
+#define CX2_BASEBAND_POWER_DOWN 0x00000001
+
+#define CX2_MEM_HALT_AND_RESET  0x003000e0
+
+/* defgroup bits_halt_reset MEM_HALT_AND_RESET register bits */
+#define CX2_BIT_HALT_RESET_ON	0x80000000
+#define CX2_BIT_HALT_RESET_OFF 	0x00000000
+
+#define CB_LAST_VALID     0x20000000
+#define CB_INT_ENABLED    0x40000000
+#define CB_VALID          0x80000000
+#define CB_SRC_LE         0x08000000
+#define CB_DEST_LE        0x04000000
+#define CB_SRC_AUTOINC    0x00800000
+#define CB_SRC_IO_GATED   0x00400000
+#define CB_DEST_AUTOINC   0x00080000
+#define CB_SRC_SIZE_LONG  0x00200000
+#define CB_DEST_SIZE_LONG 0x00020000
+
+
+/* DMA DEFINES */
+
+#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000
+#define DMA_CB_STOP_AND_ABORT            0x00000C00
+#define DMA_CB_START                     0x00000100
+
+
+#define CX2_SHARED_SRAM_SIZE               0x00030000
+#define CX2_SHARED_SRAM_DMA_CONTROL        0x00027000
+#define CB_MAX_LENGTH                      0x1FFF
+
+#define CX2_HOST_EEPROM_DATA_SRAM_SIZE 0xA18
+#define CX2_EEPROM_IMAGE_SIZE          0x100
+
+
+/* DMA defs */
+#define CX2_DMA_I_CURRENT_CB  0x003000D0
+#define CX2_DMA_O_CURRENT_CB  0x003000D4
+#define CX2_DMA_I_DMA_CONTROL 0x003000A4
+#define CX2_DMA_I_CB_BASE     0x003000A0
+
+#define CX2_TX_CMD_QUEUE_BD_BASE        (0x00000200)
+#define CX2_TX_CMD_QUEUE_BD_SIZE        (0x00000204)
+#define CX2_TX_QUEUE_0_BD_BASE          (0x00000208)
+#define CX2_TX_QUEUE_0_BD_SIZE          (0x0000020C)
+#define CX2_TX_QUEUE_1_BD_BASE          (0x00000210)
+#define CX2_TX_QUEUE_1_BD_SIZE          (0x00000214)
+#define CX2_TX_QUEUE_2_BD_BASE          (0x00000218)
+#define CX2_TX_QUEUE_2_BD_SIZE          (0x0000021C)
+#define CX2_TX_QUEUE_3_BD_BASE          (0x00000220)
+#define CX2_TX_QUEUE_3_BD_SIZE          (0x00000224)
+#define CX2_RX_BD_BASE                  (0x00000240)
+#define CX2_RX_BD_SIZE                  (0x00000244)
+#define CX2_RFDS_TABLE_LOWER            (0x00000500)
+
+#define CX2_TX_CMD_QUEUE_READ_INDEX     (0x00000280)
+#define CX2_TX_QUEUE_0_READ_INDEX       (0x00000284)
+#define CX2_TX_QUEUE_1_READ_INDEX       (0x00000288)
+#define CX2_TX_QUEUE_2_READ_INDEX       (0x0000028C)
+#define CX2_TX_QUEUE_3_READ_INDEX       (0x00000290)
+#define CX2_RX_READ_INDEX               (0x000002A0)
+
+#define CX2_TX_CMD_QUEUE_WRITE_INDEX    (0x00000F80)
+#define CX2_TX_QUEUE_0_WRITE_INDEX      (0x00000F84)
+#define CX2_TX_QUEUE_1_WRITE_INDEX      (0x00000F88)
+#define CX2_TX_QUEUE_2_WRITE_INDEX      (0x00000F8C)
+#define CX2_TX_QUEUE_3_WRITE_INDEX      (0x00000F90)
+#define CX2_RX_WRITE_INDEX              (0x00000FA0)
+
+/*
+ * EEPROM Related Definitions
+ */
+
+#define IPW_EEPROM_DATA_SRAM_ADDRESS (CX2_SHARED_LOWER_BOUND + 0x814)
+#define IPW_EEPROM_DATA_SRAM_SIZE    (CX2_SHARED_LOWER_BOUND + 0x818)
+#define IPW_EEPROM_LOAD_DISABLE      (CX2_SHARED_LOWER_BOUND + 0x81C)
+#define IPW_EEPROM_DATA              (CX2_SHARED_LOWER_BOUND + 0x820)
+#define IPW_EEPROM_UPPER_ADDRESS     (CX2_SHARED_LOWER_BOUND + 0x9E0)
+
+#define IPW_STATION_TABLE_LOWER      (CX2_SHARED_LOWER_BOUND + 0xA0C)
+#define IPW_STATION_TABLE_UPPER      (CX2_SHARED_LOWER_BOUND + 0xB0C)
+#define IPW_REQUEST_ATIM             (CX2_SHARED_LOWER_BOUND + 0xB0C)
+#define IPW_ATIM_SENT                (CX2_SHARED_LOWER_BOUND + 0xB10)
+#define IPW_WHO_IS_AWAKE             (CX2_SHARED_LOWER_BOUND + 0xB14)
+#define IPW_DURING_ATIM_WINDOW       (CX2_SHARED_LOWER_BOUND + 0xB18)
+
+
+#define MSB                             1
+#define LSB                             0
+#define WORD_TO_BYTE(_word)             ((_word) * sizeof(u16))
+
+#define GET_EEPROM_ADDR(_wordoffset,_byteoffset) \
+    ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) )
+
+/* EEPROM access by BYTE */
+#define EEPROM_PME_CAPABILITY   (GET_EEPROM_ADDR(0x09,MSB))     /* 1 byte   */
+#define EEPROM_MAC_ADDRESS      (GET_EEPROM_ADDR(0x21,LSB))     /* 6 byte   */
+#define EEPROM_VERSION          (GET_EEPROM_ADDR(0x24,MSB))     /* 1 byte   */
+#define EEPROM_NIC_TYPE         (GET_EEPROM_ADDR(0x25,LSB))     /* 1 byte   */
+#define EEPROM_SKU_CAPABILITY   (GET_EEPROM_ADDR(0x25,MSB))     /* 1 byte   */
+#define EEPROM_COUNTRY_CODE     (GET_EEPROM_ADDR(0x26,LSB))     /* 3 bytes  */
+#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB))     /* 2 bytes  */
+#define EEPROM_IBSS_CHANNELS_A  (GET_EEPROM_ADDR(0x29,MSB))     /* 5 bytes  */
+#define EEPROM_BSS_CHANNELS_BG  (GET_EEPROM_ADDR(0x2c,LSB))     /* 2 bytes  */
+#define EEPROM_HW_VERSION       (GET_EEPROM_ADDR(0x72,LSB))     /* 2 bytes  */
+
+/* NIC type as found in the one byte EEPROM_NIC_TYPE  offset*/
+#define EEPROM_NIC_TYPE_STANDARD        0
+#define EEPROM_NIC_TYPE_DELL            1
+#define EEPROM_NIC_TYPE_FUJITSU         2
+#define EEPROM_NIC_TYPE_IBM             3
+#define EEPROM_NIC_TYPE_HP              4
+
+#define FW_MEM_REG_LOWER_BOUND          0x00300000
+#define FW_MEM_REG_EEPROM_ACCESS        (FW_MEM_REG_LOWER_BOUND + 0x40)
+
+#define EEPROM_BIT_SK                   (1<<0)
+#define EEPROM_BIT_CS                   (1<<1)
+#define EEPROM_BIT_DI                   (1<<2)
+#define EEPROM_BIT_DO                   (1<<4)
+
+#define EEPROM_CMD_READ                 0x2
+
+/* Interrupts masks */
+#define CX2_INTA_NONE   0x00000000
+
+#define CX2_INTA_BIT_RX_TRANSFER                   0x00000002
+#define CX2_INTA_BIT_STATUS_CHANGE                 0x00000010
+#define CX2_INTA_BIT_BEACON_PERIOD_EXPIRED         0x00000020
+
+//Inta Bits for CF
+#define CX2_INTA_BIT_TX_CMD_QUEUE                  0x00000800
+#define CX2_INTA_BIT_TX_QUEUE_1                    0x00001000
+#define CX2_INTA_BIT_TX_QUEUE_2                    0x00002000
+#define CX2_INTA_BIT_TX_QUEUE_3                    0x00004000
+#define CX2_INTA_BIT_TX_QUEUE_4                    0x00008000
+
+#define CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE      0x00010000
+
+#define CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN        0x00100000
+#define CX2_INTA_BIT_POWER_DOWN                    0x00200000
+
+#define CX2_INTA_BIT_FW_INITIALIZATION_DONE        0x01000000
+#define CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE  0x02000000
+#define CX2_INTA_BIT_RF_KILL_DONE                  0x04000000
+#define CX2_INTA_BIT_FATAL_ERROR             0x40000000
+#define CX2_INTA_BIT_PARITY_ERROR            0x80000000
+
+/* Interrupts enabled at init time. */
+#define CX2_INTA_MASK_ALL                        \
+        (CX2_INTA_BIT_TX_QUEUE_1               | \
+	 CX2_INTA_BIT_TX_QUEUE_2               | \
+	 CX2_INTA_BIT_TX_QUEUE_3               | \
+	 CX2_INTA_BIT_TX_QUEUE_4               | \
+	 CX2_INTA_BIT_TX_CMD_QUEUE             | \
+	 CX2_INTA_BIT_RX_TRANSFER              | \
+	 CX2_INTA_BIT_FATAL_ERROR              | \
+	 CX2_INTA_BIT_PARITY_ERROR             | \
+	 CX2_INTA_BIT_STATUS_CHANGE            | \
+	 CX2_INTA_BIT_FW_INITIALIZATION_DONE   | \
+	 CX2_INTA_BIT_BEACON_PERIOD_EXPIRED    | \
+	 CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE | \
+	 CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN   | \
+	 CX2_INTA_BIT_POWER_DOWN               | \
+         CX2_INTA_BIT_RF_KILL_DONE )
+
+#define IPWSTATUS_ERROR_LOG     (CX2_SHARED_LOWER_BOUND + 0x410)
+#define IPW_EVENT_LOG     (CX2_SHARED_LOWER_BOUND + 0x414)
+
+/* FW event log definitions */
+#define EVENT_ELEM_SIZE     (3 * sizeof(u32))
+#define EVENT_START_OFFSET  (1 * sizeof(u32) + 2 * sizeof(u16))
+
+/* FW error log definitions */
+#define ERROR_ELEM_SIZE     (7 * sizeof(u32))
+#define ERROR_START_OFFSET  (1 * sizeof(u32))
+
+enum {
+	IPW_FW_ERROR_OK = 0,
+	IPW_FW_ERROR_FAIL,
+	IPW_FW_ERROR_MEMORY_UNDERFLOW,
+	IPW_FW_ERROR_MEMORY_OVERFLOW,
+	IPW_FW_ERROR_BAD_PARAM,
+	IPW_FW_ERROR_BAD_CHECKSUM,
+	IPW_FW_ERROR_NMI_INTERRUPT,
+	IPW_FW_ERROR_BAD_DATABASE,
+	IPW_FW_ERROR_ALLOC_FAIL,
+	IPW_FW_ERROR_DMA_UNDERRUN,
+	IPW_FW_ERROR_DMA_STATUS,
+	IPW_FW_ERROR_DINOSTATUS_ERROR,
+	IPW_FW_ERROR_EEPROMSTATUS_ERROR,
+	IPW_FW_ERROR_SYSASSERT,
+	IPW_FW_ERROR_FATAL_ERROR
+};
+
+#define AUTH_OPEN       0
+#define AUTH_SHARED_KEY 1
+#define AUTH_IGNORE     3
+
+#define HC_ASSOCIATE      0
+#define HC_REASSOCIATE    1
+#define HC_DISASSOCIATE   2
+#define HC_IBSS_START     3
+#define HC_IBSS_RECONF    4
+#define HC_DISASSOC_QUIET 5
+
+#define IPW_RATE_CAPABILITIES 1
+#define IPW_RATE_CONNECT      0
+
+
+/*
+ * Rate values and masks
+ */
+#define IPW_TX_RATE_1MB  0x0A
+#define IPW_TX_RATE_2MB  0x14
+#define IPW_TX_RATE_5MB  0x37
+#define IPW_TX_RATE_6MB  0x0D
+#define IPW_TX_RATE_9MB  0x0F
+#define IPW_TX_RATE_11MB 0x6E
+#define IPW_TX_RATE_12MB 0x05
+#define IPW_TX_RATE_18MB 0x07
+#define IPW_TX_RATE_24MB 0x09
+#define IPW_TX_RATE_36MB 0x0B
+#define IPW_TX_RATE_48MB 0x01
+#define IPW_TX_RATE_54MB 0x03
+
+#define IPW_ORD_TABLE_ID_MASK             0x0000FF00
+#define IPW_ORD_TABLE_VALUE_MASK          0x000000FF
+
+#define IPW_ORD_TABLE_0_MASK              0x0000F000
+#define IPW_ORD_TABLE_1_MASK              0x0000F100
+#define IPW_ORD_TABLE_2_MASK              0x0000F200
+#define IPW_ORD_TABLE_3_MASK              0x0000F300
+#define IPW_ORD_TABLE_4_MASK              0x0000F400
+#define IPW_ORD_TABLE_5_MASK              0x0000F500
+#define IPW_ORD_TABLE_6_MASK              0x0000F600
+#define IPW_ORD_TABLE_7_MASK              0x0000F700
+
+/*
+ * Table 0 Entries (all entries are 32 bits)
+ */
+enum {
+	IPW_ORD_STAT_TX_CURR_RATE = IPW_ORD_TABLE_0_MASK + 1,
+	IPW_ORD_STAT_FRAG_TRESHOLD,
+	IPW_ORD_STAT_RTS_THRESHOLD,
+	IPW_ORD_STAT_TX_HOST_REQUESTS,
+	IPW_ORD_STAT_TX_HOST_COMPLETE,
+	IPW_ORD_STAT_TX_DIR_DATA,
+	IPW_ORD_STAT_TX_DIR_DATA_B_1,
+	IPW_ORD_STAT_TX_DIR_DATA_B_2,
+	IPW_ORD_STAT_TX_DIR_DATA_B_5_5,
+	IPW_ORD_STAT_TX_DIR_DATA_B_11,
+	/* Hole */
+
+
+
+
+
+
+
+	IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19,
+	IPW_ORD_STAT_TX_DIR_DATA_G_2,
+	IPW_ORD_STAT_TX_DIR_DATA_G_5_5,
+	IPW_ORD_STAT_TX_DIR_DATA_G_6,
+	IPW_ORD_STAT_TX_DIR_DATA_G_9,
+	IPW_ORD_STAT_TX_DIR_DATA_G_11,
+	IPW_ORD_STAT_TX_DIR_DATA_G_12,
+	IPW_ORD_STAT_TX_DIR_DATA_G_18,
+	IPW_ORD_STAT_TX_DIR_DATA_G_24,
+	IPW_ORD_STAT_TX_DIR_DATA_G_36,
+	IPW_ORD_STAT_TX_DIR_DATA_G_48,
+	IPW_ORD_STAT_TX_DIR_DATA_G_54,
+	IPW_ORD_STAT_TX_NON_DIR_DATA,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_1,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_2,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_5_5,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_B_11,
+	/* Hole */
+
+
+
+
+
+
+
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_2,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_6,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_9,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_11,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_12,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_18,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_24,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_36,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_48,
+	IPW_ORD_STAT_TX_NON_DIR_DATA_G_54,
+	IPW_ORD_STAT_TX_RETRY,
+	IPW_ORD_STAT_TX_FAILURE,
+	IPW_ORD_STAT_RX_ERR_CRC,
+	IPW_ORD_STAT_RX_ERR_ICV,
+	IPW_ORD_STAT_RX_NO_BUFFER,
+	IPW_ORD_STAT_FULL_SCANS,
+	IPW_ORD_STAT_PARTIAL_SCANS,
+	IPW_ORD_STAT_TGH_ABORTED_SCANS,
+	IPW_ORD_STAT_TX_TOTAL_BYTES,
+	IPW_ORD_STAT_CURR_RSSI_RAW,
+	IPW_ORD_STAT_RX_BEACON,
+	IPW_ORD_STAT_MISSED_BEACONS,
+	IPW_ORD_TABLE_0_LAST
+};
+
+#define IPW_RSSI_TO_DBM 112
+
+/* Table 1 Entries
+ */
+enum {
+	IPW_ORD_TABLE_1_LAST = IPW_ORD_TABLE_1_MASK | 1,
+};
+
+/*
+ * Table 2 Entries
+ *
+ * FW_VERSION:    16 byte string
+ * FW_DATE:       16 byte string (only 14 bytes used)
+ * UCODE_VERSION: 4 byte version code
+ * UCODE_DATE:    5 bytes code code
+ * ADDAPTER_MAC:  6 byte MAC address
+ * RTC:           4 byte clock
+ */
+enum {
+	IPW_ORD_STAT_FW_VERSION = IPW_ORD_TABLE_2_MASK | 1,
+	IPW_ORD_STAT_FW_DATE,
+	IPW_ORD_STAT_UCODE_VERSION,
+	IPW_ORD_STAT_UCODE_DATE,
+	IPW_ORD_STAT_ADAPTER_MAC,
+	IPW_ORD_STAT_RTC,
+	IPW_ORD_TABLE_2_LAST
+};
+
+/* Table 3 */
+enum {
+	IPW_ORD_STAT_TX_PACKET = IPW_ORD_TABLE_3_MASK | 0,
+	IPW_ORD_STAT_TX_PACKET_FAILURE,
+	IPW_ORD_STAT_TX_PACKET_SUCCESS,
+	IPW_ORD_STAT_TX_PACKET_ABORTED,
+	IPW_ORD_TABLE_3_LAST
+};
+
+/* Table 4 */
+enum {
+	IPW_ORD_TABLE_4_LAST = IPW_ORD_TABLE_4_MASK
+};
+
+/* Table 5 */
+enum {
+	IPW_ORD_STAT_AVAILABLE_AP_COUNT = IPW_ORD_TABLE_5_MASK,
+	IPW_ORD_STAT_AP_ASSNS,
+	IPW_ORD_STAT_ROAM,
+	IPW_ORD_STAT_ROAM_CAUSE_MISSED_BEACONS,
+	IPW_ORD_STAT_ROAM_CAUSE_UNASSOC,
+	IPW_ORD_STAT_ROAM_CAUSE_RSSI,
+	IPW_ORD_STAT_ROAM_CAUSE_LINK_QUALITY,
+	IPW_ORD_STAT_ROAM_CAUSE_AP_LOAD_BALANCE,
+	IPW_ORD_STAT_ROAM_CAUSE_AP_NO_TX,
+	IPW_ORD_STAT_LINK_UP,
+	IPW_ORD_STAT_LINK_DOWN,
+	IPW_ORD_ANTENNA_DIVERSITY,
+	IPW_ORD_CURR_FREQ,
+	IPW_ORD_TABLE_5_LAST
+};
+
+/* Table 6 */
+enum {
+	IPW_ORD_COUNTRY_CODE = IPW_ORD_TABLE_6_MASK,
+	IPW_ORD_CURR_BSSID,
+	IPW_ORD_CURR_SSID,
+	IPW_ORD_TABLE_6_LAST
+};
+
+/* Table 7 */
+enum {
+	IPW_ORD_STAT_PERCENT_MISSED_BEACONS = IPW_ORD_TABLE_7_MASK,
+	IPW_ORD_STAT_PERCENT_TX_RETRIES,
+	IPW_ORD_STAT_PERCENT_LINK_QUALITY,
+	IPW_ORD_STAT_CURR_RSSI_DBM,
+	IPW_ORD_TABLE_7_LAST
+};
+
+#define IPW_ORDINALS_TABLE_LOWER        (CX2_SHARED_LOWER_BOUND + 0x500)
+#define IPW_ORDINALS_TABLE_0            (CX2_SHARED_LOWER_BOUND + 0x180)
+#define IPW_ORDINALS_TABLE_1            (CX2_SHARED_LOWER_BOUND + 0x184)
+#define IPW_ORDINALS_TABLE_2            (CX2_SHARED_LOWER_BOUND + 0x188)
+#define IPW_MEM_FIXED_OVERRIDE          (CX2_SHARED_LOWER_BOUND + 0x41C)
+
+struct ipw_fixed_rate {
+	u16 tx_rates;
+	u16 reserved;
+} __attribute__ ((packed));
+
+#define CX2_INDIRECT_ADDR_MASK (~0x3ul)
+
+struct host_cmd {
+	u8 cmd;
+	u8 len;
+	u16 reserved;
+	u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
+} __attribute__ ((packed));
+
+#define CFG_BT_COEXISTENCE_MIN                  0x00
+#define CFG_BT_COEXISTENCE_DEFER                0x02
+#define CFG_BT_COEXISTENCE_KILL                 0x04
+#define CFG_BT_COEXISTENCE_WME_OVER_BT          0x08
+#define CFG_BT_COEXISTENCE_OOB                  0x10
+#define CFG_BT_COEXISTENCE_MAX                  0xFF
+#define CFG_BT_COEXISTENCE_DEF                  0x80 /* read Bt from EEPROM*/
+
+#define CFG_CTS_TO_ITSELF_ENABLED_MIN	0x0
+#define CFG_CTS_TO_ITSELF_ENABLED_MAX	0x1
+#define CFG_CTS_TO_ITSELF_ENABLED_DEF	CFG_CTS_TO_ITSELF_ENABLED_MIN
+
+#define CFG_SYS_ANTENNA_BOTH                      0x000
+#define CFG_SYS_ANTENNA_A                         0x001
+#define CFG_SYS_ANTENNA_B                         0x003
+
+/*
+ * The definitions below were lifted off the ipw2100 driver, which only
+ * supports 'b' mode, so I'm sure these are not exactly correct.
+ *
+ * Somebody fix these!!
+ */
+#define REG_MIN_CHANNEL             0
+#define REG_MAX_CHANNEL             14
+
+#define REG_CHANNEL_MASK            0x00003FFF
+#define IPW_IBSS_11B_DEFAULT_MASK   0x87ff
+
+static const long ipw_frequencies[] = {
+	2412, 2417, 2422, 2427,
+	2432, 2437, 2442, 2447,
+	2452, 2457, 2462, 2467,
+	2472, 2484
+};
+
+#define FREQ_COUNT ARRAY_SIZE(ipw_frequencies)
+
+#define IPW_MAX_CONFIG_RETRIES 10
+
+static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
+{
+	u32 retval;
+	u16 fc;
+
+	retval = sizeof(struct ieee80211_hdr);
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	/*
+	 * Function	ToDS	FromDS
+	 * IBSS		0	0
+	 * To AP	1	0
+	 * From AP	0	1
+	 * WDS (bridge)	1	1
+	 *
+	 * Only WDS frames use Address4 among them. --YZ
+	 */
+	if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS))
+		retval -= ETH_ALEN;
+
+	return retval;
+}
+
+#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -94,6 +94,8 @@
 #include <net/iw_handler.h>
 #include <net/ieee80211.h>
 
+#include <net/ieee80211.h>
+
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/system.h>
@@ -101,7 +103,6 @@
 #include "hermes.h"
 #include "hermes_rid.h"
 #include "orinoco.h"
-#include "ieee802_11.h"
 
 /********************************************************************/
 /* Module information                                               */
@@ -150,7 +151,7 @@ static const u8 encaps_hdr[] = {0xaa, 0x
 #define ENCAPS_OVERHEAD		(sizeof(encaps_hdr) + 2)
 
 #define ORINOCO_MIN_MTU		256
-#define ORINOCO_MAX_MTU		(IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD)
+#define ORINOCO_MAX_MTU		(IEEE80211_DATA_LEN - ENCAPS_OVERHEAD)
 
 #define SYMBOL_MAX_VER_LEN	(14)
 #define USER_BAP		0
@@ -442,7 +443,7 @@ static int orinoco_change_mtu(struct net
 	if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
 		return -EINVAL;
 
-	if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) >
+	if ( (new_mtu + ENCAPS_OVERHEAD + IEEE80211_HLEN) >
 	     (priv->nicbuf_size - ETH_HLEN) )
 		return -EINVAL;
 
@@ -918,7 +919,7 @@ static void __orinoco_ev_rx(struct net_d
                    data. */
 		return;
 	}
-	if (length > IEEE802_11_DATA_LEN) {
+	if (length > IEEE80211_DATA_LEN) {
 		printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
 		       dev->name, length);
 		stats->rx_length_errors++;
@@ -2272,7 +2273,7 @@ static int orinoco_init(struct net_devic
 
 	/* No need to lock, the hw_unavailable flag is already set in
 	 * alloc_orinocodev() */
-	priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN;
+	priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
 
 	/* Initialize the firmware */
 	err = orinoco_reinit_firmware(dev);
@@ -4322,36 +4323,36 @@ static const struct iw_priv_args orinoco
  */
 
 static const iw_handler	orinoco_handler[] = {
-	[SIOCSIWCOMMIT-SIOCIWFIRST] (iw_handler) orinoco_ioctl_commit,
-	[SIOCGIWNAME  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getname,
-	[SIOCSIWFREQ  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfreq,
-	[SIOCGIWFREQ  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfreq,
-	[SIOCSIWMODE  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setmode,
-	[SIOCGIWMODE  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getmode,
-	[SIOCSIWSENS  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setsens,
-	[SIOCGIWSENS  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getsens,
-	[SIOCGIWRANGE -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwrange,
-	[SIOCSIWSPY   -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setspy,
-	[SIOCGIWSPY   -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getspy,
-	[SIOCSIWAP    -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setwap,
-	[SIOCGIWAP    -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getwap,
-	[SIOCSIWSCAN  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setscan,
-	[SIOCGIWSCAN  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getscan,
-	[SIOCSIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setessid,
-	[SIOCGIWESSID -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getessid,
-	[SIOCSIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setnick,
-	[SIOCGIWNICKN -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getnick,
-	[SIOCSIWRATE  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrate,
-	[SIOCGIWRATE  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrate,
-	[SIOCSIWRTS   -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setrts,
-	[SIOCGIWRTS   -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getrts,
-	[SIOCSIWFRAG  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setfrag,
-	[SIOCGIWFRAG  -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getfrag,
-	[SIOCGIWRETRY -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getretry,
-	[SIOCSIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_setiwencode,
-	[SIOCGIWENCODE-SIOCIWFIRST] (iw_handler) orinoco_ioctl_getiwencode,
-	[SIOCSIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_setpower,
-	[SIOCGIWPOWER -SIOCIWFIRST] (iw_handler) orinoco_ioctl_getpower,
+	[SIOCSIWCOMMIT-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_commit,
+	[SIOCGIWNAME  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getname,
+	[SIOCSIWFREQ  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfreq,
+	[SIOCGIWFREQ  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfreq,
+	[SIOCSIWMODE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setmode,
+	[SIOCGIWMODE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getmode,
+	[SIOCSIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setsens,
+	[SIOCGIWSENS  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getsens,
+	[SIOCGIWRANGE -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwrange,
+	[SIOCSIWSPY   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setspy,
+	[SIOCGIWSPY   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getspy,
+	[SIOCSIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setwap,
+	[SIOCGIWAP    -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getwap,
+	[SIOCSIWSCAN  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setscan,
+	[SIOCGIWSCAN  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getscan,
+	[SIOCSIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setessid,
+	[SIOCGIWESSID -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getessid,
+	[SIOCSIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setnick,
+	[SIOCGIWNICKN -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getnick,
+	[SIOCSIWRATE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrate,
+	[SIOCGIWRATE  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrate,
+	[SIOCSIWRTS   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setrts,
+	[SIOCGIWRTS   -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getrts,
+	[SIOCSIWFRAG  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setfrag,
+	[SIOCGIWFRAG  -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getfrag,
+	[SIOCGIWRETRY -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getretry,
+	[SIOCSIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setiwencode,
+	[SIOCGIWENCODE-SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getiwencode,
+	[SIOCSIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_setpower,
+	[SIOCGIWPOWER -SIOCIWFIRST] = (iw_handler) orinoco_ioctl_getpower,
 };
 
 
@@ -4359,15 +4360,15 @@ static const iw_handler	orinoco_handler[
   Added typecasting since we no longer use iwreq_data -- Moustafa
  */
 static const iw_handler	orinoco_private_handler[] = {
-	[0] (iw_handler) orinoco_ioctl_reset,
-	[1] (iw_handler) orinoco_ioctl_reset,
-	[2] (iw_handler) orinoco_ioctl_setport3,
-	[3] (iw_handler) orinoco_ioctl_getport3,
-	[4] (iw_handler) orinoco_ioctl_setpreamble,
-	[5] (iw_handler) orinoco_ioctl_getpreamble,
-	[6] (iw_handler) orinoco_ioctl_setibssport,
-	[7] (iw_handler) orinoco_ioctl_getibssport,
-	[9] (iw_handler) orinoco_ioctl_getrid,
+	[0] = (iw_handler) orinoco_ioctl_reset,
+	[1] = (iw_handler) orinoco_ioctl_reset,
+	[2] = (iw_handler) orinoco_ioctl_setport3,
+	[3] = (iw_handler) orinoco_ioctl_getport3,
+	[4] = (iw_handler) orinoco_ioctl_setpreamble,
+	[5] = (iw_handler) orinoco_ioctl_getpreamble,
+	[6] = (iw_handler) orinoco_ioctl_setibssport,
+	[7] = (iw_handler) orinoco_ioctl_getibssport,
+	[9] = (iw_handler) orinoco_ioctl_getrid,
 };
 
 static const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -209,7 +209,7 @@ enum {
 	NoStructure = 0,	/* Really old firmware */
 	StructuredMessages = 1,	/* Parsable AT response msgs */
 	ChecksummedMessages = 2	/* Parsable AT response msgs with checksums */
-} FirmwareLevel;
+};
 
 struct strip {
 	int magic;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -59,6 +59,12 @@
 /* Do *NOT* add other headers here, you are guaranteed to be wrong - Jean II */
 #include "wavelan_cs.p.h"		/* Private header */
 
+#ifdef WAVELAN_ROAMING
+static void wl_cell_expiry(unsigned long data);
+static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp);
+static void wv_nwid_filter(unsigned char mode, net_local *lp);
+#endif  /*  WAVELAN_ROAMING  */
+
 /************************* MISC SUBROUTINES **************************/
 /*
  * Subroutines which won't fit in one of the following category
@@ -500,9 +506,9 @@ fee_write(u_long	base,	/* i/o port of th
 
 #ifdef WAVELAN_ROAMING	/* Conditional compile, see wavelan_cs.h */
 
-unsigned char WAVELAN_BEACON_ADDRESS[]= {0x09,0x00,0x0e,0x20,0x03,0x00};
+static unsigned char WAVELAN_BEACON_ADDRESS[] = {0x09,0x00,0x0e,0x20,0x03,0x00};
   
-void wv_roam_init(struct net_device *dev)
+static void wv_roam_init(struct net_device *dev)
 {
   net_local  *lp= netdev_priv(dev);
 
@@ -531,7 +537,7 @@ void wv_roam_init(struct net_device *dev
   printk(KERN_DEBUG "WaveLAN: Roaming enabled on device %s\n",dev->name);
 }
  
-void wv_roam_cleanup(struct net_device *dev)
+static void wv_roam_cleanup(struct net_device *dev)
 {
   wavepoint_history *ptr,*old_ptr;
   net_local *lp= netdev_priv(dev);
@@ -550,7 +556,7 @@ void wv_roam_cleanup(struct net_device *
 }
 
 /* Enable/Disable NWID promiscuous mode on a given device */
-void wv_nwid_filter(unsigned char mode, net_local *lp)
+static void wv_nwid_filter(unsigned char mode, net_local *lp)
 {
   mm_t                  m;
   unsigned long         flags;
@@ -575,7 +581,7 @@ void wv_nwid_filter(unsigned char mode, 
 }
 
 /* Find a record in the WavePoint table matching a given NWID */
-wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
+static wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp)
 {
   wavepoint_history	*ptr=lp->wavepoint_table.head;
   
@@ -588,7 +594,7 @@ wavepoint_history *wl_roam_check(unsigne
 }
 
 /* Create a new wavepoint table entry */
-wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp)
+static wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local* lp)
 {
   wavepoint_history *new_wavepoint;
 
@@ -624,7 +630,7 @@ wavepoint_history *wl_new_wavepoint(unsi
 }
 
 /* Remove a wavepoint entry from WavePoint table */
-void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
+static void wl_del_wavepoint(wavepoint_history *wavepoint, struct net_local *lp)
 {
   if(wavepoint==NULL)
     return;
@@ -646,7 +652,7 @@ void wl_del_wavepoint(wavepoint_history 
 }
 
 /* Timer callback function - checks WavePoint table for stale entries */ 
-void wl_cell_expiry(unsigned long data)
+static void wl_cell_expiry(unsigned long data)
 {
   net_local *lp=(net_local *)data;
   wavepoint_history *wavepoint=lp->wavepoint_table.head,*old_point;
@@ -686,7 +692,7 @@ void wl_cell_expiry(unsigned long data)
 }
 
 /* Update SNR history of a wavepoint */
-void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq)	
+static void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq)	
 {
   int i=0,num_missed=0,ptr=0;
   int average_fast=0,average_slow=0;
@@ -723,7 +729,7 @@ void wl_update_history(wavepoint_history
 }
 
 /* Perform a handover to a new WavePoint */
-void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
+static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
 {
   kio_addr_t		base = lp->dev->base_addr;
   mm_t                  m;
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/net/wireless/wavelan_cs.h
--- a/drivers/net/wireless/wavelan_cs.h
+++ b/drivers/net/wireless/wavelan_cs.h
@@ -62,7 +62,7 @@
  * like DEC RoamAbout, or Digital Ocean, Epson, ...), you must modify this
  * part to accommodate your hardware...
  */
-const unsigned char	MAC_ADDRESSES[][3] =
+static const unsigned char	MAC_ADDRESSES[][3] =
 {
   { 0x08, 0x00, 0x0E },		/* AT&T Wavelan (standard) & DEC RoamAbout */
   { 0x08, 0x00, 0x6A },		/* AT&T Wavelan (alternate) */
@@ -79,14 +79,14 @@ const unsigned char	MAC_ADDRESSES[][3] =
  * (as read in the offset register of the dac area).
  * Used to map channel numbers used by `wfreqsel' to frequencies
  */
-const short	channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
+static const short	channel_bands[] = { 0x30, 0x58, 0x64, 0x7A, 0x80, 0xA8,
 				    0xD0, 0xF0, 0xF8, 0x150 };
 
 /* Frequencies of the 1.0 modem (fixed frequencies).
  * Use to map the PSA `subband' to a frequency
  * Note : all frequencies apart from the first one need to be multiplied by 10
  */
-const int	fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
+static const int	fixed_bands[] = { 915e6, 2.425e8, 2.46e8, 2.484e8, 2.4305e8 };
 
 
 /*************************** PC INTERFACE ****************************/
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -647,23 +647,6 @@ struct net_local
   void __iomem *mem;
 };
 
-/**************************** PROTOTYPES ****************************/
-
-#ifdef WAVELAN_ROAMING
-/* ---------------------- ROAMING SUBROUTINES -----------------------*/
-
-wavepoint_history *wl_roam_check(unsigned short nwid, net_local *lp);
-wavepoint_history *wl_new_wavepoint(unsigned short nwid, unsigned char seq, net_local *lp);
-void wl_del_wavepoint(wavepoint_history *wavepoint, net_local *lp);
-void wl_cell_expiry(unsigned long data);
-wavepoint_history *wl_best_sigqual(int fast_search, net_local *lp);
-void wl_update_history(wavepoint_history *wavepoint, unsigned char sigqual, unsigned char seq);
-void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp);
-void wv_nwid_filter(unsigned char mode, net_local *lp);
-void wv_roam_init(struct net_device *dev);
-void wv_roam_cleanup(struct net_device *dev);
-#endif	/* WAVELAN_ROAMING */
-
 /* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
 static inline u_char		/* data */
 	hasr_read(u_long);	/* Read the host interface : base address */
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -2,7 +2,7 @@
 #define __WL3501_H__
 
 #include <linux/spinlock.h>
-#include "ieee802_11.h"
+#include <net/ieee80211.h>
 
 /* define for WLA 2.0 */
 #define WL3501_BLKSZ 256
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
 
 struct wl3501_80211_tx_hdr {
 	struct wl3501_80211_tx_plcp_hdr	pclp_hdr;
-	struct ieee802_11_hdr		mac_hdr;
+	struct ieee80211_hdr		mac_hdr;
 } __attribute__ ((packed));
 
 /*
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -296,7 +296,8 @@ static int wl3501_get_flash_mac_addr(str
  *
  * Move 'size' bytes from PC to card. (Shouldn't be interrupted)
  */
-void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src, int size)
+static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
+			      int size)
 {
 	/* switch to SRAM Page 0 */
 	wl3501_switch_page(this, (dest & 0x8000) ? WL3501_BSS_SPAGE1 :
@@ -317,8 +318,8 @@ void wl3501_set_to_wla(struct wl3501_car
  *
  * Move 'size' bytes from card to PC. (Shouldn't be interrupted)
  */
-void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
-			 int size)
+static void wl3501_get_from_wla(struct wl3501_card *this, u16 src, void *dest,
+				int size)
 {
 	/* switch to SRAM Page 0 */
 	wl3501_switch_page(this, (src & 0x8000) ? WL3501_BSS_SPAGE1 :
@@ -1438,14 +1439,14 @@ fail:
 	goto out;
 }
 
-struct net_device_stats *wl3501_get_stats(struct net_device *dev)
+static struct net_device_stats *wl3501_get_stats(struct net_device *dev)
 {
 	struct wl3501_card *this = dev->priv;
 
 	return &this->stats;
 }
 
-struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
+static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
 {
 	struct wl3501_card *this = dev->priv;
 	struct iw_statistics *wstats = &this->wstats;
diff --git a/drivers/usb/net/Makefile b/drivers/usb/net/Makefile
--- a/drivers/usb/net/Makefile
+++ b/drivers/usb/net/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_USB_PEGASUS)	+= pegasus.o
 obj-$(CONFIG_USB_RTL8150)	+= rtl8150.o
 obj-$(CONFIG_USB_USBNET)	+= usbnet.o
 obj-$(CONFIG_USB_ZD1201)	+= zd1201.o
-
-CFLAGS_zd1201.o = -Idrivers/net/wireless/
diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/usb/net/zd1201.c
@@ -21,7 +21,7 @@
 #include <linux/string.h>
 #include <linux/if_arp.h>
 #include <linux/firmware.h>
-#include <ieee802_11.h>
+#include <net/ieee80211.h>
 #include "zd1201.h"
 
 static struct usb_device_id zd1201_table[] = {
@@ -338,25 +338,25 @@ static void zd1201_usbrx(struct urb *urb
 			goto resubmit;
 		}
 			
-		if ((seq & IEEE802_11_SCTL_FRAG) ||
-		    (fc & IEEE802_11_FCTL_MOREFRAGS)) {
+		if ((seq & IEEE80211_SCTL_FRAG) ||
+		    (fc & IEEE80211_FCTL_MOREFRAGS)) {
 			struct zd1201_frag *frag = NULL;
 			char *ptr;
 
 			if (datalen<14)
 				goto resubmit;
-			if ((seq & IEEE802_11_SCTL_FRAG) == 0) {
+			if ((seq & IEEE80211_SCTL_FRAG) == 0) {
 				frag = kmalloc(sizeof(struct zd1201_frag*),
 				    GFP_ATOMIC);
 				if (!frag)
 					goto resubmit;
-				skb = dev_alloc_skb(IEEE802_11_DATA_LEN +14+2);
+				skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2);
 				if (!skb) {
 					kfree(frag);
 					goto resubmit;
 				}
 				frag->skb = skb;
-				frag->seq = seq & IEEE802_11_SCTL_SEQ;
+				frag->seq = seq & IEEE80211_SCTL_SEQ;
 				skb_reserve(skb, 2);
 				memcpy(skb_put(skb, 12), &data[datalen-14], 12);
 				memcpy(skb_put(skb, 2), &data[6], 2);
@@ -365,7 +365,7 @@ static void zd1201_usbrx(struct urb *urb
 				goto resubmit;
 			}
 			hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
-				if(frag->seq == (seq&IEEE802_11_SCTL_SEQ))
+				if(frag->seq == (seq&IEEE80211_SCTL_SEQ))
 					break;
 			if (!frag)
 				goto resubmit;
@@ -373,7 +373,7 @@ static void zd1201_usbrx(struct urb *urb
 			ptr = skb_put(skb, len);
 			if (ptr)
 				memcpy(ptr, data+8, len);
-			if (fc & IEEE802_11_FCTL_MOREFRAGS)
+			if (fc & IEEE80211_FCTL_MOREFRAGS)
 				goto resubmit;
 			hlist_del_init(&frag->fnode);
 			kfree(frag);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -69,6 +69,12 @@ static inline int is_multicast_ether_add
 	return ((addr[0] != 0xff) && (0x01 & addr[0]));
 }
 
+static inline int is_broadcast_ether_addr(const u8 *addr)
+{
+        return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&  
+		(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
+}
+
 /**
  * is_valid_ether_addr - Determine if the given Ethernet address is valid
  * @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -408,6 +408,8 @@ struct ethtool_ops {
 #define SUPPORTED_FIBRE			(1 << 10)
 #define SUPPORTED_BNC			(1 << 11)
 #define SUPPORTED_10000baseT_Full	(1 << 12)
+#define SUPPORTED_Pause			(1 << 13)
+#define SUPPORTED_Asym_Pause		(1 << 14)
 
 /* Indicates what features are advertised by the interface. */
 #define ADVERTISED_10baseT_Half		(1 << 0)
@@ -423,6 +425,8 @@ struct ethtool_ops {
 #define ADVERTISED_FIBRE		(1 << 10)
 #define ADVERTISED_BNC			(1 << 11)
 #define ADVERTISED_10000baseT_Full	(1 << 12)
+#define ADVERTISED_Pause		(1 << 13)
+#define ADVERTISED_Asym_Pause		(1 << 14)
 
 /* The following are all involved in forcing a particular link
  * mode for the device for setting things.  When getting the
diff --git a/include/linux/mii.h b/include/linux/mii.h
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -22,6 +22,7 @@
 #define MII_EXPANSION       0x06        /* Expansion register          */
 #define MII_CTRL1000        0x09        /* 1000BASE-T control          */
 #define MII_STAT1000        0x0a        /* 1000BASE-T status           */
+#define MII_ESTATUS	    0x0f	/* Extended Status */
 #define MII_DCOUNTER        0x12        /* Disconnect counter          */
 #define MII_FCSCOUNTER      0x13        /* False carrier counter       */
 #define MII_NWAYTEST        0x14        /* N-way auto-neg test reg     */
@@ -54,7 +55,10 @@
 #define BMSR_ANEGCAPABLE        0x0008  /* Able to do auto-negotiation */
 #define BMSR_RFAULT             0x0010  /* Remote fault detected       */
 #define BMSR_ANEGCOMPLETE       0x0020  /* Auto-negotiation complete   */
-#define BMSR_RESV               0x07c0  /* Unused...                   */
+#define BMSR_RESV               0x00c0  /* Unused...                   */
+#define BMSR_ESTATEN		0x0100	/* Extended Status in R15 */
+#define BMSR_100FULL2		0x0200	/* Can do 100BASE-T2 HDX */
+#define BMSR_100HALF2		0x0400	/* Can do 100BASE-T2 FDX */
 #define BMSR_10HALF             0x0800  /* Can do 10mbps, half-duplex  */
 #define BMSR_10FULL             0x1000  /* Can do 10mbps, full-duplex  */
 #define BMSR_100HALF            0x2000  /* Can do 100mbps, half-duplex */
@@ -114,6 +118,9 @@
 #define EXPANSION_MFAULTS       0x0010  /* Multiple faults detected    */
 #define EXPANSION_RESV          0xffe0  /* Unused...                   */
 
+#define ESTATUS_1000_TFULL	0x2000	/* Can do 1000BT Full */
+#define ESTATUS_1000_THALF	0x1000	/* Can do 1000BT Half */
+
 /* N-way test register. */
 #define NWAYTEST_RESV1          0x00ff  /* Unused...                   */
 #define NWAYTEST_LOOPBACK       0x0100  /* Enable loopback for N-way   */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2144,6 +2144,7 @@
 #define PCI_DEVICE_ID_ENE_1225		0x1225
 #define PCI_DEVICE_ID_ENE_1410		0x1410
 #define PCI_DEVICE_ID_ENE_1420		0x1420
+#define PCI_VENDOR_ID_CHELSIO		0x1425
 
 #define PCI_VENDOR_ID_SYBA		0x1592
 #define PCI_DEVICE_ID_SYBA_2P_EPP	0x0782
diff --git a/include/linux/phy.h b/include/linux/phy.h
new file mode 100644
--- /dev/null
+++ b/include/linux/phy.h
@@ -0,0 +1,360 @@
+/*
+ * include/linux/phy.h
+ *
+ * Framework and drivers for configuring and reading different PHYs
+ * Based on code in sungem_phy.c and gianfar_phy.c
+ *
+ * Author: Andy Fleming
+ *
+ * Copyright (c) 2004 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __PHY_H
+#define __PHY_H
+
+#include <linux/spinlock.h>
+#include <linux/device.h>
+
+#define PHY_BASIC_FEATURES	(SUPPORTED_10baseT_Half | \
+				 SUPPORTED_10baseT_Full | \
+				 SUPPORTED_100baseT_Half | \
+				 SUPPORTED_100baseT_Full | \
+				 SUPPORTED_Autoneg | \
+				 SUPPORTED_TP | \
+				 SUPPORTED_MII)
+
+#define PHY_GBIT_FEATURES	(PHY_BASIC_FEATURES | \
+				 SUPPORTED_1000baseT_Half | \
+				 SUPPORTED_1000baseT_Full)
+
+/* Set phydev->irq to PHY_POLL if interrupts are not supported,
+ * or not desired for this PHY.  Set to PHY_IGNORE_INTERRUPT if
+ * the attached driver handles the interrupt
+ */
+#define PHY_POLL		-1
+#define PHY_IGNORE_INTERRUPT	-2
+
+#define PHY_HAS_INTERRUPT	0x00000001
+#define PHY_HAS_MAGICANEG	0x00000002
+
+#define MII_BUS_MAX 4
+
+
+#define PHY_INIT_TIMEOUT 100000
+#define PHY_STATE_TIME		1
+#define PHY_FORCE_TIMEOUT	10
+#define PHY_AN_TIMEOUT		10
+
+#define PHY_MAX_ADDR 32
+
+/* The Bus class for PHYs.  Devices which provide access to
+ * PHYs should register using this structure */
+struct mii_bus {
+	const char *name;
+	int id;
+	void *priv;
+	int (*read)(struct mii_bus *bus, int phy_id, int regnum);
+	int (*write)(struct mii_bus *bus, int phy_id, int regnum, u16 val);
+	int (*reset)(struct mii_bus *bus);
+
+	/* A lock to ensure that only one thing can read/write
+	 * the MDIO bus at a time */
+	spinlock_t mdio_lock;
+
+	struct device *dev;
+
+	/* list of all PHYs on bus */
+	struct phy_device *phy_map[PHY_MAX_ADDR];
+
+	/* Pointer to an array of interrupts, each PHY's
+	 * interrupt at the index matching its address */
+	int *irq;
+};
+
+#define PHY_INTERRUPT_DISABLED 0x0
+#define PHY_INTERRUPT_ENABLED 0x80000000
+
+/* PHY state machine states:
+ *
+ * DOWN: PHY device and driver are not ready for anything.  probe
+ * should be called if and only if the PHY is in this state,
+ * given that the PHY device exists.
+ * - PHY driver probe function will, depending on the PHY, set
+ * the state to STARTING or READY
+ *
+ * STARTING:  PHY device is coming up, and the ethernet driver is
+ * not ready.  PHY drivers may set this in the probe function.
+ * If they do, they are responsible for making sure the state is
+ * eventually set to indicate whether the PHY is UP or READY,
+ * depending on the state when the PHY is done starting up.
+ * - PHY driver will set the state to READY
+ * - start will set the state to PENDING
+ *
+ * READY: PHY is ready to send and receive packets, but the
+ * controller is not.  By default, PHYs which do not implement
+ * probe will be set to this state by phy_probe().  If the PHY
+ * driver knows the PHY is ready, and the PHY state is STARTING,
+ * then it sets this STATE.
+ * - start will set the state to UP
+ *
+ * PENDING: PHY device is coming up, but the ethernet driver is
+ * ready.  phy_start will set this state if the PHY state is
+ * STARTING.
+ * - PHY driver will set the state to UP when the PHY is ready
+ *
+ * UP: The PHY and attached device are ready to do work.
+ * Interrupts should be started here.
+ * - timer moves to AN
+ *
+ * AN: The PHY is currently negotiating the link state.  Link is
+ * therefore down for now.  phy_timer will set this state when it
+ * detects the state is UP.  config_aneg will set this state
+ * whenever called with phydev->autoneg set to AUTONEG_ENABLE.
+ * - If autonegotiation finishes, but there's no link, it sets
+ *   the state to NOLINK.
+ * - If aneg finishes with link, it sets the state to RUNNING,
+ *   and calls adjust_link
+ * - If autonegotiation did not finish after an arbitrary amount
+ *   of time, autonegotiation should be tried again if the PHY
+ *   supports "magic" autonegotiation (back to AN)
+ * - If it didn't finish, and no magic_aneg, move to FORCING.
+ *
+ * NOLINK: PHY is up, but not currently plugged in.
+ * - If the timer notes that the link comes back, we move to RUNNING
+ * - config_aneg moves to AN
+ * - phy_stop moves to HALTED
+ *
+ * FORCING: PHY is being configured with forced settings
+ * - if link is up, move to RUNNING
+ * - If link is down, we drop to the next highest setting, and
+ *   retry (FORCING) after a timeout
+ * - phy_stop moves to HALTED
+ *
+ * RUNNING: PHY is currently up, running, and possibly sending
+ * and/or receiving packets
+ * - timer will set CHANGELINK if we're polling (this ensures the
+ *   link state is polled every other cycle of this state machine,
+ *   which makes it every other second)
+ * - irq will set CHANGELINK
+ * - config_aneg will set AN
+ * - phy_stop moves to HALTED
+ *
+ * CHANGELINK: PHY experienced a change in link state
+ * - timer moves to RUNNING if link
+ * - timer moves to NOLINK if the link is down
+ * - phy_stop moves to HALTED
+ *
+ * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * PHY is in an error state.
+ *
+ * - phy_start moves to RESUMING
+ *
+ * RESUMING: PHY was halted, but now wants to run again.
+ * - If we are forcing, or aneg is done, timer moves to RUNNING
+ * - If aneg is not done, timer moves to AN
+ * - phy_stop moves to HALTED
+ */
+enum phy_state {
+	PHY_DOWN=0,
+	PHY_STARTING,
+	PHY_READY,
+	PHY_PENDING,
+	PHY_UP,
+	PHY_AN,
+	PHY_RUNNING,
+	PHY_NOLINK,
+	PHY_FORCING,
+	PHY_CHANGELINK,
+	PHY_HALTED,
+	PHY_RESUMING
+};
+
+/* phy_device: An instance of a PHY
+ *
+ * drv: Pointer to the driver for this PHY instance
+ * bus: Pointer to the bus this PHY is on
+ * dev: driver model device structure for this PHY
+ * phy_id: UID for this device found during discovery
+ * state: state of the PHY for management purposes
+ * dev_flags: Device-specific flags used by the PHY driver.
+ * addr: Bus address of PHY
+ * link_timeout: The number of timer firings to wait before the
+ * giving up on the current attempt at acquiring a link
+ * irq: IRQ number of the PHY's interrupt (-1 if none)
+ * phy_timer: The timer for handling the state machine
+ * phy_queue: A work_queue for the interrupt
+ * attached_dev: The attached enet driver's device instance ptr
+ * adjust_link: Callback for the enet controller to respond to
+ * changes in the link state.
+ * adjust_state: Callback for the enet driver to respond to
+ * changes in the state machine.
+ *
+ * speed, duplex, pause, supported, advertising, and
+ * autoneg are used like in mii_if_info
+ *
+ * interrupts currently only supports enabled or disabled,
+ * but could be changed in the future to support enabling
+ * and disabling specific interrupts
+ *
+ * Contains some infrastructure for polling and interrupt
+ * handling, as well as handling shifts in PHY hardware state
+ */
+struct phy_device {
+	/* Information about the PHY type */
+	/* And management functions */
+	struct phy_driver *drv;
+
+	struct mii_bus *bus;
+
+	struct device dev;
+
+	u32 phy_id;
+
+	enum phy_state state;
+
+	u32 dev_flags;
+
+	/* Bus address of the PHY (0-32) */
+	int addr;
+
+	/* forced speed & duplex (no autoneg)
+	 * partner speed & duplex & pause (autoneg)
+	 */
+	int speed;
+	int duplex;
+	int pause;
+	int asym_pause;
+
+	/* The most recently read link state */
+	int link;
+
+	/* Enabled Interrupts */
+	u32 interrupts;
+
+	/* Union of PHY and Attached devices' supported modes */
+	/* See mii.h for more info */
+	u32 supported;
+	u32 advertising;
+
+	int autoneg;
+
+	int link_timeout;
+
+	/* Interrupt number for this PHY
+	 * -1 means no interrupt */
+	int irq;
+
+	/* private data pointer */
+	/* For use by PHYs to maintain extra state */
+	void *priv;
+
+	/* Interrupt and Polling infrastructure */
+	struct work_struct phy_queue;
+	struct timer_list phy_timer;
+
+	spinlock_t lock;
+
+	struct net_device *attached_dev;
+
+	void (*adjust_link)(struct net_device *dev);
+
+	void (*adjust_state)(struct net_device *dev);
+};
+#define to_phy_device(d) container_of(d, struct phy_device, dev)
+
+/* struct phy_driver: Driver structure for a particular PHY type
+ *
+ * phy_id: The result of reading the UID registers of this PHY
+ *   type, and ANDing them with the phy_id_mask.  This driver
+ *   only works for PHYs with IDs which match this field
+ * name: The friendly name of this PHY type
+ * phy_id_mask: Defines the important bits of the phy_id
+ * features: A list of features (speed, duplex, etc) supported
+ *   by this PHY
+ * flags: A bitfield defining certain other features this PHY
+ *   supports (like interrupts)
+ *
+ * The drivers must implement config_aneg and read_status.  All
+ * other functions are optional. Note that none of these
+ * functions should be called from interrupt time.  The goal is
+ * for the bus read/write functions to be able to block when the
+ * bus transaction is happening, and be freed up by an interrupt
+ * (The MPC85xx has this ability, though it is not currently
+ * supported in the driver).
+ */
+struct phy_driver {
+	u32 phy_id;
+	char *name;
+	unsigned int phy_id_mask;
+	u32 features;
+	u32 flags;
+
+	/* Called to initialize the PHY,
+	 * including after a reset */
+	int (*config_init)(struct phy_device *phydev);
+
+	/* Called during discovery.  Used to set
+	 * up device-specific structures, if any */
+	int (*probe)(struct phy_device *phydev);
+
+	/* PHY Power Management */
+	int (*suspend)(struct phy_device *phydev);
+	int (*resume)(struct phy_device *phydev);
+
+	/* Configures the advertisement and resets
+	 * autonegotiation if phydev->autoneg is on,
+	 * forces the speed to the current settings in phydev
+	 * if phydev->autoneg is off */
+	int (*config_aneg)(struct phy_device *phydev);
+
+	/* Determines the negotiated speed and duplex */
+	int (*read_status)(struct phy_device *phydev);
+
+	/* Clears any pending interrupts */
+	int (*ack_interrupt)(struct phy_device *phydev);
+
+	/* Enables or disables interrupts */
+	int (*config_intr)(struct phy_device *phydev);
+
+	/* Clears up any memory if needed */
+	void (*remove)(struct phy_device *phydev);
+
+	struct device_driver driver;
+};
+#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
+
+int phy_read(struct phy_device *phydev, u16 regnum);
+int phy_write(struct phy_device *phydev, u16 regnum, u16 val);
+struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
+int phy_clear_interrupt(struct phy_device *phydev);
+int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+
+static inline int phy_read_status(struct phy_device *phydev) {
+	return phydev->drv->read_status(phydev);
+}
+
+int genphy_setup_forced(struct phy_device *phydev);
+int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_config_aneg(struct phy_device *phydev);
+int genphy_update_link(struct phy_device *phydev);
+int genphy_read_status(struct phy_device *phydev);
+void phy_driver_unregister(struct phy_driver *drv);
+int phy_driver_register(struct phy_driver *new_driver);
+void phy_prepare_link(struct phy_device *phydev,
+		void (*adjust_link)(struct net_device *));
+void phy_start_machine(struct phy_device *phydev,
+		void (*handler)(struct net_device *));
+void phy_stop_machine(struct phy_device *phydev);
+int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
+int phy_mii_ioctl(struct phy_device *phydev,
+		struct mii_ioctl_data *mii_data, int cmd);
+
+extern struct bus_type mdio_bus_type;
+#endif /* __PHY_H */
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -20,9 +20,9 @@
  */
 #ifndef IEEE80211_H
 #define IEEE80211_H
-
 #include <linux/if_ether.h> /* ETH_ALEN */
 #include <linux/kernel.h>   /* ARRAY_SIZE */
+#include <linux/wireless.h>
 
 #if WIRELESS_EXT < 17
 #define IW_QUAL_QUAL_INVALID   0x10
@@ -104,7 +104,7 @@ struct eapol {
 #define	MAX_FRAG_THRESHOLD     2346U
 
 /* Frame control field constants */
-#define IEEE80211_FCTL_VERS		0x0002
+#define IEEE80211_FCTL_VERS		0x0003
 #define IEEE80211_FCTL_FTYPE		0x000c
 #define IEEE80211_FCTL_STYPE		0x00f0
 #define IEEE80211_FCTL_TODS		0x0100
@@ -264,7 +264,7 @@ struct ieee80211_snap_hdr {
 
 #define WLAN_AUTH_CHALLENGE_LEN 128
 
-#define WLAN_CAPABILITY_BSS (1<<0)
+#define WLAN_CAPABILITY_ESS (1<<0)
 #define WLAN_CAPABILITY_IBSS (1<<1)
 #define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
 #define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
@@ -426,9 +426,7 @@ struct ieee80211_stats {
 
 struct ieee80211_device;
 
-#if 0 /* for later */
 #include "ieee80211_crypt.h"
-#endif
 
 #define SEC_KEY_1         (1<<0)
 #define SEC_KEY_2         (1<<1)
@@ -852,5 +850,4 @@ static inline const char *escape_essid(c
 	*d = '\0';
 	return escaped;
 }
-
 #endif /* IEEE80211_H */
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
new file mode 100644
--- /dev/null
+++ b/include/net/ieee80211_crypt.h
@@ -0,0 +1,86 @@
+/*
+ * Original code based on Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ *
+ * Copyright (c) 2004, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+/*
+ * This file defines the interface to the ieee80211 crypto module.
+ */
+#ifndef IEEE80211_CRYPT_H
+#define IEEE80211_CRYPT_H
+
+#include <linux/skbuff.h>
+
+struct ieee80211_crypto_ops {
+	const char *name;
+
+	/* init new crypto context (e.g., allocate private data space,
+	 * select IV, etc.); returns NULL on failure or pointer to allocated
+	 * private data on success */
+	void * (*init)(int keyidx);
+
+	/* deinitialize crypto context and free allocated private data */
+	void (*deinit)(void *priv);
+
+	/* encrypt/decrypt return < 0 on error or >= 0 on success. The return
+	 * value from decrypt_mpdu is passed as the keyidx value for
+	 * decrypt_msdu. skb must have enough head and tail room for the
+	 * encryption; if not, error will be returned; these functions are
+	 * called for all MPDUs (i.e., fragments).
+	 */
+	int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+	int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
+
+	/* These functions are called for full MSDUs, i.e. full frames.
+	 * These can be NULL if full MSDU operations are not needed. */
+	int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
+	int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
+			    void *priv);
+
+	int (*set_key)(void *key, int len, u8 *seq, void *priv);
+	int (*get_key)(void *key, int len, u8 *seq, void *priv);
+
+	/* procfs handler for printing out key information and possible
+	 * statistics */
+	char * (*print_stats)(char *p, void *priv);
+
+	/* maximum number of bytes added by encryption; encrypt buf is
+	 * allocated with extra_prefix_len bytes, copy of in_buf, and
+	 * extra_postfix_len; encrypt need not use all this space, but
+	 * the result must start at the beginning of the buffer and correct
+	 * length must be returned */
+	int extra_prefix_len, extra_postfix_len;
+
+	struct module *owner;
+};
+
+struct ieee80211_crypt_data {
+	struct list_head list; /* delayed deletion list */
+	struct ieee80211_crypto_ops *ops;
+	void *priv;
+	atomic_t refcnt;
+};
+
+int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
+int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
+struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
+void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
+void ieee80211_crypt_deinit_handler(unsigned long);
+void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
+				    struct ieee80211_crypt_data **crypt);
+
+#endif
diff --git a/net/Kconfig b/net/Kconfig
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -212,6 +212,7 @@ endmenu
 source "net/ax25/Kconfig"
 source "net/irda/Kconfig"
 source "net/bluetooth/Kconfig"
+source "net/ieee80211/Kconfig"
 
 endif   # if NET
 endmenu # Networking
diff --git a/net/Makefile b/net/Makefile
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DECNET)		+= decnet/
 obj-$(CONFIG_ECONET)		+= econet/
 obj-$(CONFIG_VLAN_8021Q)	+= 8021q/
 obj-$(CONFIG_IP_SCTP)		+= sctp/
+obj-$(CONFIG_IEEE80211)		+= ieee80211/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)		+= sysctl_net.o
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/Kconfig
@@ -0,0 +1,69 @@
+config IEEE80211
+	tristate "Generic IEEE 802.11 Networking Stack"
+	select NET_RADIO
+	---help---
+	This option enables the hardware independent IEEE 802.11
+	networking stack.
+
+config IEEE80211_DEBUG
+	bool "Enable full debugging output"
+	depends on IEEE80211
+	---help---
+	  This option will enable debug tracing output for the
+	  ieee80211 network stack.
+
+	  This will result in the kernel module being ~70k larger.  You
+	  can control which debug output is sent to the kernel log by
+	  setting the value in
+
+	  /proc/net/ieee80211/debug_level
+
+	  For example:
+
+	  % echo 0x00000FFO > /proc/net/ieee80211/debug_level
+
+	  For a list of values you can assign to debug_level, you
+	  can look at the bit mask values in <net/ieee80211.h>
+
+	  If you are not trying to debug or develop the ieee80211
+	  subsystem, you most likely want to say N here.
+
+config IEEE80211_CRYPT_WEP
+	tristate "IEEE 802.11 WEP encryption (802.1x)"
+	depends on IEEE80211
+	select CRYPTO
+	select CRYPTO_ARC4
+	select CRC32
+	---help---
+	Include software based cipher suites in support of IEEE
+	802.11's WEP.  This is needed for WEP as well as 802.1x.
+
+	This can be compiled as a modules and it will be called
+	"ieee80211_crypt_wep".
+
+config IEEE80211_CRYPT_CCMP
+	tristate "IEEE 802.11i CCMP support"
+	depends on IEEE80211
+	select CRYPTO
+	select CRYPTO_AES
+	---help---
+	Include software based cipher suites in support of IEEE 802.11i
+	(aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with CCMP enabled
+	networks.
+
+	This can be compiled as a modules and it will be called
+	"ieee80211_crypt_ccmp".
+
+config IEEE80211_CRYPT_TKIP
+	tristate "IEEE 802.11i TKIP encryption"
+	depends on IEEE80211
+	select CRYPTO
+	select CRYPTO_MICHAEL_MIC
+	---help---
+	Include software based cipher suites in support of IEEE 802.11i
+	(aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
+	networks.
+
+	This can be compiled as a modules and it will be called
+	"ieee80211_crypt_tkip".
+
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/Makefile
@@ -0,0 +1,11 @@
+obj-$(CONFIG_IEEE80211) += ieee80211.o
+obj-$(CONFIG_IEEE80211) += ieee80211_crypt.o
+obj-$(CONFIG_IEEE80211_CRYPT_WEP) += ieee80211_crypt_wep.o
+obj-$(CONFIG_IEEE80211_CRYPT_CCMP) += ieee80211_crypt_ccmp.o
+obj-$(CONFIG_IEEE80211_CRYPT_TKIP) += ieee80211_crypt_tkip.o
+ieee80211-objs := \
+	ieee80211_module.o \
+	ieee80211_tx.o \
+	ieee80211_rx.o \
+	ieee80211_wx.o
+
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -0,0 +1,259 @@
+/*
+ * Host AP crypto routines
+ *
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <asm/string.h>
+#include <asm/errno.h>
+
+#include <net/ieee80211.h>
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("HostAP crypto");
+MODULE_LICENSE("GPL");
+
+struct ieee80211_crypto_alg {
+	struct list_head list;
+	struct ieee80211_crypto_ops *ops;
+};
+
+
+struct ieee80211_crypto {
+	struct list_head algs;
+	spinlock_t lock;
+};
+
+static struct ieee80211_crypto *hcrypt;
+
+void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
+					   int force)
+{
+	struct list_head *ptr, *n;
+	struct ieee80211_crypt_data *entry;
+
+	for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
+	     ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
+		entry = list_entry(ptr, struct ieee80211_crypt_data, list);
+
+		if (atomic_read(&entry->refcnt) != 0 && !force)
+			continue;
+
+		list_del(ptr);
+
+		if (entry->ops) {
+			entry->ops->deinit(entry->priv);
+			module_put(entry->ops->owner);
+		}
+		kfree(entry);
+	}
+}
+
+void ieee80211_crypt_deinit_handler(unsigned long data)
+{
+	struct ieee80211_device *ieee = (struct ieee80211_device *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ieee->lock, flags);
+	ieee80211_crypt_deinit_entries(ieee, 0);
+	if (!list_empty(&ieee->crypt_deinit_list)) {
+		printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
+		       "deletion list\n", ieee->dev->name);
+		ieee->crypt_deinit_timer.expires = jiffies + HZ;
+		add_timer(&ieee->crypt_deinit_timer);
+	}
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+}
+
+void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
+				    struct ieee80211_crypt_data **crypt)
+{
+	struct ieee80211_crypt_data *tmp;
+	unsigned long flags;
+
+	if (*crypt == NULL)
+		return;
+
+	tmp = *crypt;
+	*crypt = NULL;
+
+	/* must not run ops->deinit() while there may be pending encrypt or
+	 * decrypt operations. Use a list of delayed deinits to avoid needing
+	 * locking. */
+
+	spin_lock_irqsave(&ieee->lock, flags);
+	list_add(&tmp->list, &ieee->crypt_deinit_list);
+	if (!timer_pending(&ieee->crypt_deinit_timer)) {
+		ieee->crypt_deinit_timer.expires = jiffies + HZ;
+		add_timer(&ieee->crypt_deinit_timer);
+	}
+	spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
+{
+	unsigned long flags;
+	struct ieee80211_crypto_alg *alg;
+
+	if (hcrypt == NULL)
+		return -1;
+
+	alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+	if (alg == NULL)
+		return -ENOMEM;
+
+	memset(alg, 0, sizeof(*alg));
+	alg->ops = ops;
+
+	spin_lock_irqsave(&hcrypt->lock, flags);
+	list_add(&alg->list, &hcrypt->algs);
+	spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+	printk(KERN_DEBUG "ieee80211_crypt: registered algorithm '%s'\n",
+	       ops->name);
+
+	return 0;
+}
+
+int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
+{
+	unsigned long flags;
+	struct list_head *ptr;
+	struct ieee80211_crypto_alg *del_alg = NULL;
+
+	if (hcrypt == NULL)
+		return -1;
+
+	spin_lock_irqsave(&hcrypt->lock, flags);
+	for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
+		struct ieee80211_crypto_alg *alg =
+			(struct ieee80211_crypto_alg *) ptr;
+		if (alg->ops == ops) {
+			list_del(&alg->list);
+			del_alg = alg;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+	if (del_alg) {
+		printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
+		       "'%s'\n", ops->name);
+		kfree(del_alg);
+	}
+
+	return del_alg ? 0 : -1;
+}
+
+
+struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
+{
+	unsigned long flags;
+	struct list_head *ptr;
+	struct ieee80211_crypto_alg *found_alg = NULL;
+
+	if (hcrypt == NULL)
+		return NULL;
+
+	spin_lock_irqsave(&hcrypt->lock, flags);
+	for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
+		struct ieee80211_crypto_alg *alg =
+			(struct ieee80211_crypto_alg *) ptr;
+		if (strcmp(alg->ops->name, name) == 0) {
+			found_alg = alg;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&hcrypt->lock, flags);
+
+	if (found_alg)
+		return found_alg->ops;
+	else
+		return NULL;
+}
+
+
+static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
+static void ieee80211_crypt_null_deinit(void *priv) {}
+
+static struct ieee80211_crypto_ops ieee80211_crypt_null = {
+	.name			= "NULL",
+	.init			= ieee80211_crypt_null_init,
+	.deinit			= ieee80211_crypt_null_deinit,
+	.encrypt_mpdu		= NULL,
+	.decrypt_mpdu		= NULL,
+	.encrypt_msdu		= NULL,
+	.decrypt_msdu		= NULL,
+	.set_key		= NULL,
+	.get_key		= NULL,
+	.extra_prefix_len	= 0,
+	.extra_postfix_len	= 0,
+	.owner			= THIS_MODULE,
+};
+
+
+static int __init ieee80211_crypto_init(void)
+{
+	int ret = -ENOMEM;
+
+	hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+	if (!hcrypt)
+		goto out;
+
+	memset(hcrypt, 0, sizeof(*hcrypt));
+	INIT_LIST_HEAD(&hcrypt->algs);
+	spin_lock_init(&hcrypt->lock);
+
+	ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
+	if (ret < 0) {
+		kfree(hcrypt);
+		hcrypt = NULL;
+	}
+out:
+	return ret;
+}
+
+
+static void __exit ieee80211_crypto_deinit(void)
+{
+	struct list_head *ptr, *n;
+
+	if (hcrypt == NULL)
+		return;
+
+	for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
+	     ptr = n, n = ptr->next) {
+		struct ieee80211_crypto_alg *alg =
+			(struct ieee80211_crypto_alg *) ptr;
+		list_del(ptr);
+		printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
+		       "'%s' (deinit)\n", alg->ops->name);
+		kfree(alg);
+	}
+
+	kfree(hcrypt);
+}
+
+EXPORT_SYMBOL(ieee80211_crypt_deinit_entries);
+EXPORT_SYMBOL(ieee80211_crypt_deinit_handler);
+EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit);
+
+EXPORT_SYMBOL(ieee80211_register_crypto_ops);
+EXPORT_SYMBOL(ieee80211_unregister_crypto_ops);
+EXPORT_SYMBOL(ieee80211_get_crypto_ops);
+
+module_init(ieee80211_crypto_init);
+module_exit(ieee80211_crypto_deinit);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -0,0 +1,470 @@
+/*
+ * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <asm/string.h>
+#include <linux/wireless.h>
+
+#include <net/ieee80211.h>
+
+
+#include <linux/crypto.h>
+#include <asm/scatterlist.h>
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Host AP crypt: CCMP");
+MODULE_LICENSE("GPL");
+
+#define AES_BLOCK_LEN 16
+#define CCMP_HDR_LEN 8
+#define CCMP_MIC_LEN 8
+#define CCMP_TK_LEN 16
+#define CCMP_PN_LEN 6
+
+struct ieee80211_ccmp_data {
+	u8 key[CCMP_TK_LEN];
+	int key_set;
+
+	u8 tx_pn[CCMP_PN_LEN];
+	u8 rx_pn[CCMP_PN_LEN];
+
+	u32 dot11RSNAStatsCCMPFormatErrors;
+	u32 dot11RSNAStatsCCMPReplays;
+	u32 dot11RSNAStatsCCMPDecryptErrors;
+
+	int key_idx;
+
+	struct crypto_tfm *tfm;
+
+	/* scratch buffers for virt_to_page() (crypto API) */
+	u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
+		tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
+	u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
+};
+
+static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
+				       const u8 pt[16], u8 ct[16])
+{
+	struct scatterlist src, dst;
+
+	src.page = virt_to_page(pt);
+	src.offset = offset_in_page(pt);
+	src.length = AES_BLOCK_LEN;
+
+	dst.page = virt_to_page(ct);
+	dst.offset = offset_in_page(ct);
+	dst.length = AES_BLOCK_LEN;
+
+	crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
+}
+
+static void * ieee80211_ccmp_init(int key_idx)
+{
+	struct ieee80211_ccmp_data *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+	if (priv == NULL)
+		goto fail;
+	memset(priv, 0, sizeof(*priv));
+	priv->key_idx = key_idx;
+
+	priv->tfm = crypto_alloc_tfm("aes", 0);
+	if (priv->tfm == NULL) {
+		printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
+		       "crypto API aes\n");
+		goto fail;
+	}
+
+	return priv;
+
+fail:
+	if (priv) {
+		if (priv->tfm)
+			crypto_free_tfm(priv->tfm);
+		kfree(priv);
+	}
+
+	return NULL;
+}
+
+
+static void ieee80211_ccmp_deinit(void *priv)
+{
+	struct ieee80211_ccmp_data *_priv = priv;
+	if (_priv && _priv->tfm)
+		crypto_free_tfm(_priv->tfm);
+	kfree(priv);
+}
+
+
+static inline void xor_block(u8 *b, u8 *a, size_t len)
+{
+	int i;
+	for (i = 0; i < len; i++)
+		b[i] ^= a[i];
+}
+
+
+static void ccmp_init_blocks(struct crypto_tfm *tfm,
+			     struct ieee80211_hdr *hdr,
+			     u8 *pn, size_t dlen, u8 *b0, u8 *auth,
+			     u8 *s0)
+{
+	u8 *pos, qc = 0;
+	size_t aad_len;
+	u16 fc;
+	int a4_included, qc_included;
+	u8 aad[2 * AES_BLOCK_LEN];
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		       (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
+	qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
+		       (WLAN_FC_GET_STYPE(fc) & 0x08));
+	aad_len = 22;
+	if (a4_included)
+		aad_len += 6;
+	if (qc_included) {
+		pos = (u8 *) &hdr->addr4;
+		if (a4_included)
+			pos += 6;
+		qc = *pos & 0x0f;
+		aad_len += 2;
+	}
+
+	/* CCM Initial Block:
+	 * Flag (Include authentication header, M=3 (8-octet MIC),
+	 *       L=1 (2-octet Dlen))
+	 * Nonce: 0x00 | A2 | PN
+	 * Dlen */
+	b0[0] = 0x59;
+	b0[1] = qc;
+	memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
+	memcpy(b0 + 8, pn, CCMP_PN_LEN);
+	b0[14] = (dlen >> 8) & 0xff;
+	b0[15] = dlen & 0xff;
+
+	/* AAD:
+	 * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
+	 * A1 | A2 | A3
+	 * SC with bits 4..15 (seq#) masked to zero
+	 * A4 (if present)
+	 * QC (if present)
+	 */
+	pos = (u8 *) hdr;
+	aad[0] = 0; /* aad_len >> 8 */
+	aad[1] = aad_len & 0xff;
+	aad[2] = pos[0] & 0x8f;
+	aad[3] = pos[1] & 0xc7;
+	memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
+	pos = (u8 *) &hdr->seq_ctl;
+	aad[22] = pos[0] & 0x0f;
+	aad[23] = 0; /* all bits masked */
+	memset(aad + 24, 0, 8);
+	if (a4_included)
+		memcpy(aad + 24, hdr->addr4, ETH_ALEN);
+	if (qc_included) {
+		aad[a4_included ? 30 : 24] = qc;
+		/* rest of QC masked */
+	}
+
+	/* Start with the first block and AAD */
+	ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
+	xor_block(auth, aad, AES_BLOCK_LEN);
+	ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
+	xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
+	ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
+	b0[0] &= 0x07;
+	b0[14] = b0[15] = 0;
+	ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
+}
+
+
+static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct ieee80211_ccmp_data *key = priv;
+	int data_len, i, blocks, last, len;
+	u8 *pos, *mic;
+	struct ieee80211_hdr *hdr;
+	u8 *b0 = key->tx_b0;
+	u8 *b = key->tx_b;
+	u8 *e = key->tx_e;
+	u8 *s0 = key->tx_s0;
+
+	if (skb_headroom(skb) < CCMP_HDR_LEN ||
+	    skb_tailroom(skb) < CCMP_MIC_LEN ||
+	    skb->len < hdr_len)
+		return -1;
+
+	data_len = skb->len - hdr_len;
+	pos = skb_push(skb, CCMP_HDR_LEN);
+	memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
+	pos += hdr_len;
+	mic = skb_put(skb, CCMP_MIC_LEN);
+
+	i = CCMP_PN_LEN - 1;
+	while (i >= 0) {
+		key->tx_pn[i]++;
+		if (key->tx_pn[i] != 0)
+			break;
+		i--;
+	}
+
+	*pos++ = key->tx_pn[5];
+	*pos++ = key->tx_pn[4];
+	*pos++ = 0;
+	*pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
+	*pos++ = key->tx_pn[3];
+	*pos++ = key->tx_pn[2];
+	*pos++ = key->tx_pn[1];
+	*pos++ = key->tx_pn[0];
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
+
+	blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+	last = data_len % AES_BLOCK_LEN;
+
+	for (i = 1; i <= blocks; i++) {
+		len = (i == blocks && last) ? last : AES_BLOCK_LEN;
+		/* Authentication */
+		xor_block(b, pos, len);
+		ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
+		/* Encryption, with counter */
+		b0[14] = (i >> 8) & 0xff;
+		b0[15] = i & 0xff;
+		ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
+		xor_block(pos, e, len);
+		pos += len;
+	}
+
+	for (i = 0; i < CCMP_MIC_LEN; i++)
+		mic[i] = b[i] ^ s0[i];
+
+	return 0;
+}
+
+
+static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct ieee80211_ccmp_data *key = priv;
+	u8 keyidx, *pos;
+	struct ieee80211_hdr *hdr;
+	u8 *b0 = key->rx_b0;
+	u8 *b = key->rx_b;
+	u8 *a = key->rx_a;
+	u8 pn[6];
+	int i, blocks, last, len;
+	size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
+	u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
+
+	if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
+		key->dot11RSNAStatsCCMPFormatErrors++;
+		return -1;
+	}
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	pos = skb->data + hdr_len;
+	keyidx = pos[3];
+	if (!(keyidx & (1 << 5))) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "CCMP: received packet without ExtIV"
+			       " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2));
+		}
+		key->dot11RSNAStatsCCMPFormatErrors++;
+		return -2;
+	}
+	keyidx >>= 6;
+	if (key->key_idx != keyidx) {
+		printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
+		       "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
+		return -6;
+	}
+	if (!key->key_set) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "CCMP: received packet from " MAC_FMT
+			       " with keyid=%d that does not have a configured"
+			       " key\n", MAC_ARG(hdr->addr2), keyidx);
+		}
+		return -3;
+	}
+
+	pn[0] = pos[7];
+	pn[1] = pos[6];
+	pn[2] = pos[5];
+	pn[3] = pos[4];
+	pn[4] = pos[1];
+	pn[5] = pos[0];
+	pos += 8;
+
+	if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT
+			       " previous PN %02x%02x%02x%02x%02x%02x "
+			       "received PN %02x%02x%02x%02x%02x%02x\n",
+			       MAC_ARG(hdr->addr2), MAC_ARG(key->rx_pn),
+			       MAC_ARG(pn));
+		}
+		key->dot11RSNAStatsCCMPReplays++;
+		return -4;
+	}
+
+	ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
+	xor_block(mic, b, CCMP_MIC_LEN);
+
+	blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
+	last = data_len % AES_BLOCK_LEN;
+
+	for (i = 1; i <= blocks; i++) {
+		len = (i == blocks && last) ? last : AES_BLOCK_LEN;
+		/* Decrypt, with counter */
+		b0[14] = (i >> 8) & 0xff;
+		b0[15] = i & 0xff;
+		ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
+		xor_block(pos, b, len);
+		/* Authentication */
+		xor_block(a, pos, len);
+		ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
+		pos += len;
+	}
+
+	if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "CCMP: decrypt failed: STA="
+			       MAC_FMT "\n", MAC_ARG(hdr->addr2));
+		}
+		key->dot11RSNAStatsCCMPDecryptErrors++;
+		return -5;
+	}
+
+	memcpy(key->rx_pn, pn, CCMP_PN_LEN);
+
+	/* Remove hdr and MIC */
+	memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
+	skb_pull(skb, CCMP_HDR_LEN);
+	skb_trim(skb, skb->len - CCMP_MIC_LEN);
+
+	return keyidx;
+}
+
+
+static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct ieee80211_ccmp_data *data = priv;
+	int keyidx;
+	struct crypto_tfm *tfm = data->tfm;
+
+	keyidx = data->key_idx;
+	memset(data, 0, sizeof(*data));
+	data->key_idx = keyidx;
+	data->tfm = tfm;
+	if (len == CCMP_TK_LEN) {
+		memcpy(data->key, key, CCMP_TK_LEN);
+		data->key_set = 1;
+		if (seq) {
+			data->rx_pn[0] = seq[5];
+			data->rx_pn[1] = seq[4];
+			data->rx_pn[2] = seq[3];
+			data->rx_pn[3] = seq[2];
+			data->rx_pn[4] = seq[1];
+			data->rx_pn[5] = seq[0];
+		}
+		crypto_cipher_setkey(data->tfm, data->key, CCMP_TK_LEN);
+	} else if (len == 0)
+		data->key_set = 0;
+	else
+		return -1;
+
+	return 0;
+}
+
+
+static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct ieee80211_ccmp_data *data = priv;
+
+	if (len < CCMP_TK_LEN)
+		return -1;
+
+	if (!data->key_set)
+		return 0;
+	memcpy(key, data->key, CCMP_TK_LEN);
+
+	if (seq) {
+		seq[0] = data->tx_pn[5];
+		seq[1] = data->tx_pn[4];
+		seq[2] = data->tx_pn[3];
+		seq[3] = data->tx_pn[2];
+		seq[4] = data->tx_pn[1];
+		seq[5] = data->tx_pn[0];
+	}
+
+	return CCMP_TK_LEN;
+}
+
+
+static char * ieee80211_ccmp_print_stats(char *p, void *priv)
+{
+	struct ieee80211_ccmp_data *ccmp = priv;
+	p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
+		     "tx_pn=%02x%02x%02x%02x%02x%02x "
+		     "rx_pn=%02x%02x%02x%02x%02x%02x "
+		     "format_errors=%d replays=%d decrypt_errors=%d\n",
+		     ccmp->key_idx, ccmp->key_set,
+		     MAC_ARG(ccmp->tx_pn), MAC_ARG(ccmp->rx_pn),
+		     ccmp->dot11RSNAStatsCCMPFormatErrors,
+		     ccmp->dot11RSNAStatsCCMPReplays,
+		     ccmp->dot11RSNAStatsCCMPDecryptErrors);
+
+	return p;
+}
+
+
+static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
+	.name			= "CCMP",
+	.init			= ieee80211_ccmp_init,
+	.deinit			= ieee80211_ccmp_deinit,
+	.encrypt_mpdu		= ieee80211_ccmp_encrypt,
+	.decrypt_mpdu		= ieee80211_ccmp_decrypt,
+	.encrypt_msdu		= NULL,
+	.decrypt_msdu		= NULL,
+	.set_key		= ieee80211_ccmp_set_key,
+	.get_key		= ieee80211_ccmp_get_key,
+	.print_stats		= ieee80211_ccmp_print_stats,
+	.extra_prefix_len	= CCMP_HDR_LEN,
+	.extra_postfix_len	= CCMP_MIC_LEN,
+	.owner			= THIS_MODULE,
+};
+
+
+static int __init ieee80211_crypto_ccmp_init(void)
+{
+	return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
+}
+
+
+static void __exit ieee80211_crypto_ccmp_exit(void)
+{
+	ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
+}
+
+
+module_init(ieee80211_crypto_ccmp_init);
+module_exit(ieee80211_crypto_ccmp_exit);
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -0,0 +1,708 @@
+/*
+ * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <asm/string.h>
+
+#include <net/ieee80211.h>
+
+
+#include <linux/crypto.h>
+#include <asm/scatterlist.h>
+#include <linux/crc32.h>
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Host AP crypt: TKIP");
+MODULE_LICENSE("GPL");
+
+struct ieee80211_tkip_data {
+#define TKIP_KEY_LEN 32
+	u8 key[TKIP_KEY_LEN];
+	int key_set;
+
+	u32 tx_iv32;
+	u16 tx_iv16;
+	u16 tx_ttak[5];
+	int tx_phase1_done;
+
+	u32 rx_iv32;
+	u16 rx_iv16;
+	u16 rx_ttak[5];
+	int rx_phase1_done;
+	u32 rx_iv32_new;
+	u16 rx_iv16_new;
+
+	u32 dot11RSNAStatsTKIPReplays;
+	u32 dot11RSNAStatsTKIPICVErrors;
+	u32 dot11RSNAStatsTKIPLocalMICFailures;
+
+	int key_idx;
+
+	struct crypto_tfm *tfm_arc4;
+	struct crypto_tfm *tfm_michael;
+
+	/* scratch buffers for virt_to_page() (crypto API) */
+	u8 rx_hdr[16], tx_hdr[16];
+};
+
+static void * ieee80211_tkip_init(int key_idx)
+{
+	struct ieee80211_tkip_data *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+	if (priv == NULL)
+		goto fail;
+	memset(priv, 0, sizeof(*priv));
+	priv->key_idx = key_idx;
+
+	priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
+	if (priv->tfm_arc4 == NULL) {
+		printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
+		       "crypto API arc4\n");
+		goto fail;
+	}
+
+	priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0);
+	if (priv->tfm_michael == NULL) {
+		printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
+		       "crypto API michael_mic\n");
+		goto fail;
+	}
+
+	return priv;
+
+fail:
+	if (priv) {
+		if (priv->tfm_michael)
+			crypto_free_tfm(priv->tfm_michael);
+		if (priv->tfm_arc4)
+			crypto_free_tfm(priv->tfm_arc4);
+		kfree(priv);
+	}
+
+	return NULL;
+}
+
+
+static void ieee80211_tkip_deinit(void *priv)
+{
+	struct ieee80211_tkip_data *_priv = priv;
+	if (_priv && _priv->tfm_michael)
+		crypto_free_tfm(_priv->tfm_michael);
+	if (_priv && _priv->tfm_arc4)
+		crypto_free_tfm(_priv->tfm_arc4);
+	kfree(priv);
+}
+
+
+static inline u16 RotR1(u16 val)
+{
+	return (val >> 1) | (val << 15);
+}
+
+
+static inline u8 Lo8(u16 val)
+{
+	return val & 0xff;
+}
+
+
+static inline u8 Hi8(u16 val)
+{
+	return val >> 8;
+}
+
+
+static inline u16 Lo16(u32 val)
+{
+	return val & 0xffff;
+}
+
+
+static inline u16 Hi16(u32 val)
+{
+	return val >> 16;
+}
+
+
+static inline u16 Mk16(u8 hi, u8 lo)
+{
+	return lo | (((u16) hi) << 8);
+}
+
+
+static inline u16 Mk16_le(u16 *v)
+{
+	return le16_to_cpu(*v);
+}
+
+
+static const u16 Sbox[256] =
+{
+	0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+	0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+	0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+	0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+	0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+	0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+	0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+	0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+	0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+	0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+	0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+	0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+	0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+	0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+	0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+	0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+	0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+	0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+	0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+	0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+	0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+	0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+	0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+	0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+	0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+	0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+	0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+	0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+	0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+	0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+	0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+	0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+};
+
+
+static inline u16 _S_(u16 v)
+{
+	u16 t = Sbox[Hi8(v)];
+	return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
+}
+
+
+#define PHASE1_LOOP_COUNT 8
+
+static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
+{
+	int i, j;
+
+	/* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
+	TTAK[0] = Lo16(IV32);
+	TTAK[1] = Hi16(IV32);
+	TTAK[2] = Mk16(TA[1], TA[0]);
+	TTAK[3] = Mk16(TA[3], TA[2]);
+	TTAK[4] = Mk16(TA[5], TA[4]);
+
+	for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
+		j = 2 * (i & 1);
+		TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
+		TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
+		TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
+		TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
+		TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
+	}
+}
+
+
+static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
+			       u16 IV16)
+{
+	/* Make temporary area overlap WEP seed so that the final copy can be
+	 * avoided on little endian hosts. */
+	u16 *PPK = (u16 *) &WEPSeed[4];
+
+	/* Step 1 - make copy of TTAK and bring in TSC */
+	PPK[0] = TTAK[0];
+	PPK[1] = TTAK[1];
+	PPK[2] = TTAK[2];
+	PPK[3] = TTAK[3];
+	PPK[4] = TTAK[4];
+	PPK[5] = TTAK[4] + IV16;
+
+	/* Step 2 - 96-bit bijective mixing using S-box */
+	PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
+	PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
+	PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
+	PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
+	PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
+	PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
+
+	PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
+	PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
+	PPK[2] += RotR1(PPK[1]);
+	PPK[3] += RotR1(PPK[2]);
+	PPK[4] += RotR1(PPK[3]);
+	PPK[5] += RotR1(PPK[4]);
+
+	/* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
+	 * WEPSeed[0..2] is transmitted as WEP IV */
+	WEPSeed[0] = Hi8(IV16);
+	WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
+	WEPSeed[2] = Lo8(IV16);
+	WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
+
+#ifdef __BIG_ENDIAN
+	{
+		int i;
+		for (i = 0; i < 6; i++)
+			PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
+	}
+#endif
+}
+
+static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+	int len;
+	u8 rc4key[16], *pos, *icv;
+	struct ieee80211_hdr *hdr;
+	u32 crc;
+	struct scatterlist sg;
+
+	if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
+	    skb->len < hdr_len)
+		return -1;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	if (!tkey->tx_phase1_done) {
+		tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
+				   tkey->tx_iv32);
+		tkey->tx_phase1_done = 1;
+	}
+	tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
+
+	len = skb->len - hdr_len;
+	pos = skb_push(skb, 8);
+	memmove(pos, pos + 8, hdr_len);
+	pos += hdr_len;
+	icv = skb_put(skb, 4);
+
+	*pos++ = rc4key[0];
+	*pos++ = rc4key[1];
+	*pos++ = rc4key[2];
+	*pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
+	*pos++ = tkey->tx_iv32 & 0xff;
+	*pos++ = (tkey->tx_iv32 >> 8) & 0xff;
+	*pos++ = (tkey->tx_iv32 >> 16) & 0xff;
+	*pos++ = (tkey->tx_iv32 >> 24) & 0xff;
+
+	crc = ~crc32_le(~0, pos, len);
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+
+	crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
+	sg.page = virt_to_page(pos);
+	sg.offset = offset_in_page(pos);
+	sg.length = len + 4;
+	crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
+
+	tkey->tx_iv16++;
+	if (tkey->tx_iv16 == 0) {
+		tkey->tx_phase1_done = 0;
+		tkey->tx_iv32++;
+	}
+
+	return 0;
+}
+
+static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+	u8 rc4key[16];
+	u8 keyidx, *pos;
+	u32 iv32;
+	u16 iv16;
+	struct ieee80211_hdr *hdr;
+	u8 icv[4];
+	u32 crc;
+	struct scatterlist sg;
+	int plen;
+
+	if (skb->len < hdr_len + 8 + 4)
+		return -1;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	pos = skb->data + hdr_len;
+	keyidx = pos[3];
+	if (!(keyidx & (1 << 5))) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "TKIP: received packet without ExtIV"
+			       " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2));
+		}
+		return -2;
+	}
+	keyidx >>= 6;
+	if (tkey->key_idx != keyidx) {
+		printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
+		       "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
+		return -6;
+	}
+	if (!tkey->key_set) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "TKIP: received packet from " MAC_FMT
+			       " with keyid=%d that does not have a configured"
+			       " key\n", MAC_ARG(hdr->addr2), keyidx);
+		}
+		return -3;
+	}
+	iv16 = (pos[0] << 8) | pos[2];
+	iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
+	pos += 8;
+
+	if (iv32 < tkey->rx_iv32 ||
+	    (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT
+			       " previous TSC %08x%04x received TSC "
+			       "%08x%04x\n", MAC_ARG(hdr->addr2),
+			       tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
+		}
+		tkey->dot11RSNAStatsTKIPReplays++;
+		return -4;
+	}
+
+	if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
+		tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
+		tkey->rx_phase1_done = 1;
+	}
+	tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
+
+	plen = skb->len - hdr_len - 12;
+
+	crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
+	sg.page = virt_to_page(pos);
+	sg.offset = offset_in_page(pos);
+	sg.length = plen + 4;
+	crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4);
+
+	crc = ~crc32_le(~0, pos, plen);
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	if (memcmp(icv, pos + plen, 4) != 0) {
+		if (iv32 != tkey->rx_iv32) {
+			/* Previously cached Phase1 result was already lost, so
+			 * it needs to be recalculated for the next packet. */
+			tkey->rx_phase1_done = 0;
+		}
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "TKIP: ICV error detected: STA="
+			       MAC_FMT "\n", MAC_ARG(hdr->addr2));
+		}
+		tkey->dot11RSNAStatsTKIPICVErrors++;
+		return -5;
+	}
+
+	/* Update real counters only after Michael MIC verification has
+	 * completed */
+	tkey->rx_iv32_new = iv32;
+	tkey->rx_iv16_new = iv16;
+
+	/* Remove IV and ICV */
+	memmove(skb->data + 8, skb->data, hdr_len);
+	skb_pull(skb, 8);
+	skb_trim(skb, skb->len - 4);
+
+	return keyidx;
+}
+
+
+static int michael_mic(struct ieee80211_tkip_data *tkey, u8 *key, u8 *hdr,
+		       u8 *data, size_t data_len, u8 *mic)
+{
+	struct scatterlist sg[2];
+
+	if (tkey->tfm_michael == NULL) {
+		printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
+		return -1;
+	}
+	sg[0].page = virt_to_page(hdr);
+	sg[0].offset = offset_in_page(hdr);
+	sg[0].length = 16;
+
+	sg[1].page = virt_to_page(data);
+	sg[1].offset = offset_in_page(data);
+	sg[1].length = data_len;
+
+	crypto_digest_init(tkey->tfm_michael);
+	crypto_digest_setkey(tkey->tfm_michael, key, 8);
+	crypto_digest_update(tkey->tfm_michael, sg, 2);
+	crypto_digest_final(tkey->tfm_michael, mic);
+
+	return 0;
+}
+
+static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
+{
+	struct ieee80211_hdr *hdr11;
+
+	hdr11 = (struct ieee80211_hdr *) skb->data;
+	switch (le16_to_cpu(hdr11->frame_ctl) &
+		(IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+	case IEEE80211_FCTL_TODS:
+		memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
+		memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
+		break;
+	case IEEE80211_FCTL_FROMDS:
+		memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
+		memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
+		break;
+	case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+		memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
+		memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
+		break;
+	case 0:
+		memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
+		memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
+		break;
+	}
+
+	hdr[12] = 0; /* priority */
+	hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
+}
+
+
+static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+	u8 *pos;
+
+	if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
+		printk(KERN_DEBUG "Invalid packet for Michael MIC add "
+		       "(tailroom=%d hdr_len=%d skb->len=%d)\n",
+		       skb_tailroom(skb), hdr_len, skb->len);
+		return -1;
+	}
+
+	michael_mic_hdr(skb, tkey->tx_hdr);
+	pos = skb_put(skb, 8);
+	if (michael_mic(tkey, &tkey->key[16], tkey->tx_hdr,
+			skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
+		return -1;
+
+	return 0;
+}
+
+
+#if WIRELESS_EXT >= 18
+static void ieee80211_michael_mic_failure(struct net_device *dev,
+				       struct ieee80211_hdr *hdr,
+				       int keyidx)
+{
+	union iwreq_data wrqu;
+	struct iw_michaelmicfailure ev;
+
+	/* TODO: needed parameters: count, keyid, key type, TSC */
+	memset(&ev, 0, sizeof(ev));
+	ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
+	if (hdr->addr1[0] & 0x01)
+		ev.flags |= IW_MICFAILURE_GROUP;
+	else
+		ev.flags |= IW_MICFAILURE_PAIRWISE;
+	ev.src_addr.sa_family = ARPHRD_ETHER;
+	memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
+	memset(&wrqu, 0, sizeof(wrqu));
+	wrqu.data.length = sizeof(ev);
+	wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
+}
+#elif WIRELESS_EXT >= 15
+static void ieee80211_michael_mic_failure(struct net_device *dev,
+				       struct ieee80211_hdr *hdr,
+				       int keyidx)
+{
+	union iwreq_data wrqu;
+	char buf[128];
+
+	/* TODO: needed parameters: count, keyid, key type, TSC */
+	sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
+		MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
+		MAC_ARG(hdr->addr2));
+	memset(&wrqu, 0, sizeof(wrqu));
+	wrqu.data.length = strlen(buf);
+	wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
+}
+#else /* WIRELESS_EXT >= 15 */
+static inline void ieee80211_michael_mic_failure(struct net_device *dev,
+					      struct ieee80211_hdr *hdr,
+					      int keyidx)
+{
+}
+#endif /* WIRELESS_EXT >= 15 */
+
+
+static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
+				     int hdr_len, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+	u8 mic[8];
+
+	if (!tkey->key_set)
+		return -1;
+
+	michael_mic_hdr(skb, tkey->rx_hdr);
+	if (michael_mic(tkey, &tkey->key[24], tkey->rx_hdr,
+			skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
+		return -1;
+	if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
+		struct ieee80211_hdr *hdr;
+		hdr = (struct ieee80211_hdr *) skb->data;
+		printk(KERN_DEBUG "%s: Michael MIC verification failed for "
+		       "MSDU from " MAC_FMT " keyidx=%d\n",
+		       skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2),
+		       keyidx);
+		if (skb->dev)
+			ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
+		tkey->dot11RSNAStatsTKIPLocalMICFailures++;
+		return -1;
+	}
+
+	/* Update TSC counters for RX now that the packet verification has
+	 * completed. */
+	tkey->rx_iv32 = tkey->rx_iv32_new;
+	tkey->rx_iv16 = tkey->rx_iv16_new;
+
+	skb_trim(skb, skb->len - 8);
+
+	return 0;
+}
+
+
+static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+	int keyidx;
+	struct crypto_tfm *tfm = tkey->tfm_michael;
+	struct crypto_tfm *tfm2 = tkey->tfm_arc4;
+
+	keyidx = tkey->key_idx;
+	memset(tkey, 0, sizeof(*tkey));
+	tkey->key_idx = keyidx;
+	tkey->tfm_michael = tfm;
+	tkey->tfm_arc4 = tfm2;
+	if (len == TKIP_KEY_LEN) {
+		memcpy(tkey->key, key, TKIP_KEY_LEN);
+		tkey->key_set = 1;
+		tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
+		if (seq) {
+			tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
+				(seq[3] << 8) | seq[2];
+			tkey->rx_iv16 = (seq[1] << 8) | seq[0];
+		}
+	} else if (len == 0)
+		tkey->key_set = 0;
+	else
+		return -1;
+
+	return 0;
+}
+
+
+static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct ieee80211_tkip_data *tkey = priv;
+
+	if (len < TKIP_KEY_LEN)
+		return -1;
+
+	if (!tkey->key_set)
+		return 0;
+	memcpy(key, tkey->key, TKIP_KEY_LEN);
+
+	if (seq) {
+		/* Return the sequence number of the last transmitted frame. */
+		u16 iv16 = tkey->tx_iv16;
+		u32 iv32 = tkey->tx_iv32;
+		if (iv16 == 0)
+			iv32--;
+		iv16--;
+		seq[0] = tkey->tx_iv16;
+		seq[1] = tkey->tx_iv16 >> 8;
+		seq[2] = tkey->tx_iv32;
+		seq[3] = tkey->tx_iv32 >> 8;
+		seq[4] = tkey->tx_iv32 >> 16;
+		seq[5] = tkey->tx_iv32 >> 24;
+	}
+
+	return TKIP_KEY_LEN;
+}
+
+
+static char * ieee80211_tkip_print_stats(char *p, void *priv)
+{
+	struct ieee80211_tkip_data *tkip = priv;
+	p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
+		     "tx_pn=%02x%02x%02x%02x%02x%02x "
+		     "rx_pn=%02x%02x%02x%02x%02x%02x "
+		     "replays=%d icv_errors=%d local_mic_failures=%d\n",
+		     tkip->key_idx, tkip->key_set,
+		     (tkip->tx_iv32 >> 24) & 0xff,
+		     (tkip->tx_iv32 >> 16) & 0xff,
+		     (tkip->tx_iv32 >> 8) & 0xff,
+		     tkip->tx_iv32 & 0xff,
+		     (tkip->tx_iv16 >> 8) & 0xff,
+		     tkip->tx_iv16 & 0xff,
+		     (tkip->rx_iv32 >> 24) & 0xff,
+		     (tkip->rx_iv32 >> 16) & 0xff,
+		     (tkip->rx_iv32 >> 8) & 0xff,
+		     tkip->rx_iv32 & 0xff,
+		     (tkip->rx_iv16 >> 8) & 0xff,
+		     tkip->rx_iv16 & 0xff,
+		     tkip->dot11RSNAStatsTKIPReplays,
+		     tkip->dot11RSNAStatsTKIPICVErrors,
+		     tkip->dot11RSNAStatsTKIPLocalMICFailures);
+	return p;
+}
+
+
+static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
+	.name			= "TKIP",
+	.init			= ieee80211_tkip_init,
+	.deinit			= ieee80211_tkip_deinit,
+	.encrypt_mpdu		= ieee80211_tkip_encrypt,
+	.decrypt_mpdu		= ieee80211_tkip_decrypt,
+	.encrypt_msdu		= ieee80211_michael_mic_add,
+	.decrypt_msdu		= ieee80211_michael_mic_verify,
+	.set_key		= ieee80211_tkip_set_key,
+	.get_key		= ieee80211_tkip_get_key,
+	.print_stats		= ieee80211_tkip_print_stats,
+	.extra_prefix_len	= 4 + 4, /* IV + ExtIV */
+	.extra_postfix_len	= 8 + 4, /* MIC + ICV */
+	.owner		        = THIS_MODULE,
+};
+
+
+static int __init ieee80211_crypto_tkip_init(void)
+{
+	return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
+}
+
+
+static void __exit ieee80211_crypto_tkip_exit(void)
+{
+	ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
+}
+
+
+module_init(ieee80211_crypto_tkip_init);
+module_exit(ieee80211_crypto_tkip_exit);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -0,0 +1,272 @@
+/*
+ * Host AP crypt: host-based WEP encryption implementation for Host AP driver
+ *
+ * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/skbuff.h>
+#include <asm/string.h>
+
+#include <net/ieee80211.h>
+
+
+#include <linux/crypto.h>
+#include <asm/scatterlist.h>
+#include <linux/crc32.h>
+
+MODULE_AUTHOR("Jouni Malinen");
+MODULE_DESCRIPTION("Host AP crypt: WEP");
+MODULE_LICENSE("GPL");
+
+
+struct prism2_wep_data {
+	u32 iv;
+#define WEP_KEY_LEN 13
+	u8 key[WEP_KEY_LEN + 1];
+	u8 key_len;
+	u8 key_idx;
+	struct crypto_tfm *tfm;
+};
+
+
+static void * prism2_wep_init(int keyidx)
+{
+	struct prism2_wep_data *priv;
+
+	priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+	if (priv == NULL)
+		goto fail;
+	memset(priv, 0, sizeof(*priv));
+	priv->key_idx = keyidx;
+
+	priv->tfm = crypto_alloc_tfm("arc4", 0);
+	if (priv->tfm == NULL) {
+		printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
+		       "crypto API arc4\n");
+		goto fail;
+	}
+
+	/* start WEP IV from a random value */
+	get_random_bytes(&priv->iv, 4);
+
+	return priv;
+
+fail:
+	if (priv) {
+		if (priv->tfm)
+			crypto_free_tfm(priv->tfm);
+		kfree(priv);
+	}
+	return NULL;
+}
+
+
+static void prism2_wep_deinit(void *priv)
+{
+	struct prism2_wep_data *_priv = priv;
+	if (_priv && _priv->tfm)
+		crypto_free_tfm(_priv->tfm);
+	kfree(priv);
+}
+
+
+/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
+ * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
+ * so the payload length increases with 8 bytes.
+ *
+ * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
+ */
+static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct prism2_wep_data *wep = priv;
+	u32 crc, klen, len;
+	u8 key[WEP_KEY_LEN + 3];
+	u8 *pos, *icv;
+	struct scatterlist sg;
+
+	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
+	    skb->len < hdr_len)
+		return -1;
+
+	len = skb->len - hdr_len;
+	pos = skb_push(skb, 4);
+	memmove(pos, pos + 4, hdr_len);
+	pos += hdr_len;
+
+	klen = 3 + wep->key_len;
+
+	wep->iv++;
+
+	/* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
+	 * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
+	 * can be used to speedup attacks, so avoid using them. */
+	if ((wep->iv & 0xff00) == 0xff00) {
+		u8 B = (wep->iv >> 16) & 0xff;
+		if (B >= 3 && B < klen)
+			wep->iv += 0x0100;
+	}
+
+	/* Prepend 24-bit IV to RC4 key and TX frame */
+	*pos++ = key[0] = (wep->iv >> 16) & 0xff;
+	*pos++ = key[1] = (wep->iv >> 8) & 0xff;
+	*pos++ = key[2] = wep->iv & 0xff;
+	*pos++ = wep->key_idx << 6;
+
+	/* Copy rest of the WEP key (the secret part) */
+	memcpy(key + 3, wep->key, wep->key_len);
+
+	/* Append little-endian CRC32 and encrypt it to produce ICV */
+	crc = ~crc32_le(~0, pos, len);
+	icv = skb_put(skb, 4);
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+
+	crypto_cipher_setkey(wep->tfm, key, klen);
+	sg.page = virt_to_page(pos);
+	sg.offset = offset_in_page(pos);
+	sg.length = len + 4;
+	crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
+
+	return 0;
+}
+
+
+/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
+ * the frame: IV (4 bytes), encrypted payload (including SNAP header),
+ * ICV (4 bytes). len includes both IV and ICV.
+ *
+ * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
+ * failure. If frame is OK, IV and ICV will be removed.
+ */
+static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
+{
+	struct prism2_wep_data *wep = priv;
+	u32 crc, klen, plen;
+	u8 key[WEP_KEY_LEN + 3];
+	u8 keyidx, *pos, icv[4];
+	struct scatterlist sg;
+
+	if (skb->len < hdr_len + 8)
+		return -1;
+
+	pos = skb->data + hdr_len;
+	key[0] = *pos++;
+	key[1] = *pos++;
+	key[2] = *pos++;
+	keyidx = *pos++ >> 6;
+	if (keyidx != wep->key_idx)
+		return -1;
+
+	klen = 3 + wep->key_len;
+
+	/* Copy rest of the WEP key (the secret part) */
+	memcpy(key + 3, wep->key, wep->key_len);
+
+	/* Apply RC4 to data and compute CRC32 over decrypted data */
+	plen = skb->len - hdr_len - 8;
+
+	crypto_cipher_setkey(wep->tfm, key, klen);
+	sg.page = virt_to_page(pos);
+	sg.offset = offset_in_page(pos);
+	sg.length = plen + 4;
+	crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
+
+	crc = ~crc32_le(~0, pos, plen);
+	icv[0] = crc;
+	icv[1] = crc >> 8;
+	icv[2] = crc >> 16;
+	icv[3] = crc >> 24;
+	if (memcmp(icv, pos + plen, 4) != 0) {
+		/* ICV mismatch - drop frame */
+		return -2;
+	}
+
+	/* Remove IV and ICV */
+	memmove(skb->data + 4, skb->data, hdr_len);
+	skb_pull(skb, 4);
+	skb_trim(skb, skb->len - 4);
+
+	return 0;
+}
+
+
+static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct prism2_wep_data *wep = priv;
+
+	if (len < 0 || len > WEP_KEY_LEN)
+		return -1;
+
+	memcpy(wep->key, key, len);
+	wep->key_len = len;
+
+	return 0;
+}
+
+
+static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
+{
+	struct prism2_wep_data *wep = priv;
+
+	if (len < wep->key_len)
+		return -1;
+
+	memcpy(key, wep->key, wep->key_len);
+
+	return wep->key_len;
+}
+
+
+static char * prism2_wep_print_stats(char *p, void *priv)
+{
+	struct prism2_wep_data *wep = priv;
+	p += sprintf(p, "key[%d] alg=WEP len=%d\n",
+		     wep->key_idx, wep->key_len);
+	return p;
+}
+
+
+static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
+	.name			= "WEP",
+	.init			= prism2_wep_init,
+	.deinit			= prism2_wep_deinit,
+	.encrypt_mpdu		= prism2_wep_encrypt,
+	.decrypt_mpdu		= prism2_wep_decrypt,
+	.encrypt_msdu		= NULL,
+	.decrypt_msdu		= NULL,
+	.set_key		= prism2_wep_set_key,
+	.get_key		= prism2_wep_get_key,
+	.print_stats		= prism2_wep_print_stats,
+	.extra_prefix_len	= 4, /* IV */
+	.extra_postfix_len	= 4, /* ICV */
+	.owner			= THIS_MODULE,
+};
+
+
+static int __init ieee80211_crypto_wep_init(void)
+{
+	return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
+}
+
+
+static void __exit ieee80211_crypto_wep_exit(void)
+{
+	ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
+}
+
+
+module_init(ieee80211_crypto_wep_init);
+module_exit(ieee80211_crypto_wep_exit);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_module.c
@@ -0,0 +1,273 @@
+/*******************************************************************************
+
+  Copyright(c) 2004 Intel Corporation. All rights reserved.
+
+  Portions of this file are based on the WEP enablement code provided by the
+  Host AP project hostap-drivers v0.1.3
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+  <jkmaline@cc.hut.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <asm/uaccess.h>
+#include <net/arp.h>
+
+#include <net/ieee80211.h>
+
+MODULE_DESCRIPTION("802.11 data/management/control stack");
+MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
+MODULE_LICENSE("GPL");
+
+#define DRV_NAME "ieee80211"
+
+static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
+{
+	if (ieee->networks)
+		return 0;
+
+	ieee->networks = kmalloc(
+		MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
+		GFP_KERNEL);
+	if (!ieee->networks) {
+		printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
+		       ieee->dev->name);
+		return -ENOMEM;
+	}
+
+	memset(ieee->networks, 0,
+	       MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
+
+	return 0;
+}
+
+static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
+{
+	if (!ieee->networks)
+		return;
+	kfree(ieee->networks);
+	ieee->networks = NULL;
+}
+
+static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
+{
+	int i;
+
+	INIT_LIST_HEAD(&ieee->network_free_list);
+	INIT_LIST_HEAD(&ieee->network_list);
+	for (i = 0; i < MAX_NETWORK_COUNT; i++)
+		list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
+}
+
+
+struct net_device *alloc_ieee80211(int sizeof_priv)
+{
+	struct ieee80211_device *ieee;
+	struct net_device *dev;
+	int err;
+
+	IEEE80211_DEBUG_INFO("Initializing...\n");
+
+	dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
+	if (!dev) {
+		IEEE80211_ERROR("Unable to network device.\n");
+		goto failed;
+	}
+	ieee = netdev_priv(dev);
+	dev->hard_start_xmit = ieee80211_xmit;
+
+	ieee->dev = dev;
+
+	err = ieee80211_networks_allocate(ieee);
+	if (err) {
+		IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
+				err);
+		goto failed;
+	}
+	ieee80211_networks_initialize(ieee);
+
+	/* Default fragmentation threshold is maximum payload size */
+	ieee->fts = DEFAULT_FTS;
+	ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
+	ieee->open_wep = 1;
+
+	/* Default to enabling full open WEP with host based encrypt/decrypt */
+	ieee->host_encrypt = 1;
+	ieee->host_decrypt = 1;
+	ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
+
+	INIT_LIST_HEAD(&ieee->crypt_deinit_list);
+	init_timer(&ieee->crypt_deinit_timer);
+	ieee->crypt_deinit_timer.data = (unsigned long)ieee;
+	ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
+
+	spin_lock_init(&ieee->lock);
+
+ 	ieee->wpa_enabled = 0;
+ 	ieee->tkip_countermeasures = 0;
+ 	ieee->drop_unencrypted = 0;
+ 	ieee->privacy_invoked = 0;
+ 	ieee->ieee802_1x = 1;
+
+	return dev;
+
+ failed:
+	if (dev)
+		free_netdev(dev);
+	return NULL;
+}
+
+
+void free_ieee80211(struct net_device *dev)
+{
+	struct ieee80211_device *ieee = netdev_priv(dev);
+
+	int i;
+
+	del_timer_sync(&ieee->crypt_deinit_timer);
+	ieee80211_crypt_deinit_entries(ieee, 1);
+
+	for (i = 0; i < WEP_KEYS; i++) {
+		struct ieee80211_crypt_data *crypt = ieee->crypt[i];
+		if (crypt) {
+			if (crypt->ops) {
+				crypt->ops->deinit(crypt->priv);
+				module_put(crypt->ops->owner);
+			}
+			kfree(crypt);
+			ieee->crypt[i] = NULL;
+		}
+	}
+
+	ieee80211_networks_free(ieee);
+	free_netdev(dev);
+}
+
+#ifdef CONFIG_IEEE80211_DEBUG
+
+static int debug = 0;
+u32 ieee80211_debug_level = 0;
+struct proc_dir_entry *ieee80211_proc = NULL;
+
+static int show_debug_level(char *page, char **start, off_t offset,
+			    int count, int *eof, void *data)
+{
+	return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
+}
+
+static int store_debug_level(struct file *file, const char __user *buffer,
+			     unsigned long count, void *data)
+{
+	char buf[] = "0x00000000";
+	char *p = (char *)buf;
+	unsigned long val;
+
+	if (count > sizeof(buf) - 1)
+		count = sizeof(buf) - 1;
+
+	if (copy_from_user(buf, buffer, count))
+		return count;
+	buf[count] = 0;
+	/*
+	 * what a FPOS...  What, sscanf(buf, "%i", &val) would be too
+	 * scary?
+	 */
+	if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
+		p++;
+		if (p[0] == 'x' || p[0] == 'X')
+			p++;
+		val = simple_strtoul(p, &p, 16);
+	} else
+		val = simple_strtoul(p, &p, 10);
+	if (p == buf)
+		printk(KERN_INFO DRV_NAME
+		       ": %s is not in hex or decimal form.\n", buf);
+	else
+		ieee80211_debug_level = val;
+
+	return strlen(buf);
+}
+
+static int __init ieee80211_init(void)
+{
+	struct proc_dir_entry *e;
+
+	ieee80211_debug_level = debug;
+	ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, proc_net);
+	if (ieee80211_proc == NULL) {
+		IEEE80211_ERROR("Unable to create " DRV_NAME
+				" proc directory\n");
+		return -EIO;
+	}
+	e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
+			      ieee80211_proc);
+	if (!e) {
+		remove_proc_entry(DRV_NAME, proc_net);
+		ieee80211_proc = NULL;
+		return -EIO;
+	}
+	e->read_proc = show_debug_level;
+	e->write_proc = store_debug_level;
+	e->data = NULL;
+
+	return 0;
+}
+
+static void __exit ieee80211_exit(void)
+{
+	if (ieee80211_proc) {
+		remove_proc_entry("debug_level", ieee80211_proc);
+		remove_proc_entry(DRV_NAME, proc_net);
+		ieee80211_proc = NULL;
+	}
+}
+
+#include <linux/moduleparam.h>
+module_param(debug, int, 0444);
+MODULE_PARM_DESC(debug, "debug output mask");
+
+
+module_exit(ieee80211_exit);
+module_init(ieee80211_init);
+#endif
+
+EXPORT_SYMBOL(alloc_ieee80211);
+EXPORT_SYMBOL(free_ieee80211);
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_rx.c
@@ -0,0 +1,1205 @@
+/*
+ * Original code based Host AP (software wireless LAN access point) driver
+ * for Intersil Prism2/2.5/3 - hostap.o module, common routines
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ * Copyright (c) 2004, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation. See README and COPYING for
+ * more details.
+ */
+
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <asm/uaccess.h>
+#include <linux/ctype.h>
+
+#include <net/ieee80211.h>
+
+static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
+					struct sk_buff *skb,
+					struct ieee80211_rx_stats *rx_stats)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+	skb->dev = ieee->dev;
+	skb->mac.raw = skb->data;
+	skb_pull(skb, ieee80211_get_hdrlen(fc));
+	skb->pkt_type = PACKET_OTHERHOST;
+	skb->protocol = __constant_htons(ETH_P_80211_RAW);
+	memset(skb->cb, 0, sizeof(skb->cb));
+	netif_rx(skb);
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static struct ieee80211_frag_entry *
+ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
+			  unsigned int frag, u8 *src, u8 *dst)
+{
+	struct ieee80211_frag_entry *entry;
+	int i;
+
+	for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
+		entry = &ieee->frag_cache[i];
+		if (entry->skb != NULL &&
+		    time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
+			IEEE80211_DEBUG_FRAG(
+				"expiring fragment cache entry "
+				"seq=%u last_frag=%u\n",
+				entry->seq, entry->last_frag);
+			dev_kfree_skb_any(entry->skb);
+			entry->skb = NULL;
+		}
+
+		if (entry->skb != NULL && entry->seq == seq &&
+		    (entry->last_frag + 1 == frag || frag == -1) &&
+		    memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
+		    memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
+			return entry;
+	}
+
+	return NULL;
+}
+
+/* Called only as a tasklet (software IRQ) */
+static struct sk_buff *
+ieee80211_frag_cache_get(struct ieee80211_device *ieee,
+			 struct ieee80211_hdr *hdr)
+{
+	struct sk_buff *skb = NULL;
+	u16 sc;
+	unsigned int frag, seq;
+	struct ieee80211_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	seq = WLAN_GET_SEQ_SEQ(sc);
+
+	if (frag == 0) {
+		/* Reserve enough space to fit maximum frame length */
+		skb = dev_alloc_skb(ieee->dev->mtu +
+				    sizeof(struct ieee80211_hdr) +
+				    8 /* LLC */ +
+				    2 /* alignment */ +
+				    8 /* WEP */ + ETH_ALEN /* WDS */);
+		if (skb == NULL)
+			return NULL;
+
+		entry = &ieee->frag_cache[ieee->frag_next_idx];
+		ieee->frag_next_idx++;
+		if (ieee->frag_next_idx >= IEEE80211_FRAG_CACHE_LEN)
+			ieee->frag_next_idx = 0;
+
+		if (entry->skb != NULL)
+			dev_kfree_skb_any(entry->skb);
+
+		entry->first_frag_time = jiffies;
+		entry->seq = seq;
+		entry->last_frag = frag;
+		entry->skb = skb;
+		memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
+		memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
+	} else {
+		/* received a fragment of a frame for which the head fragment
+		 * should have already been received */
+		entry = ieee80211_frag_cache_find(ieee, seq, frag, hdr->addr2,
+						  hdr->addr1);
+		if (entry != NULL) {
+			entry->last_frag = frag;
+			skb = entry->skb;
+		}
+	}
+
+	return skb;
+}
+
+
+/* Called only as a tasklet (software IRQ) */
+static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
+					   struct ieee80211_hdr *hdr)
+{
+	u16 sc;
+	unsigned int seq;
+	struct ieee80211_frag_entry *entry;
+
+	sc = le16_to_cpu(hdr->seq_ctl);
+	seq = WLAN_GET_SEQ_SEQ(sc);
+
+	entry = ieee80211_frag_cache_find(ieee, seq, -1, hdr->addr2,
+					  hdr->addr1);
+
+	if (entry == NULL) {
+		IEEE80211_DEBUG_FRAG(
+			"could not invalidate fragment cache "
+			"entry (seq=%u)\n", seq);
+		return -1;
+	}
+
+	entry->skb = NULL;
+	return 0;
+}
+
+
+#ifdef NOT_YET
+/* ieee80211_rx_frame_mgtmt
+ *
+ * Responsible for handling management control frames
+ *
+ * Called by ieee80211_rx */
+static inline int
+ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
+			struct ieee80211_rx_stats *rx_stats, u16 type,
+			u16 stype)
+{
+	if (ieee->iw_mode == IW_MODE_MASTER) {
+		printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
+		       ieee->dev->name);
+		return 0;
+/*
+  hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *)
+  skb->data);*/
+	}
+
+	if (ieee->hostapd && type == WLAN_FC_TYPE_MGMT) {
+		if (stype == WLAN_FC_STYPE_BEACON &&
+		    ieee->iw_mode == IW_MODE_MASTER) {
+			struct sk_buff *skb2;
+			/* Process beacon frames also in kernel driver to
+			 * update STA(AP) table statistics */
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2)
+				hostap_rx(skb2->dev, skb2, rx_stats);
+		}
+
+		/* send management frames to the user space daemon for
+		 * processing */
+		ieee->apdevstats.rx_packets++;
+		ieee->apdevstats.rx_bytes += skb->len;
+		prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
+		return 0;
+	}
+
+	    if (ieee->iw_mode == IW_MODE_MASTER) {
+		if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
+			printk(KERN_DEBUG "%s: unknown management frame "
+			       "(type=0x%02x, stype=0x%02x) dropped\n",
+			       skb->dev->name, type, stype);
+			return -1;
+		}
+
+		hostap_rx(skb->dev, skb, rx_stats);
+		return 0;
+	}
+
+	printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
+	       "received in non-Host AP mode\n", skb->dev->name);
+	return -1;
+}
+#endif
+
+
+/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
+/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
+static unsigned char rfc1042_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static unsigned char bridge_tunnel_header[] =
+{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
+/* No encapsulation header if EtherType < 0x600 (=length) */
+
+/* Called by ieee80211_rx_frame_decrypt */
+static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
+				    struct sk_buff *skb)
+{
+	struct net_device *dev = ieee->dev;
+	u16 fc, ethertype;
+	struct ieee80211_hdr *hdr;
+	u8 *pos;
+
+	if (skb->len < 24)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* check that the frame is unicast frame to us */
+	if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+	    IEEE80211_FCTL_TODS &&
+	    memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
+	    memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
+		/* ToDS frame with own addr BSSID and DA */
+	} else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		   IEEE80211_FCTL_FROMDS &&
+		   memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
+		/* FromDS frame with own addr as DA */
+	} else
+		return 0;
+
+	if (skb->len < 24 + 8)
+		return 0;
+
+	/* check for port access entity Ethernet type */
+	pos = skb->data + 24;
+	ethertype = (pos[6] << 8) | pos[7];
+	if (ethertype == ETH_P_PAE)
+		return 1;
+
+	return 0;
+}
+
+/* Called only as a tasklet (software IRQ), by ieee80211_rx */
+static inline int
+ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
+			   struct ieee80211_crypt_data *crypt)
+{
+	struct ieee80211_hdr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+#ifdef CONFIG_IEEE80211_CRYPT_TKIP
+	if (ieee->tkip_countermeasures &&
+	    strcmp(crypt->ops->name, "TKIP") == 0) {
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+			       "received packet from " MAC_FMT "\n",
+			       ieee->dev->name, MAC_ARG(hdr->addr2));
+		}
+		return -1;
+	}
+#endif
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		IEEE80211_DEBUG_DROP(
+			"decryption failed (SA=" MAC_FMT
+			") res=%d\n", MAC_ARG(hdr->addr2), res);
+		if (res == -2)
+			IEEE80211_DEBUG_DROP("Decryption failed ICV "
+					     "mismatch (key %d)\n",
+					     skb->data[hdrlen + 3] >> 6);
+		ieee->ieee_stats.rx_discards_undecryptable++;
+		return -1;
+	}
+
+	return res;
+}
+
+
+/* Called only as a tasklet (software IRQ), by ieee80211_rx */
+static inline int
+ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb,
+			     int keyidx, struct ieee80211_crypt_data *crypt)
+{
+	struct ieee80211_hdr *hdr;
+	int res, hdrlen;
+
+	if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
+		return 0;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
+
+	atomic_inc(&crypt->refcnt);
+	res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
+		       " (SA=" MAC_FMT " keyidx=%d)\n",
+		       ieee->dev->name, MAC_ARG(hdr->addr2), keyidx);
+		return -1;
+	}
+
+	return 0;
+}
+
+
+/* All received frames are sent to this function. @skb contains the frame in
+ * IEEE 802.11 format, i.e., in the format it was sent over air.
+ * This function is called only as a tasklet (software IRQ). */
+int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
+		 struct ieee80211_rx_stats *rx_stats)
+{
+	struct net_device *dev = ieee->dev;
+	struct ieee80211_hdr *hdr;
+	size_t hdrlen;
+	u16 fc, type, stype, sc;
+	struct net_device_stats *stats;
+	unsigned int frag;
+	u8 *payload;
+	u16 ethertype;
+#ifdef NOT_YET
+	struct net_device *wds = NULL;
+	struct sk_buff *skb2 = NULL;
+	struct net_device *wds = NULL;
+	int frame_authorized = 0;
+	int from_assoc_ap = 0;
+	void *sta = NULL;
+#endif
+	u8 dst[ETH_ALEN];
+	u8 src[ETH_ALEN];
+	struct ieee80211_crypt_data *crypt = NULL;
+	int keyidx = 0;
+
+	hdr = (struct ieee80211_hdr *)skb->data;
+	stats = &ieee->stats;
+
+	if (skb->len < 10) {
+		printk(KERN_INFO "%s: SKB length < 10\n",
+		       dev->name);
+		goto rx_dropped;
+	}
+
+	fc = le16_to_cpu(hdr->frame_ctl);
+	type = WLAN_FC_GET_TYPE(fc);
+	stype = WLAN_FC_GET_STYPE(fc);
+	sc = le16_to_cpu(hdr->seq_ctl);
+	frag = WLAN_GET_SEQ_FRAG(sc);
+	hdrlen = ieee80211_get_hdrlen(fc);
+
+#ifdef NOT_YET
+#if WIRELESS_EXT > 15
+	/* Put this code here so that we avoid duplicating it in all
+	 * Rx paths. - Jean II */
+#ifdef IW_WIRELESS_SPY		/* defined in iw_handler.h */
+	/* If spy monitoring on */
+	if (iface->spy_data.spy_number > 0) {
+		struct iw_quality wstats;
+		wstats.level = rx_stats->signal;
+		wstats.noise = rx_stats->noise;
+		wstats.updated = 6;	/* No qual value */
+		/* Update spy records */
+		wireless_spy_update(dev, hdr->addr2, &wstats);
+	}
+#endif /* IW_WIRELESS_SPY */
+#endif /* WIRELESS_EXT > 15 */
+	hostap_update_rx_stats(local->ap, hdr, rx_stats);
+#endif
+
+#if WIRELESS_EXT > 15
+	if (ieee->iw_mode == IW_MODE_MONITOR) {
+		ieee80211_monitor_rx(ieee, skb, rx_stats);
+		stats->rx_packets++;
+		stats->rx_bytes += skb->len;
+		return 1;
+	}
+#endif
+
+	if (ieee->host_decrypt) {
+		int idx = 0;
+		if (skb->len >= hdrlen + 3)
+			idx = skb->data[hdrlen + 3] >> 6;
+		crypt = ieee->crypt[idx];
+#ifdef NOT_YET
+		sta = NULL;
+
+		/* Use station specific key to override default keys if the
+		 * receiver address is a unicast address ("individual RA"). If
+		 * bcrx_sta_key parameter is set, station specific key is used
+		 * even with broad/multicast targets (this is against IEEE
+		 * 802.11, but makes it easier to use different keys with
+		 * stations that do not support WEP key mapping). */
+
+		if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
+			(void) hostap_handle_sta_crypto(local, hdr, &crypt,
+							&sta);
+#endif
+
+		/* allow NULL decrypt to indicate an station specific override
+		 * for default encryption */
+		if (crypt && (crypt->ops == NULL ||
+			      crypt->ops->decrypt_mpdu == NULL))
+			crypt = NULL;
+
+		if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
+			/* This seems to be triggered by some (multicast?)
+			 * frames from other than current BSS, so just drop the
+			 * frames silently instead of filling system log with
+			 * these reports. */
+			IEEE80211_DEBUG_DROP("Decryption failed (not set)"
+					     " (SA=" MAC_FMT ")\n",
+					     MAC_ARG(hdr->addr2));
+			ieee->ieee_stats.rx_discards_undecryptable++;
+			goto rx_dropped;
+		}
+	}
+
+#ifdef NOT_YET
+	if (type != WLAN_FC_TYPE_DATA) {
+		if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH &&
+		    fc & IEEE80211_FCTL_WEP && ieee->host_decrypt &&
+		    (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0)
+		{
+			printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
+			       "from " MAC_FMT "\n", dev->name,
+			       MAC_ARG(hdr->addr2));
+			/* TODO: could inform hostapd about this so that it
+			 * could send auth failure report */
+			goto rx_dropped;
+		}
+
+		if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
+			goto rx_dropped;
+		else
+			goto rx_exit;
+	}
+#endif
+
+	/* Data frame - extract src/dst addresses */
+	if (skb->len < IEEE80211_3ADDR_LEN)
+		goto rx_dropped;
+
+	switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
+	case IEEE80211_FCTL_FROMDS:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr3, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_TODS:
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
+		if (skb->len < IEEE80211_4ADDR_LEN)
+			goto rx_dropped;
+		memcpy(dst, hdr->addr3, ETH_ALEN);
+		memcpy(src, hdr->addr4, ETH_ALEN);
+		break;
+	case 0:
+		memcpy(dst, hdr->addr1, ETH_ALEN);
+		memcpy(src, hdr->addr2, ETH_ALEN);
+		break;
+	}
+
+#ifdef NOT_YET
+	if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
+		goto rx_dropped;
+	if (wds) {
+		skb->dev = dev = wds;
+		stats = hostap_get_stats(dev);
+	}
+
+	if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
+	    (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS &&
+	    ieee->stadev &&
+	    memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
+		/* Frame from BSSID of the AP for which we are a client */
+		skb->dev = dev = ieee->stadev;
+		stats = hostap_get_stats(dev);
+		from_assoc_ap = 1;
+	}
+#endif
+
+	dev->last_rx = jiffies;
+
+#ifdef NOT_YET
+	if ((ieee->iw_mode == IW_MODE_MASTER ||
+	     ieee->iw_mode == IW_MODE_REPEAT) &&
+	    !from_assoc_ap) {
+		switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
+					     wds != NULL)) {
+		case AP_RX_CONTINUE_NOT_AUTHORIZED:
+			frame_authorized = 0;
+			break;
+		case AP_RX_CONTINUE:
+			frame_authorized = 1;
+			break;
+		case AP_RX_DROP:
+			goto rx_dropped;
+		case AP_RX_EXIT:
+			goto rx_exit;
+		}
+	}
+#endif
+
+	/* Nullfunc frames may have PS-bit set, so they must be passed to
+	 * hostap_handle_sta_rx() before being dropped here. */
+	if (stype != IEEE80211_STYPE_DATA &&
+	    stype != IEEE80211_STYPE_DATA_CFACK &&
+	    stype != IEEE80211_STYPE_DATA_CFPOLL &&
+	    stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
+		if (stype != IEEE80211_STYPE_NULLFUNC)
+			IEEE80211_DEBUG_DROP(
+				"RX: dropped data frame "
+				"with no data (type=0x%02x, "
+				"subtype=0x%02x, len=%d)\n",
+				type, stype, skb->len);
+		goto rx_dropped;
+	}
+
+	/* skb: hdr + (possibly fragmented, possibly encrypted) payload */
+
+	if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	    (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
+		goto rx_dropped;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+
+	/* skb: hdr + (possibly fragmented) plaintext payload */
+	// PR: FIXME: hostap has additional conditions in the "if" below:
+	// ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
+		int flen;
+		struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
+		IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
+
+		if (!frag_skb) {
+			IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
+					"Rx cannot get skb from fragment "
+					"cache (morefrag=%d seq=%u frag=%u)\n",
+					(fc & IEEE80211_FCTL_MOREFRAGS) != 0,
+					WLAN_GET_SEQ_SEQ(sc), frag);
+			goto rx_dropped;
+		}
+
+		flen = skb->len;
+		if (frag != 0)
+			flen -= hdrlen;
+
+		if (frag_skb->tail + flen > frag_skb->end) {
+			printk(KERN_WARNING "%s: host decrypted and "
+			       "reassembled frame did not fit skb\n",
+			       dev->name);
+			ieee80211_frag_cache_invalidate(ieee, hdr);
+			goto rx_dropped;
+		}
+
+		if (frag == 0) {
+			/* copy first fragment (including full headers) into
+			 * beginning of the fragment cache skb */
+			memcpy(skb_put(frag_skb, flen), skb->data, flen);
+		} else {
+			/* append frame payload to the end of the fragment
+			 * cache skb */
+			memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
+			       flen);
+		}
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+
+		if (fc & IEEE80211_FCTL_MOREFRAGS) {
+			/* more fragments expected - leave the skb in fragment
+			 * cache for now; it will be delivered to upper layers
+			 * after all fragments have been received */
+			goto rx_exit;
+		}
+
+		/* this was the last fragment and the frame will be
+		 * delivered, so remove skb from fragment cache */
+		skb = frag_skb;
+		hdr = (struct ieee80211_hdr *) skb->data;
+		ieee80211_frag_cache_invalidate(ieee, hdr);
+	}
+
+	/* skb: hdr + (possible reassembled) full MSDU payload; possibly still
+	 * encrypted/authenticated */
+	if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
+	    ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
+		goto rx_dropped;
+
+	hdr = (struct ieee80211_hdr *) skb->data;
+	if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
+		if (/*ieee->ieee802_1x &&*/
+		    ieee80211_is_eapol_frame(ieee, skb)) {
+#ifdef CONFIG_IEEE80211_DEBUG
+			/* pass unencrypted EAPOL frames even if encryption is
+			 * configured */
+			struct eapol *eap = (struct eapol *)(skb->data +
+				24);
+			IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+						eap_get_type(eap->type));
+#endif
+		} else {
+			IEEE80211_DEBUG_DROP(
+				"encryption configured, but RX "
+				"frame not encrypted (SA=" MAC_FMT ")\n",
+				MAC_ARG(hdr->addr2));
+			goto rx_dropped;
+		}
+	}
+
+#ifdef CONFIG_IEEE80211_DEBUG
+	if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
+	    ieee80211_is_eapol_frame(ieee, skb)) {
+			struct eapol *eap = (struct eapol *)(skb->data +
+				24);
+			IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
+						eap_get_type(eap->type));
+	}
+#endif
+
+	if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
+	    !ieee80211_is_eapol_frame(ieee, skb)) {
+		IEEE80211_DEBUG_DROP(
+			"dropped unencrypted RX data "
+			"frame from " MAC_FMT
+			" (drop_unencrypted=1)\n",
+			MAC_ARG(hdr->addr2));
+		goto rx_dropped;
+	}
+
+	/* skb: hdr + (possible reassembled) full plaintext payload */
+
+	payload = skb->data + hdrlen;
+	ethertype = (payload[6] << 8) | payload[7];
+
+#ifdef NOT_YET
+	/* If IEEE 802.1X is used, check whether the port is authorized to send
+	 * the received frame. */
+	if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) {
+		if (ethertype == ETH_P_PAE) {
+			printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n",
+			       dev->name);
+			if (ieee->hostapd && ieee->apdev) {
+				/* Send IEEE 802.1X frames to the user
+				 * space daemon for processing */
+				prism2_rx_80211(ieee->apdev, skb, rx_stats,
+						PRISM2_RX_MGMT);
+				ieee->apdevstats.rx_packets++;
+				ieee->apdevstats.rx_bytes += skb->len;
+				goto rx_exit;
+			}
+		} else if (!frame_authorized) {
+			printk(KERN_DEBUG "%s: dropped frame from "
+			       "unauthorized port (IEEE 802.1X): "
+			       "ethertype=0x%04x\n",
+			       dev->name, ethertype);
+			goto rx_dropped;
+		}
+	}
+#endif
+
+	/* convert hdr + possible LLC headers into Ethernet header */
+	if (skb->len - hdrlen >= 8 &&
+	    ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 &&
+	      ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
+	     memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) {
+		/* remove RFC1042 or Bridge-Tunnel encapsulation and
+		 * replace EtherType */
+		skb_pull(skb, hdrlen + SNAP_SIZE);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	} else {
+		u16 len;
+		/* Leave Ethernet header part of hdr and full payload */
+		skb_pull(skb, hdrlen);
+		len = htons(skb->len);
+		memcpy(skb_push(skb, 2), &len, 2);
+		memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
+		memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
+	}
+
+#ifdef NOT_YET
+	if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
+		    IEEE80211_FCTL_TODS) &&
+	    skb->len >= ETH_HLEN + ETH_ALEN) {
+		/* Non-standard frame: get addr4 from its bogus location after
+		 * the payload */
+		memcpy(skb->data + ETH_ALEN,
+		       skb->data + skb->len - ETH_ALEN, ETH_ALEN);
+		skb_trim(skb, skb->len - ETH_ALEN);
+	}
+#endif
+
+	stats->rx_packets++;
+	stats->rx_bytes += skb->len;
+
+#ifdef NOT_YET
+	if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
+	    ieee->ap->bridge_packets) {
+		if (dst[0] & 0x01) {
+			/* copy multicast frame both to the higher layers and
+			 * to the wireless media */
+			ieee->ap->bridged_multicast++;
+			skb2 = skb_clone(skb, GFP_ATOMIC);
+			if (skb2 == NULL)
+				printk(KERN_DEBUG "%s: skb_clone failed for "
+				       "multicast frame\n", dev->name);
+		} else if (hostap_is_sta_assoc(ieee->ap, dst)) {
+			/* send frame directly to the associated STA using
+			 * wireless media and not passing to higher layers */
+			ieee->ap->bridged_unicast++;
+			skb2 = skb;
+			skb = NULL;
+		}
+	}
+
+	if (skb2 != NULL) {
+		/* send to wireless media */
+		skb2->protocol = __constant_htons(ETH_P_802_3);
+		skb2->mac.raw = skb2->nh.raw = skb2->data;
+		/* skb2->nh.raw = skb2->data + ETH_HLEN; */
+		skb2->dev = dev;
+		dev_queue_xmit(skb2);
+	}
+
+#endif
+
+	if (skb) {
+		skb->protocol = eth_type_trans(skb, dev);
+		memset(skb->cb, 0, sizeof(skb->cb));
+		skb->dev = dev;
+		skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
+		netif_rx(skb);
+	}
+
+ rx_exit:
+#ifdef NOT_YET
+	if (sta)
+		hostap_handle_sta_release(sta);
+#endif
+	return 1;
+
+ rx_dropped:
+	stats->rx_dropped++;
+
+	/* Returning 0 indicates to caller that we have not handled the SKB--
+	 * so it is still allocated and can be used again by underlying
+	 * hardware as a DMA target */
+	return 0;
+}
+
+#define MGMT_FRAME_FIXED_PART_LENGTH		0x24
+
+static inline int ieee80211_is_ofdm_rate(u8 rate)
+{
+	switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+	case IEEE80211_OFDM_RATE_6MB:
+	case IEEE80211_OFDM_RATE_9MB:
+	case IEEE80211_OFDM_RATE_12MB:
+	case IEEE80211_OFDM_RATE_18MB:
+	case IEEE80211_OFDM_RATE_24MB:
+	case IEEE80211_OFDM_RATE_36MB:
+	case IEEE80211_OFDM_RATE_48MB:
+	case IEEE80211_OFDM_RATE_54MB:
+		return 1;
+	}
+        return 0;
+}
+
+
+static inline int ieee80211_network_init(
+	struct ieee80211_device *ieee,
+	struct ieee80211_probe_response *beacon,
+	struct ieee80211_network *network,
+	struct ieee80211_rx_stats *stats)
+{
+#ifdef CONFIG_IEEE80211_DEBUG
+	char rates_str[64];
+	char *p;
+#endif
+	struct ieee80211_info_element *info_element;
+ 	u16 left;
+	u8 i;
+
+	/* Pull out fixed field data */
+	memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
+	network->capability = beacon->capability;
+	network->last_scanned = jiffies;
+	network->time_stamp[0] = beacon->time_stamp[0];
+	network->time_stamp[1] = beacon->time_stamp[1];
+	network->beacon_interval = beacon->beacon_interval;
+	/* Where to pull this? beacon->listen_interval;*/
+	network->listen_interval = 0x0A;
+	network->rates_len = network->rates_ex_len = 0;
+	network->last_associate = 0;
+	network->ssid_len = 0;
+	network->flags = 0;
+	network->atim_window = 0;
+
+	if (stats->freq == IEEE80211_52GHZ_BAND) {
+		/* for A band (No DS info) */
+		network->channel = stats->received_channel;
+	} else
+		network->flags |= NETWORK_HAS_CCK;
+
+ 	network->wpa_ie_len = 0;
+ 	network->rsn_ie_len = 0;
+
+ 	info_element = &beacon->info_element;
+	left = stats->len - ((void *)info_element - (void *)beacon);
+	while (left >= sizeof(struct ieee80211_info_element_hdr)) {
+		if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
+			IEEE80211_DEBUG_SCAN("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%d left=%d.\n",
+					     info_element->len + sizeof(struct ieee80211_info_element),
+					     left);
+			return 1;
+               	}
+
+		switch (info_element->id) {
+		case MFIE_TYPE_SSID:
+			if (ieee80211_is_empty_essid(info_element->data,
+						     info_element->len)) {
+				network->flags |= NETWORK_EMPTY_ESSID;
+				break;
+			}
+
+			network->ssid_len = min(info_element->len,
+						(u8)IW_ESSID_MAX_SIZE);
+			memcpy(network->ssid, info_element->data, network->ssid_len);
+        		if (network->ssid_len < IW_ESSID_MAX_SIZE)
+                		memset(network->ssid + network->ssid_len, 0,
+				       IW_ESSID_MAX_SIZE - network->ssid_len);
+
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
+					     network->ssid, network->ssid_len);
+			break;
+
+		case MFIE_TYPE_RATES:
+#ifdef CONFIG_IEEE80211_DEBUG
+			p = rates_str;
+#endif
+			network->rates_len = min(info_element->len, MAX_RATES_LENGTH);
+			for (i = 0; i < network->rates_len; i++) {
+				network->rates[i] = info_element->data[i];
+#ifdef CONFIG_IEEE80211_DEBUG
+				p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
+#endif
+				if (ieee80211_is_ofdm_rate(info_element->data[i])) {
+					network->flags |= NETWORK_HAS_OFDM;
+					if (info_element->data[i] &
+					    IEEE80211_BASIC_RATE_MASK)
+						network->flags &=
+							~NETWORK_HAS_CCK;
+				}
+			}
+
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n",
+					     rates_str, network->rates_len);
+			break;
+
+		case MFIE_TYPE_RATES_EX:
+#ifdef CONFIG_IEEE80211_DEBUG
+			p = rates_str;
+#endif
+			network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH);
+			for (i = 0; i < network->rates_ex_len; i++) {
+				network->rates_ex[i] = info_element->data[i];
+#ifdef CONFIG_IEEE80211_DEBUG
+				p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
+#endif
+				if (ieee80211_is_ofdm_rate(info_element->data[i])) {
+					network->flags |= NETWORK_HAS_OFDM;
+					if (info_element->data[i] &
+					    IEEE80211_BASIC_RATE_MASK)
+						network->flags &=
+							~NETWORK_HAS_CCK;
+				}
+			}
+
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
+					     rates_str, network->rates_ex_len);
+			break;
+
+		case MFIE_TYPE_DS_SET:
+  			IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
+					     info_element->data[0]);
+			if (stats->freq == IEEE80211_24GHZ_BAND)
+				network->channel = info_element->data[0];
+			break;
+
+	 	case MFIE_TYPE_FH_SET:
+  			IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
+			break;
+
+		case MFIE_TYPE_CF_SET:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n");
+			break;
+
+		case MFIE_TYPE_TIM:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n");
+			break;
+
+		case MFIE_TYPE_IBSS_SET:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n");
+			break;
+
+		case MFIE_TYPE_CHALLENGE:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n");
+			break;
+
+		case MFIE_TYPE_GENERIC:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
+					     info_element->len);
+			if (info_element->len >= 4  &&
+			    info_element->data[0] == 0x00 &&
+			    info_element->data[1] == 0x50 &&
+			    info_element->data[2] == 0xf2 &&
+			    info_element->data[3] == 0x01) {
+				network->wpa_ie_len = min(info_element->len + 2,
+							 MAX_WPA_IE_LEN);
+				memcpy(network->wpa_ie, info_element,
+				       network->wpa_ie_len);
+			}
+			break;
+
+		case MFIE_TYPE_RSN:
+			IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
+					     info_element->len);
+			network->rsn_ie_len = min(info_element->len + 2,
+						 MAX_WPA_IE_LEN);
+			memcpy(network->rsn_ie, info_element,
+			       network->rsn_ie_len);
+			break;
+
+		default:
+			IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
+					     info_element->id);
+                        break;
+  		}
+
+		left -= sizeof(struct ieee80211_info_element_hdr) +
+			info_element->len;
+		info_element = (struct ieee80211_info_element *)
+                	&info_element->data[info_element->len];
+  	}
+
+	network->mode = 0;
+	if (stats->freq == IEEE80211_52GHZ_BAND)
+		network->mode = IEEE_A;
+	else {
+		if (network->flags & NETWORK_HAS_OFDM)
+			network->mode |= IEEE_G;
+		if (network->flags & NETWORK_HAS_CCK)
+			network->mode |= IEEE_B;
+	}
+
+	if (network->mode == 0) {
+		IEEE80211_DEBUG_SCAN("Filtered out '%s (" MAC_FMT ")' "
+				     "network.\n",
+				     escape_essid(network->ssid,
+						  network->ssid_len),
+				     MAC_ARG(network->bssid));
+		return 1;
+	}
+
+	if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
+		network->flags |= NETWORK_EMPTY_ESSID;
+
+	memcpy(&network->stats, stats, sizeof(network->stats));
+
+	return 0;
+}
+
+static inline int is_same_network(struct ieee80211_network *src,
+				  struct ieee80211_network *dst)
+{
+	/* A network is only a duplicate if the channel, BSSID, and ESSID
+	 * all match.  We treat all <hidden> with the same BSSID and channel
+	 * as one network */
+	return ((src->ssid_len == dst->ssid_len) &&
+		(src->channel == dst->channel) &&
+		!memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
+		!memcmp(src->ssid, dst->ssid, src->ssid_len));
+}
+
+static inline void update_network(struct ieee80211_network *dst,
+				  struct ieee80211_network *src)
+{
+	memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
+	dst->capability = src->capability;
+	memcpy(dst->rates, src->rates, src->rates_len);
+	dst->rates_len = src->rates_len;
+	memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
+	dst->rates_ex_len = src->rates_ex_len;
+
+	dst->mode = src->mode;
+	dst->flags = src->flags;
+	dst->time_stamp[0] = src->time_stamp[0];
+	dst->time_stamp[1] = src->time_stamp[1];
+
+	dst->beacon_interval = src->beacon_interval;
+	dst->listen_interval = src->listen_interval;
+	dst->atim_window = src->atim_window;
+
+	memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
+	dst->wpa_ie_len = src->wpa_ie_len;
+	memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
+	dst->rsn_ie_len = src->rsn_ie_len;
+
+	dst->last_scanned = jiffies;
+	/* dst->last_associate is not overwritten */
+}
+
+static inline void ieee80211_process_probe_response(
+	struct ieee80211_device *ieee,
+	struct ieee80211_probe_response *beacon,
+	struct ieee80211_rx_stats *stats)
+{
+	struct ieee80211_network network;
+	struct ieee80211_network *target;
+	struct ieee80211_network *oldest = NULL;
+#ifdef CONFIG_IEEE80211_DEBUG
+	struct ieee80211_info_element *info_element = &beacon->info_element;
+#endif
+	unsigned long flags;
+
+	IEEE80211_DEBUG_SCAN(
+		"'%s' (" MAC_FMT "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
+		escape_essid(info_element->data, info_element->len),
+		MAC_ARG(beacon->header.addr3),
+		(beacon->capability & (1<<0xf)) ? '1' : '0',
+		(beacon->capability & (1<<0xe)) ? '1' : '0',
+		(beacon->capability & (1<<0xd)) ? '1' : '0',
+		(beacon->capability & (1<<0xc)) ? '1' : '0',
+		(beacon->capability & (1<<0xb)) ? '1' : '0',
+		(beacon->capability & (1<<0xa)) ? '1' : '0',
+		(beacon->capability & (1<<0x9)) ? '1' : '0',
+		(beacon->capability & (1<<0x8)) ? '1' : '0',
+		(beacon->capability & (1<<0x7)) ? '1' : '0',
+		(beacon->capability & (1<<0x6)) ? '1' : '0',
+		(beacon->capability & (1<<0x5)) ? '1' : '0',
+		(beacon->capability & (1<<0x4)) ? '1' : '0',
+		(beacon->capability & (1<<0x3)) ? '1' : '0',
+		(beacon->capability & (1<<0x2)) ? '1' : '0',
+		(beacon->capability & (1<<0x1)) ? '1' : '0',
+		(beacon->capability & (1<<0x0)) ? '1' : '0');
+
+	if (ieee80211_network_init(ieee, beacon, &network, stats)) {
+		IEEE80211_DEBUG_SCAN("Dropped '%s' (" MAC_FMT ") via %s.\n",
+				     escape_essid(info_element->data,
+						  info_element->len),
+				     MAC_ARG(beacon->header.addr3),
+				     WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+				     IEEE80211_STYPE_PROBE_RESP ?
+				     "PROBE RESPONSE" : "BEACON");
+		return;
+	}
+
+	/* The network parsed correctly -- so now we scan our known networks
+	 * to see if we can find it in our list.
+	 *
+	 * NOTE:  This search is definitely not optimized.  Once its doing
+	 *        the "right thing" we'll optimize it for efficiency if
+	 *        necessary */
+
+	/* Search for this entry in the list and update it if it is
+	 * already there. */
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	list_for_each_entry(target, &ieee->network_list, list) {
+		if (is_same_network(target, &network))
+			break;
+
+		if ((oldest == NULL) ||
+		    (target->last_scanned < oldest->last_scanned))
+			oldest = target;
+	}
+
+	/* If we didn't find a match, then get a new network slot to initialize
+	 * with this beacon's information */
+	if (&target->list == &ieee->network_list) {
+		if (list_empty(&ieee->network_free_list)) {
+			/* If there are no more slots, expire the oldest */
+			list_del(&oldest->list);
+			target = oldest;
+			IEEE80211_DEBUG_SCAN("Expired '%s' (" MAC_FMT ") from "
+					     "network list.\n",
+					     escape_essid(target->ssid,
+							  target->ssid_len),
+					     MAC_ARG(target->bssid));
+		} else {
+			/* Otherwise just pull from the free list */
+			target = list_entry(ieee->network_free_list.next,
+					    struct ieee80211_network, list);
+			list_del(ieee->network_free_list.next);
+		}
+
+
+#ifdef CONFIG_IEEE80211_DEBUG
+		IEEE80211_DEBUG_SCAN("Adding '%s' (" MAC_FMT ") via %s.\n",
+				     escape_essid(network.ssid,
+						  network.ssid_len),
+				     MAC_ARG(network.bssid),
+				     WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+				     IEEE80211_STYPE_PROBE_RESP ?
+				     "PROBE RESPONSE" : "BEACON");
+#endif
+		memcpy(target, &network, sizeof(*target));
+		list_add_tail(&target->list, &ieee->network_list);
+	} else {
+		IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n",
+				     escape_essid(target->ssid,
+						  target->ssid_len),
+				     MAC_ARG(target->bssid),
+				     WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+				     IEEE80211_STYPE_PROBE_RESP ?
+				     "PROBE RESPONSE" : "BEACON");
+		update_network(target, &network);
+	}
+
+	spin_unlock_irqrestore(&ieee->lock, flags);
+}
+
+void ieee80211_rx_mgt(struct ieee80211_device *ieee,
+		      struct ieee80211_hdr *header,
+		      struct ieee80211_rx_stats *stats)
+{
+	switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+	case IEEE80211_STYPE_ASSOC_RESP:
+		IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(header->frame_ctl));
+		break;
+
+	case IEEE80211_STYPE_REASSOC_RESP:
+		IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(header->frame_ctl));
+		break;
+
+	case IEEE80211_STYPE_PROBE_RESP:
+		IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
+				     WLAN_FC_GET_STYPE(header->frame_ctl));
+		IEEE80211_DEBUG_SCAN("Probe response\n");
+		ieee80211_process_probe_response(
+			ieee, (struct ieee80211_probe_response *)header, stats);
+		break;
+
+	case IEEE80211_STYPE_BEACON:
+		IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
+				     WLAN_FC_GET_STYPE(header->frame_ctl));
+		IEEE80211_DEBUG_SCAN("Beacon\n");
+		ieee80211_process_probe_response(
+			ieee, (struct ieee80211_probe_response *)header, stats);
+		break;
+
+	default:
+		IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
+				     WLAN_FC_GET_STYPE(header->frame_ctl));
+		IEEE80211_WARNING("%s: Unknown management packet: %d\n",
+				  ieee->dev->name,
+				  WLAN_FC_GET_STYPE(header->frame_ctl));
+		break;
+	}
+}
+
+
+EXPORT_SYMBOL(ieee80211_rx_mgt);
+EXPORT_SYMBOL(ieee80211_rx);
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_tx.c
@@ -0,0 +1,447 @@
+/******************************************************************************
+
+  Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/compiler.h>
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/in6.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/wireless.h>
+#include <linux/etherdevice.h>
+#include <asm/uaccess.h>
+
+#include <net/ieee80211.h>
+
+
+/*
+
+
+802.11 Data Frame
+
+      ,-------------------------------------------------------------------.
+Bytes |  2   |  2   |    6    |    6    |    6    |  2   | 0..2312 |   4  |
+      |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura |  DA/RA  |   TA    |    SA   | Sequ |  Frame  |  fcs |
+      |      | tion | (BSSID) |         |         | ence |  data   |      |
+      `--------------------------------------------------|         |------'
+Total: 28 non-data bytes                                 `----.----'
+                                                              |
+       .- 'Frame data' expands to <---------------------------'
+       |
+       V
+      ,---------------------------------------------------.
+Bytes |  1   |  1   |    1    |    3     |  2   |  0-2304 |
+      |------|------|---------|----------|------|---------|
+Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP      |
+      | DSAP | SSAP |         |          |      | Packet  |
+      | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8|      |         |
+      `-----------------------------------------|         |
+Total: 8 non-data bytes                         `----.----'
+                                                     |
+       .- 'IP Packet' expands, if WEP enabled, to <--'
+       |
+       V
+      ,-----------------------.
+Bytes |  4  |   0-2296  |  4  |
+      |-----|-----------|-----|
+Desc. | IV  | Encrypted | ICV |
+      |     | IP Packet |     |
+      `-----------------------'
+Total: 8 non-data bytes
+
+
+802.3 Ethernet Data Frame
+
+      ,-----------------------------------------.
+Bytes |   6   |   6   |  2   |  Variable |   4  |
+      |-------|-------|------|-----------|------|
+Desc. | Dest. | Source| Type | IP Packet |  fcs |
+      |  MAC  |  MAC  |      |           |      |
+      `-----------------------------------------'
+Total: 18 non-data bytes
+
+In the event that fragmentation is required, the incoming payload is split into
+N parts of size ieee->fts.  The first fragment contains the SNAP header and the
+remaining packets are just data.
+
+If encryption is enabled, each fragment payload size is reduced by enough space
+to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
+So if you have 1500 bytes of payload with ieee->fts set to 500 without
+encryption it will take 3 frames.  With WEP it will take 4 frames as the
+payload of each frame is reduced to 492 bytes.
+
+* SKB visualization
+*
+*  ,- skb->data
+* |
+* |    ETHERNET HEADER        ,-<-- PAYLOAD
+* |                           |     14 bytes from skb->data
+* |  2 bytes for Type --> ,T. |     (sizeof ethhdr)
+* |                       | | |
+* |,-Dest.--. ,--Src.---. | | |
+* |  6 bytes| | 6 bytes | | | |
+* v         | |         | | | |
+* 0         | v       1 | v | v           2
+* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+*     ^     | ^         | ^ |
+*     |     | |         | | |
+*     |     | |         | `T' <---- 2 bytes for Type
+*     |     | |         |
+*     |     | '---SNAP--' <-------- 6 bytes for SNAP
+*     |     |
+*     `-IV--' <-------------------- 4 bytes for IV (WEP)
+*
+*      SNAP HEADER
+*
+*/
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
+{
+	struct ieee80211_snap_hdr *snap;
+	u8 *oui;
+
+	snap = (struct ieee80211_snap_hdr *)data;
+	snap->dsap = 0xaa;
+	snap->ssap = 0xaa;
+	snap->ctrl = 0x03;
+
+	if (h_proto == 0x8137 || h_proto == 0x80f3)
+		oui = P802_1H_OUI;
+	else
+		oui = RFC1042_OUI;
+	snap->oui[0] = oui[0];
+	snap->oui[1] = oui[1];
+	snap->oui[2] = oui[2];
+
+	*(u16 *)(data + SNAP_SIZE) = htons(h_proto);
+
+	return SNAP_SIZE + sizeof(u16);
+}
+
+static inline int ieee80211_encrypt_fragment(
+	struct ieee80211_device *ieee,
+	struct sk_buff *frag,
+	int hdr_len)
+{
+	struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
+	int res;
+
+#ifdef CONFIG_IEEE80211_CRYPT_TKIP
+	struct ieee80211_hdr *header;
+
+	if (ieee->tkip_countermeasures &&
+	    crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
+		header = (struct ieee80211_hdr *) frag->data;
+		if (net_ratelimit()) {
+			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
+			       "TX packet to " MAC_FMT "\n",
+			       ieee->dev->name, MAC_ARG(header->addr1));
+		}
+		return -1;
+	}
+#endif
+	/* To encrypt, frame format is:
+	 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
+
+	// PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
+	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
+	 * call both MSDU and MPDU encryption functions from here. */
+	atomic_inc(&crypt->refcnt);
+	res = 0;
+	if (crypt->ops->encrypt_msdu)
+		res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
+	if (res == 0 && crypt->ops->encrypt_mpdu)
+		res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
+
+	atomic_dec(&crypt->refcnt);
+	if (res < 0) {
+		printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
+		       ieee->dev->name, frag->len);
+		ieee->ieee_stats.tx_discards++;
+		return -1;
+	}
+
+	return 0;
+}
+
+
+void ieee80211_txb_free(struct ieee80211_txb *txb) {
+	int i;
+	if (unlikely(!txb))
+		return;
+	for (i = 0; i < txb->nr_frags; i++)
+		if (txb->fragments[i])
+			dev_kfree_skb_any(txb->fragments[i]);
+	kfree(txb);
+}
+
+static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
+						 int gfp_mask)
+{
+	struct ieee80211_txb *txb;
+	int i;
+	txb = kmalloc(
+		sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
+		gfp_mask);
+	if (!txb)
+		return NULL;
+
+	memset(txb, 0, sizeof(struct ieee80211_txb));
+	txb->nr_frags = nr_frags;
+	txb->frag_size = txb_size;
+
+	for (i = 0; i < nr_frags; i++) {
+		txb->fragments[i] = dev_alloc_skb(txb_size);
+		if (unlikely(!txb->fragments[i])) {
+			i--;
+			break;
+		}
+	}
+	if (unlikely(i != nr_frags)) {
+		while (i >= 0)
+			dev_kfree_skb_any(txb->fragments[i--]);
+		kfree(txb);
+		return NULL;
+	}
+	return txb;
+}
+
+/* SKBs are added to the ieee->tx_queue. */
+int ieee80211_xmit(struct sk_buff *skb,
+		   struct net_device *dev)
+{
+	struct ieee80211_device *ieee = netdev_priv(dev);
+	struct ieee80211_txb *txb = NULL;
+	struct ieee80211_hdr *frag_hdr;
+	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
+	unsigned long flags;
+	struct net_device_stats *stats = &ieee->stats;
+	int ether_type, encrypt;
+	int bytes, fc, hdr_len;
+	struct sk_buff *skb_frag;
+	struct ieee80211_hdr header = { /* Ensure zero initialized */
+		.duration_id = 0,
+		.seq_ctl = 0
+	};
+	u8 dest[ETH_ALEN], src[ETH_ALEN];
+
+	struct ieee80211_crypt_data* crypt;
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	/* If there is no driver handler to take the TXB, dont' bother
+	 * creating it... */
+	if (!ieee->hard_start_xmit) {
+		printk(KERN_WARNING "%s: No xmit handler.\n",
+		       ieee->dev->name);
+		goto success;
+	}
+
+	if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
+		printk(KERN_WARNING "%s: skb too small (%d).\n",
+		       ieee->dev->name, skb->len);
+		goto success;
+	}
+
+	ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
+
+	crypt = ieee->crypt[ieee->tx_keyidx];
+
+	encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
+		ieee->host_encrypt && crypt && crypt->ops;
+
+	if (!encrypt && ieee->ieee802_1x &&
+	    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
+		stats->tx_dropped++;
+		goto success;
+	}
+
+#ifdef CONFIG_IEEE80211_DEBUG
+	if (crypt && !encrypt && ether_type == ETH_P_PAE) {
+		struct eapol *eap = (struct eapol *)(skb->data +
+			sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
+		IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
+			eap_get_type(eap->type));
+	}
+#endif
+
+	/* Save source and destination addresses */
+	memcpy(&dest, skb->data, ETH_ALEN);
+	memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
+
+	/* Advance the SKB to the start of the payload */
+	skb_pull(skb, sizeof(struct ethhdr));
+
+	/* Determine total amount of storage required for TXB packets */
+	bytes = skb->len + SNAP_SIZE + sizeof(u16);
+
+	if (encrypt)
+		fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
+			IEEE80211_FCTL_WEP;
+	else
+		fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
+
+	if (ieee->iw_mode == IW_MODE_INFRA) {
+		fc |= IEEE80211_FCTL_TODS;
+		/* To DS: Addr1 = BSSID, Addr2 = SA,
+		   Addr3 = DA */
+		memcpy(&header.addr1, ieee->bssid, ETH_ALEN);
+		memcpy(&header.addr2, &src, ETH_ALEN);
+		memcpy(&header.addr3, &dest, ETH_ALEN);
+	} else if (ieee->iw_mode == IW_MODE_ADHOC) {
+		/* not From/To DS: Addr1 = DA, Addr2 = SA,
+		   Addr3 = BSSID */
+		memcpy(&header.addr1, dest, ETH_ALEN);
+		memcpy(&header.addr2, src, ETH_ALEN);
+		memcpy(&header.addr3, ieee->bssid, ETH_ALEN);
+	}
+	header.frame_ctl = cpu_to_le16(fc);
+	hdr_len = IEEE80211_3ADDR_LEN;
+
+	/* Determine fragmentation size based on destination (multicast
+	 * and broadcast are not fragmented) */
+	if (is_multicast_ether_addr(dest) ||
+	    is_broadcast_ether_addr(dest))
+		frag_size = MAX_FRAG_THRESHOLD;
+	else
+		frag_size = ieee->fts;
+
+	/* Determine amount of payload per fragment.  Regardless of if
+	 * this stack is providing the full 802.11 header, one will
+	 * eventually be affixed to this fragment -- so we must account for
+	 * it when determining the amount of payload space. */
+	bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
+	if (ieee->config &
+	    (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+		bytes_per_frag -= IEEE80211_FCS_LEN;
+
+	/* Each fragment may need to have room for encryptiong pre/postfix */
+	if (encrypt)
+		bytes_per_frag -= crypt->ops->extra_prefix_len +
+			crypt->ops->extra_postfix_len;
+
+	/* Number of fragments is the total bytes_per_frag /
+	 * payload_per_fragment */
+	nr_frags = bytes / bytes_per_frag;
+	bytes_last_frag = bytes % bytes_per_frag;
+	if (bytes_last_frag)
+		nr_frags++;
+	else
+		bytes_last_frag = bytes_per_frag;
+
+	/* When we allocate the TXB we allocate enough space for the reserve
+	 * and full fragment bytes (bytes_per_frag doesn't include prefix,
+	 * postfix, header, FCS, etc.) */
+	txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
+	if (unlikely(!txb)) {
+		printk(KERN_WARNING "%s: Could not allocate TXB\n",
+		       ieee->dev->name);
+		goto failed;
+	}
+	txb->encrypted = encrypt;
+	txb->payload_size = bytes;
+
+	for (i = 0; i < nr_frags; i++) {
+		skb_frag = txb->fragments[i];
+
+		if (encrypt)
+			skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
+
+		frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
+		memcpy(frag_hdr, &header, hdr_len);
+
+		/* If this is not the last fragment, then add the MOREFRAGS
+		 * bit to the frame control */
+		if (i != nr_frags - 1) {
+			frag_hdr->frame_ctl = cpu_to_le16(
+				fc | IEEE80211_FCTL_MOREFRAGS);
+			bytes = bytes_per_frag;
+		} else {
+			/* The last fragment takes the remaining length */
+			bytes = bytes_last_frag;
+		}
+
+	       	/* Put a SNAP header on the first fragment */
+		if (i == 0) {
+			ieee80211_put_snap(
+				skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
+				ether_type);
+			bytes -= SNAP_SIZE + sizeof(u16);
+		}
+
+		memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
+
+		/* Advance the SKB... */
+		skb_pull(skb, bytes);
+
+		/* Encryption routine will move the header forward in order
+		 * to insert the IV between the header and the payload */
+		if (encrypt)
+			ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
+		if (ieee->config &
+		    (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
+			skb_put(skb_frag, 4);
+	}
+
+
+ success:
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+	dev_kfree_skb_any(skb);
+
+	if (txb) {
+		if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
+			stats->tx_packets++;
+			stats->tx_bytes += txb->payload_size;
+			return 0;
+		}
+		ieee80211_txb_free(txb);
+	}
+
+	return 0;
+
+ failed:
+	spin_unlock_irqrestore(&ieee->lock, flags);
+	netif_stop_queue(dev);
+	stats->tx_errors++;
+	return 1;
+
+}
+
+EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
new file mode 100644
--- /dev/null
+++ b/net/ieee80211/ieee80211_wx.c
@@ -0,0 +1,471 @@
+/******************************************************************************
+
+  Copyright(c) 2004 Intel Corporation. All rights reserved.
+
+  Portions of this file are based on the WEP enablement code provided by the
+  Host AP project hostap-drivers v0.1.3
+  Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+  <jkmaline@cc.hut.fi>
+  Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  James P. Ketrenos <ipw2100-admin@linux.intel.com>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+******************************************************************************/
+#include <linux/wireless.h>
+#include <linux/version.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+
+#include <net/ieee80211.h>
+static const char *ieee80211_modes[] = {
+	"?", "a", "b", "ab", "g", "ag", "bg", "abg"
+};
+
+#define MAX_CUSTOM_LEN 64
+static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
+ 					   char *start, char *stop,
+					   struct ieee80211_network *network)
+{
+	char custom[MAX_CUSTOM_LEN];
+	char *p;
+	struct iw_event iwe;
+	int i, j;
+	u8 max_rate, rate;
+
+	/* First entry *MUST* be the AP MAC address */
+	iwe.cmd = SIOCGIWAP;
+	iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+	memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
+	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_ADDR_LEN);
+
+	/* Remaining entries will be displayed in the order we provide them */
+
+	/* Add the ESSID */
+	iwe.cmd = SIOCGIWESSID;
+	iwe.u.data.flags = 1;
+	if (network->flags & NETWORK_EMPTY_ESSID) {
+		iwe.u.data.length = sizeof("<hidden>");
+		start = iwe_stream_add_point(start, stop, &iwe, "<hidden>");
+	} else {
+		iwe.u.data.length = min(network->ssid_len, (u8)32);
+		start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
+	}
+
+	/* Add the protocol name */
+	iwe.cmd = SIOCGIWNAME;
+	snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
+	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN);
+
+        /* Add mode */
+        iwe.cmd = SIOCGIWMODE;
+        if (network->capability &
+	    (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) {
+		if (network->capability & WLAN_CAPABILITY_ESS)
+			iwe.u.mode = IW_MODE_MASTER;
+		else
+			iwe.u.mode = IW_MODE_ADHOC;
+
+		start = iwe_stream_add_event(start, stop, &iwe,
+					     IW_EV_UINT_LEN);
+	}
+
+        /* Add frequency/channel */
+	iwe.cmd = SIOCGIWFREQ;
+/*	iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
+	iwe.u.freq.e = 3; */
+	iwe.u.freq.m = network->channel;
+	iwe.u.freq.e = 0;
+	iwe.u.freq.i = 0;
+	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN);
+
+	/* Add encryption capability */
+	iwe.cmd = SIOCGIWENCODE;
+	if (network->capability & WLAN_CAPABILITY_PRIVACY)
+		iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+	else
+		iwe.u.data.flags = IW_ENCODE_DISABLED;
+	iwe.u.data.length = 0;
+	start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
+
+	/* Add basic and extended rates */
+	max_rate = 0;
+	p = custom;
+	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+	for (i = 0, j = 0; i < network->rates_len; ) {
+		if (j < network->rates_ex_len &&
+		    ((network->rates_ex[j] & 0x7F) <
+		     (network->rates[i] & 0x7F)))
+			rate = network->rates_ex[j++] & 0x7F;
+		else
+			rate = network->rates[i++] & 0x7F;
+		if (rate > max_rate)
+			max_rate = rate;
+		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+			      "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+	}
+	for (; j < network->rates_ex_len; j++) {
+		rate = network->rates_ex[j] & 0x7F;
+		p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+			      "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+		if (rate > max_rate)
+			max_rate = rate;
+	}
+
+	iwe.cmd = SIOCGIWRATE;
+	iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
+	iwe.u.bitrate.value = max_rate * 500000;
+	start = iwe_stream_add_event(start, stop, &iwe,
+				     IW_EV_PARAM_LEN);
+
+	iwe.cmd = IWEVCUSTOM;
+	iwe.u.data.length = p - custom;
+	if (iwe.u.data.length)
+		start = iwe_stream_add_point(start, stop, &iwe, custom);
+
+	/* Add quality statistics */
+	/* TODO: Fix these values... */
+	iwe.cmd = IWEVQUAL;
+	iwe.u.qual.qual = network->stats.signal;
+	iwe.u.qual.level = network->stats.rssi;
+	iwe.u.qual.noise = network->stats.noise;
+	iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
+	if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
+		iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
+	if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
+		iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
+	if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
+		iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
+
+	start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN);
+
+	iwe.cmd = IWEVCUSTOM;
+	p = custom;
+
+	iwe.u.data.length = p - custom;
+	if (iwe.u.data.length)
+		start = iwe_stream_add_point(start, stop, &iwe, custom);
+
+	if (ieee->wpa_enabled && network->wpa_ie_len){
+		char buf[MAX_WPA_IE_LEN * 2 + 30];
+
+		u8 *p = buf;
+		p += sprintf(p, "wpa_ie=");
+		for (i = 0; i < network->wpa_ie_len; i++) {
+			p += sprintf(p, "%02x", network->wpa_ie[i]);
+		}
+
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = strlen(buf);
+		start = iwe_stream_add_point(start, stop, &iwe, buf);
+	}
+
+	if (ieee->wpa_enabled && network->rsn_ie_len){
+		char buf[MAX_WPA_IE_LEN * 2 + 30];
+
+		u8 *p = buf;
+		p += sprintf(p, "rsn_ie=");
+		for (i = 0; i < network->rsn_ie_len; i++) {
+			p += sprintf(p, "%02x", network->rsn_ie[i]);
+		}
+
+		memset(&iwe, 0, sizeof(iwe));
+		iwe.cmd = IWEVCUSTOM;
+		iwe.u.data.length = strlen(buf);
+		start = iwe_stream_add_point(start, stop, &iwe, buf);
+	}
+
+	/* Add EXTRA: Age to display seconds since last beacon/probe response
+	 * for given network. */
+	iwe.cmd = IWEVCUSTOM;
+	p = custom;
+	p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+		      " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
+	iwe.u.data.length = p - custom;
+	if (iwe.u.data.length)
+		start = iwe_stream_add_point(start, stop, &iwe, custom);
+
+
+	return start;
+}
+
+int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
+			  struct iw_request_info *info,
+			  union iwreq_data *wrqu, char *extra)
+{
+	struct ieee80211_network *network;
+	unsigned long flags;
+
+	char *ev = extra;
+	char *stop = ev + IW_SCAN_MAX_DATA;
+	int i = 0;
+
+	IEEE80211_DEBUG_WX("Getting scan\n");
+
+	spin_lock_irqsave(&ieee->lock, flags);
+
+	list_for_each_entry(network, &ieee->network_list, list) {
+		i++;
+		if (ieee->scan_age == 0 ||
+		    time_after(network->last_scanned + ieee->scan_age, jiffies))
+			ev = ipw2100_translate_scan(ieee, ev, stop, network);
+		else
+			IEEE80211_DEBUG_SCAN(
+				"Not showing network '%s ("
+				MAC_FMT ")' due to age (%lums).\n",
+				escape_essid(network->ssid,
+					     network->ssid_len),
+				MAC_ARG(network->bssid),
+				(jiffies - network->last_scanned) / (HZ / 100));
+	}
+
+	spin_unlock_irqrestore(&ieee->lock, flags);
+
+	wrqu->data.length = ev -  extra;
+	wrqu->data.flags = 0;
+
+	IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
+
+	return 0;
+}
+
+int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *keybuf)
+{
+	struct iw_point *erq = &(wrqu->encoding);
+	struct net_device *dev = ieee->dev;
+	struct ieee80211_security sec = {
+		.flags = 0
+	};
+	int i, key, key_provided, len;
+	struct ieee80211_crypt_data **crypt;
+
+	IEEE80211_DEBUG_WX("SET_ENCODE\n");
+
+	key = erq->flags & IW_ENCODE_INDEX;
+	if (key) {
+		if (key > WEP_KEYS)
+			return -EINVAL;
+		key--;
+		key_provided = 1;
+	} else {
+		key_provided = 0;
+		key = ieee->tx_keyidx;
+	}
+
+	IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
+			   "provided" : "default");
+
+	crypt = &ieee->crypt[key];
+
+	if (erq->flags & IW_ENCODE_DISABLED) {
+		if (key_provided && *crypt) {
+			IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
+					   key);
+			ieee80211_crypt_delayed_deinit(ieee, crypt);
+		} else
+			IEEE80211_DEBUG_WX("Disabling encryption.\n");
+
+		/* Check all the keys to see if any are still configured,
+		 * and if no key index was provided, de-init them all */
+		for (i = 0; i < WEP_KEYS; i++) {
+			if (ieee->crypt[i] != NULL) {
+				if (key_provided)
+					break;
+				ieee80211_crypt_delayed_deinit(
+					ieee, &ieee->crypt[i]);
+			}
+		}
+
+		if (i == WEP_KEYS) {
+			sec.enabled = 0;
+			sec.level = SEC_LEVEL_0;
+			sec.flags |= SEC_ENABLED | SEC_LEVEL;
+		}
+
+		goto done;
+	}
+
+
+
+	sec.enabled = 1;
+	sec.flags |= SEC_ENABLED;
+
+	if (*crypt != NULL && (*crypt)->ops != NULL &&
+	    strcmp((*crypt)->ops->name, "WEP") != 0) {
+		/* changing to use WEP; deinit previously used algorithm
+		 * on this key */
+		ieee80211_crypt_delayed_deinit(ieee, crypt);
+	}
+
+	if (*crypt == NULL) {
+		struct ieee80211_crypt_data *new_crypt;
+
+		/* take WEP into use */
+		new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
+				    GFP_KERNEL);
+		if (new_crypt == NULL)
+			return -ENOMEM;
+		memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
+		new_crypt->ops = ieee80211_get_crypto_ops("WEP");
+		if (!new_crypt->ops) {
+			request_module("ieee80211_crypt_wep");
+			new_crypt->ops = ieee80211_get_crypto_ops("WEP");
+		}
+
+		if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
+			new_crypt->priv = new_crypt->ops->init(key);
+
+		if (!new_crypt->ops || !new_crypt->priv) {
+			kfree(new_crypt);
+			new_crypt = NULL;
+
+			printk(KERN_WARNING "%s: could not initialize WEP: "
+			       "load module ieee80211_crypt_wep\n",
+			       dev->name);
+			return -EOPNOTSUPP;
+		}
+		*crypt = new_crypt;
+	}
+
+	/* If a new key was provided, set it up */
+	if (erq->length > 0) {
+		len = erq->length <= 5 ? 5 : 13;
+		memcpy(sec.keys[key], keybuf, erq->length);
+		if (len > erq->length)
+			memset(sec.keys[key] + erq->length, 0,
+			       len - erq->length);
+		IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
+				   key, escape_essid(sec.keys[key], len),
+				   erq->length, len);
+		sec.key_sizes[key] = len;
+ 		(*crypt)->ops->set_key(sec.keys[key], len, NULL,
+				       (*crypt)->priv);
+		sec.flags |= (1 << key);
+		/* This ensures a key will be activated if no key is
+		 * explicitely set */
+		if (key == sec.active_key)
+			sec.flags |= SEC_ACTIVE_KEY;
+	} else {
+		len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
+					     NULL, (*crypt)->priv);
+		if (len == 0) {
+			/* Set a default key of all 0 */
+			IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
+					   key);
+			memset(sec.keys[key], 0, 13);
+			(*crypt)->ops->set_key(sec.keys[key], 13, NULL,
+					       (*crypt)->priv);
+			sec.key_sizes[key] = 13;
+			sec.flags |= (1 << key);
+		}
+
+		/* No key data - just set the default TX key index */
+		if (key_provided) {
+			IEEE80211_DEBUG_WX(
+				"Setting key %d to default Tx key.\n", key);
+			ieee->tx_keyidx = key;
+			sec.active_key = key;
+			sec.flags |= SEC_ACTIVE_KEY;
+		}
+	}
+
+ done:
+	ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
+	sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
+	sec.flags |= SEC_AUTH_MODE;
+	IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
+			   "OPEN" : "SHARED KEY");
+
+	/* For now we just support WEP, so only set that security level...
+	 * TODO: When WPA is added this is one place that needs to change */
+	sec.flags |= SEC_LEVEL;
+	sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
+
+	if (ieee->set_security)
+		ieee->set_security(dev, &sec);
+
+	/* Do not reset port if card is in Managed mode since resetting will
+	 * generate new IEEE 802.11 authentication which may end up in looping
+	 * with IEEE 802.1X.  If your hardware requires a reset after WEP
+	 * configuration (for example... Prism2), implement the reset_port in
+	 * the callbacks structures used to initialize the 802.11 stack. */
+	if (ieee->reset_on_keychange &&
+	    ieee->iw_mode != IW_MODE_INFRA &&
+	    ieee->reset_port && ieee->reset_port(dev)) {
+		printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
+			    struct iw_request_info *info,
+			    union iwreq_data *wrqu, char *keybuf)
+{
+	struct iw_point *erq = &(wrqu->encoding);
+	int len, key;
+	struct ieee80211_crypt_data *crypt;
+
+	IEEE80211_DEBUG_WX("GET_ENCODE\n");
+
+	key = erq->flags & IW_ENCODE_INDEX;
+	if (key) {
+		if (key > WEP_KEYS)
+			return -EINVAL;
+		key--;
+	} else
+		key = ieee->tx_keyidx;
+
+	crypt = ieee->crypt[key];
+	erq->flags = key + 1;
+
+	if (crypt == NULL || crypt->ops == NULL) {
+		erq->length = 0;
+		erq->flags |= IW_ENCODE_DISABLED;
+		return 0;
+	}
+
+	if (strcmp(crypt->ops->name, "WEP") != 0) {
+		/* only WEP is supported with wireless extensions, so just
+		 * report that encryption is used */
+		erq->length = 0;
+		erq->flags |= IW_ENCODE_ENABLED;
+		return 0;
+	}
+
+	len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv);
+	erq->length = (len >= 0 ? len : 0);
+
+	erq->flags |= IW_ENCODE_ENABLED;
+
+	if (ieee->open_wep)
+		erq->flags |= IW_ENCODE_OPEN;
+	else
+		erq->flags |= IW_ENCODE_RESTRICTED;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(ieee80211_wx_get_scan);
+EXPORT_SYMBOL(ieee80211_wx_set_encode);
+EXPORT_SYMBOL(ieee80211_wx_get_encode);