v4.19.13 snapshot.
diff --git a/drivers/scsi/aacraid/Makefile b/drivers/scsi/aacraid/Makefile
new file mode 100644
index 0000000..1bd9fd1
--- /dev/null
+++ b/drivers/scsi/aacraid/Makefile
@@ -0,0 +1,8 @@
+# Adaptec aacraid
+
+obj-$(CONFIG_SCSI_AACRAID) := aacraid.o
+
+aacraid-objs	:= linit.o aachba.o commctrl.o comminit.o commsup.o \
+		   dpcsup.o rx.o sa.o rkt.o nark.o src.o
+
+ccflags-y	:= -Idrivers/scsi
diff --git a/drivers/scsi/aacraid/TODO b/drivers/scsi/aacraid/TODO
new file mode 100644
index 0000000..78dc863
--- /dev/null
+++ b/drivers/scsi/aacraid/TODO
@@ -0,0 +1,3 @@
+o	Testing
+o	More testing
+o	I/O size increase
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
new file mode 100644
index 0000000..6e35632
--- /dev/null
+++ b/drivers/scsi/aacraid/aachba.c
@@ -0,0 +1,4296 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  aachba.c
+ *
+ * Abstract: Contains Interfaces to manage IOs.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/uaccess.h>
+#include <linux/highmem.h> /* For flush_kernel_dcache_page */
+#include <linux/module.h>
+
+#include <asm/unaligned.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/* values for inqd_pdt: Peripheral device type in plain English */
+#define	INQD_PDT_DA	0x00	/* Direct-access (DISK) device */
+#define	INQD_PDT_PROC	0x03	/* Processor device */
+#define	INQD_PDT_CHNGR	0x08	/* Changer (jukebox, scsi2) */
+#define	INQD_PDT_COMM	0x09	/* Communication device (scsi2) */
+#define	INQD_PDT_NOLUN2 0x1f	/* Unknown Device (scsi2) */
+#define	INQD_PDT_NOLUN	0x7f	/* Logical Unit Not Present */
+
+#define	INQD_PDT_DMASK	0x1F	/* Peripheral Device Type Mask */
+#define	INQD_PDT_QMASK	0xE0	/* Peripheral Device Qualifer Mask */
+
+/*
+ *	Sense codes
+ */
+
+#define SENCODE_NO_SENSE			0x00
+#define SENCODE_END_OF_DATA			0x00
+#define SENCODE_BECOMING_READY			0x04
+#define SENCODE_INIT_CMD_REQUIRED		0x04
+#define SENCODE_UNRECOVERED_READ_ERROR		0x11
+#define SENCODE_PARAM_LIST_LENGTH_ERROR		0x1A
+#define SENCODE_INVALID_COMMAND			0x20
+#define SENCODE_LBA_OUT_OF_RANGE		0x21
+#define SENCODE_INVALID_CDB_FIELD		0x24
+#define SENCODE_LUN_NOT_SUPPORTED		0x25
+#define SENCODE_INVALID_PARAM_FIELD		0x26
+#define SENCODE_PARAM_NOT_SUPPORTED		0x26
+#define SENCODE_PARAM_VALUE_INVALID		0x26
+#define SENCODE_RESET_OCCURRED			0x29
+#define SENCODE_LUN_NOT_SELF_CONFIGURED_YET	0x3E
+#define SENCODE_INQUIRY_DATA_CHANGED		0x3F
+#define SENCODE_SAVING_PARAMS_NOT_SUPPORTED	0x39
+#define SENCODE_DIAGNOSTIC_FAILURE		0x40
+#define SENCODE_INTERNAL_TARGET_FAILURE		0x44
+#define SENCODE_INVALID_MESSAGE_ERROR		0x49
+#define SENCODE_LUN_FAILED_SELF_CONFIG		0x4c
+#define SENCODE_OVERLAPPED_COMMAND		0x4E
+
+/*
+ *	Additional sense codes
+ */
+
+#define ASENCODE_NO_SENSE			0x00
+#define ASENCODE_END_OF_DATA			0x05
+#define ASENCODE_BECOMING_READY			0x01
+#define ASENCODE_INIT_CMD_REQUIRED		0x02
+#define ASENCODE_PARAM_LIST_LENGTH_ERROR	0x00
+#define ASENCODE_INVALID_COMMAND		0x00
+#define ASENCODE_LBA_OUT_OF_RANGE		0x00
+#define ASENCODE_INVALID_CDB_FIELD		0x00
+#define ASENCODE_LUN_NOT_SUPPORTED		0x00
+#define ASENCODE_INVALID_PARAM_FIELD		0x00
+#define ASENCODE_PARAM_NOT_SUPPORTED		0x01
+#define ASENCODE_PARAM_VALUE_INVALID		0x02
+#define ASENCODE_RESET_OCCURRED			0x00
+#define ASENCODE_LUN_NOT_SELF_CONFIGURED_YET	0x00
+#define ASENCODE_INQUIRY_DATA_CHANGED		0x03
+#define ASENCODE_SAVING_PARAMS_NOT_SUPPORTED	0x00
+#define ASENCODE_DIAGNOSTIC_FAILURE		0x80
+#define ASENCODE_INTERNAL_TARGET_FAILURE	0x00
+#define ASENCODE_INVALID_MESSAGE_ERROR		0x00
+#define ASENCODE_LUN_FAILED_SELF_CONFIG		0x00
+#define ASENCODE_OVERLAPPED_COMMAND		0x00
+
+#define BYTE0(x) (unsigned char)(x)
+#define BYTE1(x) (unsigned char)((x) >> 8)
+#define BYTE2(x) (unsigned char)((x) >> 16)
+#define BYTE3(x) (unsigned char)((x) >> 24)
+
+/* MODE_SENSE data format */
+typedef struct {
+	struct {
+		u8	data_length;
+		u8	med_type;
+		u8	dev_par;
+		u8	bd_length;
+	} __attribute__((packed)) hd;
+	struct {
+		u8	dens_code;
+		u8	block_count[3];
+		u8	reserved;
+		u8	block_length[3];
+	} __attribute__((packed)) bd;
+		u8	mpc_buf[3];
+} __attribute__((packed)) aac_modep_data;
+
+/* MODE_SENSE_10 data format */
+typedef struct {
+	struct {
+		u8	data_length[2];
+		u8	med_type;
+		u8	dev_par;
+		u8	rsrvd[2];
+		u8	bd_length[2];
+	} __attribute__((packed)) hd;
+	struct {
+		u8	dens_code;
+		u8	block_count[3];
+		u8	reserved;
+		u8	block_length[3];
+	} __attribute__((packed)) bd;
+		u8	mpc_buf[3];
+} __attribute__((packed)) aac_modep10_data;
+
+/*------------------------------------------------------------------------------
+ *              S T R U C T S / T Y P E D E F S
+ *----------------------------------------------------------------------------*/
+/* SCSI inquiry data */
+struct inquiry_data {
+	u8 inqd_pdt;	/* Peripheral qualifier | Peripheral Device Type */
+	u8 inqd_dtq;	/* RMB | Device Type Qualifier */
+	u8 inqd_ver;	/* ISO version | ECMA version | ANSI-approved version */
+	u8 inqd_rdf;	/* AENC | TrmIOP | Response data format */
+	u8 inqd_len;	/* Additional length (n-4) */
+	u8 inqd_pad1[2];/* Reserved - must be zero */
+	u8 inqd_pad2;	/* RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+	u8 inqd_vid[8];	/* Vendor ID */
+	u8 inqd_pid[16];/* Product ID */
+	u8 inqd_prl[4];	/* Product Revision Level */
+};
+
+/* Added for VPD 0x83 */
+struct  tvpd_id_descriptor_type_1 {
+	u8 codeset:4;		/* VPD_CODE_SET */
+	u8 reserved:4;
+	u8 identifiertype:4;	/* VPD_IDENTIFIER_TYPE */
+	u8 reserved2:4;
+	u8 reserved3;
+	u8 identifierlength;
+	u8 venid[8];
+	u8 productid[16];
+	u8 serialnumber[8];	/* SN in ASCII */
+
+};
+
+struct tvpd_id_descriptor_type_2 {
+	u8 codeset:4;		/* VPD_CODE_SET */
+	u8 reserved:4;
+	u8 identifiertype:4;	/* VPD_IDENTIFIER_TYPE */
+	u8 reserved2:4;
+	u8 reserved3;
+	u8 identifierlength;
+	struct teu64id {
+		u32 Serial;
+		 /* The serial number supposed to be 40 bits,
+		  * bit we only support 32, so make the last byte zero. */
+		u8 reserved;
+		u8 venid[3];
+	} eu64id;
+
+};
+
+struct tvpd_id_descriptor_type_3 {
+	u8 codeset : 4;          /* VPD_CODE_SET */
+	u8 reserved : 4;
+	u8 identifiertype : 4;   /* VPD_IDENTIFIER_TYPE */
+	u8 reserved2 : 4;
+	u8 reserved3;
+	u8 identifierlength;
+	u8 Identifier[16];
+};
+
+struct tvpd_page83 {
+	u8 DeviceType:5;
+	u8 DeviceTypeQualifier:3;
+	u8 PageCode;
+	u8 reserved;
+	u8 PageLength;
+	struct tvpd_id_descriptor_type_1 type1;
+	struct tvpd_id_descriptor_type_2 type2;
+	struct tvpd_id_descriptor_type_3 type3;
+};
+
+/*
+ *              M O D U L E   G L O B A L S
+ */
+
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+				struct aac_raw_io2 *rio2, int sg_max);
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+				struct aac_hba_cmd_req *hbacmd,
+				int sg_max, u64 sg_address);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+				int pages, int nseg, int nseg_new);
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd);
+#ifdef AAC_DETAILED_STATUS_INFO
+static char *aac_get_status_string(u32 status);
+#endif
+
+/*
+ *	Non dasd selection is handled entirely in aachba now
+ */
+
+static int nondasd = -1;
+static int aac_cache = 2;	/* WCE=0 to avoid performance problems */
+static int dacmode = -1;
+int aac_msi;
+int aac_commit = -1;
+int startup_timeout = 180;
+int aif_timeout = 120;
+int aac_sync_mode;  /* Only Sync. transfer - disabled */
+int aac_convert_sgl = 1;	/* convert non-conformable s/g list - enabled */
+
+module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
+	" 0=off, 1=on");
+module_param(aac_convert_sgl, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aac_convert_sgl, "Convert non-conformable s/g list"
+	" 0=off, 1=on");
+module_param(nondasd, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(nondasd, "Control scanning of hba for nondasd devices."
+	" 0=off, 1=on");
+module_param_named(cache, aac_cache, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cache, "Disable Queue Flush commands:\n"
+	"\tbit 0 - Disable FUA in WRITE SCSI commands\n"
+	"\tbit 1 - Disable SYNCHRONIZE_CACHE SCSI command\n"
+	"\tbit 2 - Disable only if Battery is protecting Cache");
+module_param(dacmode, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(dacmode, "Control whether dma addressing is using 64 bit DAC."
+	" 0=off, 1=on");
+module_param_named(commit, aac_commit, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(commit, "Control whether a COMMIT_CONFIG is issued to the"
+	" adapter for foreign arrays.\n"
+	"This is typically needed in systems that do not have a BIOS."
+	" 0=off, 1=on");
+module_param_named(msi, aac_msi, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(msi, "IRQ handling."
+	" 0=PIC(default), 1=MSI, 2=MSI-X)");
+module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
+	" adapter to have it's kernel up and\n"
+	"running. This is typically adjusted for large systems that do not"
+	" have a BIOS.");
+module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(aif_timeout, "The duration of time in seconds to wait for"
+	" applications to pick up AIFs before\n"
+	"deregistering them. This is typically adjusted for heavily burdened"
+	" systems.");
+
+int aac_fib_dump;
+module_param(aac_fib_dump, int, 0644);
+MODULE_PARM_DESC(aac_fib_dump, "Dump controller fibs prior to IOP_RESET 0=off, 1=on");
+
+int numacb = -1;
+module_param(numacb, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
+	" blocks (FIB) allocated. Valid values are 512 and down. Default is"
+	" to use suggestion from Firmware.");
+
+int acbsize = -1;
+module_param(acbsize, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
+	" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
+	" suggestion from Firmware.");
+
+int update_interval = 30 * 60;
+module_param(update_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(update_interval, "Interval in seconds between time sync"
+	" updates issued to adapter.");
+
+int check_interval = 60;
+module_param(check_interval, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_interval, "Interval in seconds between adapter health"
+	" checks.");
+
+int aac_check_reset = 1;
+module_param_named(check_reset, aac_check_reset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(check_reset, "If adapter fails health check, reset the"
+	" adapter. a value of -1 forces the reset to adapters programmed to"
+	" ignore it.");
+
+int expose_physicals = -1;
+module_param(expose_physicals, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(expose_physicals, "Expose physical components of the arrays."
+	" -1=protect 0=off, 1=on");
+
+int aac_reset_devices;
+module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
+
+int aac_wwn = 1;
+module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
+	"\t0 - Disable\n"
+	"\t1 - Array Meta Data Signature (default)\n"
+	"\t2 - Adapter Serial Number");
+
+
+static inline int aac_valid_context(struct scsi_cmnd *scsicmd,
+		struct fib *fibptr) {
+	struct scsi_device *device;
+
+	if (unlikely(!scsicmd || !scsicmd->scsi_done)) {
+		dprintk((KERN_WARNING "aac_valid_context: scsi command corrupt\n"));
+		aac_fib_complete(fibptr);
+		return 0;
+	}
+	scsicmd->SCp.phase = AAC_OWNER_MIDLEVEL;
+	device = scsicmd->device;
+	if (unlikely(!device)) {
+		dprintk((KERN_WARNING "aac_valid_context: scsi device corrupt\n"));
+		aac_fib_complete(fibptr);
+		return 0;
+	}
+	return 1;
+}
+
+/**
+ *	aac_get_config_status	-	check the adapter configuration
+ *	@common: adapter to query
+ *
+ *	Query config status, and commit the configuration if needed.
+ */
+int aac_get_config_status(struct aac_dev *dev, int commit_flag)
+{
+	int status = 0;
+	struct fib * fibptr;
+
+	if (!(fibptr = aac_fib_alloc(dev)))
+		return -ENOMEM;
+
+	aac_fib_init(fibptr);
+	{
+		struct aac_get_config_status *dinfo;
+		dinfo = (struct aac_get_config_status *) fib_data(fibptr);
+
+		dinfo->command = cpu_to_le32(VM_ContainerConfig);
+		dinfo->type = cpu_to_le32(CT_GET_CONFIG_STATUS);
+		dinfo->count = cpu_to_le32(sizeof(((struct aac_get_config_status_resp *)NULL)->data));
+	}
+
+	status = aac_fib_send(ContainerCommand,
+			    fibptr,
+			    sizeof (struct aac_get_config_status),
+			    FsaNormal,
+			    1, 1,
+			    NULL, NULL);
+	if (status < 0) {
+		printk(KERN_WARNING "aac_get_config_status: SendFIB failed.\n");
+	} else {
+		struct aac_get_config_status_resp *reply
+		  = (struct aac_get_config_status_resp *) fib_data(fibptr);
+		dprintk((KERN_WARNING
+		  "aac_get_config_status: response=%d status=%d action=%d\n",
+		  le32_to_cpu(reply->response),
+		  le32_to_cpu(reply->status),
+		  le32_to_cpu(reply->data.action)));
+		if ((le32_to_cpu(reply->response) != ST_OK) ||
+		     (le32_to_cpu(reply->status) != CT_OK) ||
+		     (le32_to_cpu(reply->data.action) > CFACT_PAUSE)) {
+			printk(KERN_WARNING "aac_get_config_status: Will not issue the Commit Configuration\n");
+			status = -EINVAL;
+		}
+	}
+	/* Do not set XferState to zero unless receives a response from F/W */
+	if (status >= 0)
+		aac_fib_complete(fibptr);
+
+	/* Send a CT_COMMIT_CONFIG to enable discovery of devices */
+	if (status >= 0) {
+		if ((aac_commit == 1) || commit_flag) {
+			struct aac_commit_config * dinfo;
+			aac_fib_init(fibptr);
+			dinfo = (struct aac_commit_config *) fib_data(fibptr);
+
+			dinfo->command = cpu_to_le32(VM_ContainerConfig);
+			dinfo->type = cpu_to_le32(CT_COMMIT_CONFIG);
+
+			status = aac_fib_send(ContainerCommand,
+				    fibptr,
+				    sizeof (struct aac_commit_config),
+				    FsaNormal,
+				    1, 1,
+				    NULL, NULL);
+			/* Do not set XferState to zero unless
+			 * receives a response from F/W */
+			if (status >= 0)
+				aac_fib_complete(fibptr);
+		} else if (aac_commit == 0) {
+			printk(KERN_WARNING
+			  "aac_get_config_status: Foreign device configurations are being ignored\n");
+		}
+	}
+	/* FIB should be freed only after getting the response from the F/W */
+	if (status != -ERESTARTSYS)
+		aac_fib_free(fibptr);
+	return status;
+}
+
+static void aac_expose_phy_device(struct scsi_cmnd *scsicmd)
+{
+	char inq_data;
+	scsi_sg_copy_to_buffer(scsicmd,  &inq_data, sizeof(inq_data));
+	if ((inq_data & 0x20) && (inq_data & 0x1f) == TYPE_DISK) {
+		inq_data &= 0xdf;
+		scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+	}
+}
+
+/**
+ *	aac_get_containers	-	list containers
+ *	@common: adapter to probe
+ *
+ *	Make a list of all containers on this controller
+ */
+int aac_get_containers(struct aac_dev *dev)
+{
+	struct fsa_dev_info *fsa_dev_ptr;
+	u32 index;
+	int status = 0;
+	struct fib * fibptr;
+	struct aac_get_container_count *dinfo;
+	struct aac_get_container_count_resp *dresp;
+	int maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+
+	if (!(fibptr = aac_fib_alloc(dev)))
+		return -ENOMEM;
+
+	aac_fib_init(fibptr);
+	dinfo = (struct aac_get_container_count *) fib_data(fibptr);
+	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+	dinfo->type = cpu_to_le32(CT_GET_CONTAINER_COUNT);
+
+	status = aac_fib_send(ContainerCommand,
+		    fibptr,
+		    sizeof (struct aac_get_container_count),
+		    FsaNormal,
+		    1, 1,
+		    NULL, NULL);
+	if (status >= 0) {
+		dresp = (struct aac_get_container_count_resp *)fib_data(fibptr);
+		maximum_num_containers = le32_to_cpu(dresp->ContainerSwitchEntries);
+		if (fibptr->dev->supplement_adapter_info.supported_options2 &
+		    AAC_OPTION_SUPPORTED_240_VOLUMES) {
+			maximum_num_containers =
+				le32_to_cpu(dresp->MaxSimpleVolumes);
+		}
+		aac_fib_complete(fibptr);
+	}
+	/* FIB should be freed only after getting the response from the F/W */
+	if (status != -ERESTARTSYS)
+		aac_fib_free(fibptr);
+
+	if (maximum_num_containers < MAXIMUM_NUM_CONTAINERS)
+		maximum_num_containers = MAXIMUM_NUM_CONTAINERS;
+	if (dev->fsa_dev == NULL ||
+		dev->maximum_num_containers != maximum_num_containers) {
+
+		fsa_dev_ptr = dev->fsa_dev;
+
+		dev->fsa_dev = kcalloc(maximum_num_containers,
+					sizeof(*fsa_dev_ptr), GFP_KERNEL);
+
+		kfree(fsa_dev_ptr);
+		fsa_dev_ptr = NULL;
+
+
+		if (!dev->fsa_dev)
+			return -ENOMEM;
+
+		dev->maximum_num_containers = maximum_num_containers;
+	}
+	for (index = 0; index < dev->maximum_num_containers; index++) {
+		dev->fsa_dev[index].devname[0] = '\0';
+		dev->fsa_dev[index].valid = 0;
+
+		status = aac_probe_container(dev, index);
+
+		if (status < 0) {
+			printk(KERN_WARNING "aac_get_containers: SendFIB failed.\n");
+			break;
+		}
+	}
+	return status;
+}
+
+static void get_container_name_callback(void *context, struct fib * fibptr)
+{
+	struct aac_get_name_resp * get_name_reply;
+	struct scsi_cmnd * scsicmd;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	dprintk((KERN_DEBUG "get_container_name_callback[cpu %d]: t = %ld.\n", smp_processor_id(), jiffies));
+	BUG_ON(fibptr == NULL);
+
+	get_name_reply = (struct aac_get_name_resp *) fib_data(fibptr);
+	/* Failure is irrelevant, using default value instead */
+	if ((le32_to_cpu(get_name_reply->status) == CT_OK)
+	 && (get_name_reply->data[0] != '\0')) {
+		char *sp = get_name_reply->data;
+		int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+
+		sp[data_size - 1] = '\0';
+		while (*sp == ' ')
+			++sp;
+		if (*sp) {
+			struct inquiry_data inq;
+			char d[sizeof(((struct inquiry_data *)NULL)->inqd_pid)];
+			int count = sizeof(d);
+			char *dp = d;
+			do {
+				*dp++ = (*sp) ? *sp++ : ' ';
+			} while (--count > 0);
+
+			scsi_sg_copy_to_buffer(scsicmd, &inq, sizeof(inq));
+			memcpy(inq.inqd_pid, d, sizeof(d));
+			scsi_sg_copy_from_buffer(scsicmd, &inq, sizeof(inq));
+		}
+	}
+
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+	aac_fib_complete(fibptr);
+	scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ *	aac_get_container_name	-	get container name, none blocking.
+ */
+static int aac_get_container_name(struct scsi_cmnd * scsicmd)
+{
+	int status;
+	int data_size;
+	struct aac_get_name *dinfo;
+	struct fib * cmd_fibcontext;
+	struct aac_dev * dev;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+	data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);
+
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+
+	aac_fib_init(cmd_fibcontext);
+	dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+	dinfo->type = cpu_to_le32(CT_READ_NAME);
+	dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+	dinfo->count = cpu_to_le32(data_size - 1);
+
+	status = aac_fib_send(ContainerCommand,
+		  cmd_fibcontext,
+		  sizeof(struct aac_get_name_resp),
+		  FsaNormal,
+		  0, 1,
+		  (fib_callback)get_container_name_callback,
+		  (void *) scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);
+	aac_fib_complete(cmd_fibcontext);
+	return -1;
+}
+
+static int aac_probe_container_callback2(struct scsi_cmnd * scsicmd)
+{
+	struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+
+	if ((fsa_dev_ptr[scmd_id(scsicmd)].valid & 1))
+		return aac_scsi_cmd(scsicmd);
+
+	scsicmd->result = DID_NO_CONNECT << 16;
+	scsicmd->scsi_done(scsicmd);
+	return 0;
+}
+
+static void _aac_probe_container2(void * context, struct fib * fibptr)
+{
+	struct fsa_dev_info *fsa_dev_ptr;
+	int (*callback)(struct scsi_cmnd *);
+	struct scsi_cmnd * scsicmd = (struct scsi_cmnd *)context;
+	int i;
+
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	scsicmd->SCp.Status = 0;
+	fsa_dev_ptr = fibptr->dev->fsa_dev;
+	if (fsa_dev_ptr) {
+		struct aac_mount * dresp = (struct aac_mount *) fib_data(fibptr);
+		__le32 sup_options2;
+
+		fsa_dev_ptr += scmd_id(scsicmd);
+		sup_options2 =
+			fibptr->dev->supplement_adapter_info.supported_options2;
+
+		if ((le32_to_cpu(dresp->status) == ST_OK) &&
+		    (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE) &&
+		    (le32_to_cpu(dresp->mnt[0].state) != FSCS_HIDDEN)) {
+			if (!(sup_options2 & AAC_OPTION_VARIABLE_BLOCK_SIZE)) {
+				dresp->mnt[0].fileinfo.bdevinfo.block_size = 0x200;
+				fsa_dev_ptr->block_size = 0x200;
+			} else {
+				fsa_dev_ptr->block_size =
+					le32_to_cpu(dresp->mnt[0].fileinfo.bdevinfo.block_size);
+			}
+			for (i = 0; i < 16; i++)
+				fsa_dev_ptr->identifier[i] =
+					dresp->mnt[0].fileinfo.bdevinfo
+								.identifier[i];
+			fsa_dev_ptr->valid = 1;
+			/* sense_key holds the current state of the spin-up */
+			if (dresp->mnt[0].state & cpu_to_le32(FSCS_NOT_READY))
+				fsa_dev_ptr->sense_data.sense_key = NOT_READY;
+			else if (fsa_dev_ptr->sense_data.sense_key == NOT_READY)
+				fsa_dev_ptr->sense_data.sense_key = NO_SENSE;
+			fsa_dev_ptr->type = le32_to_cpu(dresp->mnt[0].vol);
+			fsa_dev_ptr->size
+			  = ((u64)le32_to_cpu(dresp->mnt[0].capacity)) +
+			    (((u64)le32_to_cpu(dresp->mnt[0].capacityhigh)) << 32);
+			fsa_dev_ptr->ro = ((le32_to_cpu(dresp->mnt[0].state) & FSCS_READONLY) != 0);
+		}
+		if ((fsa_dev_ptr->valid & 1) == 0)
+			fsa_dev_ptr->valid = 0;
+		scsicmd->SCp.Status = le32_to_cpu(dresp->count);
+	}
+	aac_fib_complete(fibptr);
+	aac_fib_free(fibptr);
+	callback = (int (*)(struct scsi_cmnd *))(scsicmd->SCp.ptr);
+	scsicmd->SCp.ptr = NULL;
+	(*callback)(scsicmd);
+	return;
+}
+
+static void _aac_probe_container1(void * context, struct fib * fibptr)
+{
+	struct scsi_cmnd * scsicmd;
+	struct aac_mount * dresp;
+	struct aac_query_mount *dinfo;
+	int status;
+
+	dresp = (struct aac_mount *) fib_data(fibptr);
+	if (!aac_supports_2T(fibptr->dev)) {
+		dresp->mnt[0].capacityhigh = 0;
+		if ((le32_to_cpu(dresp->status) == ST_OK) &&
+			(le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
+			_aac_probe_container2(context, fibptr);
+			return;
+		}
+	}
+	scsicmd = (struct scsi_cmnd *) context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	aac_fib_init(fibptr);
+
+	dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+	if (fibptr->dev->supplement_adapter_info.supported_options2 &
+	    AAC_OPTION_VARIABLE_BLOCK_SIZE)
+		dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+	else
+		dinfo->command = cpu_to_le32(VM_NameServe64);
+
+	dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+	dinfo->type = cpu_to_le32(FT_FILESYS);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+	status = aac_fib_send(ContainerCommand,
+			  fibptr,
+			  sizeof(struct aac_query_mount),
+			  FsaNormal,
+			  0, 1,
+			  _aac_probe_container2,
+			  (void *) scsicmd);
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status < 0 && status != -EINPROGRESS) {
+		/* Inherit results from VM_NameServe, if any */
+		dresp->status = cpu_to_le32(ST_OK);
+		_aac_probe_container2(context, fibptr);
+	}
+}
+
+static int _aac_probe_container(struct scsi_cmnd * scsicmd, int (*callback)(struct scsi_cmnd *))
+{
+	struct fib * fibptr;
+	int status = -ENOMEM;
+
+	if ((fibptr = aac_fib_alloc((struct aac_dev *)scsicmd->device->host->hostdata))) {
+		struct aac_query_mount *dinfo;
+
+		aac_fib_init(fibptr);
+
+		dinfo = (struct aac_query_mount *)fib_data(fibptr);
+
+		if (fibptr->dev->supplement_adapter_info.supported_options2 &
+		    AAC_OPTION_VARIABLE_BLOCK_SIZE)
+			dinfo->command = cpu_to_le32(VM_NameServeAllBlk);
+		else
+			dinfo->command = cpu_to_le32(VM_NameServe);
+
+		dinfo->count = cpu_to_le32(scmd_id(scsicmd));
+		dinfo->type = cpu_to_le32(FT_FILESYS);
+		scsicmd->SCp.ptr = (char *)callback;
+		scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+		status = aac_fib_send(ContainerCommand,
+			  fibptr,
+			  sizeof(struct aac_query_mount),
+			  FsaNormal,
+			  0, 1,
+			  _aac_probe_container1,
+			  (void *) scsicmd);
+		/*
+		 *	Check that the command queued to the controller
+		 */
+		if (status == -EINPROGRESS)
+			return 0;
+
+		if (status < 0) {
+			scsicmd->SCp.ptr = NULL;
+			aac_fib_complete(fibptr);
+			aac_fib_free(fibptr);
+		}
+	}
+	if (status < 0) {
+		struct fsa_dev_info *fsa_dev_ptr = ((struct aac_dev *)(scsicmd->device->host->hostdata))->fsa_dev;
+		if (fsa_dev_ptr) {
+			fsa_dev_ptr += scmd_id(scsicmd);
+			if ((fsa_dev_ptr->valid & 1) == 0) {
+				fsa_dev_ptr->valid = 0;
+				return (*callback)(scsicmd);
+			}
+		}
+	}
+	return status;
+}
+
+/**
+ *	aac_probe_container		-	query a logical volume
+ *	@dev: device to query
+ *	@cid: container identifier
+ *
+ *	Queries the controller about the given volume. The volume information
+ *	is updated in the struct fsa_dev_info structure rather than returned.
+ */
+static int aac_probe_container_callback1(struct scsi_cmnd * scsicmd)
+{
+	scsicmd->device = NULL;
+	return 0;
+}
+
+int aac_probe_container(struct aac_dev *dev, int cid)
+{
+	struct scsi_cmnd *scsicmd = kmalloc(sizeof(*scsicmd), GFP_KERNEL);
+	struct scsi_device *scsidev = kmalloc(sizeof(*scsidev), GFP_KERNEL);
+	int status;
+
+	if (!scsicmd || !scsidev) {
+		kfree(scsicmd);
+		kfree(scsidev);
+		return -ENOMEM;
+	}
+	scsicmd->list.next = NULL;
+	scsicmd->scsi_done = (void (*)(struct scsi_cmnd*))aac_probe_container_callback1;
+
+	scsicmd->device = scsidev;
+	scsidev->sdev_state = 0;
+	scsidev->id = cid;
+	scsidev->host = dev->scsi_host_ptr;
+
+	if (_aac_probe_container(scsicmd, aac_probe_container_callback1) == 0)
+		while (scsicmd->device == scsidev)
+			schedule();
+	kfree(scsidev);
+	status = scsicmd->SCp.Status;
+	kfree(scsicmd);
+	return status;
+}
+
+/* Local Structure to set SCSI inquiry data strings */
+struct scsi_inq {
+	char vid[8];         /* Vendor ID */
+	char pid[16];        /* Product ID */
+	char prl[4];         /* Product Revision Level */
+};
+
+/**
+ *	InqStrCopy	-	string merge
+ *	@a:	string to copy from
+ *	@b:	string to copy to
+ *
+ *	Copy a String from one location to another
+ *	without copying \0
+ */
+
+static void inqstrcpy(char *a, char *b)
+{
+
+	while (*a != (char)0)
+		*b++ = *a++;
+}
+
+static char *container_types[] = {
+	"None",
+	"Volume",
+	"Mirror",
+	"Stripe",
+	"RAID5",
+	"SSRW",
+	"SSRO",
+	"Morph",
+	"Legacy",
+	"RAID4",
+	"RAID10",
+	"RAID00",
+	"V-MIRRORS",
+	"PSEUDO R4",
+	"RAID50",
+	"RAID5D",
+	"RAID5D0",
+	"RAID1E",
+	"RAID6",
+	"RAID60",
+	"Unknown"
+};
+
+char * get_container_type(unsigned tindex)
+{
+	if (tindex >= ARRAY_SIZE(container_types))
+		tindex = ARRAY_SIZE(container_types) - 1;
+	return container_types[tindex];
+}
+
+/* Function: setinqstr
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI inquiry data strings for vendor, product
+ * and revision level. Allows strings to be set in platform dependent
+ * files instead of in OS dependent driver source.
+ */
+
+static void setinqstr(struct aac_dev *dev, void *data, int tindex)
+{
+	struct scsi_inq *str;
+	struct aac_supplement_adapter_info *sup_adap_info;
+
+	sup_adap_info = &dev->supplement_adapter_info;
+	str = (struct scsi_inq *)(data); /* cast data to scsi inq block */
+	memset(str, ' ', sizeof(*str));
+
+	if (sup_adap_info->adapter_type_text[0]) {
+		int c;
+		char *cp;
+		char *cname = kmemdup(sup_adap_info->adapter_type_text,
+				sizeof(sup_adap_info->adapter_type_text),
+								GFP_ATOMIC);
+		if (!cname)
+			return;
+
+		cp = cname;
+		if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
+			inqstrcpy("SMC", str->vid);
+		else {
+			c = sizeof(str->vid);
+			while (*cp && *cp != ' ' && --c)
+				++cp;
+			c = *cp;
+			*cp = '\0';
+			inqstrcpy(cname, str->vid);
+			*cp = c;
+			while (*cp && *cp != ' ')
+				++cp;
+		}
+		while (*cp == ' ')
+			++cp;
+		/* last six chars reserved for vol type */
+		if (strlen(cp) > sizeof(str->pid))
+			cp[sizeof(str->pid)] = '\0';
+		inqstrcpy (cp, str->pid);
+
+		kfree(cname);
+	} else {
+		struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
+
+		inqstrcpy (mp->vname, str->vid);
+		/* last six chars reserved for vol type */
+		inqstrcpy (mp->model, str->pid);
+	}
+
+	if (tindex < ARRAY_SIZE(container_types)){
+		char *findit = str->pid;
+
+		for ( ; *findit != ' '; findit++); /* walk till we find a space */
+		/* RAID is superfluous in the context of a RAID device */
+		if (memcmp(findit-4, "RAID", 4) == 0)
+			*(findit -= 4) = ' ';
+		if (((findit - str->pid) + strlen(container_types[tindex]))
+		 < (sizeof(str->pid) + sizeof(str->prl)))
+			inqstrcpy (container_types[tindex], findit + 1);
+	}
+	inqstrcpy ("V1.0", str->prl);
+}
+
+static void build_vpd83_type3(struct tvpd_page83 *vpdpage83data,
+		struct aac_dev *dev, struct scsi_cmnd *scsicmd)
+{
+	int container;
+
+	vpdpage83data->type3.codeset = 1;
+	vpdpage83data->type3.identifiertype = 3;
+	vpdpage83data->type3.identifierlength = sizeof(vpdpage83data->type3)
+			- 4;
+
+	for (container = 0; container < dev->maximum_num_containers;
+			container++) {
+
+		if (scmd_id(scsicmd) == container) {
+			memcpy(vpdpage83data->type3.Identifier,
+					dev->fsa_dev[container].identifier,
+					16);
+			break;
+		}
+	}
+}
+
+static void get_container_serial_callback(void *context, struct fib * fibptr)
+{
+	struct aac_get_serial_resp * get_serial_reply;
+	struct scsi_cmnd * scsicmd;
+
+	BUG_ON(fibptr == NULL);
+
+	scsicmd = (struct scsi_cmnd *) context;
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	get_serial_reply = (struct aac_get_serial_resp *) fib_data(fibptr);
+	/* Failure is irrelevant, using default value instead */
+	if (le32_to_cpu(get_serial_reply->status) == CT_OK) {
+		/*Check to see if it's for VPD 0x83 or 0x80 */
+		if (scsicmd->cmnd[2] == 0x83) {
+			/* vpd page 0x83 - Device Identification Page */
+			struct aac_dev *dev;
+			int i;
+			struct tvpd_page83 vpdpage83data;
+
+			dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+			memset(((u8 *)&vpdpage83data), 0,
+			       sizeof(vpdpage83data));
+
+			/* DIRECT_ACCESS_DEVIC */
+			vpdpage83data.DeviceType = 0;
+			/* DEVICE_CONNECTED */
+			vpdpage83data.DeviceTypeQualifier = 0;
+			/* VPD_DEVICE_IDENTIFIERS */
+			vpdpage83data.PageCode = 0x83;
+			vpdpage83data.reserved = 0;
+			vpdpage83data.PageLength =
+				sizeof(vpdpage83data.type1) +
+				sizeof(vpdpage83data.type2);
+
+			/* VPD 83 Type 3 is not supported for ARC */
+			if (dev->sa_firmware)
+				vpdpage83data.PageLength +=
+				sizeof(vpdpage83data.type3);
+
+			/* T10 Vendor Identifier Field Format */
+			/* VpdcodesetAscii */
+			vpdpage83data.type1.codeset = 2;
+			/* VpdIdentifierTypeVendorId */
+			vpdpage83data.type1.identifiertype = 1;
+			vpdpage83data.type1.identifierlength =
+				sizeof(vpdpage83data.type1) - 4;
+
+			/* "ADAPTEC " for adaptec */
+			memcpy(vpdpage83data.type1.venid,
+				"ADAPTEC ",
+				sizeof(vpdpage83data.type1.venid));
+			memcpy(vpdpage83data.type1.productid,
+				"ARRAY           ",
+				sizeof(
+				vpdpage83data.type1.productid));
+
+			/* Convert to ascii based serial number.
+			 * The LSB is the the end.
+			 */
+			for (i = 0; i < 8; i++) {
+				u8 temp =
+					(u8)((get_serial_reply->uid >> ((7 - i) * 4)) & 0xF);
+				if (temp  > 0x9) {
+					vpdpage83data.type1.serialnumber[i] =
+							'A' + (temp - 0xA);
+				} else {
+					vpdpage83data.type1.serialnumber[i] =
+							'0' + temp;
+				}
+			}
+
+			/* VpdCodeSetBinary */
+			vpdpage83data.type2.codeset = 1;
+			/* VpdidentifiertypeEUI64 */
+			vpdpage83data.type2.identifiertype = 2;
+			vpdpage83data.type2.identifierlength =
+				sizeof(vpdpage83data.type2) - 4;
+
+			vpdpage83data.type2.eu64id.venid[0] = 0xD0;
+			vpdpage83data.type2.eu64id.venid[1] = 0;
+			vpdpage83data.type2.eu64id.venid[2] = 0;
+
+			vpdpage83data.type2.eu64id.Serial =
+							get_serial_reply->uid;
+			vpdpage83data.type2.eu64id.reserved = 0;
+
+			/*
+			 * VpdIdentifierTypeFCPHName
+			 * VPD 0x83 Type 3 not supported for ARC
+			 */
+			if (dev->sa_firmware) {
+				build_vpd83_type3(&vpdpage83data,
+						dev, scsicmd);
+			}
+
+			/* Move the inquiry data to the response buffer. */
+			scsi_sg_copy_from_buffer(scsicmd, &vpdpage83data,
+						 sizeof(vpdpage83data));
+		} else {
+			/* It must be for VPD 0x80 */
+			char sp[13];
+			/* EVPD bit set */
+			sp[0] = INQD_PDT_DA;
+			sp[1] = scsicmd->cmnd[2];
+			sp[2] = 0;
+			sp[3] = snprintf(sp+4, sizeof(sp)-4, "%08X",
+				le32_to_cpu(get_serial_reply->uid));
+			scsi_sg_copy_from_buffer(scsicmd, sp,
+						 sizeof(sp));
+		}
+	}
+
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+	aac_fib_complete(fibptr);
+	scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ *	aac_get_container_serial - get container serial, none blocking.
+ */
+static int aac_get_container_serial(struct scsi_cmnd * scsicmd)
+{
+	int status;
+	struct aac_get_serial *dinfo;
+	struct fib * cmd_fibcontext;
+	struct aac_dev * dev;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+
+	aac_fib_init(cmd_fibcontext);
+	dinfo = (struct aac_get_serial *) fib_data(cmd_fibcontext);
+
+	dinfo->command = cpu_to_le32(VM_ContainerConfig);
+	dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);
+	dinfo->cid = cpu_to_le32(scmd_id(scsicmd));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+	status = aac_fib_send(ContainerCommand,
+		  cmd_fibcontext,
+		  sizeof(struct aac_get_serial_resp),
+		  FsaNormal,
+		  0, 1,
+		  (fib_callback) get_container_serial_callback,
+		  (void *) scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);
+	aac_fib_complete(cmd_fibcontext);
+	return -1;
+}
+
+/* Function: setinqserial
+ *
+ * Arguments: [1] pointer to void [1] int
+ *
+ * Purpose: Sets SCSI Unit Serial number.
+ *          This is a fake. We should read a proper
+ *          serial number from the container. <SuSE>But
+ *          without docs it's quite hard to do it :-)
+ *          So this will have to do in the meantime.</SuSE>
+ */
+
+static int setinqserial(struct aac_dev *dev, void *data, int cid)
+{
+	/*
+	 *	This breaks array migration.
+	 */
+	return snprintf((char *)(data), sizeof(struct scsi_inq) - 4, "%08X%02X",
+			le32_to_cpu(dev->adapter_info.serial[0]), cid);
+}
+
+static inline void set_sense(struct sense_data *sense_data, u8 sense_key,
+	u8 sense_code, u8 a_sense_code, u8 bit_pointer, u16 field_pointer)
+{
+	u8 *sense_buf = (u8 *)sense_data;
+	/* Sense data valid, err code 70h */
+	sense_buf[0] = 0x70; /* No info field */
+	sense_buf[1] = 0;	/* Segment number, always zero */
+
+	sense_buf[2] = sense_key;	/* Sense key */
+
+	sense_buf[12] = sense_code;	/* Additional sense code */
+	sense_buf[13] = a_sense_code;	/* Additional sense code qualifier */
+
+	if (sense_key == ILLEGAL_REQUEST) {
+		sense_buf[7] = 10;	/* Additional sense length */
+
+		sense_buf[15] = bit_pointer;
+		/* Illegal parameter is in the parameter block */
+		if (sense_code == SENCODE_INVALID_CDB_FIELD)
+			sense_buf[15] |= 0xc0;/* Std sense key specific field */
+		/* Illegal parameter is in the CDB block */
+		sense_buf[16] = field_pointer >> 8;	/* MSB */
+		sense_buf[17] = field_pointer;		/* LSB */
+	} else
+		sense_buf[7] = 6;	/* Additional sense length */
+}
+
+static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+	if (lba & 0xffffffff00000000LL) {
+		int cid = scmd_id(cmd);
+		dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+		cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+		  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+		  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+		memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		cmd->scsi_done(cmd);
+		return 1;
+	}
+	return 0;
+}
+
+static int aac_bounds_64(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
+{
+	return 0;
+}
+
+static void io_callback(void *context, struct fib * fibptr);
+
+static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+	struct aac_dev *dev = fib->dev;
+	u16 fibsize, command;
+	long ret;
+
+	aac_fib_init(fib);
+	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+		dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+		!dev->sync_mode) {
+		struct aac_raw_io2 *readcmd2;
+		readcmd2 = (struct aac_raw_io2 *) fib_data(fib);
+		memset(readcmd2, 0, sizeof(struct aac_raw_io2));
+		readcmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+		readcmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+		readcmd2->byteCount = cpu_to_le32(count *
+			dev->fsa_dev[scmd_id(cmd)].block_size);
+		readcmd2->cid = cpu_to_le16(scmd_id(cmd));
+		readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
+		ret = aac_build_sgraw2(cmd, readcmd2,
+				dev->scsi_host_ptr->sg_tablesize);
+		if (ret < 0)
+			return ret;
+		command = ContainerRawIo2;
+		fibsize = sizeof(struct aac_raw_io2) +
+			((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+	} else {
+		struct aac_raw_io *readcmd;
+		readcmd = (struct aac_raw_io *) fib_data(fib);
+		readcmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+		readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+		readcmd->count = cpu_to_le32(count *
+			dev->fsa_dev[scmd_id(cmd)].block_size);
+		readcmd->cid = cpu_to_le16(scmd_id(cmd));
+		readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
+		readcmd->bpTotal = 0;
+		readcmd->bpComplete = 0;
+		ret = aac_build_sgraw(cmd, &readcmd->sg);
+		if (ret < 0)
+			return ret;
+		command = ContainerRawIo;
+		fibsize = sizeof(struct aac_raw_io) +
+			((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
+	}
+
+	BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(command,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+	u16 fibsize;
+	struct aac_read64 *readcmd;
+	long ret;
+
+	aac_fib_init(fib);
+	readcmd = (struct aac_read64 *) fib_data(fib);
+	readcmd->command = cpu_to_le32(VM_CtHostRead64);
+	readcmd->cid = cpu_to_le16(scmd_id(cmd));
+	readcmd->sector_count = cpu_to_le16(count);
+	readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+	readcmd->pad   = 0;
+	readcmd->flags = 0;
+
+	ret = aac_build_sg64(cmd, &readcmd->sg);
+	if (ret < 0)
+		return ret;
+	fibsize = sizeof(struct aac_read64) +
+		((le32_to_cpu(readcmd->sg.count) - 1) *
+		 sizeof (struct sgentry64));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ContainerCommand64,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count)
+{
+	u16 fibsize;
+	struct aac_read *readcmd;
+	struct aac_dev *dev = fib->dev;
+	long ret;
+
+	aac_fib_init(fib);
+	readcmd = (struct aac_read *) fib_data(fib);
+	readcmd->command = cpu_to_le32(VM_CtBlockRead);
+	readcmd->cid = cpu_to_le32(scmd_id(cmd));
+	readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+	readcmd->count = cpu_to_le32(count *
+		dev->fsa_dev[scmd_id(cmd)].block_size);
+
+	ret = aac_build_sg(cmd, &readcmd->sg);
+	if (ret < 0)
+		return ret;
+	fibsize = sizeof(struct aac_read) +
+			((le32_to_cpu(readcmd->sg.count) - 1) *
+			 sizeof (struct sgentry));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ContainerCommand,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+	struct aac_dev *dev = fib->dev;
+	u16 fibsize, command;
+	long ret;
+
+	aac_fib_init(fib);
+	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+		dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) &&
+		!dev->sync_mode) {
+		struct aac_raw_io2 *writecmd2;
+		writecmd2 = (struct aac_raw_io2 *) fib_data(fib);
+		memset(writecmd2, 0, sizeof(struct aac_raw_io2));
+		writecmd2->blockLow = cpu_to_le32((u32)(lba&0xffffffff));
+		writecmd2->blockHigh = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+		writecmd2->byteCount = cpu_to_le32(count *
+			dev->fsa_dev[scmd_id(cmd)].block_size);
+		writecmd2->cid = cpu_to_le16(scmd_id(cmd));
+		writecmd2->flags = (fua && ((aac_cache & 5) != 1) &&
+						   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+			cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
+			cpu_to_le16(RIO2_IO_TYPE_WRITE);
+		ret = aac_build_sgraw2(cmd, writecmd2,
+				dev->scsi_host_ptr->sg_tablesize);
+		if (ret < 0)
+			return ret;
+		command = ContainerRawIo2;
+		fibsize = sizeof(struct aac_raw_io2) +
+			((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
+	} else {
+		struct aac_raw_io *writecmd;
+		writecmd = (struct aac_raw_io *) fib_data(fib);
+		writecmd->block[0] = cpu_to_le32((u32)(lba&0xffffffff));
+		writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
+		writecmd->count = cpu_to_le32(count *
+			dev->fsa_dev[scmd_id(cmd)].block_size);
+		writecmd->cid = cpu_to_le16(scmd_id(cmd));
+		writecmd->flags = (fua && ((aac_cache & 5) != 1) &&
+						   (((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
+			cpu_to_le16(RIO_TYPE_WRITE|RIO_SUREWRITE) :
+			cpu_to_le16(RIO_TYPE_WRITE);
+		writecmd->bpTotal = 0;
+		writecmd->bpComplete = 0;
+		ret = aac_build_sgraw(cmd, &writecmd->sg);
+		if (ret < 0)
+			return ret;
+		command = ContainerRawIo;
+		fibsize = sizeof(struct aac_raw_io) +
+			((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
+	}
+
+	BUG_ON(fibsize > (fib->dev->max_fib_size - sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(command,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+	u16 fibsize;
+	struct aac_write64 *writecmd;
+	long ret;
+
+	aac_fib_init(fib);
+	writecmd = (struct aac_write64 *) fib_data(fib);
+	writecmd->command = cpu_to_le32(VM_CtHostWrite64);
+	writecmd->cid = cpu_to_le16(scmd_id(cmd));
+	writecmd->sector_count = cpu_to_le16(count);
+	writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+	writecmd->pad	= 0;
+	writecmd->flags	= 0;
+
+	ret = aac_build_sg64(cmd, &writecmd->sg);
+	if (ret < 0)
+		return ret;
+	fibsize = sizeof(struct aac_write64) +
+		((le32_to_cpu(writecmd->sg.count) - 1) *
+		 sizeof (struct sgentry64));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ContainerCommand64,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
+{
+	u16 fibsize;
+	struct aac_write *writecmd;
+	struct aac_dev *dev = fib->dev;
+	long ret;
+
+	aac_fib_init(fib);
+	writecmd = (struct aac_write *) fib_data(fib);
+	writecmd->command = cpu_to_le32(VM_CtBlockWrite);
+	writecmd->cid = cpu_to_le32(scmd_id(cmd));
+	writecmd->block = cpu_to_le32((u32)(lba&0xffffffff));
+	writecmd->count = cpu_to_le32(count *
+		dev->fsa_dev[scmd_id(cmd)].block_size);
+	writecmd->sg.count = cpu_to_le32(1);
+	/* ->stable is not used - it did mean which type of write */
+
+	ret = aac_build_sg(cmd, &writecmd->sg);
+	if (ret < 0)
+		return ret;
+	fibsize = sizeof(struct aac_write) +
+		((le32_to_cpu(writecmd->sg.count) - 1) *
+		 sizeof (struct sgentry));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ContainerCommand,
+			  fib,
+			  fibsize,
+			  FsaNormal,
+			  0, 1,
+			  (fib_callback) io_callback,
+			  (void *) cmd);
+}
+
+static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd)
+{
+	struct aac_srb * srbcmd;
+	u32 flag;
+	u32 timeout;
+
+	aac_fib_init(fib);
+	switch(cmd->sc_data_direction){
+	case DMA_TO_DEVICE:
+		flag = SRB_DataOut;
+		break;
+	case DMA_BIDIRECTIONAL:
+		flag = SRB_DataIn | SRB_DataOut;
+		break;
+	case DMA_FROM_DEVICE:
+		flag = SRB_DataIn;
+		break;
+	case DMA_NONE:
+	default:	/* shuts up some versions of gcc */
+		flag = SRB_NoDataXfer;
+		break;
+	}
+
+	srbcmd = (struct aac_srb*) fib_data(fib);
+	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+	srbcmd->channel  = cpu_to_le32(aac_logical_to_phys(scmd_channel(cmd)));
+	srbcmd->id       = cpu_to_le32(scmd_id(cmd));
+	srbcmd->lun      = cpu_to_le32(cmd->device->lun);
+	srbcmd->flags    = cpu_to_le32(flag);
+	timeout = cmd->request->timeout/HZ;
+	if (timeout == 0)
+		timeout = 1;
+	srbcmd->timeout  = cpu_to_le32(timeout);  // timeout in seconds
+	srbcmd->retry_limit = 0; /* Obsolete parameter */
+	srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len);
+	return srbcmd;
+}
+
+static struct aac_hba_cmd_req *aac_construct_hbacmd(struct fib *fib,
+							struct scsi_cmnd *cmd)
+{
+	struct aac_hba_cmd_req *hbacmd;
+	struct aac_dev *dev;
+	int bus, target;
+	u64 address;
+
+	dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+	hbacmd = (struct aac_hba_cmd_req *)fib->hw_fib_va;
+	memset(hbacmd, 0, 96);	/* sizeof(*hbacmd) is not necessary */
+	/* iu_type is a parameter of aac_hba_send */
+	switch (cmd->sc_data_direction) {
+	case DMA_TO_DEVICE:
+		hbacmd->byte1 = 2;
+		break;
+	case DMA_FROM_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		hbacmd->byte1 = 1;
+		break;
+	case DMA_NONE:
+	default:
+		break;
+	}
+	hbacmd->lun[1] = cpu_to_le32(cmd->device->lun);
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	target = scmd_id(cmd);
+	hbacmd->it_nexus = dev->hba_map[bus][target].rmw_nexus;
+
+	/* we fill in reply_qid later in aac_src_deliver_message */
+	/* we fill in iu_type, request_id later in aac_hba_send */
+	/* we fill in emb_data_desc_count later in aac_build_sghba */
+
+	memcpy(hbacmd->cdb, cmd->cmnd, cmd->cmd_len);
+	hbacmd->data_length = cpu_to_le32(scsi_bufflen(cmd));
+
+	address = (u64)fib->hw_error_pa;
+	hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+	hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+	hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+	return hbacmd;
+}
+
+static void aac_srb_callback(void *context, struct fib * fibptr);
+
+static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+	u16 fibsize;
+	struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+	long ret;
+
+	ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+	if (ret < 0)
+		return ret;
+	srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+	memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+	/*
+	 *	Build Scatter/Gather list
+	 */
+	fibsize = sizeof (struct aac_srb) - sizeof (struct sgentry) +
+		((le32_to_cpu(srbcmd->sg.count) & 0xff) *
+		 sizeof (struct sgentry64));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ScsiPortCommand64, fib,
+				fibsize, FsaNormal, 0, 1,
+				  (fib_callback) aac_srb_callback,
+				  (void *) cmd);
+}
+
+static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
+{
+	u16 fibsize;
+	struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+	long ret;
+
+	ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+	if (ret < 0)
+		return ret;
+	srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
+
+	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+	memcpy(srbcmd->cdb, cmd->cmnd, cmd->cmd_len);
+	/*
+	 *	Build Scatter/Gather list
+	 */
+	fibsize = sizeof (struct aac_srb) +
+		(((le32_to_cpu(srbcmd->sg.count) & 0xff) - 1) *
+		 sizeof (struct sgentry));
+	BUG_ON (fibsize > (fib->dev->max_fib_size -
+				sizeof(struct aac_fibhdr)));
+
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	return aac_fib_send(ScsiPortCommand, fib, fibsize, FsaNormal, 0, 1,
+				  (fib_callback) aac_srb_callback, (void *) cmd);
+}
+
+static int aac_scsi_32_64(struct fib * fib, struct scsi_cmnd * cmd)
+{
+	if ((sizeof(dma_addr_t) > 4) && fib->dev->needs_dac &&
+	    (fib->dev->adapter_info.options & AAC_OPT_SGMAP_HOST64))
+		return FAILED;
+	return aac_scsi_32(fib, cmd);
+}
+
+static int aac_adapter_hba(struct fib *fib, struct scsi_cmnd *cmd)
+{
+	struct aac_hba_cmd_req *hbacmd = aac_construct_hbacmd(fib, cmd);
+	struct aac_dev *dev;
+	long ret;
+
+	dev = (struct aac_dev *)cmd->device->host->hostdata;
+
+	ret = aac_build_sghba(cmd, hbacmd,
+		dev->scsi_host_ptr->sg_tablesize, (u64)fib->hw_sgl_pa);
+	if (ret < 0)
+		return ret;
+
+	/*
+	 *	Now send the HBA command to the adapter
+	 */
+	fib->hbacmd_size = 64 + le32_to_cpu(hbacmd->emb_data_desc_count) *
+		sizeof(struct aac_hba_sgl);
+
+	return aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, fib,
+				  (fib_callback) aac_hba_callback,
+				  (void *) cmd);
+}
+
+static int aac_send_safw_bmic_cmd(struct aac_dev *dev,
+	struct aac_srb_unit *srbu, void *xfer_buf, int xfer_len)
+{
+	struct fib	*fibptr;
+	dma_addr_t	addr;
+	int		rcode;
+	int		fibsize;
+	struct aac_srb	*srb;
+	struct aac_srb_reply *srb_reply;
+	struct sgmap64	*sg64;
+	u32 vbus;
+	u32 vid;
+
+	if (!dev->sa_firmware)
+		return 0;
+
+	/* allocate FIB */
+	fibptr = aac_fib_alloc(dev);
+	if (!fibptr)
+		return -ENOMEM;
+
+	aac_fib_init(fibptr);
+	fibptr->hw_fib_va->header.XferState &=
+		~cpu_to_le32(FastResponseCapable);
+
+	fibsize  = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+						sizeof(struct sgentry64);
+
+	/* allocate DMA buffer for response */
+	addr = dma_map_single(&dev->pdev->dev, xfer_buf, xfer_len,
+							DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(&dev->pdev->dev, addr)) {
+		rcode = -ENOMEM;
+		goto fib_error;
+	}
+
+	srb = fib_data(fibptr);
+	memcpy(srb, &srbu->srb, sizeof(struct aac_srb));
+
+	vbus = (u32)le16_to_cpu(
+			dev->supplement_adapter_info.virt_device_bus);
+	vid  = (u32)le16_to_cpu(
+			dev->supplement_adapter_info.virt_device_target);
+
+	/* set the common request fields */
+	srb->channel		= cpu_to_le32(vbus);
+	srb->id			= cpu_to_le32(vid);
+	srb->lun		= 0;
+	srb->function		= cpu_to_le32(SRBF_ExecuteScsi);
+	srb->timeout		= 0;
+	srb->retry_limit	= 0;
+	srb->cdb_size		= cpu_to_le32(16);
+	srb->count		= cpu_to_le32(xfer_len);
+
+	sg64 = (struct sgmap64 *)&srb->sg;
+	sg64->count		= cpu_to_le32(1);
+	sg64->sg[0].addr[1]	= cpu_to_le32(upper_32_bits(addr));
+	sg64->sg[0].addr[0]	= cpu_to_le32(lower_32_bits(addr));
+	sg64->sg[0].count	= cpu_to_le32(xfer_len);
+
+	/*
+	 * Copy the updated data for other dumping or other usage if needed
+	 */
+	memcpy(&srbu->srb, srb, sizeof(struct aac_srb));
+
+	/* issue request to the controller */
+	rcode = aac_fib_send(ScsiPortCommand64, fibptr, fibsize, FsaNormal,
+					1, 1, NULL, NULL);
+
+	if (rcode == -ERESTARTSYS)
+		rcode = -ERESTART;
+
+	if (unlikely(rcode < 0))
+		goto bmic_error;
+
+	srb_reply = (struct aac_srb_reply *)fib_data(fibptr);
+	memcpy(&srbu->srb_reply, srb_reply, sizeof(struct aac_srb_reply));
+
+bmic_error:
+	dma_unmap_single(&dev->pdev->dev, addr, xfer_len, DMA_BIDIRECTIONAL);
+fib_error:
+	aac_fib_complete(fibptr);
+	aac_fib_free(fibptr);
+	return rcode;
+}
+
+static void aac_set_safw_target_qd(struct aac_dev *dev, int bus, int target)
+{
+
+	struct aac_ciss_identify_pd *identify_resp;
+
+	if (dev->hba_map[bus][target].devtype != AAC_DEVTYPE_NATIVE_RAW)
+		return;
+
+	identify_resp = dev->hba_map[bus][target].safw_identify_resp;
+	if (identify_resp == NULL) {
+		dev->hba_map[bus][target].qd_limit = 32;
+		return;
+	}
+
+	if (identify_resp->current_queue_depth_limit <= 0 ||
+		identify_resp->current_queue_depth_limit > 255)
+		dev->hba_map[bus][target].qd_limit = 32;
+	else
+		dev->hba_map[bus][target].qd_limit =
+			identify_resp->current_queue_depth_limit;
+}
+
+static int aac_issue_safw_bmic_identify(struct aac_dev *dev,
+	struct aac_ciss_identify_pd **identify_resp, u32 bus, u32 target)
+{
+	int rcode = -ENOMEM;
+	int datasize;
+	struct aac_srb_unit srbu;
+	struct aac_srb *srbcmd;
+	struct aac_ciss_identify_pd *identify_reply;
+
+	datasize = sizeof(struct aac_ciss_identify_pd);
+	identify_reply = kmalloc(datasize, GFP_KERNEL);
+	if (!identify_reply)
+		goto out;
+
+	memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+	srbcmd = &srbu.srb;
+	srbcmd->flags	= cpu_to_le32(SRB_DataIn);
+	srbcmd->cdb[0]	= 0x26;
+	srbcmd->cdb[2]	= (u8)((AAC_MAX_LUN + target) & 0x00FF);
+	srbcmd->cdb[6]	= CISS_IDENTIFY_PHYSICAL_DEVICE;
+
+	rcode = aac_send_safw_bmic_cmd(dev, &srbu, identify_reply, datasize);
+	if (unlikely(rcode < 0))
+		goto mem_free_all;
+
+	*identify_resp = identify_reply;
+
+out:
+	return rcode;
+mem_free_all:
+	kfree(identify_reply);
+	goto out;
+}
+
+static inline void aac_free_safw_ciss_luns(struct aac_dev *dev)
+{
+	kfree(dev->safw_phys_luns);
+	dev->safw_phys_luns = NULL;
+}
+
+/**
+ *	aac_get_safw_ciss_luns()	Process topology change
+ *	@dev:		aac_dev structure
+ *
+ *	Execute a CISS REPORT PHYS LUNS and process the results into
+ *	the current hba_map.
+ */
+static int aac_get_safw_ciss_luns(struct aac_dev *dev)
+{
+	int rcode = -ENOMEM;
+	int datasize;
+	struct aac_srb *srbcmd;
+	struct aac_srb_unit srbu;
+	struct aac_ciss_phys_luns_resp *phys_luns;
+
+	datasize = sizeof(struct aac_ciss_phys_luns_resp) +
+		(AAC_MAX_TARGETS - 1) * sizeof(struct _ciss_lun);
+	phys_luns = kmalloc(datasize, GFP_KERNEL);
+	if (phys_luns == NULL)
+		goto out;
+
+	memset(&srbu, 0, sizeof(struct aac_srb_unit));
+
+	srbcmd = &srbu.srb;
+	srbcmd->flags	= cpu_to_le32(SRB_DataIn);
+	srbcmd->cdb[0]	= CISS_REPORT_PHYSICAL_LUNS;
+	srbcmd->cdb[1]	= 2; /* extended reporting */
+	srbcmd->cdb[8]	= (u8)(datasize >> 8);
+	srbcmd->cdb[9]	= (u8)(datasize);
+
+	rcode = aac_send_safw_bmic_cmd(dev, &srbu, phys_luns, datasize);
+	if (unlikely(rcode < 0))
+		goto mem_free_all;
+
+	if (phys_luns->resp_flag != 2) {
+		rcode = -ENOMSG;
+		goto mem_free_all;
+	}
+
+	dev->safw_phys_luns = phys_luns;
+
+out:
+	return rcode;
+mem_free_all:
+	kfree(phys_luns);
+	goto out;
+}
+
+static inline u32 aac_get_safw_phys_lun_count(struct aac_dev *dev)
+{
+	return get_unaligned_be32(&dev->safw_phys_luns->list_length[0])/24;
+}
+
+static inline u32 aac_get_safw_phys_bus(struct aac_dev *dev, int lun)
+{
+	return dev->safw_phys_luns->lun[lun].level2[1] & 0x3f;
+}
+
+static inline u32 aac_get_safw_phys_target(struct aac_dev *dev, int lun)
+{
+	return dev->safw_phys_luns->lun[lun].level2[0];
+}
+
+static inline u32 aac_get_safw_phys_expose_flag(struct aac_dev *dev, int lun)
+{
+	return dev->safw_phys_luns->lun[lun].bus >> 6;
+}
+
+static inline u32 aac_get_safw_phys_attribs(struct aac_dev *dev, int lun)
+{
+	return dev->safw_phys_luns->lun[lun].node_ident[9];
+}
+
+static inline u32 aac_get_safw_phys_nexus(struct aac_dev *dev, int lun)
+{
+	return *((u32 *)&dev->safw_phys_luns->lun[lun].node_ident[12]);
+}
+
+static inline u32 aac_get_safw_phys_device_type(struct aac_dev *dev, int lun)
+{
+	return dev->safw_phys_luns->lun[lun].node_ident[8];
+}
+
+static inline void aac_free_safw_identify_resp(struct aac_dev *dev,
+						int bus, int target)
+{
+	kfree(dev->hba_map[bus][target].safw_identify_resp);
+	dev->hba_map[bus][target].safw_identify_resp = NULL;
+}
+
+static inline void aac_free_safw_all_identify_resp(struct aac_dev *dev,
+	int lun_count)
+{
+	int luns;
+	int i;
+	u32 bus;
+	u32 target;
+
+	luns = aac_get_safw_phys_lun_count(dev);
+
+	if (luns < lun_count)
+		lun_count = luns;
+	else if (lun_count < 0)
+		lun_count = luns;
+
+	for (i = 0; i < lun_count; i++) {
+		bus = aac_get_safw_phys_bus(dev, i);
+		target = aac_get_safw_phys_target(dev, i);
+
+		aac_free_safw_identify_resp(dev, bus, target);
+	}
+}
+
+static int aac_get_safw_attr_all_targets(struct aac_dev *dev)
+{
+	int i;
+	int rcode = 0;
+	u32 lun_count;
+	u32 bus;
+	u32 target;
+	struct aac_ciss_identify_pd *identify_resp = NULL;
+
+	lun_count = aac_get_safw_phys_lun_count(dev);
+
+	for (i = 0; i < lun_count; ++i) {
+
+		bus = aac_get_safw_phys_bus(dev, i);
+		target = aac_get_safw_phys_target(dev, i);
+
+		rcode = aac_issue_safw_bmic_identify(dev,
+						&identify_resp, bus, target);
+
+		if (unlikely(rcode < 0))
+			goto free_identify_resp;
+
+		dev->hba_map[bus][target].safw_identify_resp = identify_resp;
+	}
+
+out:
+	return rcode;
+free_identify_resp:
+	aac_free_safw_all_identify_resp(dev, i);
+	goto out;
+}
+
+/**
+ *	aac_set_safw_attr_all_targets-	update current hba map with data from FW
+ *	@dev:	aac_dev structure
+ *	@phys_luns: FW information from report phys luns
+ *	@rescan: Indicates scan type
+ *
+ *	Update our hba map with the information gathered from the FW
+ */
+static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
+{
+	/* ok and extended reporting */
+	u32 lun_count, nexus;
+	u32 i, bus, target;
+	u8 expose_flag, attribs;
+
+	lun_count = aac_get_safw_phys_lun_count(dev);
+
+	dev->scan_counter++;
+
+	for (i = 0; i < lun_count; ++i) {
+
+		bus = aac_get_safw_phys_bus(dev, i);
+		target = aac_get_safw_phys_target(dev, i);
+		expose_flag = aac_get_safw_phys_expose_flag(dev, i);
+		attribs = aac_get_safw_phys_attribs(dev, i);
+		nexus = aac_get_safw_phys_nexus(dev, i);
+
+		if (bus >= AAC_MAX_BUSES || target >= AAC_MAX_TARGETS)
+			continue;
+
+		if (expose_flag != 0) {
+			dev->hba_map[bus][target].devtype =
+				AAC_DEVTYPE_RAID_MEMBER;
+			continue;
+		}
+
+		if (nexus != 0 && (attribs & 8)) {
+			dev->hba_map[bus][target].devtype =
+				AAC_DEVTYPE_NATIVE_RAW;
+			dev->hba_map[bus][target].rmw_nexus =
+					nexus;
+		} else
+			dev->hba_map[bus][target].devtype =
+				AAC_DEVTYPE_ARC_RAW;
+
+		dev->hba_map[bus][target].scan_counter = dev->scan_counter;
+
+		aac_set_safw_target_qd(dev, bus, target);
+	}
+}
+
+static int aac_setup_safw_targets(struct aac_dev *dev)
+{
+	int rcode = 0;
+
+	rcode = aac_get_containers(dev);
+	if (unlikely(rcode < 0))
+		goto out;
+
+	rcode = aac_get_safw_ciss_luns(dev);
+	if (unlikely(rcode < 0))
+		goto out;
+
+	rcode = aac_get_safw_attr_all_targets(dev);
+	if (unlikely(rcode < 0))
+		goto free_ciss_luns;
+
+	aac_set_safw_attr_all_targets(dev);
+
+	aac_free_safw_all_identify_resp(dev, -1);
+free_ciss_luns:
+	aac_free_safw_ciss_luns(dev);
+out:
+	return rcode;
+}
+
+int aac_setup_safw_adapter(struct aac_dev *dev)
+{
+	return aac_setup_safw_targets(dev);
+}
+
+int aac_get_adapter_info(struct aac_dev* dev)
+{
+	struct fib* fibptr;
+	int rcode;
+	u32 tmp, bus, target;
+	struct aac_adapter_info *info;
+	struct aac_bus_info *command;
+	struct aac_bus_info_response *bus_info;
+
+	if (!(fibptr = aac_fib_alloc(dev)))
+		return -ENOMEM;
+
+	aac_fib_init(fibptr);
+	info = (struct aac_adapter_info *) fib_data(fibptr);
+	memset(info,0,sizeof(*info));
+
+	rcode = aac_fib_send(RequestAdapterInfo,
+			 fibptr,
+			 sizeof(*info),
+			 FsaNormal,
+			 -1, 1, /* First `interrupt' command uses special wait */
+			 NULL,
+			 NULL);
+
+	if (rcode < 0) {
+		/* FIB should be freed only after
+		 * getting the response from the F/W */
+		if (rcode != -ERESTARTSYS) {
+			aac_fib_complete(fibptr);
+			aac_fib_free(fibptr);
+		}
+		return rcode;
+	}
+	memcpy(&dev->adapter_info, info, sizeof(*info));
+
+	dev->supplement_adapter_info.virt_device_bus = 0xffff;
+	if (dev->adapter_info.options & AAC_OPT_SUPPLEMENT_ADAPTER_INFO) {
+		struct aac_supplement_adapter_info * sinfo;
+
+		aac_fib_init(fibptr);
+
+		sinfo = (struct aac_supplement_adapter_info *) fib_data(fibptr);
+
+		memset(sinfo,0,sizeof(*sinfo));
+
+		rcode = aac_fib_send(RequestSupplementAdapterInfo,
+				 fibptr,
+				 sizeof(*sinfo),
+				 FsaNormal,
+				 1, 1,
+				 NULL,
+				 NULL);
+
+		if (rcode >= 0)
+			memcpy(&dev->supplement_adapter_info, sinfo, sizeof(*sinfo));
+		if (rcode == -ERESTARTSYS) {
+			fibptr = aac_fib_alloc(dev);
+			if (!fibptr)
+				return -ENOMEM;
+		}
+
+	}
+
+	/* reset all previous mapped devices (i.e. for init. after IOP_RESET) */
+	for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+		for (target = 0; target < AAC_MAX_TARGETS; target++) {
+			dev->hba_map[bus][target].devtype = 0;
+			dev->hba_map[bus][target].qd_limit = 0;
+		}
+	}
+
+	/*
+	 * GetBusInfo
+	 */
+
+	aac_fib_init(fibptr);
+
+	bus_info = (struct aac_bus_info_response *) fib_data(fibptr);
+
+	memset(bus_info, 0, sizeof(*bus_info));
+
+	command = (struct aac_bus_info *)bus_info;
+
+	command->Command = cpu_to_le32(VM_Ioctl);
+	command->ObjType = cpu_to_le32(FT_DRIVE);
+	command->MethodId = cpu_to_le32(1);
+	command->CtlCmd = cpu_to_le32(GetBusInfo);
+
+	rcode = aac_fib_send(ContainerCommand,
+			 fibptr,
+			 sizeof (*bus_info),
+			 FsaNormal,
+			 1, 1,
+			 NULL, NULL);
+
+	/* reasoned default */
+	dev->maximum_num_physicals = 16;
+	if (rcode >= 0 && le32_to_cpu(bus_info->Status) == ST_OK) {
+		dev->maximum_num_physicals = le32_to_cpu(bus_info->TargetsPerBus);
+		dev->maximum_num_channels = le32_to_cpu(bus_info->BusCount);
+	}
+
+	if (!dev->in_reset) {
+		char buffer[16];
+		tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+		printk(KERN_INFO "%s%d: kernel %d.%d-%d[%d] %.*s\n",
+			dev->name,
+			dev->id,
+			tmp>>24,
+			(tmp>>16)&0xff,
+			tmp&0xff,
+			le32_to_cpu(dev->adapter_info.kernelbuild),
+			(int)sizeof(dev->supplement_adapter_info.build_date),
+			dev->supplement_adapter_info.build_date);
+		tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+		printk(KERN_INFO "%s%d: monitor %d.%d-%d[%d]\n",
+			dev->name, dev->id,
+			tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+			le32_to_cpu(dev->adapter_info.monitorbuild));
+		tmp = le32_to_cpu(dev->adapter_info.biosrev);
+		printk(KERN_INFO "%s%d: bios %d.%d-%d[%d]\n",
+			dev->name, dev->id,
+			tmp>>24,(tmp>>16)&0xff,tmp&0xff,
+			le32_to_cpu(dev->adapter_info.biosbuild));
+		buffer[0] = '\0';
+		if (aac_get_serial_number(
+		  shost_to_class(dev->scsi_host_ptr), buffer))
+			printk(KERN_INFO "%s%d: serial %s",
+			  dev->name, dev->id, buffer);
+		if (dev->supplement_adapter_info.vpd_info.tsid[0]) {
+			printk(KERN_INFO "%s%d: TSID %.*s\n",
+			  dev->name, dev->id,
+			  (int)sizeof(dev->supplement_adapter_info
+							.vpd_info.tsid),
+				dev->supplement_adapter_info.vpd_info.tsid);
+		}
+		if (!aac_check_reset || ((aac_check_reset == 1) &&
+		  (dev->supplement_adapter_info.supported_options2 &
+		  AAC_OPTION_IGNORE_RESET))) {
+			printk(KERN_INFO "%s%d: Reset Adapter Ignored\n",
+			  dev->name, dev->id);
+		}
+	}
+
+	dev->cache_protected = 0;
+	dev->jbod = ((dev->supplement_adapter_info.feature_bits &
+		AAC_FEATURE_JBOD) != 0);
+	dev->nondasd_support = 0;
+	dev->raid_scsi_mode = 0;
+	if(dev->adapter_info.options & AAC_OPT_NONDASD)
+		dev->nondasd_support = 1;
+
+	/*
+	 * If the firmware supports ROMB RAID/SCSI mode and we are currently
+	 * in RAID/SCSI mode, set the flag. For now if in this mode we will
+	 * force nondasd support on. If we decide to allow the non-dasd flag
+	 * additional changes changes will have to be made to support
+	 * RAID/SCSI.  the function aac_scsi_cmd in this module will have to be
+	 * changed to support the new dev->raid_scsi_mode flag instead of
+	 * leaching off of the dev->nondasd_support flag. Also in linit.c the
+	 * function aac_detect will have to be modified where it sets up the
+	 * max number of channels based on the aac->nondasd_support flag only.
+	 */
+	if ((dev->adapter_info.options & AAC_OPT_SCSI_MANAGED) &&
+	    (dev->adapter_info.options & AAC_OPT_RAID_SCSI_MODE)) {
+		dev->nondasd_support = 1;
+		dev->raid_scsi_mode = 1;
+	}
+	if (dev->raid_scsi_mode != 0)
+		printk(KERN_INFO "%s%d: ROMB RAID/SCSI mode enabled\n",
+				dev->name, dev->id);
+
+	if (nondasd != -1)
+		dev->nondasd_support = (nondasd!=0);
+	if (dev->nondasd_support && !dev->in_reset)
+		printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
+
+	if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
+		dev->needs_dac = 1;
+	dev->dac_support = 0;
+	if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
+	    (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64)) {
+		if (!dev->in_reset)
+			printk(KERN_INFO "%s%d: 64bit support enabled.\n",
+				dev->name, dev->id);
+		dev->dac_support = 1;
+	}
+
+	if(dacmode != -1) {
+		dev->dac_support = (dacmode!=0);
+	}
+
+	/* avoid problems with AAC_QUIRK_SCSI_32 controllers */
+	if (dev->dac_support &&	(aac_get_driver_ident(dev->cardtype)->quirks
+		& AAC_QUIRK_SCSI_32)) {
+		dev->nondasd_support = 0;
+		dev->jbod = 0;
+		expose_physicals = 0;
+	}
+
+	if (dev->dac_support) {
+		if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+			if (!dev->in_reset)
+				dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
+		} else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+			dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
+			dev->dac_support = 0;
+		} else {
+			dev_info(&dev->pdev->dev, "No suitable DMA available\n");
+			rcode = -ENOMEM;
+		}
+	}
+	/*
+	 * Deal with configuring for the individualized limits of each packet
+	 * interface.
+	 */
+	dev->a_ops.adapter_scsi = (dev->dac_support)
+	  ? ((aac_get_driver_ident(dev->cardtype)->quirks & AAC_QUIRK_SCSI_32)
+				? aac_scsi_32_64
+				: aac_scsi_64)
+				: aac_scsi_32;
+	if (dev->raw_io_interface) {
+		dev->a_ops.adapter_bounds = (dev->raw_io_64)
+					? aac_bounds_64
+					: aac_bounds_32;
+		dev->a_ops.adapter_read = aac_read_raw_io;
+		dev->a_ops.adapter_write = aac_write_raw_io;
+	} else {
+		dev->a_ops.adapter_bounds = aac_bounds_32;
+		dev->scsi_host_ptr->sg_tablesize = (dev->max_fib_size -
+			sizeof(struct aac_fibhdr) -
+			sizeof(struct aac_write) + sizeof(struct sgentry)) /
+				sizeof(struct sgentry);
+		if (dev->dac_support) {
+			dev->a_ops.adapter_read = aac_read_block64;
+			dev->a_ops.adapter_write = aac_write_block64;
+			/*
+			 * 38 scatter gather elements
+			 */
+			dev->scsi_host_ptr->sg_tablesize =
+				(dev->max_fib_size -
+				sizeof(struct aac_fibhdr) -
+				sizeof(struct aac_write64) +
+				sizeof(struct sgentry64)) /
+					sizeof(struct sgentry64);
+		} else {
+			dev->a_ops.adapter_read = aac_read_block;
+			dev->a_ops.adapter_write = aac_write_block;
+		}
+		dev->scsi_host_ptr->max_sectors = AAC_MAX_32BIT_SGBCOUNT;
+		if (!(dev->adapter_info.options & AAC_OPT_NEW_COMM)) {
+			/*
+			 * Worst case size that could cause sg overflow when
+			 * we break up SG elements that are larger than 64KB.
+			 * Would be nice if we could tell the SCSI layer what
+			 * the maximum SG element size can be. Worst case is
+			 * (sg_tablesize-1) 4KB elements with one 64KB
+			 * element.
+			 *	32bit -> 468 or 238KB	64bit -> 424 or 212KB
+			 */
+			dev->scsi_host_ptr->max_sectors =
+			  (dev->scsi_host_ptr->sg_tablesize * 8) + 112;
+		}
+	}
+	if (!dev->sync_mode && dev->sa_firmware &&
+		dev->scsi_host_ptr->sg_tablesize > HBA_MAX_SG_SEPARATE)
+		dev->scsi_host_ptr->sg_tablesize = dev->sg_tablesize =
+			HBA_MAX_SG_SEPARATE;
+
+	/* FIB should be freed only after getting the response from the F/W */
+	if (rcode != -ERESTARTSYS) {
+		aac_fib_complete(fibptr);
+		aac_fib_free(fibptr);
+	}
+
+	return rcode;
+}
+
+
+static void io_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_read_reply *readreply;
+	struct scsi_cmnd *scsicmd;
+	u32 cid;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	dev = fibptr->dev;
+	cid = scmd_id(scsicmd);
+
+	if (nblank(dprintk(x))) {
+		u64 lba;
+		switch (scsicmd->cmnd[0]) {
+		case WRITE_6:
+		case READ_6:
+			lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+			    (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+			break;
+		case WRITE_16:
+		case READ_16:
+			lba = ((u64)scsicmd->cmnd[2] << 56) |
+			      ((u64)scsicmd->cmnd[3] << 48) |
+			      ((u64)scsicmd->cmnd[4] << 40) |
+			      ((u64)scsicmd->cmnd[5] << 32) |
+			      ((u64)scsicmd->cmnd[6] << 24) |
+			      (scsicmd->cmnd[7] << 16) |
+			      (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+			break;
+		case WRITE_12:
+		case READ_12:
+			lba = ((u64)scsicmd->cmnd[2] << 24) |
+			      (scsicmd->cmnd[3] << 16) |
+			      (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+			break;
+		default:
+			lba = ((u64)scsicmd->cmnd[2] << 24) |
+			       (scsicmd->cmnd[3] << 16) |
+			       (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+			break;
+		}
+		printk(KERN_DEBUG
+		  "io_callback[cpu %d]: lba = %llu, t = %ld.\n",
+		  smp_processor_id(), (unsigned long long)lba, jiffies);
+	}
+
+	BUG_ON(fibptr == NULL);
+
+	scsi_dma_unmap(scsicmd);
+
+	readreply = (struct aac_read_reply *)fib_data(fibptr);
+	switch (le32_to_cpu(readreply->status)) {
+	case ST_OK:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_GOOD;
+		dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
+		break;
+	case ST_NOT_READY:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
+		  SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		break;
+	case ST_MEDERR:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
+		  SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		break;
+	default:
+#ifdef AAC_DETAILED_STATUS_INFO
+		printk(KERN_WARNING "io_callback: io failed, status = %d\n",
+		  le32_to_cpu(readreply->status));
+#endif
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+		  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+		  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		break;
+	}
+	aac_fib_complete(fibptr);
+
+	scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_read(struct scsi_cmnd * scsicmd)
+{
+	u64 lba;
+	u32 count;
+	int status;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+	int cid;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	switch (scsicmd->cmnd[0]) {
+	case READ_6:
+		dprintk((KERN_DEBUG "aachba: received a read(6) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba = ((scsicmd->cmnd[1] & 0x1F) << 16) |
+			(scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+		count = scsicmd->cmnd[4];
+
+		if (count == 0)
+			count = 256;
+		break;
+	case READ_16:
+		dprintk((KERN_DEBUG "aachba: received a read(16) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba =	((u64)scsicmd->cmnd[2] << 56) |
+			((u64)scsicmd->cmnd[3] << 48) |
+			((u64)scsicmd->cmnd[4] << 40) |
+			((u64)scsicmd->cmnd[5] << 32) |
+			((u64)scsicmd->cmnd[6] << 24) |
+			(scsicmd->cmnd[7] << 16) |
+			(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+		count = (scsicmd->cmnd[10] << 24) |
+			(scsicmd->cmnd[11] << 16) |
+			(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+		break;
+	case READ_12:
+		dprintk((KERN_DEBUG "aachba: received a read(12) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba = ((u64)scsicmd->cmnd[2] << 24) |
+			(scsicmd->cmnd[3] << 16) |
+			(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[6] << 24) |
+			(scsicmd->cmnd[7] << 16) |
+			(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+		break;
+	default:
+		dprintk((KERN_DEBUG "aachba: received a read(10) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba = ((u64)scsicmd->cmnd[2] << 24) |
+			(scsicmd->cmnd[3] << 16) |
+			(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+		break;
+	}
+
+	if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+		cid = scmd_id(scsicmd);
+		dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+			  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+			  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		scsicmd->scsi_done(scsicmd);
+		return 1;
+	}
+
+	dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
+	  smp_processor_id(), (unsigned long long)lba, jiffies));
+	if (aac_adapter_bounds(dev,scsicmd,lba))
+		return 0;
+	/*
+	 *	Alocate and initialize a Fib
+	 */
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+	scsicmd->scsi_done(scsicmd);
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+	return 0;
+}
+
+static int aac_write(struct scsi_cmnd * scsicmd)
+{
+	u64 lba;
+	u32 count;
+	int fua;
+	int status;
+	struct aac_dev *dev;
+	struct fib * cmd_fibcontext;
+	int cid;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	/*
+	 *	Get block address and transfer length
+	 */
+	if (scsicmd->cmnd[0] == WRITE_6)	/* 6 byte command */
+	{
+		lba = ((scsicmd->cmnd[1] & 0x1F) << 16) | (scsicmd->cmnd[2] << 8) | scsicmd->cmnd[3];
+		count = scsicmd->cmnd[4];
+		if (count == 0)
+			count = 256;
+		fua = 0;
+	} else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
+		dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba =	((u64)scsicmd->cmnd[2] << 56) |
+			((u64)scsicmd->cmnd[3] << 48) |
+			((u64)scsicmd->cmnd[4] << 40) |
+			((u64)scsicmd->cmnd[5] << 32) |
+			((u64)scsicmd->cmnd[6] << 24) |
+			(scsicmd->cmnd[7] << 16) |
+			(scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+		count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
+			(scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
+		fua = scsicmd->cmnd[1] & 0x8;
+	} else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
+		dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
+
+		lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16)
+		    | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
+		      | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
+		fua = scsicmd->cmnd[1] & 0x8;
+	} else {
+		dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
+		lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+		count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+		fua = scsicmd->cmnd[1] & 0x8;
+	}
+
+	if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
+		cid = scmd_id(scsicmd);
+		dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+			SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+			  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+			  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		scsicmd->scsi_done(scsicmd);
+		return 1;
+	}
+
+	dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
+	  smp_processor_id(), (unsigned long long)lba, jiffies));
+	if (aac_adapter_bounds(dev,scsicmd,lba))
+		return 0;
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);
+	/*
+	 *	For some reason, the Fib didn't queue, return QUEUE_FULL
+	 */
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+	scsicmd->scsi_done(scsicmd);
+
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+	return 0;
+}
+
+static void synchronize_callback(void *context, struct fib *fibptr)
+{
+	struct aac_synchronize_reply *synchronizereply;
+	struct scsi_cmnd *cmd;
+
+	cmd = context;
+
+	if (!aac_valid_context(cmd, fibptr))
+		return;
+
+	dprintk((KERN_DEBUG "synchronize_callback[cpu %d]: t = %ld.\n",
+				smp_processor_id(), jiffies));
+	BUG_ON(fibptr == NULL);
+
+
+	synchronizereply = fib_data(fibptr);
+	if (le32_to_cpu(synchronizereply->status) == CT_OK)
+		cmd->result = DID_OK << 16 |
+			COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+	else {
+		struct scsi_device *sdev = cmd->device;
+		struct aac_dev *dev = fibptr->dev;
+		u32 cid = sdev_id(sdev);
+		printk(KERN_WARNING
+		     "synchronize_callback: synchronize failed, status = %d\n",
+		     le32_to_cpu(synchronizereply->status));
+		cmd->result = DID_OK << 16 |
+			COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+		  HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
+		  ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+		memcpy(cmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+	}
+
+	aac_fib_complete(fibptr);
+	aac_fib_free(fibptr);
+	cmd->scsi_done(cmd);
+}
+
+static int aac_synchronize(struct scsi_cmnd *scsicmd)
+{
+	int status;
+	struct fib *cmd_fibcontext;
+	struct aac_synchronize *synchronizecmd;
+	struct scsi_cmnd *cmd;
+	struct scsi_device *sdev = scsicmd->device;
+	int active = 0;
+	struct aac_dev *aac;
+	u64 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) |
+		(scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
+	u32 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
+	unsigned long flags;
+
+	/*
+	 * Wait for all outstanding queued commands to complete to this
+	 * specific target (block).
+	 */
+	spin_lock_irqsave(&sdev->list_lock, flags);
+	list_for_each_entry(cmd, &sdev->cmd_list, list)
+		if (cmd->SCp.phase == AAC_OWNER_FIRMWARE) {
+			u64 cmnd_lba;
+			u32 cmnd_count;
+
+			if (cmd->cmnd[0] == WRITE_6) {
+				cmnd_lba = ((cmd->cmnd[1] & 0x1F) << 16) |
+					(cmd->cmnd[2] << 8) |
+					cmd->cmnd[3];
+				cmnd_count = cmd->cmnd[4];
+				if (cmnd_count == 0)
+					cmnd_count = 256;
+			} else if (cmd->cmnd[0] == WRITE_16) {
+				cmnd_lba = ((u64)cmd->cmnd[2] << 56) |
+					((u64)cmd->cmnd[3] << 48) |
+					((u64)cmd->cmnd[4] << 40) |
+					((u64)cmd->cmnd[5] << 32) |
+					((u64)cmd->cmnd[6] << 24) |
+					(cmd->cmnd[7] << 16) |
+					(cmd->cmnd[8] << 8) |
+					cmd->cmnd[9];
+				cmnd_count = (cmd->cmnd[10] << 24) |
+					(cmd->cmnd[11] << 16) |
+					(cmd->cmnd[12] << 8) |
+					cmd->cmnd[13];
+			} else if (cmd->cmnd[0] == WRITE_12) {
+				cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+					(cmd->cmnd[3] << 16) |
+					(cmd->cmnd[4] << 8) |
+					cmd->cmnd[5];
+				cmnd_count = (cmd->cmnd[6] << 24) |
+					(cmd->cmnd[7] << 16) |
+					(cmd->cmnd[8] << 8) |
+					cmd->cmnd[9];
+			} else if (cmd->cmnd[0] == WRITE_10) {
+				cmnd_lba = ((u64)cmd->cmnd[2] << 24) |
+					(cmd->cmnd[3] << 16) |
+					(cmd->cmnd[4] << 8) |
+					cmd->cmnd[5];
+				cmnd_count = (cmd->cmnd[7] << 8) |
+					cmd->cmnd[8];
+			} else
+				continue;
+			if (((cmnd_lba + cmnd_count) < lba) ||
+			  (count && ((lba + count) < cmnd_lba)))
+				continue;
+			++active;
+			break;
+		}
+
+	spin_unlock_irqrestore(&sdev->list_lock, flags);
+
+	/*
+	 *	Yield the processor (requeue for later)
+	 */
+	if (active)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+
+	aac = (struct aac_dev *)sdev->host->hostdata;
+	if (aac->in_reset)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 *	Allocate and initialize a Fib
+	 */
+	if (!(cmd_fibcontext = aac_fib_alloc(aac)))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	aac_fib_init(cmd_fibcontext);
+
+	synchronizecmd = fib_data(cmd_fibcontext);
+	synchronizecmd->command = cpu_to_le32(VM_ContainerConfig);
+	synchronizecmd->type = cpu_to_le32(CT_FLUSH_CACHE);
+	synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));
+	synchronizecmd->count =
+	     cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	status = aac_fib_send(ContainerCommand,
+		  cmd_fibcontext,
+		  sizeof(struct aac_synchronize),
+		  FsaNormal,
+		  0, 1,
+		  (fib_callback)synchronize_callback,
+		  (void *)scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING
+		"aac_synchronize: aac_fib_send failed with status: %d.\n", status);
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+static void aac_start_stop_callback(void *context, struct fib *fibptr)
+{
+	struct scsi_cmnd *scsicmd = context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	BUG_ON(fibptr == NULL);
+
+	scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+
+	aac_fib_complete(fibptr);
+	aac_fib_free(fibptr);
+	scsicmd->scsi_done(scsicmd);
+}
+
+static int aac_start_stop(struct scsi_cmnd *scsicmd)
+{
+	int status;
+	struct fib *cmd_fibcontext;
+	struct aac_power_management *pmcmd;
+	struct scsi_device *sdev = scsicmd->device;
+	struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+
+	if (!(aac->supplement_adapter_info.supported_options2 &
+	      AAC_OPTION_POWER_MANAGEMENT)) {
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+	if (aac->in_reset)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 *	Allocate and initialize a Fib
+	 */
+	cmd_fibcontext = aac_fib_alloc_tag(aac, scsicmd);
+
+	aac_fib_init(cmd_fibcontext);
+
+	pmcmd = fib_data(cmd_fibcontext);
+	pmcmd->command = cpu_to_le32(VM_ContainerConfig);
+	pmcmd->type = cpu_to_le32(CT_POWER_MANAGEMENT);
+	/* Eject bit ignored, not relevant */
+	pmcmd->sub = (scsicmd->cmnd[4] & 1) ?
+		cpu_to_le32(CT_PM_START_UNIT) : cpu_to_le32(CT_PM_STOP_UNIT);
+	pmcmd->cid = cpu_to_le32(sdev_id(sdev));
+	pmcmd->parm = (scsicmd->cmnd[1] & 1) ?
+		cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+
+	/*
+	 *	Now send the Fib to the adapter
+	 */
+	status = aac_fib_send(ContainerCommand,
+		  cmd_fibcontext,
+		  sizeof(struct aac_power_management),
+		  FsaNormal,
+		  0, 1,
+		  (fib_callback)aac_start_stop_callback,
+		  (void *)scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/**
+ *	aac_scsi_cmd()		-	Process SCSI command
+ *	@scsicmd:		SCSI command block
+ *
+ *	Emulate a SCSI command and queue the required request for the
+ *	aacraid firmware.
+ */
+
+int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
+{
+	u32 cid, bus;
+	struct Scsi_Host *host = scsicmd->device->host;
+	struct aac_dev *dev = (struct aac_dev *)host->hostdata;
+	struct fsa_dev_info *fsa_dev_ptr = dev->fsa_dev;
+
+	if (fsa_dev_ptr == NULL)
+		return -1;
+	/*
+	 *	If the bus, id or lun is out of range, return fail
+	 *	Test does not apply to ID 16, the pseudo id for the controller
+	 *	itself.
+	 */
+	cid = scmd_id(scsicmd);
+	if (cid != host->this_id) {
+		if (scmd_channel(scsicmd) == CONTAINER_CHANNEL) {
+			if((cid >= dev->maximum_num_containers) ||
+					(scsicmd->device->lun != 0)) {
+				scsicmd->result = DID_NO_CONNECT << 16;
+				goto scsi_done_ret;
+			}
+
+			/*
+			 *	If the target container doesn't exist, it may have
+			 *	been newly created
+			 */
+			if (((fsa_dev_ptr[cid].valid & 1) == 0) ||
+			  (fsa_dev_ptr[cid].sense_data.sense_key ==
+			   NOT_READY)) {
+				switch (scsicmd->cmnd[0]) {
+				case SERVICE_ACTION_IN_16:
+					if (!(dev->raw_io_interface) ||
+					    !(dev->raw_io_64) ||
+					    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+						break;
+				case INQUIRY:
+				case READ_CAPACITY:
+				case TEST_UNIT_READY:
+					if (dev->in_reset)
+						return -1;
+					return _aac_probe_container(scsicmd,
+							aac_probe_container_callback2);
+				default:
+					break;
+				}
+			}
+		} else {  /* check for physical non-dasd devices */
+			bus = aac_logical_to_phys(scmd_channel(scsicmd));
+
+			if (bus < AAC_MAX_BUSES && cid < AAC_MAX_TARGETS &&
+				dev->hba_map[bus][cid].devtype
+					== AAC_DEVTYPE_NATIVE_RAW) {
+				if (dev->in_reset)
+					return -1;
+				return aac_send_hba_fib(scsicmd);
+			} else if (dev->nondasd_support || expose_physicals ||
+				dev->jbod) {
+				if (dev->in_reset)
+					return -1;
+				return aac_send_srb_fib(scsicmd);
+			} else {
+				scsicmd->result = DID_NO_CONNECT << 16;
+				goto scsi_done_ret;
+			}
+		}
+	}
+	/*
+	 * else Command for the controller itself
+	 */
+	else if ((scsicmd->cmnd[0] != INQUIRY) &&	/* only INQUIRY & TUR cmnd supported for controller */
+		(scsicmd->cmnd[0] != TEST_UNIT_READY))
+	{
+		dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+		  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+		  ASENCODE_INVALID_COMMAND, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+		       min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+			     SCSI_SENSE_BUFFERSIZE));
+		goto scsi_done_ret;
+	}
+
+	switch (scsicmd->cmnd[0]) {
+	case READ_6:
+	case READ_10:
+	case READ_12:
+	case READ_16:
+		if (dev->in_reset)
+			return -1;
+		return aac_read(scsicmd);
+
+	case WRITE_6:
+	case WRITE_10:
+	case WRITE_12:
+	case WRITE_16:
+		if (dev->in_reset)
+			return -1;
+		return aac_write(scsicmd);
+
+	case SYNCHRONIZE_CACHE:
+		if (((aac_cache & 6) == 6) && dev->cache_protected) {
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+					  SAM_STAT_GOOD;
+			break;
+		}
+		/* Issue FIB to tell Firmware to flush it's cache */
+		if ((aac_cache & 6) != 2)
+			return aac_synchronize(scsicmd);
+	case INQUIRY:
+	{
+		struct inquiry_data inq_data;
+
+		dprintk((KERN_DEBUG "INQUIRY command, ID: %d.\n", cid));
+		memset(&inq_data, 0, sizeof (struct inquiry_data));
+
+		if ((scsicmd->cmnd[1] & 0x1) && aac_wwn) {
+			char *arr = (char *)&inq_data;
+
+			/* EVPD bit set */
+			arr[0] = (scmd_id(scsicmd) == host->this_id) ?
+			  INQD_PDT_PROC : INQD_PDT_DA;
+			if (scsicmd->cmnd[2] == 0) {
+				/* supported vital product data pages */
+				arr[3] = 3;
+				arr[4] = 0x0;
+				arr[5] = 0x80;
+				arr[6] = 0x83;
+				arr[1] = scsicmd->cmnd[2];
+				scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+							 sizeof(inq_data));
+				scsicmd->result = DID_OK << 16 |
+						  COMMAND_COMPLETE << 8 |
+						  SAM_STAT_GOOD;
+			} else if (scsicmd->cmnd[2] == 0x80) {
+				/* unit serial number page */
+				arr[3] = setinqserial(dev, &arr[4],
+				  scmd_id(scsicmd));
+				arr[1] = scsicmd->cmnd[2];
+				scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+							 sizeof(inq_data));
+				if (aac_wwn != 2)
+					return aac_get_container_serial(
+						scsicmd);
+				scsicmd->result = DID_OK << 16 |
+						  COMMAND_COMPLETE << 8 |
+						  SAM_STAT_GOOD;
+			} else if (scsicmd->cmnd[2] == 0x83) {
+				/* vpd page 0x83 - Device Identification Page */
+				char *sno = (char *)&inq_data;
+				sno[3] = setinqserial(dev, &sno[4],
+						      scmd_id(scsicmd));
+				if (aac_wwn != 2)
+					return aac_get_container_serial(
+						scsicmd);
+				scsicmd->result = DID_OK << 16 |
+						  COMMAND_COMPLETE << 8 |
+						  SAM_STAT_GOOD;
+			} else {
+				/* vpd page not implemented */
+				scsicmd->result = DID_OK << 16 |
+				  COMMAND_COMPLETE << 8 |
+				  SAM_STAT_CHECK_CONDITION;
+				set_sense(&dev->fsa_dev[cid].sense_data,
+				  ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
+				  ASENCODE_NO_SENSE, 7, 2);
+				memcpy(scsicmd->sense_buffer,
+				  &dev->fsa_dev[cid].sense_data,
+				  min_t(size_t,
+					sizeof(dev->fsa_dev[cid].sense_data),
+					SCSI_SENSE_BUFFERSIZE));
+			}
+			break;
+		}
+		inq_data.inqd_ver = 2;	/* claim compliance to SCSI-2 */
+		inq_data.inqd_rdf = 2;	/* A response data format value of two indicates that the data shall be in the format specified in SCSI-2 */
+		inq_data.inqd_len = 31;
+		/*Format for "pad2" is  RelAdr | WBus32 | WBus16 |  Sync  | Linked |Reserved| CmdQue | SftRe */
+		inq_data.inqd_pad2= 0x32 ;	 /*WBus16|Sync|CmdQue */
+		/*
+		 *	Set the Vendor, Product, and Revision Level
+		 *	see: <vendor>.c i.e. aac.c
+		 */
+		if (cid == host->this_id) {
+			setinqstr(dev, (void *) (inq_data.inqd_vid), ARRAY_SIZE(container_types));
+			inq_data.inqd_pdt = INQD_PDT_PROC;	/* Processor device */
+			scsi_sg_copy_from_buffer(scsicmd, &inq_data,
+						 sizeof(inq_data));
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+					  SAM_STAT_GOOD;
+			break;
+		}
+		if (dev->in_reset)
+			return -1;
+		setinqstr(dev, (void *) (inq_data.inqd_vid), fsa_dev_ptr[cid].type);
+		inq_data.inqd_pdt = INQD_PDT_DA;	/* Direct/random access device */
+		scsi_sg_copy_from_buffer(scsicmd, &inq_data, sizeof(inq_data));
+		return aac_get_container_name(scsicmd);
+	}
+	case SERVICE_ACTION_IN_16:
+		if (!(dev->raw_io_interface) ||
+		    !(dev->raw_io_64) ||
+		    ((scsicmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+			break;
+	{
+		u64 capacity;
+		char cp[13];
+		unsigned int alloc_len;
+
+		dprintk((KERN_DEBUG "READ CAPACITY_16 command.\n"));
+		capacity = fsa_dev_ptr[cid].size - 1;
+		cp[0] = (capacity >> 56) & 0xff;
+		cp[1] = (capacity >> 48) & 0xff;
+		cp[2] = (capacity >> 40) & 0xff;
+		cp[3] = (capacity >> 32) & 0xff;
+		cp[4] = (capacity >> 24) & 0xff;
+		cp[5] = (capacity >> 16) & 0xff;
+		cp[6] = (capacity >> 8) & 0xff;
+		cp[7] = (capacity >> 0) & 0xff;
+		cp[8] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+		cp[9] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+		cp[10] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+		cp[11] = (fsa_dev_ptr[cid].block_size) & 0xff;
+		cp[12] = 0;
+
+		alloc_len = ((scsicmd->cmnd[10] << 24)
+			     + (scsicmd->cmnd[11] << 16)
+			     + (scsicmd->cmnd[12] << 8) + scsicmd->cmnd[13]);
+
+		alloc_len = min_t(size_t, alloc_len, sizeof(cp));
+		scsi_sg_copy_from_buffer(scsicmd, cp, alloc_len);
+		if (alloc_len < scsi_bufflen(scsicmd))
+			scsi_set_resid(scsicmd,
+				       scsi_bufflen(scsicmd) - alloc_len);
+
+		/* Do not cache partition table for arrays */
+		scsicmd->device->removable = 1;
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+	}
+
+	case READ_CAPACITY:
+	{
+		u32 capacity;
+		char cp[8];
+
+		dprintk((KERN_DEBUG "READ CAPACITY command.\n"));
+		if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+			capacity = fsa_dev_ptr[cid].size - 1;
+		else
+			capacity = (u32)-1;
+
+		cp[0] = (capacity >> 24) & 0xff;
+		cp[1] = (capacity >> 16) & 0xff;
+		cp[2] = (capacity >> 8) & 0xff;
+		cp[3] = (capacity >> 0) & 0xff;
+		cp[4] = (fsa_dev_ptr[cid].block_size >> 24) & 0xff;
+		cp[5] = (fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+		cp[6] = (fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+		cp[7] = (fsa_dev_ptr[cid].block_size) & 0xff;
+		scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
+		/* Do not cache partition table for arrays */
+		scsicmd->device->removable = 1;
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+	}
+
+	case MODE_SENSE:
+	{
+		int mode_buf_length = 4;
+		u32 capacity;
+		aac_modep_data mpd;
+
+		if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+			capacity = fsa_dev_ptr[cid].size - 1;
+		else
+			capacity = (u32)-1;
+
+		dprintk((KERN_DEBUG "MODE SENSE command.\n"));
+		memset((char *)&mpd, 0, sizeof(aac_modep_data));
+
+		/* Mode data length */
+		mpd.hd.data_length = sizeof(mpd.hd) - 1;
+		/* Medium type - default */
+		mpd.hd.med_type = 0;
+		/* Device-specific param,
+		   bit 8: 0/1 = write enabled/protected
+		   bit 4: 0/1 = FUA enabled */
+		mpd.hd.dev_par = 0;
+
+		if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+			mpd.hd.dev_par = 0x10;
+		if (scsicmd->cmnd[1] & 0x8)
+			mpd.hd.bd_length = 0;	/* Block descriptor length */
+		else {
+			mpd.hd.bd_length = sizeof(mpd.bd);
+			mpd.hd.data_length += mpd.hd.bd_length;
+			mpd.bd.block_length[0] =
+				(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+			mpd.bd.block_length[1] =
+				(fsa_dev_ptr[cid].block_size >> 8) &  0xff;
+			mpd.bd.block_length[2] =
+				fsa_dev_ptr[cid].block_size  & 0xff;
+
+			mpd.mpc_buf[0] = scsicmd->cmnd[2];
+			if (scsicmd->cmnd[2] == 0x1C) {
+				/* page length */
+				mpd.mpc_buf[1] = 0xa;
+				/* Mode data length */
+				mpd.hd.data_length = 23;
+			} else {
+				/* Mode data length */
+				mpd.hd.data_length = 15;
+			}
+
+			if (capacity > 0xffffff) {
+				mpd.bd.block_count[0] = 0xff;
+				mpd.bd.block_count[1] = 0xff;
+				mpd.bd.block_count[2] = 0xff;
+			} else {
+				mpd.bd.block_count[0] = (capacity >> 16) & 0xff;
+				mpd.bd.block_count[1] = (capacity >> 8) & 0xff;
+				mpd.bd.block_count[2] = capacity  & 0xff;
+			}
+		}
+		if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+		  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+			mpd.hd.data_length += 3;
+			mpd.mpc_buf[0] = 8;
+			mpd.mpc_buf[1] = 1;
+			mpd.mpc_buf[2] = ((aac_cache & 6) == 2)
+				? 0 : 0x04; /* WCE */
+			mode_buf_length = sizeof(mpd);
+		}
+
+		if (mode_buf_length > scsicmd->cmnd[4])
+			mode_buf_length = scsicmd->cmnd[4];
+		else
+			mode_buf_length = sizeof(mpd);
+		scsi_sg_copy_from_buffer(scsicmd,
+					 (char *)&mpd,
+					 mode_buf_length);
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+	}
+	case MODE_SENSE_10:
+	{
+		u32 capacity;
+		int mode_buf_length = 8;
+		aac_modep10_data mpd10;
+
+		if (fsa_dev_ptr[cid].size <= 0x100000000ULL)
+			capacity = fsa_dev_ptr[cid].size - 1;
+		else
+			capacity = (u32)-1;
+
+		dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
+		memset((char *)&mpd10, 0, sizeof(aac_modep10_data));
+		/* Mode data length (MSB) */
+		mpd10.hd.data_length[0] = 0;
+		/* Mode data length (LSB) */
+		mpd10.hd.data_length[1] = sizeof(mpd10.hd) - 1;
+		/* Medium type - default */
+		mpd10.hd.med_type = 0;
+		/* Device-specific param,
+		   bit 8: 0/1 = write enabled/protected
+		   bit 4: 0/1 = FUA enabled */
+		mpd10.hd.dev_par = 0;
+
+		if (dev->raw_io_interface && ((aac_cache & 5) != 1))
+			mpd10.hd.dev_par = 0x10;
+		mpd10.hd.rsrvd[0] = 0;	/* reserved */
+		mpd10.hd.rsrvd[1] = 0;	/* reserved */
+		if (scsicmd->cmnd[1] & 0x8) {
+			/* Block descriptor length (MSB) */
+			mpd10.hd.bd_length[0] = 0;
+			/* Block descriptor length (LSB) */
+			mpd10.hd.bd_length[1] = 0;
+		} else {
+			mpd10.hd.bd_length[0] = 0;
+			mpd10.hd.bd_length[1] = sizeof(mpd10.bd);
+
+			mpd10.hd.data_length[1] += mpd10.hd.bd_length[1];
+
+			mpd10.bd.block_length[0] =
+				(fsa_dev_ptr[cid].block_size >> 16) & 0xff;
+			mpd10.bd.block_length[1] =
+				(fsa_dev_ptr[cid].block_size >> 8) & 0xff;
+			mpd10.bd.block_length[2] =
+				fsa_dev_ptr[cid].block_size  & 0xff;
+
+			if (capacity > 0xffffff) {
+				mpd10.bd.block_count[0] = 0xff;
+				mpd10.bd.block_count[1] = 0xff;
+				mpd10.bd.block_count[2] = 0xff;
+			} else {
+				mpd10.bd.block_count[0] =
+					(capacity >> 16) & 0xff;
+				mpd10.bd.block_count[1] =
+					(capacity >> 8) & 0xff;
+				mpd10.bd.block_count[2] =
+					capacity  & 0xff;
+			}
+		}
+		if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
+		  ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
+			mpd10.hd.data_length[1] += 3;
+			mpd10.mpc_buf[0] = 8;
+			mpd10.mpc_buf[1] = 1;
+			mpd10.mpc_buf[2] = ((aac_cache & 6) == 2)
+				? 0 : 0x04; /* WCE */
+			mode_buf_length = sizeof(mpd10);
+			if (mode_buf_length > scsicmd->cmnd[8])
+				mode_buf_length = scsicmd->cmnd[8];
+		}
+		scsi_sg_copy_from_buffer(scsicmd,
+					 (char *)&mpd10,
+					 mode_buf_length);
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+	}
+	case REQUEST_SENSE:
+		dprintk((KERN_DEBUG "REQUEST SENSE command.\n"));
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+				sizeof(struct sense_data));
+		memset(&dev->fsa_dev[cid].sense_data, 0,
+				sizeof(struct sense_data));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+
+	case ALLOW_MEDIUM_REMOVAL:
+		dprintk((KERN_DEBUG "LOCK command.\n"));
+		if (scsicmd->cmnd[4])
+			fsa_dev_ptr[cid].locked = 1;
+		else
+			fsa_dev_ptr[cid].locked = 0;
+
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+	/*
+	 *	These commands are all No-Ops
+	 */
+	case TEST_UNIT_READY:
+		if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				SAM_STAT_CHECK_CONDITION;
+			set_sense(&dev->fsa_dev[cid].sense_data,
+				  NOT_READY, SENCODE_BECOMING_READY,
+				  ASENCODE_BECOMING_READY, 0, 0);
+			memcpy(scsicmd->sense_buffer,
+			       &dev->fsa_dev[cid].sense_data,
+			       min_t(size_t,
+				     sizeof(dev->fsa_dev[cid].sense_data),
+				     SCSI_SENSE_BUFFERSIZE));
+		break;
+		}
+	case RESERVE:
+	case RELEASE:
+	case REZERO_UNIT:
+	case REASSIGN_BLOCKS:
+	case SEEK_10:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				  SAM_STAT_GOOD;
+		break;
+
+	case START_STOP:
+		return aac_start_stop(scsicmd);
+
+	/* FALLTHRU */
+	default:
+	/*
+	 *	Unhandled commands
+	 */
+		dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
+				scsicmd->cmnd[0]));
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+				SAM_STAT_CHECK_CONDITION;
+		set_sense(&dev->fsa_dev[cid].sense_data,
+			  ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
+			  ASENCODE_INVALID_COMMAND, 0, 0);
+		memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+				min_t(size_t,
+				      sizeof(dev->fsa_dev[cid].sense_data),
+				      SCSI_SENSE_BUFFERSIZE));
+	}
+
+scsi_done_ret:
+
+	scsicmd->scsi_done(scsicmd);
+	return 0;
+}
+
+static int query_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_query_disk qd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+	if (!fsa_dev_ptr)
+		return -EBUSY;
+	if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	if (qd.cnum == -1) {
+		if (qd.id < 0 || qd.id >= dev->maximum_num_containers)
+			return -EINVAL;
+		qd.cnum = qd.id;
+	} else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {
+		if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)
+			return -EINVAL;
+		qd.instance = dev->scsi_host_ptr->host_no;
+		qd.bus = 0;
+		qd.id = CONTAINER_TO_ID(qd.cnum);
+		qd.lun = CONTAINER_TO_LUN(qd.cnum);
+	}
+	else return -EINVAL;
+
+	qd.valid = fsa_dev_ptr[qd.cnum].valid != 0;
+	qd.locked = fsa_dev_ptr[qd.cnum].locked;
+	qd.deleted = fsa_dev_ptr[qd.cnum].deleted;
+
+	if (fsa_dev_ptr[qd.cnum].devname[0] == '\0')
+		qd.unmapped = 1;
+	else
+		qd.unmapped = 0;
+
+	strlcpy(qd.name, fsa_dev_ptr[qd.cnum].devname,
+	  min(sizeof(qd.name), sizeof(fsa_dev_ptr[qd.cnum].devname) + 1));
+
+	if (copy_to_user(arg, &qd, sizeof (struct aac_query_disk)))
+		return -EFAULT;
+	return 0;
+}
+
+static int force_delete_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+	if (!fsa_dev_ptr)
+		return -EBUSY;
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum >= dev->maximum_num_containers)
+		return -EINVAL;
+	/*
+	 *	Mark this container as being deleted.
+	 */
+	fsa_dev_ptr[dd.cnum].deleted = 1;
+	/*
+	 *	Mark the container as no longer valid
+	 */
+	fsa_dev_ptr[dd.cnum].valid = 0;
+	return 0;
+}
+
+static int delete_disk(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_delete_disk dd;
+	struct fsa_dev_info *fsa_dev_ptr;
+
+	fsa_dev_ptr = dev->fsa_dev;
+	if (!fsa_dev_ptr)
+		return -EBUSY;
+
+	if (copy_from_user(&dd, arg, sizeof (struct aac_delete_disk)))
+		return -EFAULT;
+
+	if (dd.cnum >= dev->maximum_num_containers)
+		return -EINVAL;
+	/*
+	 *	If the container is locked, it can not be deleted by the API.
+	 */
+	if (fsa_dev_ptr[dd.cnum].locked)
+		return -EBUSY;
+	else {
+		/*
+		 *	Mark the container as no longer being valid.
+		 */
+		fsa_dev_ptr[dd.cnum].valid = 0;
+		fsa_dev_ptr[dd.cnum].devname[0] = '\0';
+		return 0;
+	}
+}
+
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg)
+{
+	switch (cmd) {
+	case FSACTL_QUERY_DISK:
+		return query_disk(dev, arg);
+	case FSACTL_DELETE_DISK:
+		return delete_disk(dev, arg);
+	case FSACTL_FORCE_DELETE_DISK:
+		return force_delete_disk(dev, arg);
+	case FSACTL_GET_CONTAINERS:
+		return aac_get_containers(dev);
+	default:
+		return -ENOTTY;
+	}
+}
+
+/**
+ *
+ * aac_srb_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a scsi command to a non dasd device
+ *
+ */
+
+static void aac_srb_callback(void *context, struct fib * fibptr)
+{
+	struct aac_dev *dev;
+	struct aac_srb_reply *srbreply;
+	struct scsi_cmnd *scsicmd;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	BUG_ON(fibptr == NULL);
+
+	dev = fibptr->dev;
+
+	srbreply = (struct aac_srb_reply *) fib_data(fibptr);
+
+	scsicmd->sense_buffer[0] = '\0';  /* Initialize sense valid flag to false */
+
+	if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+		/* fast response */
+		srbreply->srb_status = cpu_to_le32(SRB_STATUS_SUCCESS);
+		srbreply->scsi_status = cpu_to_le32(SAM_STAT_GOOD);
+	} else {
+		/*
+		 *	Calculate resid for sg
+		 */
+		scsi_set_resid(scsicmd, scsi_bufflen(scsicmd)
+				   - le32_to_cpu(srbreply->data_xfer_length));
+	}
+
+
+	scsi_dma_unmap(scsicmd);
+
+	/* expose physical device if expose_physicald flag is on */
+	if (scsicmd->cmnd[0] == INQUIRY && !(scsicmd->cmnd[1] & 0x01)
+	  && expose_physicals > 0)
+		aac_expose_phy_device(scsicmd);
+
+	/*
+	 * First check the fib status
+	 */
+
+	if (le32_to_cpu(srbreply->status) != ST_OK) {
+		int len;
+
+		pr_warn("aac_srb_callback: srb failed, status = %d\n",
+				le32_to_cpu(srbreply->status));
+		len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+			    SCSI_SENSE_BUFFERSIZE);
+		scsicmd->result = DID_ERROR << 16
+				| COMMAND_COMPLETE << 8
+				| SAM_STAT_CHECK_CONDITION;
+		memcpy(scsicmd->sense_buffer,
+				srbreply->sense_data, len);
+	}
+
+	/*
+	 * Next check the srb status
+	 */
+	switch ((le32_to_cpu(srbreply->srb_status))&0x3f) {
+	case SRB_STATUS_ERROR_RECOVERY:
+	case SRB_STATUS_PENDING:
+	case SRB_STATUS_SUCCESS:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case SRB_STATUS_DATA_OVERRUN:
+		switch (scsicmd->cmnd[0]) {
+		case  READ_6:
+		case  WRITE_6:
+		case  READ_10:
+		case  WRITE_10:
+		case  READ_12:
+		case  WRITE_12:
+		case  READ_16:
+		case  WRITE_16:
+			if (le32_to_cpu(srbreply->data_xfer_length)
+						< scsicmd->underflow)
+				pr_warn("aacraid: SCSI CMD underflow\n");
+			else
+				pr_warn("aacraid: SCSI CMD Data Overrun\n");
+			scsicmd->result = DID_ERROR << 16
+					| COMMAND_COMPLETE << 8;
+			break;
+		case INQUIRY:
+			scsicmd->result = DID_OK << 16
+					| COMMAND_COMPLETE << 8;
+			break;
+		default:
+			scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+			break;
+		}
+		break;
+	case SRB_STATUS_ABORTED:
+		scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+		break;
+	case SRB_STATUS_ABORT_FAILED:
+		/*
+		 * Not sure about this one - but assuming the
+		 * hba was trying to abort for some reason
+		 */
+		scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+		break;
+	case SRB_STATUS_PARITY_ERROR:
+		scsicmd->result = DID_PARITY << 16
+				| MSG_PARITY_ERROR << 8;
+		break;
+	case SRB_STATUS_NO_DEVICE:
+	case SRB_STATUS_INVALID_PATH_ID:
+	case SRB_STATUS_INVALID_TARGET_ID:
+	case SRB_STATUS_INVALID_LUN:
+	case SRB_STATUS_SELECTION_TIMEOUT:
+		scsicmd->result = DID_NO_CONNECT << 16
+				| COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_COMMAND_TIMEOUT:
+	case SRB_STATUS_TIMEOUT:
+		scsicmd->result = DID_TIME_OUT << 16
+				| COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_BUSY:
+		scsicmd->result = DID_BUS_BUSY << 16
+				| COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_BUS_RESET:
+		scsicmd->result = DID_RESET << 16
+				| COMMAND_COMPLETE << 8;
+		break;
+
+	case SRB_STATUS_MESSAGE_REJECTED:
+		scsicmd->result = DID_ERROR << 16
+				| MESSAGE_REJECT << 8;
+		break;
+	case SRB_STATUS_REQUEST_FLUSHED:
+	case SRB_STATUS_ERROR:
+	case SRB_STATUS_INVALID_REQUEST:
+	case SRB_STATUS_REQUEST_SENSE_FAILED:
+	case SRB_STATUS_NO_HBA:
+	case SRB_STATUS_UNEXPECTED_BUS_FREE:
+	case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
+	case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
+	case SRB_STATUS_DELAYED_RETRY:
+	case SRB_STATUS_BAD_FUNCTION:
+	case SRB_STATUS_NOT_STARTED:
+	case SRB_STATUS_NOT_IN_USE:
+	case SRB_STATUS_FORCE_ABORT:
+	case SRB_STATUS_DOMAIN_VALIDATION_FAIL:
+	default:
+#ifdef AAC_DETAILED_STATUS_INFO
+		pr_info("aacraid: SRB ERROR(%u) %s scsi cmd 0x%x -scsi status 0x%x\n",
+			le32_to_cpu(srbreply->srb_status) & 0x3F,
+			aac_get_status_string(
+				le32_to_cpu(srbreply->srb_status) & 0x3F),
+			scsicmd->cmnd[0],
+			le32_to_cpu(srbreply->scsi_status));
+#endif
+		/*
+		 * When the CC bit is SET by the host in ATA pass thru CDB,
+		 *  driver is supposed to return DID_OK
+		 *
+		 * When the CC bit is RESET by the host, driver should
+		 *  return DID_ERROR
+		 */
+		if ((scsicmd->cmnd[0] == ATA_12)
+			|| (scsicmd->cmnd[0] == ATA_16)) {
+
+			if (scsicmd->cmnd[2] & (0x01 << 5)) {
+				scsicmd->result = DID_OK << 16
+					| COMMAND_COMPLETE << 8;
+			break;
+			} else {
+				scsicmd->result = DID_ERROR << 16
+					| COMMAND_COMPLETE << 8;
+			break;
+			}
+		} else {
+			scsicmd->result = DID_ERROR << 16
+				| COMMAND_COMPLETE << 8;
+			break;
+		}
+	}
+	if (le32_to_cpu(srbreply->scsi_status)
+			== SAM_STAT_CHECK_CONDITION) {
+		int len;
+
+		scsicmd->result |= SAM_STAT_CHECK_CONDITION;
+		len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
+			    SCSI_SENSE_BUFFERSIZE);
+#ifdef AAC_DETAILED_STATUS_INFO
+		pr_warn("aac_srb_callback: check condition, status = %d len=%d\n",
+					le32_to_cpu(srbreply->status), len);
+#endif
+		memcpy(scsicmd->sense_buffer,
+				srbreply->sense_data, len);
+	}
+
+	/*
+	 * OR in the scsi status (already shifted up a bit)
+	 */
+	scsicmd->result |= le32_to_cpu(srbreply->scsi_status);
+
+	aac_fib_complete(fibptr);
+	scsicmd->scsi_done(scsicmd);
+}
+
+static void hba_resp_task_complete(struct aac_dev *dev,
+					struct scsi_cmnd *scsicmd,
+					struct aac_hba_resp *err) {
+
+	scsicmd->result = err->status;
+	/* set residual count */
+	scsi_set_resid(scsicmd, le32_to_cpu(err->residual_count));
+
+	switch (err->status) {
+	case SAM_STAT_GOOD:
+		scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case SAM_STAT_CHECK_CONDITION:
+	{
+		int len;
+
+		len = min_t(u8, err->sense_response_data_len,
+			SCSI_SENSE_BUFFERSIZE);
+		if (len)
+			memcpy(scsicmd->sense_buffer,
+				err->sense_response_buf, len);
+		scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+	case SAM_STAT_BUSY:
+		scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case SAM_STAT_TASK_ABORTED:
+		scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
+		break;
+	case SAM_STAT_RESERVATION_CONFLICT:
+	case SAM_STAT_TASK_SET_FULL:
+	default:
+		scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+}
+
+static void hba_resp_task_failure(struct aac_dev *dev,
+					struct scsi_cmnd *scsicmd,
+					struct aac_hba_resp *err)
+{
+	switch (err->status) {
+	case HBA_RESP_STAT_HBAMODE_DISABLED:
+	{
+		u32 bus, cid;
+
+		bus = aac_logical_to_phys(scmd_channel(scsicmd));
+		cid = scmd_id(scsicmd);
+		if (dev->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+			dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
+			dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
+		}
+		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+	case HBA_RESP_STAT_IO_ERROR:
+	case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
+		scsicmd->result = DID_OK << 16 |
+			COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
+		break;
+	case HBA_RESP_STAT_IO_ABORTED:
+		scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+		break;
+	case HBA_RESP_STAT_INVALID_DEVICE:
+		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case HBA_RESP_STAT_UNDERRUN:
+		/* UNDERRUN is OK */
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case HBA_RESP_STAT_OVERRUN:
+	default:
+		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+}
+
+/**
+ *
+ * aac_hba_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the completion of a native HBA scsi command
+ *
+ */
+void aac_hba_callback(void *context, struct fib *fibptr)
+{
+	struct aac_dev *dev;
+	struct scsi_cmnd *scsicmd;
+
+	struct aac_hba_resp *err =
+			&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+
+	scsicmd = (struct scsi_cmnd *) context;
+
+	if (!aac_valid_context(scsicmd, fibptr))
+		return;
+
+	WARN_ON(fibptr == NULL);
+	dev = fibptr->dev;
+
+	if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF))
+		scsi_dma_unmap(scsicmd);
+
+	if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+		/* fast response */
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		goto out;
+	}
+
+	switch (err->service_response) {
+	case HBA_RESP_SVCRES_TASK_COMPLETE:
+		hba_resp_task_complete(dev, scsicmd, err);
+		break;
+	case HBA_RESP_SVCRES_FAILURE:
+		hba_resp_task_failure(dev, scsicmd, err);
+		break;
+	case HBA_RESP_SVCRES_TMF_REJECTED:
+		scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+		break;
+	case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+		scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+		break;
+	case HBA_RESP_SVCRES_TMF_COMPLETE:
+	case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+		scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+		break;
+	default:
+		scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+		break;
+	}
+
+out:
+	aac_fib_complete(fibptr);
+
+	if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF)
+		scsicmd->SCp.sent_command = 1;
+	else
+		scsicmd->scsi_done(scsicmd);
+}
+
+/**
+ *
+ * aac_send_srb_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_srb from the
+ * scsicmd passed in.
+ */
+
+static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
+{
+	struct fib* cmd_fibcontext;
+	struct aac_dev* dev;
+	int status;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
+			scsicmd->device->lun > 7) {
+		scsicmd->result = DID_NO_CONNECT << 16;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	status = aac_adapter_scsi(cmd_fibcontext, scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+
+	return -1;
+}
+
+/**
+ *
+ * aac_send_hba_fib
+ * @scsicmd: the scsi command block
+ *
+ * This routine will form a FIB and fill in the aac_hba_cmd_req from the
+ * scsicmd passed in.
+ */
+static int aac_send_hba_fib(struct scsi_cmnd *scsicmd)
+{
+	struct fib *cmd_fibcontext;
+	struct aac_dev *dev;
+	int status;
+
+	dev = shost_priv(scsicmd->device->host);
+	if (scmd_id(scsicmd) >= dev->maximum_num_physicals ||
+			scsicmd->device->lun > AAC_MAX_LUN - 1) {
+		scsicmd->result = DID_NO_CONNECT << 16;
+		scsicmd->scsi_done(scsicmd);
+		return 0;
+	}
+
+	/*
+	 *	Allocate and initialize a Fib then setup a BlockWrite command
+	 */
+	cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);
+	if (!cmd_fibcontext)
+		return -1;
+
+	scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;
+	status = aac_adapter_hba(cmd_fibcontext, scsicmd);
+
+	/*
+	 *	Check that the command queued to the controller
+	 */
+	if (status == -EINPROGRESS)
+		return 0;
+
+	pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",
+		status);
+	aac_fib_complete(cmd_fibcontext);
+	aac_fib_free(cmd_fibcontext);
+
+	return -1;
+}
+
+
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
+{
+	struct aac_dev *dev;
+	unsigned long byte_count = 0;
+	int nseg;
+	struct scatterlist *sg;
+	int i;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	// Get rid of old data
+	psg->count = 0;
+	psg->sg[0].addr = 0;
+	psg->sg[0].count = 0;
+
+	nseg = scsi_dma_map(scsicmd);
+	if (nseg <= 0)
+		return nseg;
+
+	psg->count = cpu_to_le32(nseg);
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		psg->sg[i].addr = cpu_to_le32(sg_dma_address(sg));
+		psg->sg[i].count = cpu_to_le32(sg_dma_len(sg));
+		byte_count += sg_dma_len(sg);
+	}
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
+	return byte_count;
+}
+
+
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
+{
+	struct aac_dev *dev;
+	unsigned long byte_count = 0;
+	u64 addr;
+	int nseg;
+	struct scatterlist *sg;
+	int i;
+
+	dev = (struct aac_dev *)scsicmd->device->host->hostdata;
+	// Get rid of old data
+	psg->count = 0;
+	psg->sg[0].addr[0] = 0;
+	psg->sg[0].addr[1] = 0;
+	psg->sg[0].count = 0;
+
+	nseg = scsi_dma_map(scsicmd);
+	if (nseg <= 0)
+		return nseg;
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		addr = sg_dma_address(sg);
+		psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+		psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+		psg->sg[i].count = cpu_to_le32(count);
+		byte_count += count;
+	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
+	return byte_count;
+}
+
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
+{
+	unsigned long byte_count = 0;
+	int nseg;
+	struct scatterlist *sg;
+	int i;
+
+	// Get rid of old data
+	psg->count = 0;
+	psg->sg[0].next = 0;
+	psg->sg[0].prev = 0;
+	psg->sg[0].addr[0] = 0;
+	psg->sg[0].addr[1] = 0;
+	psg->sg[0].count = 0;
+	psg->sg[0].flags = 0;
+
+	nseg = scsi_dma_map(scsicmd);
+	if (nseg <= 0)
+		return nseg;
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+		psg->sg[i].next = 0;
+		psg->sg[i].prev = 0;
+		psg->sg[i].addr[1] = cpu_to_le32((u32)(addr>>32));
+		psg->sg[i].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+		psg->sg[i].count = cpu_to_le32(count);
+		psg->sg[i].flags = 0;
+		byte_count += count;
+	}
+	psg->count = cpu_to_le32(nseg);
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(psg->sg[i-1].count) -
+			(byte_count - scsi_bufflen(scsicmd));
+		psg->sg[i-1].count = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
+	return byte_count;
+}
+
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+				struct aac_raw_io2 *rio2, int sg_max)
+{
+	unsigned long byte_count = 0;
+	int nseg;
+	struct scatterlist *sg;
+	int i, conformable = 0;
+	u32 min_size = PAGE_SIZE, cur_size;
+
+	nseg = scsi_dma_map(scsicmd);
+	if (nseg <= 0)
+		return nseg;
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+
+		BUG_ON(i >= sg_max);
+		rio2->sge[i].addrHigh = cpu_to_le32((u32)(addr>>32));
+		rio2->sge[i].addrLow = cpu_to_le32((u32)(addr & 0xffffffff));
+		cur_size = cpu_to_le32(count);
+		rio2->sge[i].length = cur_size;
+		rio2->sge[i].flags = 0;
+		if (i == 0) {
+			conformable = 1;
+			rio2->sgeFirstSize = cur_size;
+		} else if (i == 1) {
+			rio2->sgeNominalSize = cur_size;
+			min_size = cur_size;
+		} else if ((i+1) < nseg && cur_size != rio2->sgeNominalSize) {
+			conformable = 0;
+			if (cur_size < min_size)
+				min_size = cur_size;
+		}
+		byte_count += count;
+	}
+
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp = le32_to_cpu(rio2->sge[i-1].length) -
+			(byte_count - scsi_bufflen(scsicmd));
+		rio2->sge[i-1].length = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+
+	rio2->sgeCnt = cpu_to_le32(nseg);
+	rio2->flags |= cpu_to_le16(RIO2_SG_FORMAT_IEEE1212);
+	/* not conformable: evaluate required sg elements */
+	if (!conformable) {
+		int j, nseg_new = nseg, err_found;
+		for (i = min_size / PAGE_SIZE; i >= 1; --i) {
+			err_found = 0;
+			nseg_new = 2;
+			for (j = 1; j < nseg - 1; ++j) {
+				if (rio2->sge[j].length % (i*PAGE_SIZE)) {
+					err_found = 1;
+					break;
+				}
+				nseg_new += (rio2->sge[j].length / (i*PAGE_SIZE));
+			}
+			if (!err_found)
+				break;
+		}
+		if (i > 0 && nseg_new <= sg_max) {
+			int ret = aac_convert_sgraw2(rio2, i, nseg, nseg_new);
+
+			if (ret < 0)
+				return ret;
+		}
+	} else
+		rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		printk(KERN_WARNING"aacraid: cmd len %08lX cmd underflow %08X\n",
+		       byte_count, scsicmd->underflow);
+	}
+
+	return byte_count;
+}
+
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new)
+{
+	struct sge_ieee1212 *sge;
+	int i, j, pos;
+	u32 addr_low;
+
+	if (aac_convert_sgl == 0)
+		return 0;
+
+	sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
+	if (sge == NULL)
+		return -ENOMEM;
+
+	for (i = 1, pos = 1; i < nseg-1; ++i) {
+		for (j = 0; j < rio2->sge[i].length / (pages * PAGE_SIZE); ++j) {
+			addr_low = rio2->sge[i].addrLow + j * pages * PAGE_SIZE;
+			sge[pos].addrLow = addr_low;
+			sge[pos].addrHigh = rio2->sge[i].addrHigh;
+			if (addr_low < rio2->sge[i].addrLow)
+				sge[pos].addrHigh++;
+			sge[pos].length = pages * PAGE_SIZE;
+			sge[pos].flags = 0;
+			pos++;
+		}
+	}
+	sge[pos] = rio2->sge[nseg-1];
+	memcpy(&rio2->sge[1], &sge[1], (nseg_new-1)*sizeof(struct sge_ieee1212));
+
+	kfree(sge);
+	rio2->sgeCnt = cpu_to_le32(nseg_new);
+	rio2->flags |= cpu_to_le16(RIO2_SGL_CONFORMANT);
+	rio2->sgeNominalSize = pages * PAGE_SIZE;
+	return 0;
+}
+
+static long aac_build_sghba(struct scsi_cmnd *scsicmd,
+			struct aac_hba_cmd_req *hbacmd,
+			int sg_max,
+			u64 sg_address)
+{
+	unsigned long byte_count = 0;
+	int nseg;
+	struct scatterlist *sg;
+	int i;
+	u32 cur_size;
+	struct aac_hba_sgl *sge;
+
+	nseg = scsi_dma_map(scsicmd);
+	if (nseg <= 0) {
+		byte_count = nseg;
+		goto out;
+	}
+
+	if (nseg > HBA_MAX_SG_EMBEDDED)
+		sge = &hbacmd->sge[2];
+	else
+		sge = &hbacmd->sge[0];
+
+	scsi_for_each_sg(scsicmd, sg, nseg, i) {
+		int count = sg_dma_len(sg);
+		u64 addr = sg_dma_address(sg);
+
+		WARN_ON(i >= sg_max);
+		sge->addr_hi = cpu_to_le32((u32)(addr>>32));
+		sge->addr_lo = cpu_to_le32((u32)(addr & 0xffffffff));
+		cur_size = cpu_to_le32(count);
+		sge->len = cur_size;
+		sge->flags = 0;
+		byte_count += count;
+		sge++;
+	}
+
+	sge--;
+	/* hba wants the size to be exact */
+	if (byte_count > scsi_bufflen(scsicmd)) {
+		u32 temp;
+
+		temp = le32_to_cpu(sge->len) - byte_count
+						- scsi_bufflen(scsicmd);
+		sge->len = cpu_to_le32(temp);
+		byte_count = scsi_bufflen(scsicmd);
+	}
+
+	if (nseg <= HBA_MAX_SG_EMBEDDED) {
+		hbacmd->emb_data_desc_count = cpu_to_le32(nseg);
+		sge->flags = cpu_to_le32(0x40000000);
+	} else {
+		/* not embedded */
+		hbacmd->sge[0].flags = cpu_to_le32(0x80000000);
+		hbacmd->emb_data_desc_count = (u8)cpu_to_le32(1);
+		hbacmd->sge[0].addr_hi = (u32)cpu_to_le32(sg_address >> 32);
+		hbacmd->sge[0].addr_lo =
+			cpu_to_le32((u32)(sg_address & 0xffffffff));
+	}
+
+	/* Check for command underflow */
+	if (scsicmd->underflow && (byte_count < scsicmd->underflow)) {
+		pr_warn("aacraid: cmd len %08lX cmd underflow %08X\n",
+				byte_count, scsicmd->underflow);
+	}
+out:
+	return byte_count;
+}
+
+#ifdef AAC_DETAILED_STATUS_INFO
+
+struct aac_srb_status_info {
+	u32	status;
+	char	*str;
+};
+
+
+static struct aac_srb_status_info srb_status_info[] = {
+	{ SRB_STATUS_PENDING,		"Pending Status"},
+	{ SRB_STATUS_SUCCESS,		"Success"},
+	{ SRB_STATUS_ABORTED,		"Aborted Command"},
+	{ SRB_STATUS_ABORT_FAILED,	"Abort Failed"},
+	{ SRB_STATUS_ERROR,		"Error Event"},
+	{ SRB_STATUS_BUSY,		"Device Busy"},
+	{ SRB_STATUS_INVALID_REQUEST,	"Invalid Request"},
+	{ SRB_STATUS_INVALID_PATH_ID,	"Invalid Path ID"},
+	{ SRB_STATUS_NO_DEVICE,		"No Device"},
+	{ SRB_STATUS_TIMEOUT,		"Timeout"},
+	{ SRB_STATUS_SELECTION_TIMEOUT,	"Selection Timeout"},
+	{ SRB_STATUS_COMMAND_TIMEOUT,	"Command Timeout"},
+	{ SRB_STATUS_MESSAGE_REJECTED,	"Message Rejected"},
+	{ SRB_STATUS_BUS_RESET,		"Bus Reset"},
+	{ SRB_STATUS_PARITY_ERROR,	"Parity Error"},
+	{ SRB_STATUS_REQUEST_SENSE_FAILED,"Request Sense Failed"},
+	{ SRB_STATUS_NO_HBA,		"No HBA"},
+	{ SRB_STATUS_DATA_OVERRUN,	"Data Overrun/Data Underrun"},
+	{ SRB_STATUS_UNEXPECTED_BUS_FREE,"Unexpected Bus Free"},
+	{ SRB_STATUS_PHASE_SEQUENCE_FAILURE,"Phase Error"},
+	{ SRB_STATUS_BAD_SRB_BLOCK_LENGTH,"Bad Srb Block Length"},
+	{ SRB_STATUS_REQUEST_FLUSHED,	"Request Flushed"},
+	{ SRB_STATUS_DELAYED_RETRY,	"Delayed Retry"},
+	{ SRB_STATUS_INVALID_LUN,	"Invalid LUN"},
+	{ SRB_STATUS_INVALID_TARGET_ID,	"Invalid TARGET ID"},
+	{ SRB_STATUS_BAD_FUNCTION,	"Bad Function"},
+	{ SRB_STATUS_ERROR_RECOVERY,	"Error Recovery"},
+	{ SRB_STATUS_NOT_STARTED,	"Not Started"},
+	{ SRB_STATUS_NOT_IN_USE,	"Not In Use"},
+	{ SRB_STATUS_FORCE_ABORT,	"Force Abort"},
+	{ SRB_STATUS_DOMAIN_VALIDATION_FAIL,"Domain Validation Failure"},
+	{ 0xff,				"Unknown Error"}
+};
+
+char *aac_get_status_string(u32 status)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(srb_status_info); i++)
+		if (srb_status_info[i].status == status)
+			return srb_status_info[i].str;
+
+	return "Bad Status Code";
+}
+
+#endif
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
new file mode 100644
index 0000000..39eb415
--- /dev/null
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -0,0 +1,2769 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  aacraid.h
+ *
+ * Abstract: Contains all routines for control of the aacraid driver
+ *
+ */
+
+#ifndef _AACRAID_H_
+#define _AACRAID_H_
+#ifndef dprintk
+# define dprintk(x)
+#endif
+/* eg: if (nblank(dprintk(x))) */
+#define _nblank(x) #x
+#define nblank(x) _nblank(x)[0]
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <scsi/scsi_host.h>
+
+/*------------------------------------------------------------------------------
+ *              D E F I N E S
+ *----------------------------------------------------------------------------*/
+
+#define AAC_MAX_MSIX		32	/* vectors */
+#define AAC_PCI_MSI_ENABLE	0x8000
+
+enum {
+	AAC_ENABLE_INTERRUPT	= 0x0,
+	AAC_DISABLE_INTERRUPT,
+	AAC_ENABLE_MSIX,
+	AAC_DISABLE_MSIX,
+	AAC_CLEAR_AIF_BIT,
+	AAC_CLEAR_SYNC_BIT,
+	AAC_ENABLE_INTX
+};
+
+#define AAC_INT_MODE_INTX		(1<<0)
+#define AAC_INT_MODE_MSI		(1<<1)
+#define AAC_INT_MODE_AIF		(1<<2)
+#define AAC_INT_MODE_SYNC		(1<<3)
+#define AAC_INT_MODE_MSIX		(1<<16)
+
+#define AAC_INT_ENABLE_TYPE1_INTX	0xfffffffb
+#define AAC_INT_ENABLE_TYPE1_MSIX	0xfffffffa
+#define AAC_INT_DISABLE_ALL		0xffffffff
+
+/* Bit definitions in IOA->Host Interrupt Register */
+#define PMC_TRANSITION_TO_OPERATIONAL	(1<<31)
+#define PMC_IOARCB_TRANSFER_FAILED	(1<<28)
+#define PMC_IOA_UNIT_CHECK		(1<<27)
+#define PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE (1<<26)
+#define PMC_CRITICAL_IOA_OP_IN_PROGRESS	(1<<25)
+#define PMC_IOARRIN_LOST		(1<<4)
+#define PMC_SYSTEM_BUS_MMIO_ERROR	(1<<3)
+#define PMC_IOA_PROCESSOR_IN_ERROR_STATE (1<<2)
+#define PMC_HOST_RRQ_VALID		(1<<1)
+#define PMC_OPERATIONAL_STATUS		(1<<31)
+#define PMC_ALLOW_MSIX_VECTOR0		(1<<0)
+
+#define PMC_IOA_ERROR_INTERRUPTS	(PMC_IOARCB_TRANSFER_FAILED | \
+					 PMC_IOA_UNIT_CHECK | \
+					 PMC_NO_HOST_RRQ_FOR_CMD_RESPONSE | \
+					 PMC_IOARRIN_LOST | \
+					 PMC_SYSTEM_BUS_MMIO_ERROR | \
+					 PMC_IOA_PROCESSOR_IN_ERROR_STATE)
+
+#define PMC_ALL_INTERRUPT_BITS		(PMC_IOA_ERROR_INTERRUPTS | \
+					 PMC_HOST_RRQ_VALID | \
+					 PMC_TRANSITION_TO_OPERATIONAL | \
+					 PMC_ALLOW_MSIX_VECTOR0)
+#define	PMC_GLOBAL_INT_BIT2		0x00000004
+#define	PMC_GLOBAL_INT_BIT0		0x00000001
+
+#ifndef AAC_DRIVER_BUILD
+# define AAC_DRIVER_BUILD 50877
+# define AAC_DRIVER_BRANCH "-custom"
+#endif
+#define MAXIMUM_NUM_CONTAINERS	32
+
+#define AAC_NUM_MGT_FIB         8
+#define AAC_NUM_IO_FIB		(1024 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_FIB		(AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
+
+#define AAC_MAX_LUN		256
+
+#define AAC_MAX_HOSTPHYSMEMPAGES (0xfffff)
+#define AAC_MAX_32BIT_SGBCOUNT	((unsigned short)256)
+
+#define AAC_DEBUG_INSTRUMENT_AIF_DELETE
+
+#define AAC_MAX_NATIVE_TARGETS		1024
+/* Thor: 5 phys. buses: #0: empty, 1-4: 256 targets each */
+#define AAC_MAX_BUSES			5
+#define AAC_MAX_TARGETS		256
+#define AAC_BUS_TARGET_LOOP		(AAC_MAX_BUSES * AAC_MAX_TARGETS)
+#define AAC_MAX_NATIVE_SIZE		2048
+#define FW_ERROR_BUFFER_SIZE		512
+
+#define get_bus_number(x)	(x/AAC_MAX_TARGETS)
+#define get_target_number(x)	(x%AAC_MAX_TARGETS)
+
+/* Thor AIF events */
+#define SA_AIF_HOTPLUG			(1<<1)
+#define SA_AIF_HARDWARE		(1<<2)
+#define SA_AIF_PDEV_CHANGE		(1<<4)
+#define SA_AIF_LDEV_CHANGE		(1<<5)
+#define SA_AIF_BPSTAT_CHANGE		(1<<30)
+#define SA_AIF_BPCFG_CHANGE		(1<<31)
+
+#define HBA_MAX_SG_EMBEDDED		28
+#define HBA_MAX_SG_SEPARATE		90
+#define HBA_SENSE_DATA_LEN_MAX		32
+#define HBA_REQUEST_TAG_ERROR_FLAG	0x00000002
+#define HBA_SGL_FLAGS_EXT		0x80000000UL
+
+struct aac_hba_sgl {
+	u32		addr_lo; /* Lower 32-bits of SGL element address */
+	u32		addr_hi; /* Upper 32-bits of SGL element address */
+	u32		len;	/* Length of SGL element in bytes */
+	u32		flags;	/* SGL element flags */
+};
+
+enum {
+	HBA_IU_TYPE_SCSI_CMD_REQ		= 0x40,
+	HBA_IU_TYPE_SCSI_TM_REQ			= 0x41,
+	HBA_IU_TYPE_SATA_REQ			= 0x42,
+	HBA_IU_TYPE_RESP			= 0x60,
+	HBA_IU_TYPE_COALESCED_RESP		= 0x61,
+	HBA_IU_TYPE_INT_COALESCING_CFG_REQ	= 0x70
+};
+
+enum {
+	HBA_CMD_BYTE1_DATA_DIR_IN		= 0x1,
+	HBA_CMD_BYTE1_DATA_DIR_OUT		= 0x2,
+	HBA_CMD_BYTE1_DATA_TYPE_DDR		= 0x4,
+	HBA_CMD_BYTE1_CRYPTO_ENABLE		= 0x8
+};
+
+enum {
+	HBA_CMD_BYTE1_BITOFF_DATA_DIR_IN	= 0x0,
+	HBA_CMD_BYTE1_BITOFF_DATA_DIR_OUT,
+	HBA_CMD_BYTE1_BITOFF_DATA_TYPE_DDR,
+	HBA_CMD_BYTE1_BITOFF_CRYPTO_ENABLE
+};
+
+enum {
+	HBA_RESP_DATAPRES_NO_DATA		= 0x0,
+	HBA_RESP_DATAPRES_RESPONSE_DATA,
+	HBA_RESP_DATAPRES_SENSE_DATA
+};
+
+enum {
+	HBA_RESP_SVCRES_TASK_COMPLETE		= 0x0,
+	HBA_RESP_SVCRES_FAILURE,
+	HBA_RESP_SVCRES_TMF_COMPLETE,
+	HBA_RESP_SVCRES_TMF_SUCCEEDED,
+	HBA_RESP_SVCRES_TMF_REJECTED,
+	HBA_RESP_SVCRES_TMF_LUN_INVALID
+};
+
+enum {
+	HBA_RESP_STAT_IO_ERROR			= 0x1,
+	HBA_RESP_STAT_IO_ABORTED,
+	HBA_RESP_STAT_NO_PATH_TO_DEVICE,
+	HBA_RESP_STAT_INVALID_DEVICE,
+	HBA_RESP_STAT_HBAMODE_DISABLED		= 0xE,
+	HBA_RESP_STAT_UNDERRUN			= 0x51,
+	HBA_RESP_STAT_OVERRUN			= 0x75
+};
+
+struct aac_hba_cmd_req {
+	u8	iu_type;	/* HBA information unit type */
+	/*
+	 * byte1:
+	 * [1:0] DIR - 0=No data, 0x1 = IN, 0x2 = OUT
+	 * [2]   TYPE - 0=PCI, 1=DDR
+	 * [3]   CRYPTO_ENABLE - 0=Crypto disabled, 1=Crypto enabled
+	 */
+	u8	byte1;
+	u8	reply_qid;	/* Host reply queue to post response to */
+	u8	reserved1;
+	__le32	it_nexus;	/* Device handle for the request */
+	__le32	request_id;	/* Sender context */
+	/* Lower 32-bits of tweak value for crypto enabled IOs */
+	__le32	tweak_value_lo;
+	u8	cdb[16];	/* SCSI CDB of the command */
+	u8	lun[8];		/* SCSI LUN of the command */
+
+	/* Total data length in bytes to be read/written (if any) */
+	__le32	data_length;
+
+	/* [2:0] Task Attribute, [6:3] Command Priority */
+	u8	attr_prio;
+
+	/* Number of SGL elements embedded in the HBA req */
+	u8	emb_data_desc_count;
+
+	__le16	dek_index;	/* DEK index for crypto enabled IOs */
+
+	/* Lower 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_lo;
+
+	/* Upper 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_hi;
+
+	/* Length of reserved error data area on the host in bytes */
+	__le32	error_length;
+
+	/* Upper 32-bits of tweak value for crypto enabled IOs */
+	__le32	tweak_value_hi;
+
+	struct aac_hba_sgl sge[HBA_MAX_SG_SEPARATE+2]; /* SG list space */
+
+	/*
+	 * structure must not exceed
+	 * AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE
+	 */
+};
+
+/* Task Management Functions (TMF) */
+#define HBA_TMF_ABORT_TASK	0x01
+#define HBA_TMF_LUN_RESET	0x08
+
+struct aac_hba_tm_req {
+	u8	iu_type;	/* HBA information unit type */
+	u8	reply_qid;	/* Host reply queue to post response to */
+	u8	tmf;		/* Task management function */
+	u8	reserved1;
+
+	__le32	it_nexus;	/* Device handle for the command */
+
+	u8	lun[8];		/* SCSI LUN */
+
+	/* Used to hold sender context. */
+	__le32	request_id;	/* Sender context */
+	__le32	reserved2;
+
+	/* Request identifier of managed task */
+	__le32	managed_request_id;	/* Sender context being managed */
+	__le32	reserved3;
+
+	/* Lower 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_lo;
+	/* Upper 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_hi;
+	/* Length of reserved error data area on the host in bytes */
+	__le32	error_length;
+};
+
+struct aac_hba_reset_req {
+	u8	iu_type;	/* HBA information unit type */
+	/* 0 - reset specified device, 1 - reset all devices */
+	u8	reset_type;
+	u8	reply_qid;	/* Host reply queue to post response to */
+	u8	reserved1;
+
+	__le32	it_nexus;	/* Device handle for the command */
+	__le32	request_id;	/* Sender context */
+	/* Lower 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_lo;
+	/* Upper 32-bits of reserved error data target location on the host */
+	__le32	error_ptr_hi;
+	/* Length of reserved error data area on the host in bytes */
+	__le32	error_length;
+};
+
+struct aac_hba_resp {
+	u8	iu_type;		/* HBA information unit type */
+	u8	reserved1[3];
+	__le32	request_identifier;	/* sender context */
+	__le32	reserved2;
+	u8	service_response;	/* SCSI service response */
+	u8	status;			/* SCSI status */
+	u8	datapres;	/* [1:0] - data present, [7:2] - reserved */
+	u8	sense_response_data_len;	/* Sense/response data length */
+	__le32	residual_count;		/* Residual data length in bytes */
+	/* Sense/response data */
+	u8	sense_response_buf[HBA_SENSE_DATA_LEN_MAX];
+};
+
+struct aac_native_hba {
+	union {
+		struct aac_hba_cmd_req cmd;
+		struct aac_hba_tm_req tmr;
+		u8 cmd_bytes[AAC_MAX_NATIVE_SIZE-FW_ERROR_BUFFER_SIZE];
+	} cmd;
+	union {
+		struct aac_hba_resp err;
+		u8 resp_bytes[FW_ERROR_BUFFER_SIZE];
+	} resp;
+};
+
+#define CISS_REPORT_PHYSICAL_LUNS	0xc3
+#define WRITE_HOST_WELLNESS		0xa5
+#define CISS_IDENTIFY_PHYSICAL_DEVICE	0x15
+#define BMIC_IN			0x26
+#define BMIC_OUT			0x27
+
+struct aac_ciss_phys_luns_resp {
+	u8	list_length[4];		/* LUN list length (N-7, big endian) */
+	u8	resp_flag;		/* extended response_flag */
+	u8	reserved[3];
+	struct _ciss_lun {
+		u8	tid[3];		/* Target ID */
+		u8	bus;		/* Bus, flag (bits 6,7) */
+		u8	level3[2];
+		u8	level2[2];
+		u8	node_ident[16];	/* phys. node identifier */
+	} lun[1];			/* List of phys. devices */
+};
+
+/*
+ * Interrupts
+ */
+#define AAC_MAX_HRRQ		64
+
+struct aac_ciss_identify_pd {
+	u8 scsi_bus;			/* SCSI Bus number on controller */
+	u8 scsi_id;			/* SCSI ID on this bus */
+	u16 block_size;			/* sector size in bytes */
+	u32 total_blocks;		/* number for sectors on drive */
+	u32 reserved_blocks;		/* controller reserved (RIS) */
+	u8 model[40];			/* Physical Drive Model */
+	u8 serial_number[40];		/* Drive Serial Number */
+	u8 firmware_revision[8];	/* drive firmware revision */
+	u8 scsi_inquiry_bits;		/* inquiry byte 7 bits */
+	u8 compaq_drive_stamp;		/* 0 means drive not stamped */
+	u8 last_failure_reason;
+
+	u8  flags;
+	u8  more_flags;
+	u8  scsi_lun;			/* SCSI LUN for phys drive */
+	u8  yet_more_flags;
+	u8  even_more_flags;
+	u32 spi_speed_rules;		/* SPI Speed :Ultra disable diagnose */
+	u8  phys_connector[2];		/* connector number on controller */
+	u8  phys_box_on_bus;		/* phys enclosure this drive resides */
+	u8  phys_bay_in_box;		/* phys drv bay this drive resides */
+	u32 rpm;			/* Drive rotational speed in rpm */
+	u8  device_type;		/* type of drive */
+	u8  sata_version;		/* only valid when drive_type is SATA */
+	u64 big_total_block_count;
+	u64 ris_starting_lba;
+	u32 ris_size;
+	u8  wwid[20];
+	u8  controller_phy_map[32];
+	u16 phy_count;
+	u8  phy_connected_dev_type[256];
+	u8  phy_to_drive_bay_num[256];
+	u16 phy_to_attached_dev_index[256];
+	u8  box_index;
+	u8  spitfire_support;
+	u16 extra_physical_drive_flags;
+	u8  negotiated_link_rate[256];
+	u8  phy_to_phy_map[256];
+	u8  redundant_path_present_map;
+	u8  redundant_path_failure_map;
+	u8  active_path_number;
+	u16 alternate_paths_phys_connector[8];
+	u8  alternate_paths_phys_box_on_port[8];
+	u8  multi_lun_device_lun_count;
+	u8  minimum_good_fw_revision[8];
+	u8  unique_inquiry_bytes[20];
+	u8  current_temperature_degreesC;
+	u8  temperature_threshold_degreesC;
+	u8  max_temperature_degreesC;
+	u8  logical_blocks_per_phys_block_exp;	/* phyblocksize = 512 * 2^exp */
+	u16 current_queue_depth_limit;
+	u8  switch_name[10];
+	u16 switch_port;
+	u8  alternate_paths_switch_name[40];
+	u8  alternate_paths_switch_port[8];
+	u16 power_on_hours;		/* valid only if gas gauge supported */
+	u16 percent_endurance_used;	/* valid only if gas gauge supported. */
+	u8  drive_authentication;
+	u8  smart_carrier_authentication;
+	u8  smart_carrier_app_fw_version;
+	u8  smart_carrier_bootloader_fw_version;
+	u8  SanitizeSecureEraseSupport;
+	u8  DriveKeyFlags;
+	u8  encryption_key_name[64];
+	u32 misc_drive_flags;
+	u16 dek_index;
+	u16 drive_encryption_flags;
+	u8  sanitize_maximum_time[6];
+	u8  connector_info_mode;
+	u8  connector_info_number[4];
+	u8  long_connector_name[64];
+	u8  device_unique_identifier[16];
+	u8  padto_2K[17];
+} __packed;
+
+/*
+ * These macros convert from physical channels to virtual channels
+ */
+#define CONTAINER_CHANNEL		(0)
+#define NATIVE_CHANNEL			(1)
+#define CONTAINER_TO_CHANNEL(cont)	(CONTAINER_CHANNEL)
+#define CONTAINER_TO_ID(cont)		(cont)
+#define CONTAINER_TO_LUN(cont)		(0)
+#define ENCLOSURE_CHANNEL		(3)
+
+#define PMC_DEVICE_S6	0x28b
+#define PMC_DEVICE_S7	0x28c
+#define PMC_DEVICE_S8	0x28d
+
+#define aac_phys_to_logical(x)  ((x)+1)
+#define aac_logical_to_phys(x)  ((x)?(x)-1:0)
+
+/*
+ * These macros are for keeping track of
+ * character device state.
+ */
+#define AAC_CHARDEV_UNREGISTERED	(-1)
+#define AAC_CHARDEV_NEEDS_REINIT	(-2)
+
+/* #define AAC_DETAILED_STATUS_INFO */
+
+struct diskparm
+{
+	int heads;
+	int sectors;
+	int cylinders;
+};
+
+
+/*
+ *	Firmware constants
+ */
+
+#define		CT_NONE			0
+#define		CT_OK			218
+#define		FT_FILESYS	8	/* ADAPTEC's "FSA"(tm) filesystem */
+#define		FT_DRIVE	9	/* physical disk - addressable in scsi by bus/id/lun */
+
+/*
+ *	Host side memory scatter gather list
+ *	Used by the adapter for read, write, and readdirplus operations
+ *	We have separate 32 and 64 bit version because even
+ *	on 64 bit systems not all cards support the 64 bit version
+ */
+struct sgentry {
+	__le32	addr;	/* 32-bit address. */
+	__le32	count;	/* Length. */
+};
+
+struct user_sgentry {
+	u32	addr;	/* 32-bit address. */
+	u32	count;	/* Length. */
+};
+
+struct sgentry64 {
+	__le32	addr[2];	/* 64-bit addr. 2 pieces for data alignment */
+	__le32	count;	/* Length. */
+};
+
+struct user_sgentry64 {
+	u32	addr[2];	/* 64-bit addr. 2 pieces for data alignment */
+	u32	count;	/* Length. */
+};
+
+struct sgentryraw {
+	__le32		next;	/* reserved for F/W use */
+	__le32		prev;	/* reserved for F/W use */
+	__le32		addr[2];
+	__le32		count;
+	__le32		flags;	/* reserved for F/W use */
+};
+
+struct user_sgentryraw {
+	u32		next;	/* reserved for F/W use */
+	u32		prev;	/* reserved for F/W use */
+	u32		addr[2];
+	u32		count;
+	u32		flags;	/* reserved for F/W use */
+};
+
+struct sge_ieee1212 {
+	u32	addrLow;
+	u32	addrHigh;
+	u32	length;
+	u32	flags;
+};
+
+/*
+ *	SGMAP
+ *
+ *	This is the SGMAP structure for all commands that use
+ *	32-bit addressing.
+ */
+
+struct sgmap {
+	__le32		count;
+	struct sgentry	sg[1];
+};
+
+struct user_sgmap {
+	u32		count;
+	struct user_sgentry	sg[1];
+};
+
+struct sgmap64 {
+	__le32		count;
+	struct sgentry64 sg[1];
+};
+
+struct user_sgmap64 {
+	u32		count;
+	struct user_sgentry64 sg[1];
+};
+
+struct sgmapraw {
+	__le32		  count;
+	struct sgentryraw sg[1];
+};
+
+struct user_sgmapraw {
+	u32		  count;
+	struct user_sgentryraw sg[1];
+};
+
+struct creation_info
+{
+	u8		buildnum;		/* e.g., 588 */
+	u8		usec;			/* e.g., 588 */
+	u8		via;			/* e.g., 1 = FSU,
+						 *	 2 = API
+						 */
+	u8		year;			/* e.g., 1997 = 97 */
+	__le32		date;			/*
+						 * unsigned	Month		:4;	// 1 - 12
+						 * unsigned	Day		:6;	// 1 - 32
+						 * unsigned	Hour		:6;	// 0 - 23
+						 * unsigned	Minute		:6;	// 0 - 60
+						 * unsigned	Second		:6;	// 0 - 60
+						 */
+	__le32		serial[2];			/* e.g., 0x1DEADB0BFAFAF001 */
+};
+
+
+/*
+ *	Define all the constants needed for the communication interface
+ */
+
+/*
+ *	Define how many queue entries each queue will have and the total
+ *	number of entries for the entire communication interface. Also define
+ *	how many queues we support.
+ *
+ *	This has to match the controller
+ */
+
+#define NUMBER_OF_COMM_QUEUES  8   // 4 command; 4 response
+#define HOST_HIGH_CMD_ENTRIES  4
+#define HOST_NORM_CMD_ENTRIES  8
+#define ADAP_HIGH_CMD_ENTRIES  4
+#define ADAP_NORM_CMD_ENTRIES  512
+#define HOST_HIGH_RESP_ENTRIES 4
+#define HOST_NORM_RESP_ENTRIES 512
+#define ADAP_HIGH_RESP_ENTRIES 4
+#define ADAP_NORM_RESP_ENTRIES 8
+
+#define TOTAL_QUEUE_ENTRIES  \
+    (HOST_NORM_CMD_ENTRIES + HOST_HIGH_CMD_ENTRIES + ADAP_NORM_CMD_ENTRIES + ADAP_HIGH_CMD_ENTRIES + \
+	    HOST_NORM_RESP_ENTRIES + HOST_HIGH_RESP_ENTRIES + ADAP_NORM_RESP_ENTRIES + ADAP_HIGH_RESP_ENTRIES)
+
+
+/*
+ *	Set the queues on a 16 byte alignment
+ */
+
+#define QUEUE_ALIGNMENT		16
+
+/*
+ *	The queue headers define the Communication Region queues. These
+ *	are physically contiguous and accessible by both the adapter and the
+ *	host. Even though all queue headers are in the same contiguous block
+ *	they will be represented as individual units in the data structures.
+ */
+
+struct aac_entry {
+	__le32 size; /* Size in bytes of Fib which this QE points to */
+	__le32 addr; /* Receiver address of the FIB */
+};
+
+/*
+ *	The adapter assumes the ProducerIndex and ConsumerIndex are grouped
+ *	adjacently and in that order.
+ */
+
+struct aac_qhdr {
+	__le64 header_addr;/* Address to hand the adapter to access
+			      to this queue head */
+	__le32 *producer; /* The producer index for this queue (host address) */
+	__le32 *consumer; /* The consumer index for this queue (host address) */
+};
+
+/*
+ *	Define all the events which the adapter would like to notify
+ *	the host of.
+ */
+
+#define		HostNormCmdQue		1	/* Change in host normal priority command queue */
+#define		HostHighCmdQue		2	/* Change in host high priority command queue */
+#define		HostNormRespQue		3	/* Change in host normal priority response queue */
+#define		HostHighRespQue		4	/* Change in host high priority response queue */
+#define		AdapNormRespNotFull	5
+#define		AdapHighRespNotFull	6
+#define		AdapNormCmdNotFull	7
+#define		AdapHighCmdNotFull	8
+#define		SynchCommandComplete	9
+#define		AdapInternalError	0xfe    /* The adapter detected an internal error shutting down */
+
+/*
+ *	Define all the events the host wishes to notify the
+ *	adapter of. The first four values much match the Qid the
+ *	corresponding queue.
+ */
+
+#define		AdapNormCmdQue		2
+#define		AdapHighCmdQue		3
+#define		AdapNormRespQue		6
+#define		AdapHighRespQue		7
+#define		HostShutdown		8
+#define		HostPowerFail		9
+#define		FatalCommError		10
+#define		HostNormRespNotFull	11
+#define		HostHighRespNotFull	12
+#define		HostNormCmdNotFull	13
+#define		HostHighCmdNotFull	14
+#define		FastIo			15
+#define		AdapPrintfDone		16
+
+/*
+ *	Define all the queues that the adapter and host use to communicate
+ *	Number them to match the physical queue layout.
+ */
+
+enum aac_queue_types {
+        HostNormCmdQueue = 0,	/* Adapter to host normal priority command traffic */
+        HostHighCmdQueue,	/* Adapter to host high priority command traffic */
+        AdapNormCmdQueue,	/* Host to adapter normal priority command traffic */
+        AdapHighCmdQueue,	/* Host to adapter high priority command traffic */
+        HostNormRespQueue,	/* Adapter to host normal priority response traffic */
+        HostHighRespQueue,	/* Adapter to host high priority response traffic */
+        AdapNormRespQueue,	/* Host to adapter normal priority response traffic */
+        AdapHighRespQueue	/* Host to adapter high priority response traffic */
+};
+
+/*
+ *	Assign type values to the FSA communication data structures
+ */
+
+#define		FIB_MAGIC	0x0001
+#define		FIB_MAGIC2	0x0004
+#define		FIB_MAGIC2_64	0x0005
+
+/*
+ *	Define the priority levels the FSA communication routines support.
+ */
+
+#define		FsaNormal	1
+
+/* transport FIB header (PMC) */
+struct aac_fib_xporthdr {
+	__le64	HostAddress;	/* FIB host address w/o xport header */
+	__le32	Size;		/* FIB size excluding xport header */
+	__le32	Handle;		/* driver handle to reference the FIB */
+	__le64	Reserved[2];
+};
+
+#define		ALIGN32		32
+
+/*
+ * Define the FIB. The FIB is the where all the requested data and
+ * command information are put to the application on the FSA adapter.
+ */
+
+struct aac_fibhdr {
+	__le32 XferState;	/* Current transfer state for this CCB */
+	__le16 Command;		/* Routing information for the destination */
+	u8 StructType;		/* Type FIB */
+	u8 Unused;		/* Unused */
+	__le16 Size;		/* Size of this FIB in bytes */
+	__le16 SenderSize;	/* Size of the FIB in the sender
+				   (for response sizing) */
+	__le32 SenderFibAddress;  /* Host defined data in the FIB */
+	union {
+		__le32 ReceiverFibAddress;/* Logical address of this FIB for
+				     the adapter (old) */
+		__le32 SenderFibAddressHigh;/* upper 32bit of phys. FIB address */
+		__le32 TimeStamp;	/* otherwise timestamp for FW internal use */
+	} u;
+	__le32 Handle;		/* FIB handle used for MSGU commnunication */
+	u32 Previous;		/* FW internal use */
+	u32 Next;		/* FW internal use */
+};
+
+struct hw_fib {
+	struct aac_fibhdr header;
+	u8 data[512-sizeof(struct aac_fibhdr)];	// Command specific data
+};
+
+/*
+ *	FIB commands
+ */
+
+#define		TestCommandResponse		1
+#define		TestAdapterCommand		2
+/*
+ *	Lowlevel and comm commands
+ */
+#define		LastTestCommand			100
+#define		ReinitHostNormCommandQueue	101
+#define		ReinitHostHighCommandQueue	102
+#define		ReinitHostHighRespQueue		103
+#define		ReinitHostNormRespQueue		104
+#define		ReinitAdapNormCommandQueue	105
+#define		ReinitAdapHighCommandQueue	107
+#define		ReinitAdapHighRespQueue		108
+#define		ReinitAdapNormRespQueue		109
+#define		InterfaceShutdown		110
+#define		DmaCommandFib			120
+#define		StartProfile			121
+#define		TermProfile			122
+#define		SpeedTest			123
+#define		TakeABreakPt			124
+#define		RequestPerfData			125
+#define		SetInterruptDefTimer		126
+#define		SetInterruptDefCount		127
+#define		GetInterruptDefStatus		128
+#define		LastCommCommand			129
+/*
+ *	Filesystem commands
+ */
+#define		NuFileSystem			300
+#define		UFS				301
+#define		HostFileSystem			302
+#define		LastFileSystemCommand		303
+/*
+ *	Container Commands
+ */
+#define		ContainerCommand		500
+#define		ContainerCommand64		501
+#define		ContainerRawIo			502
+#define		ContainerRawIo2			503
+/*
+ *	Scsi Port commands (scsi passthrough)
+ */
+#define		ScsiPortCommand			600
+#define		ScsiPortCommand64		601
+/*
+ *	Misc house keeping and generic adapter initiated commands
+ */
+#define		AifRequest			700
+#define		CheckRevision			701
+#define		FsaHostShutdown			702
+#define		RequestAdapterInfo		703
+#define		IsAdapterPaused			704
+#define		SendHostTime			705
+#define		RequestSupplementAdapterInfo	706
+#define		LastMiscCommand			707
+
+/*
+ * Commands that will target the failover level on the FSA adapter
+ */
+
+enum fib_xfer_state {
+	HostOwned			= (1<<0),
+	AdapterOwned			= (1<<1),
+	FibInitialized			= (1<<2),
+	FibEmpty			= (1<<3),
+	AllocatedFromPool		= (1<<4),
+	SentFromHost			= (1<<5),
+	SentFromAdapter			= (1<<6),
+	ResponseExpected		= (1<<7),
+	NoResponseExpected		= (1<<8),
+	AdapterProcessed		= (1<<9),
+	HostProcessed			= (1<<10),
+	HighPriority			= (1<<11),
+	NormalPriority			= (1<<12),
+	Async				= (1<<13),
+	AsyncIo				= (1<<13),	// rpbfix: remove with new regime
+	PageFileIo			= (1<<14),	// rpbfix: remove with new regime
+	ShutdownRequest			= (1<<15),
+	LazyWrite			= (1<<16),	// rpbfix: remove with new regime
+	AdapterMicroFib			= (1<<17),
+	BIOSFibPath			= (1<<18),
+	FastResponseCapable		= (1<<19),
+	ApiFib				= (1<<20),	/* Its an API Fib */
+	/* PMC NEW COMM: There is no more AIF data pending */
+	NoMoreAifDataAvailable		= (1<<21)
+};
+
+/*
+ *	The following defines needs to be updated any time there is an
+ *	incompatible change made to the aac_init structure.
+ */
+
+#define ADAPTER_INIT_STRUCT_REVISION		3
+#define ADAPTER_INIT_STRUCT_REVISION_4		4 // rocket science
+#define ADAPTER_INIT_STRUCT_REVISION_6		6 /* PMC src */
+#define ADAPTER_INIT_STRUCT_REVISION_7		7 /* Denali */
+#define ADAPTER_INIT_STRUCT_REVISION_8		8 // Thor
+
+union aac_init
+{
+	struct _r7 {
+		__le32	init_struct_revision;
+		__le32	no_of_msix_vectors;
+		__le32	fsrev;
+		__le32	comm_header_address;
+		__le32	fast_io_comm_area_address;
+		__le32	adapter_fibs_physical_address;
+		__le32	adapter_fibs_virtual_address;
+		__le32	adapter_fibs_size;
+		__le32	adapter_fib_align;
+		__le32	printfbuf;
+		__le32	printfbufsiz;
+		/* number of 4k pages of host phys. mem. */
+		__le32	host_phys_mem_pages;
+		/* number of seconds since 1970. */
+		__le32	host_elapsed_seconds;
+		/* ADAPTER_INIT_STRUCT_REVISION_4 begins here */
+		__le32	init_flags;	/* flags for supported features */
+#define INITFLAGS_NEW_COMM_SUPPORTED	0x00000001
+#define INITFLAGS_DRIVER_USES_UTC_TIME	0x00000010
+#define INITFLAGS_DRIVER_SUPPORTS_PM	0x00000020
+#define INITFLAGS_NEW_COMM_TYPE1_SUPPORTED	0x00000040
+#define INITFLAGS_FAST_JBOD_SUPPORTED	0x00000080
+#define INITFLAGS_NEW_COMM_TYPE2_SUPPORTED	0x00000100
+#define INITFLAGS_DRIVER_SUPPORTS_HBA_MODE  0x00000400
+		__le32	max_io_commands;	/* max outstanding commands */
+		__le32	max_io_size;	/* largest I/O command */
+		__le32	max_fib_size;	/* largest FIB to adapter */
+		/* ADAPTER_INIT_STRUCT_REVISION_5 begins here */
+		__le32	max_num_aif;	/* max number of aif */
+		/* ADAPTER_INIT_STRUCT_REVISION_6 begins here */
+		/* Host RRQ (response queue) for SRC */
+		__le32	host_rrq_addr_low;
+		__le32	host_rrq_addr_high;
+	} r7;
+	struct _r8 {
+		/* ADAPTER_INIT_STRUCT_REVISION_8 */
+		__le32	init_struct_revision;
+		__le32	rr_queue_count;
+		__le32	host_elapsed_seconds; /* number of secs since 1970. */
+		__le32	init_flags;
+		__le32	max_io_size;	/* largest I/O command */
+		__le32	max_num_aif;	/* max number of aif */
+		__le32	reserved1;
+		__le32	reserved2;
+		struct _rrq {
+			__le32	host_addr_low;
+			__le32	host_addr_high;
+			__le16	msix_id;
+			__le16	element_count;
+			__le16	comp_thresh;
+			__le16	unused;
+		} rrq[1];		/* up to 64 RRQ addresses */
+	} r8;
+};
+
+enum aac_log_level {
+	LOG_AAC_INIT			= 10,
+	LOG_AAC_INFORMATIONAL		= 20,
+	LOG_AAC_WARNING			= 30,
+	LOG_AAC_LOW_ERROR		= 40,
+	LOG_AAC_MEDIUM_ERROR		= 50,
+	LOG_AAC_HIGH_ERROR		= 60,
+	LOG_AAC_PANIC			= 70,
+	LOG_AAC_DEBUG			= 80,
+	LOG_AAC_WINDBG_PRINT		= 90
+};
+
+#define FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT	0x030b
+#define FSAFS_NTC_FIB_CONTEXT			0x030c
+
+struct aac_dev;
+struct fib;
+struct scsi_cmnd;
+
+struct adapter_ops
+{
+	/* Low level operations */
+	void (*adapter_interrupt)(struct aac_dev *dev);
+	void (*adapter_notify)(struct aac_dev *dev, u32 event);
+	void (*adapter_disable_int)(struct aac_dev *dev);
+	void (*adapter_enable_int)(struct aac_dev *dev);
+	int  (*adapter_sync_cmd)(struct aac_dev *dev, u32 command, u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6, u32 *status, u32 *r1, u32 *r2, u32 *r3, u32 *r4);
+	int  (*adapter_check_health)(struct aac_dev *dev);
+	int  (*adapter_restart)(struct aac_dev *dev, int bled, u8 reset_type);
+	void (*adapter_start)(struct aac_dev *dev);
+	/* Transport operations */
+	int  (*adapter_ioremap)(struct aac_dev * dev, u32 size);
+	irq_handler_t adapter_intr;
+	/* Packet operations */
+	int  (*adapter_deliver)(struct fib * fib);
+	int  (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
+	int  (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
+	int  (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
+	int  (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
+	/* Administrative operations */
+	int  (*adapter_comm)(struct aac_dev * dev, int comm);
+};
+
+/*
+ *	Define which interrupt handler needs to be installed
+ */
+
+struct aac_driver_ident
+{
+	int	(*init)(struct aac_dev *dev);
+	char *	name;
+	char *	vname;
+	char *	model;
+	u16	channels;
+	int	quirks;
+};
+/*
+ * Some adapter firmware needs communication memory
+ * below 2gig. This tells the init function to set the
+ * dma mask such that fib memory will be allocated where the
+ * adapter firmware can get to it.
+ */
+#define AAC_QUIRK_31BIT	0x0001
+
+/*
+ * Some adapter firmware, when the raid card's cache is turned off, can not
+ * split up scatter gathers in order to deal with the limits of the
+ * underlying CHIM. This limit is 34 scatter gather elements.
+ */
+#define AAC_QUIRK_34SG	0x0002
+
+/*
+ * This adapter is a slave (no Firmware)
+ */
+#define AAC_QUIRK_SLAVE 0x0004
+
+/*
+ * This adapter is a master.
+ */
+#define AAC_QUIRK_MASTER 0x0008
+
+/*
+ * Some adapter firmware perform poorly when it must split up scatter gathers
+ * in order to deal with the limits of the underlying CHIM. This limit in this
+ * class of adapters is 17 scatter gather elements.
+ */
+#define AAC_QUIRK_17SG	0x0010
+
+/*
+ *	Some adapter firmware does not support 64 bit scsi passthrough
+ * commands.
+ */
+#define AAC_QUIRK_SCSI_32	0x0020
+
+/*
+ * SRC based adapters support the AifReqEvent functions
+ */
+#define AAC_QUIRK_SRC 0x0040
+
+/*
+ *	The adapter interface specs all queues to be located in the same
+ *	physically contiguous block. The host structure that defines the
+ *	commuication queues will assume they are each a separate physically
+ *	contiguous memory region that will support them all being one big
+ *	contiguous block.
+ *	There is a command and response queue for each level and direction of
+ *	commuication. These regions are accessed by both the host and adapter.
+ */
+
+struct aac_queue {
+	u64			logical;	/*address we give the adapter */
+	struct aac_entry	*base;		/*system virtual address */
+	struct aac_qhdr		headers;	/*producer,consumer q headers*/
+	u32			entries;	/*Number of queue entries */
+	wait_queue_head_t	qfull;		/*Event to wait on if q full */
+	wait_queue_head_t	cmdready;	/*Cmd ready from the adapter */
+		/* This is only valid for adapter to host command queues. */
+	spinlock_t		*lock;		/* Spinlock for this queue must take this lock before accessing the lock */
+	spinlock_t		lockdata;	/* Actual lock (used only on one side of the lock) */
+	struct list_head	cmdq;		/* A queue of FIBs which need to be prcessed by the FS thread. This is */
+						/* only valid for command queues which receive entries from the adapter. */
+	/* Number of entries on outstanding queue. */
+	atomic_t		numpending;
+	struct aac_dev *	dev;		/* Back pointer to adapter structure */
+};
+
+/*
+ *	Message queues. The order here is important, see also the
+ *	queue type ordering
+ */
+
+struct aac_queue_block
+{
+	struct aac_queue queue[8];
+};
+
+/*
+ *	SaP1 Message Unit Registers
+ */
+
+struct sa_drawbridge_CSR {
+				/*	Offset	|  Name */
+	__le32	reserved[10];	/*	00h-27h |  Reserved */
+	u8	LUT_Offset;	/*	28h	|  Lookup Table Offset */
+	u8	reserved1[3];	/*	29h-2bh	|  Reserved */
+	__le32	LUT_Data;	/*	2ch	|  Looup Table Data */
+	__le32	reserved2[26];	/*	30h-97h	|  Reserved */
+	__le16	PRICLEARIRQ;	/*	98h	|  Primary Clear Irq */
+	__le16	SECCLEARIRQ;	/*	9ah	|  Secondary Clear Irq */
+	__le16	PRISETIRQ;	/*	9ch	|  Primary Set Irq */
+	__le16	SECSETIRQ;	/*	9eh	|  Secondary Set Irq */
+	__le16	PRICLEARIRQMASK;/*	a0h	|  Primary Clear Irq Mask */
+	__le16	SECCLEARIRQMASK;/*	a2h	|  Secondary Clear Irq Mask */
+	__le16	PRISETIRQMASK;	/*	a4h	|  Primary Set Irq Mask */
+	__le16	SECSETIRQMASK;	/*	a6h	|  Secondary Set Irq Mask */
+	__le32	MAILBOX0;	/*	a8h	|  Scratchpad 0 */
+	__le32	MAILBOX1;	/*	ach	|  Scratchpad 1 */
+	__le32	MAILBOX2;	/*	b0h	|  Scratchpad 2 */
+	__le32	MAILBOX3;	/*	b4h	|  Scratchpad 3 */
+	__le32	MAILBOX4;	/*	b8h	|  Scratchpad 4 */
+	__le32	MAILBOX5;	/*	bch	|  Scratchpad 5 */
+	__le32	MAILBOX6;	/*	c0h	|  Scratchpad 6 */
+	__le32	MAILBOX7;	/*	c4h	|  Scratchpad 7 */
+	__le32	ROM_Setup_Data;	/*	c8h	|  Rom Setup and Data */
+	__le32	ROM_Control_Addr;/*	cch	|  Rom Control and Address */
+	__le32	reserved3[12];	/*	d0h-ffh	|  reserved */
+	__le32	LUT[64];	/*    100h-1ffh	|  Lookup Table Entries */
+};
+
+#define Mailbox0	SaDbCSR.MAILBOX0
+#define Mailbox1	SaDbCSR.MAILBOX1
+#define Mailbox2	SaDbCSR.MAILBOX2
+#define Mailbox3	SaDbCSR.MAILBOX3
+#define Mailbox4	SaDbCSR.MAILBOX4
+#define Mailbox5	SaDbCSR.MAILBOX5
+#define Mailbox6	SaDbCSR.MAILBOX6
+#define Mailbox7	SaDbCSR.MAILBOX7
+
+#define DoorbellReg_p SaDbCSR.PRISETIRQ
+#define DoorbellReg_s SaDbCSR.SECSETIRQ
+#define DoorbellClrReg_p SaDbCSR.PRICLEARIRQ
+
+
+#define	DOORBELL_0	0x0001
+#define DOORBELL_1	0x0002
+#define DOORBELL_2	0x0004
+#define DOORBELL_3	0x0008
+#define DOORBELL_4	0x0010
+#define DOORBELL_5	0x0020
+#define DOORBELL_6	0x0040
+
+
+#define PrintfReady	DOORBELL_5
+#define PrintfDone	DOORBELL_5
+
+struct sa_registers {
+	struct sa_drawbridge_CSR	SaDbCSR;			/* 98h - c4h */
+};
+
+
+#define SA_INIT_NUM_MSIXVECTORS		1
+#define SA_MINIPORT_REVISION		SA_INIT_NUM_MSIXVECTORS
+
+#define sa_readw(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_readl(AEP, CSR)		readl(&((AEP)->regs.sa->CSR))
+#define sa_writew(AEP, CSR, value)	writew(value, &((AEP)->regs.sa->CSR))
+#define sa_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.sa->CSR))
+
+/*
+ *	Rx Message Unit Registers
+ */
+
+struct rx_mu_registers {
+			    /*	Local  | PCI*| Name */
+	__le32	ARSR;	    /*	1300h  | 00h | APIC Register Select Register */
+	__le32	reserved0;  /*	1304h  | 04h | Reserved */
+	__le32	AWR;	    /*	1308h  | 08h | APIC Window Register */
+	__le32	reserved1;  /*	130Ch  | 0Ch | Reserved */
+	__le32	IMRx[2];    /*	1310h  | 10h | Inbound Message Registers */
+	__le32	OMRx[2];    /*	1318h  | 18h | Outbound Message Registers */
+	__le32	IDR;	    /*	1320h  | 20h | Inbound Doorbell Register */
+	__le32	IISR;	    /*	1324h  | 24h | Inbound Interrupt
+						Status Register */
+	__le32	IIMR;	    /*	1328h  | 28h | Inbound Interrupt
+						Mask Register */
+	__le32	ODR;	    /*	132Ch  | 2Ch | Outbound Doorbell Register */
+	__le32	OISR;	    /*	1330h  | 30h | Outbound Interrupt
+						Status Register */
+	__le32	OIMR;	    /*	1334h  | 34h | Outbound Interrupt
+						Mask Register */
+	__le32	reserved2;  /*	1338h  | 38h | Reserved */
+	__le32	reserved3;  /*	133Ch  | 3Ch | Reserved */
+	__le32	InboundQueue;/*	1340h  | 40h | Inbound Queue Port relative to firmware */
+	__le32	OutboundQueue;/*1344h  | 44h | Outbound Queue Port relative to firmware */
+			    /* * Must access through ATU Inbound
+				 Translation Window */
+};
+
+struct rx_inbound {
+	__le32	Mailbox[8];
+};
+
+#define	INBOUNDDOORBELL_0	0x00000001
+#define INBOUNDDOORBELL_1	0x00000002
+#define INBOUNDDOORBELL_2	0x00000004
+#define INBOUNDDOORBELL_3	0x00000008
+#define INBOUNDDOORBELL_4	0x00000010
+#define INBOUNDDOORBELL_5	0x00000020
+#define INBOUNDDOORBELL_6	0x00000040
+
+#define	OUTBOUNDDOORBELL_0	0x00000001
+#define OUTBOUNDDOORBELL_1	0x00000002
+#define OUTBOUNDDOORBELL_2	0x00000004
+#define OUTBOUNDDOORBELL_3	0x00000008
+#define OUTBOUNDDOORBELL_4	0x00000010
+
+#define InboundDoorbellReg	MUnit.IDR
+#define OutboundDoorbellReg	MUnit.ODR
+
+struct rx_registers {
+	struct rx_mu_registers		MUnit;		/* 1300h - 1347h */
+	__le32				reserved1[2];	/* 1348h - 134ch */
+	struct rx_inbound		IndexRegs;
+};
+
+#define rx_readb(AEP, CSR)		readb(&((AEP)->regs.rx->CSR))
+#define rx_readl(AEP, CSR)		readl(&((AEP)->regs.rx->CSR))
+#define rx_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rx->CSR))
+#define rx_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rx->CSR))
+
+/*
+ *	Rkt Message Unit Registers (same as Rx, except a larger reserve region)
+ */
+
+#define rkt_mu_registers rx_mu_registers
+#define rkt_inbound rx_inbound
+
+struct rkt_registers {
+	struct rkt_mu_registers		MUnit;		 /* 1300h - 1347h */
+	__le32				reserved1[1006]; /* 1348h - 22fch */
+	struct rkt_inbound		IndexRegs;	 /* 2300h - */
+};
+
+#define rkt_readb(AEP, CSR)		readb(&((AEP)->regs.rkt->CSR))
+#define rkt_readl(AEP, CSR)		readl(&((AEP)->regs.rkt->CSR))
+#define rkt_writeb(AEP, CSR, value)	writeb(value, &((AEP)->regs.rkt->CSR))
+#define rkt_writel(AEP, CSR, value)	writel(value, &((AEP)->regs.rkt->CSR))
+
+/*
+ * PMC SRC message unit registers
+ */
+
+#define src_inbound rx_inbound
+
+struct src_mu_registers {
+				/*  PCI*| Name */
+	__le32	reserved0[6];	/*  00h | Reserved */
+	__le32	IOAR[2];	/*  18h | IOA->host interrupt register */
+	__le32	IDR;		/*  20h | Inbound Doorbell Register */
+	__le32	IISR;		/*  24h | Inbound Int. Status Register */
+	__le32	reserved1[3];	/*  28h | Reserved */
+	__le32	OIMR;		/*  34h | Outbound Int. Mask Register */
+	__le32	reserved2[25];  /*  38h | Reserved */
+	__le32	ODR_R;		/*  9ch | Outbound Doorbell Read */
+	__le32	ODR_C;		/*  a0h | Outbound Doorbell Clear */
+	__le32	reserved3[3];	/*  a4h | Reserved */
+	__le32	SCR0;		/*  b0h | Scratchpad 0 */
+	__le32	reserved4[2];	/*  b4h | Reserved */
+	__le32	OMR;		/*  bch | Outbound Message Register */
+	__le32	IQ_L;		/*  c0h | Inbound Queue (Low address) */
+	__le32	IQ_H;		/*  c4h | Inbound Queue (High address) */
+	__le32	ODR_MSI;	/*  c8h | MSI register for sync./AIF */
+	__le32  reserved5;	/*  cch | Reserved */
+	__le32	IQN_L;		/*  d0h | Inbound (native cmd) low  */
+	__le32	IQN_H;		/*  d4h | Inbound (native cmd) high */
+};
+
+struct src_registers {
+	struct src_mu_registers MUnit;	/* 00h - cbh */
+	union {
+		struct {
+			__le32 reserved1[130786];	/* d8h - 7fc5fh */
+			struct src_inbound IndexRegs;	/* 7fc60h */
+		} tupelo;
+		struct {
+			__le32 reserved1[970];		/* d8h - fffh */
+			struct src_inbound IndexRegs;	/* 1000h */
+		} denali;
+	} u;
+};
+
+#define src_readb(AEP, CSR)		readb(&((AEP)->regs.src.bar0->CSR))
+#define src_readl(AEP, CSR)		readl(&((AEP)->regs.src.bar0->CSR))
+#define src_writeb(AEP, CSR, value)	writeb(value, \
+						&((AEP)->regs.src.bar0->CSR))
+#define src_writel(AEP, CSR, value)	writel(value, \
+						&((AEP)->regs.src.bar0->CSR))
+#if defined(writeq)
+#define	src_writeq(AEP, CSR, value)	writeq(value, \
+						&((AEP)->regs.src.bar0->CSR))
+#endif
+
+#define SRC_ODR_SHIFT		12
+#define SRC_IDR_SHIFT		9
+#define SRC_MSI_READ_MASK	0x1000
+
+typedef void (*fib_callback)(void *ctxt, struct fib *fibctx);
+
+struct aac_fib_context {
+	s16			type;		// used for verification of structure
+	s16			size;
+	u32			unique;		// unique value representing this context
+	ulong			jiffies;	// used for cleanup - dmb changed to ulong
+	struct list_head	next;		// used to link context's into a linked list
+	struct semaphore	wait_sem;	// this is used to wait for the next fib to arrive.
+	int			wait;		// Set to true when thread is in WaitForSingleObject
+	unsigned long		count;		// total number of FIBs on FibList
+	struct list_head	fib_list;	// this holds fibs and their attachd hw_fibs
+};
+
+struct sense_data {
+	u8 error_code;		/* 70h (current errors), 71h(deferred errors) */
+	u8 valid:1;		/* A valid bit of one indicates that the information  */
+				/* field contains valid information as defined in the
+				 * SCSI-2 Standard.
+				 */
+	u8 segment_number;	/* Only used for COPY, COMPARE, or COPY AND VERIFY Commands */
+	u8 sense_key:4;		/* Sense Key */
+	u8 reserved:1;
+	u8 ILI:1;		/* Incorrect Length Indicator */
+	u8 EOM:1;		/* End Of Medium - reserved for random access devices */
+	u8 filemark:1;		/* Filemark - reserved for random access devices */
+
+	u8 information[4];	/* for direct-access devices, contains the unsigned
+				 * logical block address or residue associated with
+				 * the sense key
+				 */
+	u8 add_sense_len;	/* number of additional sense bytes to follow this field */
+	u8 cmnd_info[4];	/* not used */
+	u8 ASC;			/* Additional Sense Code */
+	u8 ASCQ;		/* Additional Sense Code Qualifier */
+	u8 FRUC;		/* Field Replaceable Unit Code - not used */
+	u8 bit_ptr:3;		/* indicates which byte of the CDB or parameter data
+				 * was in error
+				 */
+	u8 BPV:1;		/* bit pointer valid (BPV): 1- indicates that
+				 * the bit_ptr field has valid value
+				 */
+	u8 reserved2:2;
+	u8 CD:1;		/* command data bit: 1- illegal parameter in CDB.
+				 * 0- illegal parameter in data.
+				 */
+	u8 SKSV:1;
+	u8 field_ptr[2];	/* byte of the CDB or parameter data in error */
+};
+
+struct fsa_dev_info {
+	u64		last;
+	u64		size;
+	u32		type;
+	u32		config_waiting_on;
+	unsigned long	config_waiting_stamp;
+	u16		queue_depth;
+	u8		config_needed;
+	u8		valid;
+	u8		ro;
+	u8		locked;
+	u8		deleted;
+	char		devname[8];
+	struct sense_data sense_data;
+	u32		block_size;
+	u8		identifier[16];
+};
+
+struct fib {
+	void			*next;	/* this is used by the allocator */
+	s16			type;
+	s16			size;
+	/*
+	 *	The Adapter that this I/O is destined for.
+	 */
+	struct aac_dev		*dev;
+	/*
+	 *	This is the event the sendfib routine will wait on if the
+	 *	caller did not pass one and this is synch io.
+	 */
+	struct semaphore	event_wait;
+	spinlock_t		event_lock;
+
+	u32			done;	/* gets set to 1 when fib is complete */
+	fib_callback		callback;
+	void			*callback_data;
+	u32			flags; // u32 dmb was ulong
+	/*
+	 *	And for the internal issue/reply queues (we may be able
+	 *	to merge these two)
+	 */
+	struct list_head	fiblink;
+	void			*data;
+	u32			vector_no;
+	struct hw_fib		*hw_fib_va;	/* also used for native */
+	dma_addr_t		hw_fib_pa;	/* physical address of hw_fib*/
+	dma_addr_t		hw_sgl_pa;	/* extra sgl for native */
+	dma_addr_t		hw_error_pa;	/* error buffer for native */
+	u32			hbacmd_size;	/* cmd size for native */
+};
+
+#define AAC_INIT			0
+#define AAC_RESCAN			1
+
+#define AAC_DEVTYPE_RAID_MEMBER	1
+#define AAC_DEVTYPE_ARC_RAW		2
+#define AAC_DEVTYPE_NATIVE_RAW		3
+
+#define AAC_SAFW_RESCAN_DELAY		(10 * HZ)
+
+struct aac_hba_map_info {
+	__le32	rmw_nexus;		/* nexus for native HBA devices */
+	u8		devtype;	/* device type */
+	s8		reset_state;	/* 0 - no reset, 1..x - */
+					/* after xth TM LUN reset */
+	u16		qd_limit;
+	u32		scan_counter;
+	struct aac_ciss_identify_pd  *safw_identify_resp;
+};
+
+/*
+ *	Adapter Information Block
+ *
+ *	This is returned by the RequestAdapterInfo block
+ */
+
+struct aac_adapter_info
+{
+	__le32	platform;
+	__le32	cpu;
+	__le32	subcpu;
+	__le32	clock;
+	__le32	execmem;
+	__le32	buffermem;
+	__le32	totalmem;
+	__le32	kernelrev;
+	__le32	kernelbuild;
+	__le32	monitorrev;
+	__le32	monitorbuild;
+	__le32	hwrev;
+	__le32	hwbuild;
+	__le32	biosrev;
+	__le32	biosbuild;
+	__le32	cluster;
+	__le32	clusterchannelmask;
+	__le32	serial[2];
+	__le32	battery;
+	__le32	options;
+	__le32	OEM;
+};
+
+struct aac_supplement_adapter_info
+{
+	u8	adapter_type_text[17+1];
+	u8	pad[2];
+	__le32	flash_memory_byte_size;
+	__le32	flash_image_id;
+	__le32	max_number_ports;
+	__le32	version;
+	__le32	feature_bits;
+	u8	slot_number;
+	u8	reserved_pad0[3];
+	u8	build_date[12];
+	__le32	current_number_ports;
+	struct {
+		u8	assembly_pn[8];
+		u8	fru_pn[8];
+		u8	battery_fru_pn[8];
+		u8	ec_version_string[8];
+		u8	tsid[12];
+	}	vpd_info;
+	__le32	flash_firmware_revision;
+	__le32	flash_firmware_build;
+	__le32	raid_type_morph_options;
+	__le32	flash_firmware_boot_revision;
+	__le32	flash_firmware_boot_build;
+	u8	mfg_pcba_serial_no[12];
+	u8	mfg_wwn_name[8];
+	__le32	supported_options2;
+	__le32	struct_expansion;
+	/* StructExpansion == 1 */
+	__le32	feature_bits3;
+	__le32	supported_performance_modes;
+	u8	host_bus_type;		/* uses HOST_BUS_TYPE_xxx defines */
+	u8	host_bus_width;		/* actual width in bits or links */
+	u16	host_bus_speed;		/* actual bus speed/link rate in MHz */
+	u8	max_rrc_drives;		/* max. number of ITP-RRC drives/pool */
+	u8	max_disk_xtasks;	/* max. possible num of DiskX Tasks */
+
+	u8	cpld_ver_loaded;
+	u8	cpld_ver_in_flash;
+
+	__le64	max_rrc_capacity;
+	__le32	compiled_max_hist_log_level;
+	u8	custom_board_name[12];
+	u16	supported_cntlr_mode;	/* identify supported controller mode */
+	u16	reserved_for_future16;
+	__le32	supported_options3;	/* reserved for future options */
+
+	__le16	virt_device_bus;		/* virt. SCSI device for Thor */
+	__le16	virt_device_target;
+	__le16	virt_device_lun;
+	__le16	unused;
+	__le32	reserved_for_future_growth[68];
+
+};
+#define AAC_FEATURE_FALCON	cpu_to_le32(0x00000010)
+#define AAC_FEATURE_JBOD	cpu_to_le32(0x08000000)
+/* SupportedOptions2 */
+#define AAC_OPTION_MU_RESET		cpu_to_le32(0x00000001)
+#define AAC_OPTION_IGNORE_RESET		cpu_to_le32(0x00000002)
+#define AAC_OPTION_POWER_MANAGEMENT	cpu_to_le32(0x00000004)
+#define AAC_OPTION_DOORBELL_RESET	cpu_to_le32(0x00004000)
+/* 4KB sector size */
+#define AAC_OPTION_VARIABLE_BLOCK_SIZE	cpu_to_le32(0x00040000)
+/* 240 simple volume support */
+#define AAC_OPTION_SUPPORTED_240_VOLUMES cpu_to_le32(0x10000000)
+/*
+ * Supports FIB dump sync command send prior to IOP_RESET
+ */
+#define AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP	cpu_to_le32(0x00004000)
+#define AAC_SIS_VERSION_V3	3
+#define AAC_SIS_SLOT_UNKNOWN	0xFF
+
+#define GetBusInfo 0x00000009
+struct aac_bus_info {
+	__le32	Command;	/* VM_Ioctl */
+	__le32	ObjType;	/* FT_DRIVE */
+	__le32	MethodId;	/* 1 = SCSI Layer */
+	__le32	ObjectId;	/* Handle */
+	__le32	CtlCmd;		/* GetBusInfo */
+};
+
+struct aac_bus_info_response {
+	__le32	Status;		/* ST_OK */
+	__le32	ObjType;
+	__le32	MethodId;	/* unused */
+	__le32	ObjectId;	/* unused */
+	__le32	CtlCmd;		/* unused */
+	__le32	ProbeComplete;
+	__le32	BusCount;
+	__le32	TargetsPerBus;
+	u8	InitiatorBusId[10];
+	u8	BusValid[10];
+};
+
+/*
+ * Battery platforms
+ */
+#define AAC_BAT_REQ_PRESENT	(1)
+#define AAC_BAT_REQ_NOTPRESENT	(2)
+#define AAC_BAT_OPT_PRESENT	(3)
+#define AAC_BAT_OPT_NOTPRESENT	(4)
+#define AAC_BAT_NOT_SUPPORTED	(5)
+/*
+ * cpu types
+ */
+#define AAC_CPU_SIMULATOR	(1)
+#define AAC_CPU_I960		(2)
+#define AAC_CPU_STRONGARM	(3)
+
+/*
+ * Supported Options
+ */
+#define AAC_OPT_SNAPSHOT		cpu_to_le32(1)
+#define AAC_OPT_CLUSTERS		cpu_to_le32(1<<1)
+#define AAC_OPT_WRITE_CACHE		cpu_to_le32(1<<2)
+#define AAC_OPT_64BIT_DATA		cpu_to_le32(1<<3)
+#define AAC_OPT_HOST_TIME_FIB		cpu_to_le32(1<<4)
+#define AAC_OPT_RAID50			cpu_to_le32(1<<5)
+#define AAC_OPT_4GB_WINDOW		cpu_to_le32(1<<6)
+#define AAC_OPT_SCSI_UPGRADEABLE	cpu_to_le32(1<<7)
+#define AAC_OPT_SOFT_ERR_REPORT		cpu_to_le32(1<<8)
+#define AAC_OPT_SUPPORTED_RECONDITION	cpu_to_le32(1<<9)
+#define AAC_OPT_SGMAP_HOST64		cpu_to_le32(1<<10)
+#define AAC_OPT_ALARM			cpu_to_le32(1<<11)
+#define AAC_OPT_NONDASD			cpu_to_le32(1<<12)
+#define AAC_OPT_SCSI_MANAGED		cpu_to_le32(1<<13)
+#define AAC_OPT_RAID_SCSI_MODE		cpu_to_le32(1<<14)
+#define AAC_OPT_SUPPLEMENT_ADAPTER_INFO	cpu_to_le32(1<<16)
+#define AAC_OPT_NEW_COMM		cpu_to_le32(1<<17)
+#define AAC_OPT_NEW_COMM_64		cpu_to_le32(1<<18)
+#define AAC_OPT_EXTENDED		cpu_to_le32(1<<23)
+#define AAC_OPT_NATIVE_HBA		cpu_to_le32(1<<25)
+#define AAC_OPT_NEW_COMM_TYPE1		cpu_to_le32(1<<28)
+#define AAC_OPT_NEW_COMM_TYPE2		cpu_to_le32(1<<29)
+#define AAC_OPT_NEW_COMM_TYPE3		cpu_to_le32(1<<30)
+#define AAC_OPT_NEW_COMM_TYPE4		cpu_to_le32(1<<31)
+
+#define AAC_COMM_PRODUCER		0
+#define AAC_COMM_MESSAGE		1
+#define AAC_COMM_MESSAGE_TYPE1		3
+#define AAC_COMM_MESSAGE_TYPE2		4
+#define AAC_COMM_MESSAGE_TYPE3		5
+
+#define AAC_EXTOPT_SA_FIRMWARE		cpu_to_le32(1<<1)
+#define AAC_EXTOPT_SOFT_RESET		cpu_to_le32(1<<16)
+
+/* MSIX context */
+struct aac_msix_ctx {
+	int		vector_no;
+	struct aac_dev	*dev;
+};
+
+struct aac_dev
+{
+	struct list_head	entry;
+	const char		*name;
+	int			id;
+
+	/*
+	 *	negotiated FIB settings
+	 */
+	unsigned int		max_fib_size;
+	unsigned int		sg_tablesize;
+	unsigned int		max_num_aif;
+
+	unsigned int		max_cmd_size;	/* max_fib_size or MAX_NATIVE */
+
+	/*
+	 *	Map for 128 fib objects (64k)
+	 */
+	dma_addr_t		hw_fib_pa;	/* also used for native cmd */
+	struct hw_fib		*hw_fib_va;	/* also used for native cmd */
+	struct hw_fib		*aif_base_va;
+	/*
+	 *	Fib Headers
+	 */
+	struct fib              *fibs;
+
+	struct fib		*free_fib;
+	spinlock_t		fib_lock;
+
+	struct mutex		ioctl_mutex;
+	struct mutex		scan_mutex;
+	struct aac_queue_block *queues;
+	/*
+	 *	The user API will use an IOCTL to register itself to receive
+	 *	FIBs from the adapter.  The following list is used to keep
+	 *	track of all the threads that have requested these FIBs.  The
+	 *	mutex is used to synchronize access to all data associated
+	 *	with the adapter fibs.
+	 */
+	struct list_head	fib_list;
+
+	struct adapter_ops	a_ops;
+	unsigned long		fsrev;		/* Main driver's revision number */
+
+	resource_size_t		base_start;	/* main IO base */
+	resource_size_t		dbg_base;	/* address of UART
+						 * debug buffer */
+
+	resource_size_t		base_size, dbg_size;	/* Size of
+							 *  mapped in region */
+	/*
+	 * Holds initialization info
+	 * to communicate with adapter
+	 */
+	union aac_init		*init;
+	dma_addr_t		init_pa;	/* Holds physical address of the init struct */
+	/* response queue (if AAC_COMM_MESSAGE_TYPE1) */
+	__le32			*host_rrq;
+	dma_addr_t		host_rrq_pa;	/* phys. address */
+	/* index into rrq buffer */
+	u32			host_rrq_idx[AAC_MAX_MSIX];
+	atomic_t		rrq_outstanding[AAC_MAX_MSIX];
+	u32			fibs_pushed_no;
+	struct pci_dev		*pdev;		/* Our PCI interface */
+	/* pointer to buffer used for printf's from the adapter */
+	void			*printfbuf;
+	void			*comm_addr;	/* Base address of Comm area */
+	dma_addr_t		comm_phys;	/* Physical Address of Comm area */
+	size_t			comm_size;
+
+	struct Scsi_Host	*scsi_host_ptr;
+	int			maximum_num_containers;
+	int			maximum_num_physicals;
+	int			maximum_num_channels;
+	struct fsa_dev_info	*fsa_dev;
+	struct task_struct	*thread;
+	struct delayed_work	safw_rescan_work;
+	int			cardtype;
+	/*
+	 *This lock will protect the two 32-bit
+	 *writes to the Inbound Queue
+	 */
+	spinlock_t		iq_lock;
+
+	/*
+	 *	The following is the device specific extension.
+	 */
+#ifndef AAC_MIN_FOOTPRINT_SIZE
+#	define AAC_MIN_FOOTPRINT_SIZE 8192
+#	define AAC_MIN_SRC_BAR0_SIZE 0x400000
+#	define AAC_MIN_SRC_BAR1_SIZE 0x800
+#	define AAC_MIN_SRCV_BAR0_SIZE 0x100000
+#	define AAC_MIN_SRCV_BAR1_SIZE 0x400
+#endif
+	union
+	{
+		struct sa_registers __iomem *sa;
+		struct rx_registers __iomem *rx;
+		struct rkt_registers __iomem *rkt;
+		struct {
+			struct src_registers __iomem *bar0;
+			char __iomem *bar1;
+		} src;
+	} regs;
+	volatile void __iomem *base, *dbg_base_mapped;
+	volatile struct rx_inbound __iomem *IndexRegs;
+	u32			OIMR; /* Mask Register Cache */
+	/*
+	 *	AIF thread states
+	 */
+	u32			aif_thread;
+	struct aac_adapter_info adapter_info;
+	struct aac_supplement_adapter_info supplement_adapter_info;
+	/* These are in adapter info but they are in the io flow so
+	 * lets break them out so we don't have to do an AND to check them
+	 */
+	u8			nondasd_support;
+	u8			jbod;
+	u8			cache_protected;
+	u8			dac_support;
+	u8			needs_dac;
+	u8			raid_scsi_mode;
+	u8			comm_interface;
+	u8			raw_io_interface;
+	u8			raw_io_64;
+	u8			printf_enabled;
+	u8			in_reset;
+	u8			in_soft_reset;
+	u8			msi;
+	u8			sa_firmware;
+	int			management_fib_count;
+	spinlock_t		manage_lock;
+	spinlock_t		sync_lock;
+	int			sync_mode;
+	struct fib		*sync_fib;
+	struct list_head	sync_fib_list;
+	u32			doorbell_mask;
+	u32			max_msix;	/* max. MSI-X vectors */
+	u32			vector_cap;	/* MSI-X vector capab.*/
+	int			msi_enabled;	/* MSI/MSI-X enabled */
+	atomic_t		msix_counter;
+	u32			scan_counter;
+	struct msix_entry	msixentry[AAC_MAX_MSIX];
+	struct aac_msix_ctx	aac_msix[AAC_MAX_MSIX]; /* context */
+	struct aac_hba_map_info	hba_map[AAC_MAX_BUSES][AAC_MAX_TARGETS];
+	struct aac_ciss_phys_luns_resp *safw_phys_luns;
+	u8			adapter_shutdown;
+	u32			handle_pci_error;
+	bool			init_reset;
+};
+
+#define aac_adapter_interrupt(dev) \
+	(dev)->a_ops.adapter_interrupt(dev)
+
+#define aac_adapter_notify(dev, event) \
+	(dev)->a_ops.adapter_notify(dev, event)
+
+#define aac_adapter_disable_int(dev) \
+	(dev)->a_ops.adapter_disable_int(dev)
+
+#define aac_adapter_enable_int(dev) \
+	(dev)->a_ops.adapter_enable_int(dev)
+
+#define aac_adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4) \
+	(dev)->a_ops.adapter_sync_cmd(dev, command, p1, p2, p3, p4, p5, p6, status, r1, r2, r3, r4)
+
+#define aac_adapter_restart(dev, bled, reset_type) \
+	((dev)->a_ops.adapter_restart(dev, bled, reset_type))
+
+#define aac_adapter_start(dev) \
+	((dev)->a_ops.adapter_start(dev))
+
+#define aac_adapter_ioremap(dev, size) \
+	(dev)->a_ops.adapter_ioremap(dev, size)
+
+#define aac_adapter_deliver(fib) \
+	((fib)->dev)->a_ops.adapter_deliver(fib)
+
+#define aac_adapter_bounds(dev,cmd,lba) \
+	dev->a_ops.adapter_bounds(dev,cmd,lba)
+
+#define aac_adapter_read(fib,cmd,lba,count) \
+	((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
+
+#define aac_adapter_write(fib,cmd,lba,count,fua) \
+	((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
+
+#define aac_adapter_scsi(fib,cmd) \
+	((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
+
+#define aac_adapter_comm(dev,comm) \
+	(dev)->a_ops.adapter_comm(dev, comm)
+
+#define FIB_CONTEXT_FLAG_TIMED_OUT		(0x00000001)
+#define FIB_CONTEXT_FLAG			(0x00000002)
+#define FIB_CONTEXT_FLAG_WAIT			(0x00000004)
+#define FIB_CONTEXT_FLAG_FASTRESP		(0x00000008)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA		(0x00000010)
+#define FIB_CONTEXT_FLAG_NATIVE_HBA_TMF	(0x00000020)
+#define FIB_CONTEXT_FLAG_SCSI_CMD	(0x00000040)
+#define FIB_CONTEXT_FLAG_EH_RESET	(0x00000080)
+
+/*
+ *	Define the command values
+ */
+
+#define		Null			0
+#define		GetAttributes		1
+#define		SetAttributes		2
+#define		Lookup			3
+#define		ReadLink		4
+#define		Read			5
+#define		Write			6
+#define		Create			7
+#define		MakeDirectory		8
+#define		SymbolicLink		9
+#define		MakeNode		10
+#define		Removex			11
+#define		RemoveDirectoryx	12
+#define		Rename			13
+#define		Link			14
+#define		ReadDirectory		15
+#define		ReadDirectoryPlus	16
+#define		FileSystemStatus	17
+#define		FileSystemInfo		18
+#define		PathConfigure		19
+#define		Commit			20
+#define		Mount			21
+#define		UnMount			22
+#define		Newfs			23
+#define		FsCheck			24
+#define		FsSync			25
+#define		SimReadWrite		26
+#define		SetFileSystemStatus	27
+#define		BlockRead		28
+#define		BlockWrite		29
+#define		NvramIoctl		30
+#define		FsSyncWait		31
+#define		ClearArchiveBit		32
+#define		SetAcl			33
+#define		GetAcl			34
+#define		AssignAcl		35
+#define		FaultInsertion		36	/* Fault Insertion Command */
+#define		CrazyCache		37	/* Crazycache */
+
+#define		MAX_FSACOMMAND_NUM	38
+
+
+/*
+ *	Define the status returns. These are very unixlike although
+ *	most are not in fact used
+ */
+
+#define		ST_OK		0
+#define		ST_PERM		1
+#define		ST_NOENT	2
+#define		ST_IO		5
+#define		ST_NXIO		6
+#define		ST_E2BIG	7
+#define		ST_MEDERR	8
+#define		ST_ACCES	13
+#define		ST_EXIST	17
+#define		ST_XDEV		18
+#define		ST_NODEV	19
+#define		ST_NOTDIR	20
+#define		ST_ISDIR	21
+#define		ST_INVAL	22
+#define		ST_FBIG		27
+#define		ST_NOSPC	28
+#define		ST_ROFS		30
+#define		ST_MLINK	31
+#define		ST_WOULDBLOCK	35
+#define		ST_NAMETOOLONG	63
+#define		ST_NOTEMPTY	66
+#define		ST_DQUOT	69
+#define		ST_STALE	70
+#define		ST_REMOTE	71
+#define		ST_NOT_READY	72
+#define		ST_BADHANDLE	10001
+#define		ST_NOT_SYNC	10002
+#define		ST_BAD_COOKIE	10003
+#define		ST_NOTSUPP	10004
+#define		ST_TOOSMALL	10005
+#define		ST_SERVERFAULT	10006
+#define		ST_BADTYPE	10007
+#define		ST_JUKEBOX	10008
+#define		ST_NOTMOUNTED	10009
+#define		ST_MAINTMODE	10010
+#define		ST_STALEACL	10011
+
+/*
+ *	On writes how does the client want the data written.
+ */
+
+#define	CACHE_CSTABLE		1
+#define CACHE_UNSTABLE		2
+
+/*
+ *	Lets the client know at which level the data was committed on
+ *	a write request
+ */
+
+#define	CMFILE_SYNCH_NVRAM	1
+#define	CMDATA_SYNCH_NVRAM	2
+#define	CMFILE_SYNCH		3
+#define CMDATA_SYNCH		4
+#define CMUNSTABLE		5
+
+#define	RIO_TYPE_WRITE 			0x0000
+#define	RIO_TYPE_READ			0x0001
+#define	RIO_SUREWRITE			0x0008
+
+#define RIO2_IO_TYPE			0x0003
+#define RIO2_IO_TYPE_WRITE		0x0000
+#define RIO2_IO_TYPE_READ		0x0001
+#define RIO2_IO_TYPE_VERIFY		0x0002
+#define RIO2_IO_ERROR			0x0004
+#define RIO2_IO_SUREWRITE		0x0008
+#define RIO2_SGL_CONFORMANT		0x0010
+#define RIO2_SG_FORMAT			0xF000
+#define RIO2_SG_FORMAT_ARC		0x0000
+#define RIO2_SG_FORMAT_SRL		0x1000
+#define RIO2_SG_FORMAT_IEEE1212		0x2000
+
+struct aac_read
+{
+	__le32		command;
+	__le32		cid;
+	__le32		block;
+	__le32		count;
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_read64
+{
+	__le32		command;
+	__le16		cid;
+	__le16		sector_count;
+	__le32		block;
+	__le16		pad;
+	__le16		flags;
+	struct sgmap64	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_read_reply
+{
+	__le32		status;
+	__le32		count;
+};
+
+struct aac_write
+{
+	__le32		command;
+	__le32		cid;
+	__le32		block;
+	__le32		count;
+	__le32		stable;	// Not used
+	struct sgmap	sg;	// Must be last in struct because it is variable
+};
+
+struct aac_write64
+{
+	__le32		command;
+	__le16		cid;
+	__le16		sector_count;
+	__le32		block;
+	__le16		pad;
+	__le16		flags;
+	struct sgmap64	sg;	// Must be last in struct because it is variable
+};
+struct aac_write_reply
+{
+	__le32		status;
+	__le32		count;
+	__le32		committed;
+};
+
+struct aac_raw_io
+{
+	__le32		block[2];
+	__le32		count;
+	__le16		cid;
+	__le16		flags;		/* 00 W, 01 R */
+	__le16		bpTotal;	/* reserved for F/W use */
+	__le16		bpComplete;	/* reserved for F/W use */
+	struct sgmapraw	sg;
+};
+
+struct aac_raw_io2 {
+	__le32		blockLow;
+	__le32		blockHigh;
+	__le32		byteCount;
+	__le16		cid;
+	__le16		flags;		/* RIO2 flags */
+	__le32		sgeFirstSize;	/* size of first sge el. */
+	__le32		sgeNominalSize;	/* size of 2nd sge el. (if conformant) */
+	u8		sgeCnt;		/* only 8 bits required */
+	u8		bpTotal;	/* reserved for F/W use */
+	u8		bpComplete;	/* reserved for F/W use */
+	u8		sgeFirstIndex;	/* reserved for F/W use */
+	u8		unused[4];
+	struct sge_ieee1212	sge[1];
+};
+
+#define CT_FLUSH_CACHE 129
+struct aac_synchronize {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_FLUSH_CACHE */
+	__le32		cid;
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		count;	/* sizeof(((struct aac_synchronize_reply *)NULL)->data) */
+};
+
+struct aac_synchronize_reply {
+	__le32		dummy0;
+	__le32		dummy1;
+	__le32		status;	/* CT_OK */
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		parm5;
+	u8		data[16];
+};
+
+#define CT_POWER_MANAGEMENT	245
+#define CT_PM_START_UNIT	2
+#define CT_PM_STOP_UNIT		3
+#define CT_PM_UNIT_IMMEDIATE	1
+struct aac_power_management {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_POWER_MANAGEMENT */
+	__le32		sub;		/* CT_PM_* */
+	__le32		cid;
+	__le32		parm;		/* CT_PM_sub_* */
+};
+
+#define CT_PAUSE_IO    65
+#define CT_RELEASE_IO  66
+struct aac_pause {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_PAUSE_IO */
+	__le32		timeout;	/* 10ms ticks */
+	__le32		min;
+	__le32		noRescan;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		count;	/* sizeof(((struct aac_pause_reply *)NULL)->data) */
+};
+
+struct aac_srb
+{
+	__le32		function;
+	__le32		channel;
+	__le32		id;
+	__le32		lun;
+	__le32		timeout;
+	__le32		flags;
+	__le32		count;		// Data xfer size
+	__le32		retry_limit;
+	__le32		cdb_size;
+	u8		cdb[16];
+	struct	sgmap	sg;
+};
+
+/*
+ * This and associated data structs are used by the
+ * ioctl caller and are in cpu order.
+ */
+struct user_aac_srb
+{
+	u32		function;
+	u32		channel;
+	u32		id;
+	u32		lun;
+	u32		timeout;
+	u32		flags;
+	u32		count;		// Data xfer size
+	u32		retry_limit;
+	u32		cdb_size;
+	u8		cdb[16];
+	struct	user_sgmap	sg;
+};
+
+#define		AAC_SENSE_BUFFERSIZE	 30
+
+struct aac_srb_reply
+{
+	__le32		status;
+	__le32		srb_status;
+	__le32		scsi_status;
+	__le32		data_xfer_length;
+	__le32		sense_data_size;
+	u8		sense_data[AAC_SENSE_BUFFERSIZE]; // Can this be SCSI_SENSE_BUFFERSIZE
+};
+
+struct aac_srb_unit {
+	struct aac_srb		srb;
+	struct aac_srb_reply	srb_reply;
+};
+
+/*
+ * SRB Flags
+ */
+#define		SRB_NoDataXfer		 0x0000
+#define		SRB_DisableDisconnect	 0x0004
+#define		SRB_DisableSynchTransfer 0x0008
+#define		SRB_BypassFrozenQueue	 0x0010
+#define		SRB_DisableAutosense	 0x0020
+#define		SRB_DataIn		 0x0040
+#define		SRB_DataOut		 0x0080
+
+/*
+ * SRB Functions - set in aac_srb->function
+ */
+#define	SRBF_ExecuteScsi	0x0000
+#define	SRBF_ClaimDevice	0x0001
+#define	SRBF_IO_Control		0x0002
+#define	SRBF_ReceiveEvent	0x0003
+#define	SRBF_ReleaseQueue	0x0004
+#define	SRBF_AttachDevice	0x0005
+#define	SRBF_ReleaseDevice	0x0006
+#define	SRBF_Shutdown		0x0007
+#define	SRBF_Flush		0x0008
+#define	SRBF_AbortCommand	0x0010
+#define	SRBF_ReleaseRecovery	0x0011
+#define	SRBF_ResetBus		0x0012
+#define	SRBF_ResetDevice	0x0013
+#define	SRBF_TerminateIO	0x0014
+#define	SRBF_FlushQueue		0x0015
+#define	SRBF_RemoveDevice	0x0016
+#define	SRBF_DomainValidation	0x0017
+
+/*
+ * SRB SCSI Status - set in aac_srb->scsi_status
+ */
+#define SRB_STATUS_PENDING                  0x00
+#define SRB_STATUS_SUCCESS                  0x01
+#define SRB_STATUS_ABORTED                  0x02
+#define SRB_STATUS_ABORT_FAILED             0x03
+#define SRB_STATUS_ERROR                    0x04
+#define SRB_STATUS_BUSY                     0x05
+#define SRB_STATUS_INVALID_REQUEST          0x06
+#define SRB_STATUS_INVALID_PATH_ID          0x07
+#define SRB_STATUS_NO_DEVICE                0x08
+#define SRB_STATUS_TIMEOUT                  0x09
+#define SRB_STATUS_SELECTION_TIMEOUT        0x0A
+#define SRB_STATUS_COMMAND_TIMEOUT          0x0B
+#define SRB_STATUS_MESSAGE_REJECTED         0x0D
+#define SRB_STATUS_BUS_RESET                0x0E
+#define SRB_STATUS_PARITY_ERROR             0x0F
+#define SRB_STATUS_REQUEST_SENSE_FAILED     0x10
+#define SRB_STATUS_NO_HBA                   0x11
+#define SRB_STATUS_DATA_OVERRUN             0x12
+#define SRB_STATUS_UNEXPECTED_BUS_FREE      0x13
+#define SRB_STATUS_PHASE_SEQUENCE_FAILURE   0x14
+#define SRB_STATUS_BAD_SRB_BLOCK_LENGTH     0x15
+#define SRB_STATUS_REQUEST_FLUSHED          0x16
+#define SRB_STATUS_DELAYED_RETRY	    0x17
+#define SRB_STATUS_INVALID_LUN              0x20
+#define SRB_STATUS_INVALID_TARGET_ID        0x21
+#define SRB_STATUS_BAD_FUNCTION             0x22
+#define SRB_STATUS_ERROR_RECOVERY           0x23
+#define SRB_STATUS_NOT_STARTED		    0x24
+#define SRB_STATUS_NOT_IN_USE		    0x30
+#define SRB_STATUS_FORCE_ABORT		    0x31
+#define SRB_STATUS_DOMAIN_VALIDATION_FAIL   0x32
+
+/*
+ * Object-Server / Volume-Manager Dispatch Classes
+ */
+
+#define		VM_Null			0
+#define		VM_NameServe		1
+#define		VM_ContainerConfig	2
+#define		VM_Ioctl		3
+#define		VM_FilesystemIoctl	4
+#define		VM_CloseAll		5
+#define		VM_CtBlockRead		6
+#define		VM_CtBlockWrite		7
+#define		VM_SliceBlockRead	8	/* raw access to configured "storage objects" */
+#define		VM_SliceBlockWrite	9
+#define		VM_DriveBlockRead	10	/* raw access to physical devices */
+#define		VM_DriveBlockWrite	11
+#define		VM_EnclosureMgt		12	/* enclosure management */
+#define		VM_Unused		13	/* used to be diskset management */
+#define		VM_CtBlockVerify	14
+#define		VM_CtPerf		15	/* performance test */
+#define		VM_CtBlockRead64	16
+#define		VM_CtBlockWrite64	17
+#define		VM_CtBlockVerify64	18
+#define		VM_CtHostRead64		19
+#define		VM_CtHostWrite64	20
+#define		VM_DrvErrTblLog		21
+#define		VM_NameServe64		22
+#define		VM_NameServeAllBlk	30
+
+#define		MAX_VMCOMMAND_NUM	23	/* used for sizing stats array - leave last */
+
+/*
+ *	Descriptive information (eg, vital stats)
+ *	that a content manager might report.  The
+ *	FileArray filesystem component is one example
+ *	of a content manager.  Raw mode might be
+ *	another.
+ */
+
+struct aac_fsinfo {
+	__le32  fsTotalSize;	/* Consumed by fs, incl. metadata */
+	__le32  fsBlockSize;
+	__le32  fsFragSize;
+	__le32  fsMaxExtendSize;
+	__le32  fsSpaceUnits;
+	__le32  fsMaxNumFiles;
+	__le32  fsNumFreeFiles;
+	__le32  fsInodeDensity;
+};	/* valid iff ObjType == FT_FILESYS && !(ContentState & FSCS_NOTCLEAN) */
+
+struct  aac_blockdevinfo {
+	__le32	block_size;
+	__le32  logical_phys_map;
+	u8	identifier[16];
+};
+
+union aac_contentinfo {
+	struct	aac_fsinfo		filesys;
+	struct	aac_blockdevinfo	bdevinfo;
+};
+
+/*
+ *	Query for Container Configuration Status
+ */
+
+#define CT_GET_CONFIG_STATUS 147
+struct aac_get_config_status {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_GET_CONFIG_STATUS */
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		parm5;
+	__le32		count;	/* sizeof(((struct aac_get_config_status_resp *)NULL)->data) */
+};
+
+#define CFACT_CONTINUE 0
+#define CFACT_PAUSE    1
+#define CFACT_ABORT    2
+struct aac_get_config_status_resp {
+	__le32		response; /* ST_OK */
+	__le32		dummy0;
+	__le32		status;	/* CT_OK */
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		parm5;
+	struct {
+		__le32	action; /* CFACT_CONTINUE, CFACT_PAUSE or CFACT_ABORT */
+		__le16	flags;
+		__le16	count;
+	}		data;
+};
+
+/*
+ *	Accept the configuration as-is
+ */
+
+#define CT_COMMIT_CONFIG 152
+
+struct aac_commit_config {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_COMMIT_CONFIG */
+};
+
+/*
+ *	Query for Container Configuration Status
+ */
+
+#define CT_GET_CONTAINER_COUNT 4
+struct aac_get_container_count {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_GET_CONTAINER_COUNT */
+};
+
+struct aac_get_container_count_resp {
+	__le32		response; /* ST_OK */
+	__le32		dummy0;
+	__le32		MaxContainers;
+	__le32		ContainerSwitchEntries;
+	__le32		MaxPartitions;
+	__le32		MaxSimpleVolumes;
+};
+
+
+/*
+ *	Query for "mountable" objects, ie, objects that are typically
+ *	associated with a drive letter on the client (host) side.
+ */
+
+struct aac_mntent {
+	__le32			oid;
+	u8			name[16];	/* if applicable */
+	struct creation_info	create_info;	/* if applicable */
+	__le32			capacity;
+	__le32			vol;		/* substrate structure */
+	__le32			obj;		/* FT_FILESYS, etc. */
+	__le32			state;		/* unready for mounting,
+						   readonly, etc. */
+	union aac_contentinfo	fileinfo;	/* Info specific to content
+						   manager (eg, filesystem) */
+	__le32			altoid;		/* != oid <==> snapshot or
+						   broken mirror exists */
+	__le32			capacityhigh;
+};
+
+#define FSCS_NOTCLEAN	0x0001  /* fsck is necessary before mounting */
+#define FSCS_READONLY	0x0002	/* possible result of broken mirror */
+#define FSCS_HIDDEN	0x0004	/* should be ignored - set during a clear */
+#define FSCS_NOT_READY	0x0008	/* Array spinning up to fulfil request */
+
+struct aac_query_mount {
+	__le32		command;
+	__le32		type;
+	__le32		count;
+};
+
+struct aac_mount {
+	__le32		status;
+	__le32		type;           /* should be same as that requested */
+	__le32		count;
+	struct aac_mntent mnt[1];
+};
+
+#define CT_READ_NAME 130
+struct aac_get_name {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_READ_NAME */
+	__le32		cid;
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		count;	/* sizeof(((struct aac_get_name_resp *)NULL)->data) */
+};
+
+struct aac_get_name_resp {
+	__le32		dummy0;
+	__le32		dummy1;
+	__le32		status;	/* CT_OK */
+	__le32		parm1;
+	__le32		parm2;
+	__le32		parm3;
+	__le32		parm4;
+	__le32		parm5;
+	u8		data[17];
+};
+
+#define CT_CID_TO_32BITS_UID 165
+struct aac_get_serial {
+	__le32		command;	/* VM_ContainerConfig */
+	__le32		type;		/* CT_CID_TO_32BITS_UID */
+	__le32		cid;
+};
+
+struct aac_get_serial_resp {
+	__le32		dummy0;
+	__le32		dummy1;
+	__le32		status;	/* CT_OK */
+	__le32		uid;
+};
+
+/*
+ * The following command is sent to shut down each container.
+ */
+
+struct aac_close {
+	__le32	command;
+	__le32	cid;
+};
+
+struct aac_query_disk
+{
+	s32	cnum;
+	s32	bus;
+	s32	id;
+	s32	lun;
+	u32	valid;
+	u32	locked;
+	u32	deleted;
+	s32	instance;
+	s8	name[10];
+	u32	unmapped;
+};
+
+struct aac_delete_disk {
+	u32	disknum;
+	u32	cnum;
+};
+
+struct fib_ioctl
+{
+	u32	fibctx;
+	s32	wait;
+	char	__user *fib;
+};
+
+struct revision
+{
+	u32 compat;
+	__le32 version;
+	__le32 build;
+};
+
+
+/*
+ *	Ugly - non Linux like ioctl coding for back compat.
+ */
+
+#define CTL_CODE(function, method) (                 \
+    (4<< 16) | ((function) << 2) | (method) \
+)
+
+/*
+ *	Define the method codes for how buffers are passed for I/O and FS
+ *	controls
+ */
+
+#define METHOD_BUFFERED                 0
+#define METHOD_NEITHER                  3
+
+/*
+ *	Filesystem ioctls
+ */
+
+#define FSACTL_SENDFIB				CTL_CODE(2050, METHOD_BUFFERED)
+#define FSACTL_SEND_RAW_SRB			CTL_CODE(2067, METHOD_BUFFERED)
+#define FSACTL_DELETE_DISK			0x163
+#define FSACTL_QUERY_DISK			0x173
+#define FSACTL_OPEN_GET_ADAPTER_FIB		CTL_CODE(2100, METHOD_BUFFERED)
+#define FSACTL_GET_NEXT_ADAPTER_FIB		CTL_CODE(2101, METHOD_BUFFERED)
+#define FSACTL_CLOSE_GET_ADAPTER_FIB		CTL_CODE(2102, METHOD_BUFFERED)
+#define FSACTL_MINIPORT_REV_CHECK               CTL_CODE(2107, METHOD_BUFFERED)
+#define FSACTL_GET_PCI_INFO			CTL_CODE(2119, METHOD_BUFFERED)
+#define FSACTL_FORCE_DELETE_DISK		CTL_CODE(2120, METHOD_NEITHER)
+#define FSACTL_GET_CONTAINERS			2131
+#define FSACTL_SEND_LARGE_FIB			CTL_CODE(2138, METHOD_BUFFERED)
+#define FSACTL_RESET_IOP			CTL_CODE(2140, METHOD_BUFFERED)
+#define FSACTL_GET_HBA_INFO			CTL_CODE(2150, METHOD_BUFFERED)
+/* flags defined for IOP & HW SOFT RESET */
+#define HW_IOP_RESET				0x01
+#define HW_SOFT_RESET				0x02
+#define IOP_HWSOFT_RESET			(HW_IOP_RESET | HW_SOFT_RESET)
+/* HW Soft Reset register offset */
+#define IBW_SWR_OFFSET				0x4000
+#define SOFT_RESET_TIME			60
+
+
+
+struct aac_common
+{
+	/*
+	 *	If this value is set to 1 then interrupt moderation will occur
+	 *	in the base commuication support.
+	 */
+	u32 irq_mod;
+	u32 peak_fibs;
+	u32 zero_fibs;
+	u32 fib_timeouts;
+	/*
+	 *	Statistical counters in debug mode
+	 */
+#ifdef DBG
+	u32 FibsSent;
+	u32 FibRecved;
+	u32 NativeSent;
+	u32 NativeRecved;
+	u32 NoResponseSent;
+	u32 NoResponseRecved;
+	u32 AsyncSent;
+	u32 AsyncRecved;
+	u32 NormalSent;
+	u32 NormalRecved;
+#endif
+};
+
+extern struct aac_common aac_config;
+
+/*
+ * This is for management ioctl purpose only.
+ */
+struct aac_hba_info {
+
+	u8	driver_name[50];
+	u8	adapter_number;
+	u8	system_io_bus_number;
+	u8	device_number;
+	u32	function_number;
+	u32	vendor_id;
+	u32	device_id;
+	u32	sub_vendor_id;
+	u32	sub_system_id;
+	u32	mapped_base_address_size;
+	u32	base_physical_address_high_part;
+	u32	base_physical_address_low_part;
+
+	u32	max_command_size;
+	u32	max_fib_size;
+	u32	max_scatter_gather_from_os;
+	u32	max_scatter_gather_to_fw;
+	u32	max_outstanding_fibs;
+
+	u32	queue_start_threshold;
+	u32	queue_dump_threshold;
+	u32	max_io_size_queued;
+	u32	outstanding_io;
+
+	u32	firmware_build_number;
+	u32	bios_build_number;
+	u32	driver_build_number;
+	u32	serial_number_high_part;
+	u32	serial_number_low_part;
+	u32	supported_options;
+	u32	feature_bits;
+	u32	currentnumber_ports;
+
+	u8	new_comm_interface:1;
+	u8	new_commands_supported:1;
+	u8	disable_passthrough:1;
+	u8	expose_non_dasd:1;
+	u8	queue_allowed:1;
+	u8	bled_check_enabled:1;
+	u8	reserved1:1;
+	u8	reserted2:1;
+
+	u32	reserved3[10];
+
+};
+
+/*
+ *	The following macro is used when sending and receiving FIBs. It is
+ *	only used for debugging.
+ */
+
+#ifdef DBG
+#define	FIB_COUNTER_INCREMENT(counter)		(counter)++
+#else
+#define	FIB_COUNTER_INCREMENT(counter)
+#endif
+
+/*
+ *	Adapter direct commands
+ *	Monitor/Kernel API
+ */
+
+#define	BREAKPOINT_REQUEST		0x00000004
+#define	INIT_STRUCT_BASE_ADDRESS	0x00000005
+#define READ_PERMANENT_PARAMETERS	0x0000000a
+#define WRITE_PERMANENT_PARAMETERS	0x0000000b
+#define HOST_CRASHING			0x0000000d
+#define	SEND_SYNCHRONOUS_FIB		0x0000000c
+#define COMMAND_POST_RESULTS		0x00000014
+#define GET_ADAPTER_PROPERTIES		0x00000019
+#define GET_DRIVER_BUFFER_PROPERTIES	0x00000023
+#define RCV_TEMP_READINGS		0x00000025
+#define GET_COMM_PREFERRED_SETTINGS	0x00000026
+#define IOP_RESET_FW_FIB_DUMP		0x00000034
+#define DROP_IO			0x00000035
+#define IOP_RESET			0x00001000
+#define IOP_RESET_ALWAYS		0x00001001
+#define RE_INIT_ADAPTER		0x000000ee
+
+#define IOP_SRC_RESET_MASK		0x00000100
+
+/*
+ *	Adapter Status Register
+ *
+ *  Phase Staus mailbox is 32bits:
+ *	<31:16> = Phase Status
+ *	<15:0>  = Phase
+ *
+ *	The adapter reports is present state through the phase.  Only
+ *	a single phase should be ever be set.  Each phase can have multiple
+ *	phase status bits to provide more detailed information about the
+ *	state of the board.  Care should be taken to ensure that any phase
+ *	status bits that are set when changing the phase are also valid
+ *	for the new phase or be cleared out.  Adapter software (monitor,
+ *	iflash, kernel) is responsible for properly maintining the phase
+ *	status mailbox when it is running.
+ *
+ *	MONKER_API Phases
+ *
+ *	Phases are bit oriented.  It is NOT valid  to have multiple bits set
+ */
+
+#define	SELF_TEST_FAILED		0x00000004
+#define	MONITOR_PANIC			0x00000020
+#define	KERNEL_BOOTING			0x00000040
+#define	KERNEL_UP_AND_RUNNING		0x00000080
+#define	KERNEL_PANIC			0x00000100
+#define	FLASH_UPD_PENDING		0x00002000
+#define	FLASH_UPD_SUCCESS		0x00004000
+#define	FLASH_UPD_FAILED		0x00008000
+#define	INVALID_OMR			0xffffffff
+#define	FWUPD_TIMEOUT			(5 * 60)
+
+/*
+ *	Doorbell bit defines
+ */
+
+#define DoorBellSyncCmdAvailable	(1<<0)	/* Host -> Adapter */
+#define DoorBellPrintfDone		(1<<5)	/* Host -> Adapter */
+#define DoorBellAdapterNormCmdReady	(1<<1)	/* Adapter -> Host */
+#define DoorBellAdapterNormRespReady	(1<<2)	/* Adapter -> Host */
+#define DoorBellAdapterNormCmdNotFull	(1<<3)	/* Adapter -> Host */
+#define DoorBellAdapterNormRespNotFull	(1<<4)	/* Adapter -> Host */
+#define DoorBellPrintfReady		(1<<5)	/* Adapter -> Host */
+#define DoorBellAifPending		(1<<6)	/* Adapter -> Host */
+
+/* PMC specific outbound doorbell bits */
+#define PmDoorBellResponseSent		(1<<1)	/* Adapter -> Host */
+
+/*
+ *	For FIB communication, we need all of the following things
+ *	to send back to the user.
+ */
+
+#define		AifCmdEventNotify	1	/* Notify of event */
+#define			AifEnConfigChange	3	/* Adapter configuration change */
+#define			AifEnContainerChange	4	/* Container configuration change */
+#define			AifEnDeviceFailure	5	/* SCSI device failed */
+#define			AifEnEnclosureManagement 13	/* EM_DRIVE_* */
+#define				EM_DRIVE_INSERTION	31
+#define				EM_DRIVE_REMOVAL	32
+#define			EM_SES_DRIVE_INSERTION	33
+#define			EM_SES_DRIVE_REMOVAL	26
+#define			AifEnBatteryEvent	14	/* Change in Battery State */
+#define			AifEnAddContainer	15	/* A new array was created */
+#define			AifEnDeleteContainer	16	/* A container was deleted */
+#define			AifEnExpEvent		23	/* Firmware Event Log */
+#define			AifExeFirmwarePanic	3	/* Firmware Event Panic */
+#define			AifHighPriority		3	/* Highest Priority Event */
+#define			AifEnAddJBOD		30	/* JBOD created */
+#define			AifEnDeleteJBOD		31	/* JBOD deleted */
+
+#define			AifBuManagerEvent		42 /* Bu management*/
+#define			AifBuCacheDataLoss		10
+#define			AifBuCacheDataRecover	11
+
+#define		AifCmdJobProgress	2	/* Progress report */
+#define			AifJobCtrZero	101	/* Array Zero progress */
+#define			AifJobStsSuccess 1	/* Job completes */
+#define			AifJobStsRunning 102	/* Job running */
+#define		AifCmdAPIReport		3	/* Report from other user of API */
+#define		AifCmdDriverNotify	4	/* Notify host driver of event */
+#define			AifDenMorphComplete 200	/* A morph operation completed */
+#define			AifDenVolumeExtendComplete 201 /* A volume extend completed */
+#define		AifReqJobList		100	/* Gets back complete job list */
+#define		AifReqJobsForCtr	101	/* Gets back jobs for specific container */
+#define		AifReqJobsForScsi	102	/* Gets back jobs for specific SCSI device */
+#define		AifReqJobReport		103	/* Gets back a specific job report or list of them */
+#define		AifReqTerminateJob	104	/* Terminates job */
+#define		AifReqSuspendJob	105	/* Suspends a job */
+#define		AifReqResumeJob		106	/* Resumes a job */
+#define		AifReqSendAPIReport	107	/* API generic report requests */
+#define		AifReqAPIJobStart	108	/* Start a job from the API */
+#define		AifReqAPIJobUpdate	109	/* Update a job report from the API */
+#define		AifReqAPIJobFinish	110	/* Finish a job from the API */
+
+/* PMC NEW COMM: Request the event data */
+#define		AifReqEvent		200
+#define		AifRawDeviceRemove	203	/* RAW device deleted */
+#define		AifNativeDeviceAdd	204	/* native HBA device added */
+#define		AifNativeDeviceRemove	205	/* native HBA device removed */
+
+
+/*
+ *	Adapter Initiated FIB command structures. Start with the adapter
+ *	initiated FIBs that really come from the adapter, and get responded
+ *	to by the host.
+ */
+
+struct aac_aifcmd {
+	__le32 command;		/* Tell host what type of notify this is */
+	__le32 seqnum;		/* To allow ordering of reports (if necessary) */
+	u8 data[1];		/* Undefined length (from kernel viewpoint) */
+};
+
+/**
+ *	Convert capacity to cylinders
+ *	accounting for the fact capacity could be a 64 bit value
+ *
+ */
+static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
+{
+	sector_div(capacity, divisor);
+	return capacity;
+}
+
+static inline int aac_adapter_check_health(struct aac_dev *dev)
+{
+	if (unlikely(pci_channel_offline(dev->pdev)))
+		return -1;
+
+	return (dev)->a_ops.adapter_check_health(dev);
+}
+
+
+int aac_scan_host(struct aac_dev *dev);
+
+static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev)
+{
+	schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY);
+}
+
+static inline void aac_safw_rescan_worker(struct work_struct *work)
+{
+	struct aac_dev *dev = container_of(to_delayed_work(work),
+		struct aac_dev, safw_rescan_work);
+
+	wait_event(dev->scsi_host_ptr->host_wait,
+		!scsi_host_in_recovery(dev->scsi_host_ptr));
+
+	aac_scan_host(dev);
+}
+
+static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev)
+{
+	if (dev->sa_firmware)
+		cancel_delayed_work_sync(&dev->safw_rescan_work);
+}
+
+/* SCp.phase values */
+#define AAC_OWNER_MIDLEVEL	0x101
+#define AAC_OWNER_LOWLEVEL	0x102
+#define AAC_OWNER_ERROR_HANDLER	0x103
+#define AAC_OWNER_FIRMWARE	0x106
+
+void aac_safw_rescan_worker(struct work_struct *work);
+int aac_acquire_irq(struct aac_dev *dev);
+void aac_free_irq(struct aac_dev *dev);
+int aac_setup_safw_adapter(struct aac_dev *dev);
+const char *aac_driverinfo(struct Scsi_Host *);
+void aac_fib_vector_assign(struct aac_dev *dev);
+struct fib *aac_fib_alloc(struct aac_dev *dev);
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd);
+int aac_fib_setup(struct aac_dev *dev);
+void aac_fib_map_free(struct aac_dev *dev);
+void aac_fib_free(struct fib * context);
+void aac_fib_init(struct fib * context);
+void aac_printf(struct aac_dev *dev, u32 val);
+int aac_fib_send(u16 command, struct fib * context, unsigned long size, int priority, int wait, int reply, fib_callback callback, void *ctxt);
+int aac_hba_send(u8 command, struct fib *context,
+		fib_callback callback, void *ctxt);
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry);
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum);
+int aac_fib_complete(struct fib * context);
+void aac_hba_callback(void *context, struct fib *fibptr);
+#define fib_data(fibctx) ((void *)(fibctx)->hw_fib_va->data)
+struct aac_dev *aac_init_adapter(struct aac_dev *dev);
+void aac_src_access_devreg(struct aac_dev *dev, int mode);
+void aac_set_intx_mode(struct aac_dev *dev);
+int aac_get_config_status(struct aac_dev *dev, int commit_flag);
+int aac_get_containers(struct aac_dev *dev);
+int aac_scsi_cmd(struct scsi_cmnd *cmd);
+int aac_dev_ioctl(struct aac_dev *dev, int cmd, void __user *arg);
+#ifndef shost_to_class
+#define shost_to_class(shost) &shost->shost_dev
+#endif
+ssize_t aac_get_serial_number(struct device *dev, char *buf);
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg);
+int aac_rx_init(struct aac_dev *dev);
+int aac_rkt_init(struct aac_dev *dev);
+int aac_nark_init(struct aac_dev *dev);
+int aac_sa_init(struct aac_dev *dev);
+int aac_src_init(struct aac_dev *dev);
+int aac_srcv_init(struct aac_dev *dev);
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify);
+void aac_define_int_mode(struct aac_dev *dev);
+unsigned int aac_response_normal(struct aac_queue * q);
+unsigned int aac_command_normal(struct aac_queue * q);
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 Index,
+			int isAif, int isFastResponse,
+			struct hw_fib *aif_fib);
+int aac_reset_adapter(struct aac_dev *dev, int forced, u8 reset_type);
+int aac_check_health(struct aac_dev * dev);
+int aac_command_thread(void *data);
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context *fibctx);
+int aac_fib_adapter_complete(struct fib * fibptr, unsigned short size);
+struct aac_driver_ident* aac_get_driver_ident(int devtype);
+int aac_get_adapter_info(struct aac_dev* dev);
+int aac_send_shutdown(struct aac_dev *dev);
+int aac_probe_container(struct aac_dev *dev, int cid);
+int _aac_rx_init(struct aac_dev *dev);
+int aac_rx_select_comm(struct aac_dev *dev, int comm);
+int aac_rx_deliver_producer(struct fib * fib);
+
+static inline int aac_is_src(struct aac_dev *dev)
+{
+	u16 device = dev->pdev->device;
+
+	if (device == PMC_DEVICE_S6 ||
+		device == PMC_DEVICE_S7 ||
+		device == PMC_DEVICE_S8)
+		return 1;
+	return 0;
+}
+
+static inline int aac_supports_2T(struct aac_dev *dev)
+{
+	return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
+}
+
+char * get_container_type(unsigned type);
+extern int numacb;
+extern char aac_driver_version[];
+extern int startup_timeout;
+extern int aif_timeout;
+extern int expose_physicals;
+extern int aac_reset_devices;
+extern int aac_msi;
+extern int aac_commit;
+extern int update_interval;
+extern int check_interval;
+extern int aac_check_reset;
+extern int aac_fib_dump;
+#endif
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
new file mode 100644
index 0000000..25f6600
--- /dev/null
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -0,0 +1,1123 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commctrl.c
+ *
+ * Abstract: Contains all routines for control of the AFA comm layer
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/dma-mapping.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h> /* ssleep prototype */
+#include <linux/kthread.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/**
+ *	ioctl_send_fib	-	send a FIB from userspace
+ *	@dev:	adapter is being processed
+ *	@arg:	arguments to the ioctl call
+ *
+ *	This routine sends a fib to the adapter on behalf of a user level
+ *	program.
+ */
+# define AAC_DEBUG_PREAMBLE	KERN_INFO
+# define AAC_DEBUG_POSTAMBLE
+
+static int ioctl_send_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct hw_fib * kfib;
+	struct fib *fibptr;
+	struct hw_fib * hw_fib = (struct hw_fib *)0;
+	dma_addr_t hw_fib_pa = (dma_addr_t)0LL;
+	unsigned int size, osize;
+	int retval;
+
+	if (dev->in_reset) {
+		return -EBUSY;
+	}
+	fibptr = aac_fib_alloc(dev);
+	if(fibptr == NULL) {
+		return -ENOMEM;
+	}
+
+	kfib = fibptr->hw_fib_va;
+	/*
+	 *	First copy in the header so that we can check the size field.
+	 */
+	if (copy_from_user((void *)kfib, arg, sizeof(struct aac_fibhdr))) {
+		aac_fib_free(fibptr);
+		return -EFAULT;
+	}
+	/*
+	 *	Since we copy based on the fib header size, make sure that we
+	 *	will not overrun the buffer when we copy the memory. Return
+	 *	an error if we would.
+	 */
+	osize = size = le16_to_cpu(kfib->header.Size) +
+		sizeof(struct aac_fibhdr);
+	if (size < le16_to_cpu(kfib->header.SenderSize))
+		size = le16_to_cpu(kfib->header.SenderSize);
+	if (size > dev->max_fib_size) {
+		dma_addr_t daddr;
+
+		if (size > 2048) {
+			retval = -EINVAL;
+			goto cleanup;
+		}
+
+		kfib = dma_alloc_coherent(&dev->pdev->dev, size, &daddr,
+					  GFP_KERNEL);
+		if (!kfib) {
+			retval = -ENOMEM;
+			goto cleanup;
+		}
+
+		/* Highjack the hw_fib */
+		hw_fib = fibptr->hw_fib_va;
+		hw_fib_pa = fibptr->hw_fib_pa;
+		fibptr->hw_fib_va = kfib;
+		fibptr->hw_fib_pa = daddr;
+		memset(((char *)kfib) + dev->max_fib_size, 0, size - dev->max_fib_size);
+		memcpy(kfib, hw_fib, dev->max_fib_size);
+	}
+
+	if (copy_from_user(kfib, arg, size)) {
+		retval = -EFAULT;
+		goto cleanup;
+	}
+
+	/* Sanity check the second copy */
+	if ((osize != le16_to_cpu(kfib->header.Size) +
+		sizeof(struct aac_fibhdr))
+		|| (size < le16_to_cpu(kfib->header.SenderSize))) {
+		retval = -EINVAL;
+		goto cleanup;
+	}
+
+	if (kfib->header.Command == cpu_to_le16(TakeABreakPt)) {
+		aac_adapter_interrupt(dev);
+		/*
+		 * Since we didn't really send a fib, zero out the state to allow
+		 * cleanup code not to assert.
+		 */
+		kfib->header.XferState = 0;
+	} else {
+		retval = aac_fib_send(le16_to_cpu(kfib->header.Command), fibptr,
+				le16_to_cpu(kfib->header.Size) , FsaNormal,
+				1, 1, NULL, NULL);
+		if (retval) {
+			goto cleanup;
+		}
+		if (aac_fib_complete(fibptr) != 0) {
+			retval = -EINVAL;
+			goto cleanup;
+		}
+	}
+	/*
+	 *	Make sure that the size returned by the adapter (which includes
+	 *	the header) is less than or equal to the size of a fib, so we
+	 *	don't corrupt application data. Then copy that size to the user
+	 *	buffer. (Don't try to add the header information again, since it
+	 *	was already included by the adapter.)
+	 */
+
+	retval = 0;
+	if (copy_to_user(arg, (void *)kfib, size))
+		retval = -EFAULT;
+cleanup:
+	if (hw_fib) {
+		dma_free_coherent(&dev->pdev->dev, size, kfib,
+				  fibptr->hw_fib_pa);
+		fibptr->hw_fib_pa = hw_fib_pa;
+		fibptr->hw_fib_va = hw_fib;
+	}
+	if (retval != -ERESTARTSYS)
+		aac_fib_free(fibptr);
+	return retval;
+}
+
+/**
+ *	open_getadapter_fib	-	Get the next fib
+ *
+ *	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct aac_fib_context * fibctx;
+	int status;
+
+	fibctx = kmalloc(sizeof(struct aac_fib_context), GFP_KERNEL);
+	if (fibctx == NULL) {
+		status = -ENOMEM;
+	} else {
+		unsigned long flags;
+		struct list_head * entry;
+		struct aac_fib_context * context;
+
+		fibctx->type = FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT;
+		fibctx->size = sizeof(struct aac_fib_context);
+		/*
+		 *	Yes yes, I know this could be an index, but we have a
+		 * better guarantee of uniqueness for the locked loop below.
+		 * Without the aid of a persistent history, this also helps
+		 * reduce the chance that the opaque context would be reused.
+		 */
+		fibctx->unique = (u32)((ulong)fibctx & 0xFFFFFFFF);
+		/*
+		 *	Initialize the mutex used to wait for the next AIF.
+		 */
+		sema_init(&fibctx->wait_sem, 0);
+		fibctx->wait = 0;
+		/*
+		 *	Initialize the fibs and set the count of fibs on
+		 *	the list to 0.
+		 */
+		fibctx->count = 0;
+		INIT_LIST_HEAD(&fibctx->fib_list);
+		fibctx->jiffies = jiffies/HZ;
+		/*
+		 *	Now add this context onto the adapter's
+		 *	AdapterFibContext list.
+		 */
+		spin_lock_irqsave(&dev->fib_lock, flags);
+		/* Ensure that we have a unique identifier */
+		entry = dev->fib_list.next;
+		while (entry != &dev->fib_list) {
+			context = list_entry(entry, struct aac_fib_context, next);
+			if (context->unique == fibctx->unique) {
+				/* Not unique (32 bits) */
+				fibctx->unique++;
+				entry = dev->fib_list.next;
+			} else {
+				entry = entry->next;
+			}
+		}
+		list_add_tail(&fibctx->next, &dev->fib_list);
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(arg, &fibctx->unique,
+						sizeof(fibctx->unique))) {
+			status = -EFAULT;
+		} else {
+			status = 0;
+		}
+	}
+	return status;
+}
+
+/**
+ *	next_getadapter_fib	-	get the next fib
+ *	@dev: adapter to use
+ *	@arg: ioctl argument
+ *
+ *	This routine will get the next Fib, if available, from the AdapterFibContext
+ *	passed in from the user.
+ */
+
+static int next_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct fib_ioctl f;
+	struct fib *fib;
+	struct aac_fib_context *fibctx;
+	int status;
+	struct list_head * entry;
+	unsigned long flags;
+
+	if(copy_from_user((void *)&f, arg, sizeof(struct fib_ioctl)))
+		return -EFAULT;
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	entry = dev->fib_list.next;
+	fibctx = NULL;
+
+	while (entry != &dev->fib_list) {
+		fibctx = list_entry(entry, struct aac_fib_context, next);
+		/*
+		 *	Extract the AdapterFibContext from the Input parameters.
+		 */
+		if (fibctx->unique == f.fibctx) { /* We found a winner */
+			break;
+		}
+		entry = entry->next;
+		fibctx = NULL;
+	}
+	if (!fibctx) {
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		dprintk ((KERN_INFO "Fib Context not found\n"));
+		return -EINVAL;
+	}
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context))) {
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		dprintk ((KERN_INFO "Fib Context corrupt?\n"));
+		return -EINVAL;
+	}
+	status = 0;
+	/*
+	 *	If there are no fibs to send back, then either wait or return
+	 *	-EAGAIN
+	 */
+return_fib:
+	if (!list_empty(&fibctx->fib_list)) {
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fib_list.next;
+		list_del(entry);
+
+		fib = list_entry(entry, struct fib, fiblink);
+		fibctx->count--;
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		if (copy_to_user(f.fib, fib->hw_fib_va, sizeof(struct hw_fib))) {
+			kfree(fib->hw_fib_va);
+			kfree(fib);
+			return -EFAULT;
+		}
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib->hw_fib_va);
+		kfree(fib);
+		status = 0;
+	} else {
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		/* If someone killed the AIF aacraid thread, restart it */
+		status = !dev->aif_thread;
+		if (status && !dev->in_reset && dev->queues && dev->fsa_dev) {
+			/* Be paranoid, be very paranoid! */
+			kthread_stop(dev->thread);
+			ssleep(1);
+			dev->aif_thread = 0;
+			dev->thread = kthread_run(aac_command_thread, dev,
+						  "%s", dev->name);
+			ssleep(1);
+		}
+		if (f.wait) {
+			if(down_interruptible(&fibctx->wait_sem) < 0) {
+				status = -ERESTARTSYS;
+			} else {
+				/* Lock again and retry */
+				spin_lock_irqsave(&dev->fib_lock, flags);
+				goto return_fib;
+			}
+		} else {
+			status = -EAGAIN;
+		}
+	}
+	fibctx->jiffies = jiffies/HZ;
+	return status;
+}
+
+int aac_close_fib_context(struct aac_dev * dev, struct aac_fib_context * fibctx)
+{
+	struct fib *fib;
+
+	/*
+	 *	First free any FIBs that have not been consumed.
+	 */
+	while (!list_empty(&fibctx->fib_list)) {
+		struct list_head * entry;
+		/*
+		 *	Pull the next fib from the fibs
+		 */
+		entry = fibctx->fib_list.next;
+		list_del(entry);
+		fib = list_entry(entry, struct fib, fiblink);
+		fibctx->count--;
+		/*
+		 *	Free the space occupied by this copy of the fib.
+		 */
+		kfree(fib->hw_fib_va);
+		kfree(fib);
+	}
+	/*
+	 *	Remove the Context from the AdapterFibContext List
+	 */
+	list_del(&fibctx->next);
+	/*
+	 *	Invalidate context
+	 */
+	fibctx->type = 0;
+	/*
+	 *	Free the space occupied by the Context
+	 */
+	kfree(fibctx);
+	return 0;
+}
+
+/**
+ *	close_getadapter_fib	-	close down user fib context
+ *	@dev: adapter
+ *	@arg: ioctl arguments
+ *
+ *	This routine will close down the fibctx passed in from the user.
+ */
+
+static int close_getadapter_fib(struct aac_dev * dev, void __user *arg)
+{
+	struct aac_fib_context *fibctx;
+	int status;
+	unsigned long flags;
+	struct list_head * entry;
+
+	/*
+	 *	Verify that the HANDLE passed in was a valid AdapterFibContext
+	 *
+	 *	Search the list of AdapterFibContext addresses on the adapter
+	 *	to be sure this is a valid address
+	 */
+
+	entry = dev->fib_list.next;
+	fibctx = NULL;
+
+	while(entry != &dev->fib_list) {
+		fibctx = list_entry(entry, struct aac_fib_context, next);
+		/*
+		 *	Extract the fibctx from the input parameters
+		 */
+		if (fibctx->unique == (u32)(uintptr_t)arg) /* We found a winner */
+			break;
+		entry = entry->next;
+		fibctx = NULL;
+	}
+
+	if (!fibctx)
+		return 0; /* Already gone */
+
+	if((fibctx->type != FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT) ||
+		 (fibctx->size != sizeof(struct aac_fib_context)))
+		return -EINVAL;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	status = aac_close_fib_context(dev, fibctx);
+	spin_unlock_irqrestore(&dev->fib_lock, flags);
+	return status;
+}
+
+/**
+ *	check_revision	-	close down user fib context
+ *	@dev: adapter
+ *	@arg: ioctl arguments
+ *
+ *	This routine returns the driver version.
+ *	Under Linux, there have been no version incompatibilities, so this is
+ *	simple!
+ */
+
+static int check_revision(struct aac_dev *dev, void __user *arg)
+{
+	struct revision response;
+	char *driver_version = aac_driver_version;
+	u32 version;
+
+	response.compat = 1;
+	version = (simple_strtol(driver_version,
+				&driver_version, 10) << 24) | 0x00000400;
+	version += simple_strtol(driver_version + 1, &driver_version, 10) << 16;
+	version += simple_strtol(driver_version + 1, NULL, 10);
+	response.version = cpu_to_le32(version);
+#	ifdef AAC_DRIVER_BUILD
+		response.build = cpu_to_le32(AAC_DRIVER_BUILD);
+#	else
+		response.build = cpu_to_le32(9999);
+#	endif
+
+	if (copy_to_user(arg, &response, sizeof(response)))
+		return -EFAULT;
+	return 0;
+}
+
+
+/**
+ *
+ * aac_send_raw_scb
+ *
+ */
+
+static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
+{
+	struct fib* srbfib;
+	int status;
+	struct aac_srb *srbcmd = NULL;
+	struct aac_hba_cmd_req *hbacmd = NULL;
+	struct user_aac_srb *user_srbcmd = NULL;
+	struct user_aac_srb __user *user_srb = arg;
+	struct aac_srb_reply __user *user_reply;
+	u32 chn;
+	u32 fibsize = 0;
+	u32 flags = 0;
+	s32 rcode = 0;
+	u32 data_dir;
+	void __user *sg_user[HBA_MAX_SG_EMBEDDED];
+	void *sg_list[HBA_MAX_SG_EMBEDDED];
+	u32 sg_count[HBA_MAX_SG_EMBEDDED];
+	u32 sg_indx = 0;
+	u32 byte_count = 0;
+	u32 actual_fibsize64, actual_fibsize = 0;
+	int i;
+	int is_native_device;
+	u64 address;
+
+
+	if (dev->in_reset) {
+		dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
+		return -EBUSY;
+	}
+	if (!capable(CAP_SYS_ADMIN)){
+		dprintk((KERN_DEBUG"aacraid: No permission to send raw srb\n"));
+		return -EPERM;
+	}
+	/*
+	 *	Allocate and initialize a Fib then setup a SRB command
+	 */
+	if (!(srbfib = aac_fib_alloc(dev))) {
+		return -ENOMEM;
+	}
+
+	memset(sg_list, 0, sizeof(sg_list)); /* cleanup may take issue */
+	if(copy_from_user(&fibsize, &user_srb->count,sizeof(u32))){
+		dprintk((KERN_DEBUG"aacraid: Could not copy data size from user\n"));
+		rcode = -EFAULT;
+		goto cleanup;
+	}
+
+	if ((fibsize < (sizeof(struct user_aac_srb) - sizeof(struct user_sgentry))) ||
+	    (fibsize > (dev->max_fib_size - sizeof(struct aac_fibhdr)))) {
+		rcode = -EINVAL;
+		goto cleanup;
+	}
+
+	user_srbcmd = kmalloc(fibsize, GFP_KERNEL);
+	if (!user_srbcmd) {
+		dprintk((KERN_DEBUG"aacraid: Could not make a copy of the srb\n"));
+		rcode = -ENOMEM;
+		goto cleanup;
+	}
+	if(copy_from_user(user_srbcmd, user_srb,fibsize)){
+		dprintk((KERN_DEBUG"aacraid: Could not copy srb from user\n"));
+		rcode = -EFAULT;
+		goto cleanup;
+	}
+
+	flags = user_srbcmd->flags; /* from user in cpu order */
+	switch (flags & (SRB_DataIn | SRB_DataOut)) {
+	case SRB_DataOut:
+		data_dir = DMA_TO_DEVICE;
+		break;
+	case (SRB_DataIn | SRB_DataOut):
+		data_dir = DMA_BIDIRECTIONAL;
+		break;
+	case SRB_DataIn:
+		data_dir = DMA_FROM_DEVICE;
+		break;
+	default:
+		data_dir = DMA_NONE;
+	}
+	if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
+		dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
+			user_srbcmd->sg.count));
+		rcode = -EINVAL;
+		goto cleanup;
+	}
+	if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
+		dprintk((KERN_DEBUG"aacraid:SG with no direction specified\n"));
+		rcode = -EINVAL;
+		goto cleanup;
+	}
+	actual_fibsize = sizeof(struct aac_srb) - sizeof(struct sgentry) +
+		((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
+	actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
+	  (sizeof(struct sgentry64) - sizeof(struct sgentry));
+	/* User made a mistake - should not continue */
+	if ((actual_fibsize != fibsize) && (actual_fibsize64 != fibsize)) {
+		dprintk((KERN_DEBUG"aacraid: Bad Size specified in "
+		  "Raw SRB command calculated fibsize=%lu;%lu "
+		  "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
+		  "issued fibsize=%d\n",
+		  actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
+		  sizeof(struct aac_srb), sizeof(struct sgentry),
+		  sizeof(struct sgentry64), fibsize));
+		rcode = -EINVAL;
+		goto cleanup;
+	}
+
+	chn = user_srbcmd->channel;
+	if (chn < AAC_MAX_BUSES && user_srbcmd->id < AAC_MAX_TARGETS &&
+		dev->hba_map[chn][user_srbcmd->id].devtype ==
+		AAC_DEVTYPE_NATIVE_RAW) {
+		is_native_device = 1;
+		hbacmd = (struct aac_hba_cmd_req *)srbfib->hw_fib_va;
+		memset(hbacmd, 0, 96);	/* sizeof(*hbacmd) is not necessary */
+
+		/* iu_type is a parameter of aac_hba_send */
+		switch (data_dir) {
+		case DMA_TO_DEVICE:
+			hbacmd->byte1 = 2;
+			break;
+		case DMA_FROM_DEVICE:
+		case DMA_BIDIRECTIONAL:
+			hbacmd->byte1 = 1;
+			break;
+		case DMA_NONE:
+		default:
+			break;
+		}
+		hbacmd->lun[1] = cpu_to_le32(user_srbcmd->lun);
+		hbacmd->it_nexus = dev->hba_map[chn][user_srbcmd->id].rmw_nexus;
+
+		/*
+		 * we fill in reply_qid later in aac_src_deliver_message
+		 * we fill in iu_type, request_id later in aac_hba_send
+		 * we fill in emb_data_desc_count, data_length later
+		 * in sg list build
+		 */
+
+		memcpy(hbacmd->cdb, user_srbcmd->cdb, sizeof(hbacmd->cdb));
+
+		address = (u64)srbfib->hw_error_pa;
+		hbacmd->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+		hbacmd->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+		hbacmd->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+		hbacmd->emb_data_desc_count =
+					cpu_to_le32(user_srbcmd->sg.count);
+		srbfib->hbacmd_size = 64 +
+			user_srbcmd->sg.count * sizeof(struct aac_hba_sgl);
+
+	} else {
+		is_native_device = 0;
+		aac_fib_init(srbfib);
+
+		/* raw_srb FIB is not FastResponseCapable */
+		srbfib->hw_fib_va->header.XferState &=
+			~cpu_to_le32(FastResponseCapable);
+
+		srbcmd = (struct aac_srb *) fib_data(srbfib);
+
+		// Fix up srb for endian and force some values
+
+		srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi); // Force this
+		srbcmd->channel	 = cpu_to_le32(user_srbcmd->channel);
+		srbcmd->id	 = cpu_to_le32(user_srbcmd->id);
+		srbcmd->lun	 = cpu_to_le32(user_srbcmd->lun);
+		srbcmd->timeout	 = cpu_to_le32(user_srbcmd->timeout);
+		srbcmd->flags	 = cpu_to_le32(flags);
+		srbcmd->retry_limit = 0; // Obsolete parameter
+		srbcmd->cdb_size = cpu_to_le32(user_srbcmd->cdb_size);
+		memcpy(srbcmd->cdb, user_srbcmd->cdb, sizeof(srbcmd->cdb));
+	}
+
+	byte_count = 0;
+	if (is_native_device) {
+		struct user_sgmap *usg32 = &user_srbcmd->sg;
+		struct user_sgmap64 *usg64 =
+			(struct user_sgmap64 *)&user_srbcmd->sg;
+
+		for (i = 0; i < usg32->count; i++) {
+			void *p;
+			u64 addr;
+
+			sg_count[i] = (actual_fibsize64 == fibsize) ?
+				usg64->sg[i].count : usg32->sg[i].count;
+			if (sg_count[i] >
+				(dev->scsi_host_ptr->max_sectors << 9)) {
+				pr_err("aacraid: upsg->sg[%d].count=%u>%u\n",
+					i, sg_count[i],
+					dev->scsi_host_ptr->max_sectors << 9);
+				rcode = -EINVAL;
+				goto cleanup;
+			}
+
+			p = kmalloc(sg_count[i], GFP_KERNEL);
+			if (!p) {
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+
+			if (actual_fibsize64 == fibsize) {
+				addr = (u64)usg64->sg[i].addr[0];
+				addr += ((u64)usg64->sg[i].addr[1]) << 32;
+			} else {
+				addr = (u64)usg32->sg[i].addr;
+			}
+
+			sg_user[i] = (void __user *)(uintptr_t)addr;
+			sg_list[i] = p; // save so we can clean up later
+			sg_indx = i;
+
+			if (flags & SRB_DataOut) {
+				if (copy_from_user(p, sg_user[i],
+					sg_count[i])) {
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+			addr = pci_map_single(dev->pdev, p, sg_count[i],
+						data_dir);
+			hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
+			hbacmd->sge[i].addr_lo = cpu_to_le32(
+						(u32)(addr & 0xffffffff));
+			hbacmd->sge[i].len = cpu_to_le32(sg_count[i]);
+			hbacmd->sge[i].flags = 0;
+			byte_count += sg_count[i];
+		}
+
+		if (usg32->count > 0)	/* embedded sglist */
+			hbacmd->sge[usg32->count-1].flags =
+				cpu_to_le32(0x40000000);
+		hbacmd->data_length = cpu_to_le32(byte_count);
+
+		status = aac_hba_send(HBA_IU_TYPE_SCSI_CMD_REQ, srbfib,
+					NULL, NULL);
+
+	} else if (dev->adapter_info.options & AAC_OPT_SGMAP_HOST64) {
+		struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
+		struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
+
+		/*
+		 * This should also catch if user used the 32 bit sgmap
+		 */
+		if (actual_fibsize64 == fibsize) {
+			actual_fibsize = actual_fibsize64;
+			for (i = 0; i < upsg->count; i++) {
+				u64 addr;
+				void* p;
+
+				sg_count[i] = upsg->sg[i].count;
+				if (sg_count[i] >
+				    ((dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536)) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
+
+				p = kmalloc(sg_count[i], GFP_KERNEL);
+				if(!p) {
+					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+					  sg_count[i], i, upsg->count));
+					rcode = -ENOMEM;
+					goto cleanup;
+				}
+				addr = (u64)upsg->sg[i].addr[0];
+				addr += ((u64)upsg->sg[i].addr[1]) << 32;
+				sg_user[i] = (void __user *)(uintptr_t)addr;
+				sg_list[i] = p; // save so we can clean up later
+				sg_indx = i;
+
+				if (flags & SRB_DataOut) {
+					if (copy_from_user(p, sg_user[i],
+						sg_count[i])){
+						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+						rcode = -EFAULT;
+						goto cleanup;
+					}
+				}
+				addr = pci_map_single(dev->pdev, p,
+							sg_count[i], data_dir);
+
+				psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+				psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+				byte_count += sg_count[i];
+				psg->sg[i].count = cpu_to_le32(sg_count[i]);
+			}
+		} else {
+			struct user_sgmap* usg;
+			usg = kmemdup(upsg,
+				      actual_fibsize - sizeof(struct aac_srb)
+				      + sizeof(struct sgmap), GFP_KERNEL);
+			if (!usg) {
+				dprintk((KERN_DEBUG"aacraid: Allocation error in Raw SRB command\n"));
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+			actual_fibsize = actual_fibsize64;
+
+			for (i = 0; i < usg->count; i++) {
+				u64 addr;
+				void* p;
+
+				sg_count[i] = usg->sg[i].count;
+				if (sg_count[i] >
+				    ((dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536)) {
+					kfree(usg);
+					rcode = -EINVAL;
+					goto cleanup;
+				}
+
+				p = kmalloc(sg_count[i], GFP_KERNEL);
+				if(!p) {
+					dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+						sg_count[i], i, usg->count));
+					kfree(usg);
+					rcode = -ENOMEM;
+					goto cleanup;
+				}
+				sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
+				sg_list[i] = p; // save so we can clean up later
+				sg_indx = i;
+
+				if (flags & SRB_DataOut) {
+					if (copy_from_user(p, sg_user[i],
+						sg_count[i])) {
+						kfree (usg);
+						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+						rcode = -EFAULT;
+						goto cleanup;
+					}
+				}
+				addr = pci_map_single(dev->pdev, p,
+							sg_count[i], data_dir);
+
+				psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
+				psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
+				byte_count += sg_count[i];
+				psg->sg[i].count = cpu_to_le32(sg_count[i]);
+			}
+			kfree (usg);
+		}
+		srbcmd->count = cpu_to_le32(byte_count);
+		if (user_srbcmd->sg.count)
+			psg->count = cpu_to_le32(sg_indx+1);
+		else
+			psg->count = 0;
+		status = aac_fib_send(ScsiPortCommand64, srbfib, actual_fibsize, FsaNormal, 1, 1,NULL,NULL);
+	} else {
+		struct user_sgmap* upsg = &user_srbcmd->sg;
+		struct sgmap* psg = &srbcmd->sg;
+
+		if (actual_fibsize64 == fibsize) {
+			struct user_sgmap64* usg = (struct user_sgmap64 *)upsg;
+			for (i = 0; i < upsg->count; i++) {
+				uintptr_t addr;
+				void* p;
+
+				sg_count[i] = usg->sg[i].count;
+				if (sg_count[i] >
+				    ((dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536)) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
+				p = kmalloc(sg_count[i], GFP_KERNEL);
+				if (!p) {
+					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+						sg_count[i], i, usg->count));
+					rcode = -ENOMEM;
+					goto cleanup;
+				}
+				addr = (u64)usg->sg[i].addr[0];
+				addr += ((u64)usg->sg[i].addr[1]) << 32;
+				sg_user[i] = (void __user *)addr;
+				sg_list[i] = p; // save so we can clean up later
+				sg_indx = i;
+
+				if (flags & SRB_DataOut) {
+					if (copy_from_user(p, sg_user[i],
+						sg_count[i])){
+						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+						rcode = -EFAULT;
+						goto cleanup;
+					}
+				}
+				addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+
+				psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
+				byte_count += usg->sg[i].count;
+				psg->sg[i].count = cpu_to_le32(sg_count[i]);
+			}
+		} else {
+			for (i = 0; i < upsg->count; i++) {
+				dma_addr_t addr;
+				void* p;
+
+				sg_count[i] = upsg->sg[i].count;
+				if (sg_count[i] >
+				    ((dev->adapter_info.options &
+				     AAC_OPT_NEW_COMM) ?
+				      (dev->scsi_host_ptr->max_sectors << 9) :
+				      65536)) {
+					rcode = -EINVAL;
+					goto cleanup;
+				}
+				p = kmalloc(sg_count[i], GFP_KERNEL);
+				if (!p) {
+					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+					  sg_count[i], i, upsg->count));
+					rcode = -ENOMEM;
+					goto cleanup;
+				}
+				sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
+				sg_list[i] = p; // save so we can clean up later
+				sg_indx = i;
+
+				if (flags & SRB_DataOut) {
+					if (copy_from_user(p, sg_user[i],
+						sg_count[i])) {
+						dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
+						rcode = -EFAULT;
+						goto cleanup;
+					}
+				}
+				addr = pci_map_single(dev->pdev, p,
+					sg_count[i], data_dir);
+
+				psg->sg[i].addr = cpu_to_le32(addr);
+				byte_count += sg_count[i];
+				psg->sg[i].count = cpu_to_le32(sg_count[i]);
+			}
+		}
+		srbcmd->count = cpu_to_le32(byte_count);
+		if (user_srbcmd->sg.count)
+			psg->count = cpu_to_le32(sg_indx+1);
+		else
+			psg->count = 0;
+		status = aac_fib_send(ScsiPortCommand, srbfib, actual_fibsize, FsaNormal, 1, 1, NULL, NULL);
+	}
+
+	if (status == -ERESTARTSYS) {
+		rcode = -ERESTARTSYS;
+		goto cleanup;
+	}
+
+	if (status != 0) {
+		dprintk((KERN_DEBUG"aacraid: Could not send raw srb fib to hba\n"));
+		rcode = -ENXIO;
+		goto cleanup;
+	}
+
+	if (flags & SRB_DataIn) {
+		for(i = 0 ; i <= sg_indx; i++){
+			if (copy_to_user(sg_user[i], sg_list[i], sg_count[i])) {
+				dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));
+				rcode = -EFAULT;
+				goto cleanup;
+
+			}
+		}
+	}
+
+	user_reply = arg + fibsize;
+	if (is_native_device) {
+		struct aac_hba_resp *err =
+			&((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
+		struct aac_srb_reply reply;
+
+		memset(&reply, 0, sizeof(reply));
+		reply.status = ST_OK;
+		if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
+			/* fast response */
+			reply.srb_status = SRB_STATUS_SUCCESS;
+			reply.scsi_status = 0;
+			reply.data_xfer_length = byte_count;
+			reply.sense_data_size = 0;
+			memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
+		} else {
+			reply.srb_status = err->service_response;
+			reply.scsi_status = err->status;
+			reply.data_xfer_length = byte_count -
+				le32_to_cpu(err->residual_count);
+			reply.sense_data_size = err->sense_response_data_len;
+			memcpy(reply.sense_data, err->sense_response_buf,
+				AAC_SENSE_BUFFERSIZE);
+		}
+		if (copy_to_user(user_reply, &reply,
+			sizeof(struct aac_srb_reply))) {
+			dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+	} else {
+		struct aac_srb_reply *reply;
+
+		reply = (struct aac_srb_reply *) fib_data(srbfib);
+		if (copy_to_user(user_reply, reply,
+			sizeof(struct aac_srb_reply))) {
+			dprintk((KERN_DEBUG"aacraid: Copy to user failed\n"));
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+	}
+
+cleanup:
+	kfree(user_srbcmd);
+	if (rcode != -ERESTARTSYS) {
+		for (i = 0; i <= sg_indx; i++)
+			kfree(sg_list[i]);
+		aac_fib_complete(srbfib);
+		aac_fib_free(srbfib);
+	}
+
+	return rcode;
+}
+
+struct aac_pci_info {
+	u32 bus;
+	u32 slot;
+};
+
+
+static int aac_get_pci_info(struct aac_dev* dev, void __user *arg)
+{
+	struct aac_pci_info pci_info;
+
+	pci_info.bus = dev->pdev->bus->number;
+	pci_info.slot = PCI_SLOT(dev->pdev->devfn);
+
+	if (copy_to_user(arg, &pci_info, sizeof(struct aac_pci_info))) {
+		dprintk((KERN_DEBUG "aacraid: Could not copy pci info\n"));
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_hba_info hbainfo;
+
+	memset(&hbainfo, 0, sizeof(hbainfo));
+	hbainfo.adapter_number		= (u8) dev->id;
+	hbainfo.system_io_bus_number	= dev->pdev->bus->number;
+	hbainfo.device_number		= (dev->pdev->devfn >> 3);
+	hbainfo.function_number		= (dev->pdev->devfn & 0x0007);
+
+	hbainfo.vendor_id		= dev->pdev->vendor;
+	hbainfo.device_id		= dev->pdev->device;
+	hbainfo.sub_vendor_id		= dev->pdev->subsystem_vendor;
+	hbainfo.sub_system_id		= dev->pdev->subsystem_device;
+
+	if (copy_to_user(arg, &hbainfo, sizeof(struct aac_hba_info))) {
+		dprintk((KERN_DEBUG "aacraid: Could not copy hba info\n"));
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+struct aac_reset_iop {
+	u8	reset_type;
+};
+
+static int aac_send_reset_adapter(struct aac_dev *dev, void __user *arg)
+{
+	struct aac_reset_iop reset;
+	int retval;
+
+	if (copy_from_user((void *)&reset, arg, sizeof(struct aac_reset_iop)))
+		return -EFAULT;
+
+	dev->adapter_shutdown = 1;
+
+	mutex_unlock(&dev->ioctl_mutex);
+	retval = aac_reset_adapter(dev, 0, reset.reset_type);
+	mutex_lock(&dev->ioctl_mutex);
+
+	return retval;
+}
+
+int aac_do_ioctl(struct aac_dev * dev, int cmd, void __user *arg)
+{
+	int status;
+
+	mutex_lock(&dev->ioctl_mutex);
+
+	if (dev->adapter_shutdown) {
+		status = -EACCES;
+		goto cleanup;
+	}
+
+	/*
+	 *	HBA gets first crack
+	 */
+
+	status = aac_dev_ioctl(dev, cmd, arg);
+	if (status != -ENOTTY)
+		goto cleanup;
+
+	switch (cmd) {
+	case FSACTL_MINIPORT_REV_CHECK:
+		status = check_revision(dev, arg);
+		break;
+	case FSACTL_SEND_LARGE_FIB:
+	case FSACTL_SENDFIB:
+		status = ioctl_send_fib(dev, arg);
+		break;
+	case FSACTL_OPEN_GET_ADAPTER_FIB:
+		status = open_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_GET_NEXT_ADAPTER_FIB:
+		status = next_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_CLOSE_GET_ADAPTER_FIB:
+		status = close_getadapter_fib(dev, arg);
+		break;
+	case FSACTL_SEND_RAW_SRB:
+		status = aac_send_raw_srb(dev,arg);
+		break;
+	case FSACTL_GET_PCI_INFO:
+		status = aac_get_pci_info(dev,arg);
+		break;
+	case FSACTL_GET_HBA_INFO:
+		status = aac_get_hba_info(dev, arg);
+		break;
+	case FSACTL_RESET_IOP:
+		status = aac_send_reset_adapter(dev, arg);
+		break;
+
+	default:
+		status = -ENOTTY;
+		break;
+	}
+
+cleanup:
+	mutex_unlock(&dev->ioctl_mutex);
+
+	return status;
+}
+
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
new file mode 100644
index 0000000..0dc7b5a
--- /dev/null
+++ b/drivers/scsi/aacraid/comminit.c
@@ -0,0 +1,670 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *               2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  comminit.c
+ *
+ * Abstract: This supports the initialization of the host adapter commuication interface.
+ *    This is a platform dependent module for the pci cyclone board.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/mm.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include "aacraid.h"
+
+struct aac_common aac_config = {
+	.irq_mod = 1
+};
+
+static inline int aac_is_msix_mode(struct aac_dev *dev)
+{
+	u32 status = 0;
+
+	if (aac_is_src(dev))
+		status = src_readl(dev, MUnit.OMR);
+	return (status & AAC_INT_MODE_MSIX);
+}
+
+static inline void aac_change_to_intx(struct aac_dev *dev)
+{
+	aac_src_access_devreg(dev, AAC_DISABLE_MSIX);
+	aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+}
+
+static int aac_alloc_comm(struct aac_dev *dev, void **commaddr, unsigned long commsize, unsigned long commalign)
+{
+	unsigned char *base;
+	unsigned long size, align;
+	const unsigned long fibsize = dev->max_fib_size;
+	const unsigned long printfbufsiz = 256;
+	unsigned long host_rrq_size, aac_init_size;
+	union aac_init *init;
+	dma_addr_t phys;
+	unsigned long aac_max_hostphysmempages;
+
+	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+		(dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+		(dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+		!dev->sa_firmware)) {
+		host_rrq_size =
+			(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB)
+				* sizeof(u32);
+		aac_init_size = sizeof(union aac_init);
+	} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 &&
+		dev->sa_firmware) {
+		host_rrq_size = (dev->scsi_host_ptr->can_queue
+			+ AAC_NUM_MGT_FIB) * sizeof(u32)  * AAC_MAX_MSIX;
+		aac_init_size = sizeof(union aac_init) +
+			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq);
+	} else {
+		host_rrq_size = 0;
+		aac_init_size = sizeof(union aac_init);
+	}
+	size = fibsize + aac_init_size + commsize + commalign +
+			printfbufsiz + host_rrq_size;
+
+	base = dma_alloc_coherent(&dev->pdev->dev, size, &phys, GFP_KERNEL);
+	if (base == NULL) {
+		printk(KERN_ERR "aacraid: unable to create mapping.\n");
+		return 0;
+	}
+
+	dev->comm_addr = (void *)base;
+	dev->comm_phys = phys;
+	dev->comm_size = size;
+
+	if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) ||
+	    (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) ||
+	    (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)) {
+		dev->host_rrq = (u32 *)(base + fibsize);
+		dev->host_rrq_pa = phys + fibsize;
+		memset(dev->host_rrq, 0, host_rrq_size);
+	}
+
+	dev->init = (union aac_init *)(base + fibsize + host_rrq_size);
+	dev->init_pa = phys + fibsize + host_rrq_size;
+
+	init = dev->init;
+
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+		int i;
+		u64 addr;
+
+		init->r8.init_struct_revision =
+			cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_8);
+		init->r8.init_flags = cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+					INITFLAGS_DRIVER_USES_UTC_TIME |
+					INITFLAGS_DRIVER_SUPPORTS_PM);
+		init->r8.init_flags |=
+				cpu_to_le32(INITFLAGS_DRIVER_SUPPORTS_HBA_MODE);
+		init->r8.rr_queue_count = cpu_to_le32(dev->max_msix);
+		init->r8.max_io_size =
+			cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+		init->r8.max_num_aif = init->r8.reserved1 =
+			init->r8.reserved2 = 0;
+
+		for (i = 0; i < dev->max_msix; i++) {
+			addr = (u64)dev->host_rrq_pa + dev->vector_cap * i *
+					sizeof(u32);
+			init->r8.rrq[i].host_addr_high = cpu_to_le32(
+						upper_32_bits(addr));
+			init->r8.rrq[i].host_addr_low = cpu_to_le32(
+						lower_32_bits(addr));
+			init->r8.rrq[i].msix_id = i;
+			init->r8.rrq[i].element_count = cpu_to_le16(
+					(u16)dev->vector_cap);
+			init->r8.rrq[i].comp_thresh =
+					init->r8.rrq[i].unused = 0;
+		}
+
+		pr_warn("aacraid: Comm Interface type3 enabled\n");
+	} else {
+		init->r7.init_struct_revision =
+			cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION);
+		if (dev->max_fib_size != sizeof(struct hw_fib))
+			init->r7.init_struct_revision =
+				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_4);
+		init->r7.no_of_msix_vectors = cpu_to_le32(SA_MINIPORT_REVISION);
+		init->r7.fsrev = cpu_to_le32(dev->fsrev);
+
+		/*
+		 *	Adapter Fibs are the first thing allocated so that they
+		 *	start page aligned
+		 */
+		dev->aif_base_va = (struct hw_fib *)base;
+
+		init->r7.adapter_fibs_virtual_address = 0;
+		init->r7.adapter_fibs_physical_address = cpu_to_le32((u32)phys);
+		init->r7.adapter_fibs_size = cpu_to_le32(fibsize);
+		init->r7.adapter_fib_align = cpu_to_le32(sizeof(struct hw_fib));
+
+		/*
+		 * number of 4k pages of host physical memory. The aacraid fw
+		 * needs this number to be less than 4gb worth of pages. New
+		 * firmware doesn't have any issues with the mapping system, but
+		 * older Firmware did, and had *troubles* dealing with the math
+		 * overloading past 32 bits, thus we must limit this field.
+		 */
+		aac_max_hostphysmempages =
+				dma_get_required_mask(&dev->pdev->dev) >> 12;
+		if (aac_max_hostphysmempages < AAC_MAX_HOSTPHYSMEMPAGES)
+			init->r7.host_phys_mem_pages =
+					cpu_to_le32(aac_max_hostphysmempages);
+		else
+			init->r7.host_phys_mem_pages =
+					cpu_to_le32(AAC_MAX_HOSTPHYSMEMPAGES);
+
+		init->r7.init_flags =
+			cpu_to_le32(INITFLAGS_DRIVER_USES_UTC_TIME |
+			INITFLAGS_DRIVER_SUPPORTS_PM);
+		init->r7.max_io_commands =
+			cpu_to_le32(dev->scsi_host_ptr->can_queue +
+					AAC_NUM_MGT_FIB);
+		init->r7.max_io_size =
+			cpu_to_le32(dev->scsi_host_ptr->max_sectors << 9);
+		init->r7.max_fib_size = cpu_to_le32(dev->max_fib_size);
+		init->r7.max_num_aif = cpu_to_le32(dev->max_num_aif);
+
+		if (dev->comm_interface == AAC_COMM_MESSAGE) {
+			init->r7.init_flags |=
+				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED);
+			pr_warn("aacraid: Comm Interface enabled\n");
+		} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1) {
+			init->r7.init_struct_revision =
+				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_6);
+			init->r7.init_flags |=
+				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+				INITFLAGS_NEW_COMM_TYPE1_SUPPORTED |
+				INITFLAGS_FAST_JBOD_SUPPORTED);
+			init->r7.host_rrq_addr_high =
+				cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+			init->r7.host_rrq_addr_low =
+				cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+			pr_warn("aacraid: Comm Interface type1 enabled\n");
+		} else if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2) {
+			init->r7.init_struct_revision =
+				cpu_to_le32(ADAPTER_INIT_STRUCT_REVISION_7);
+			init->r7.init_flags |=
+				cpu_to_le32(INITFLAGS_NEW_COMM_SUPPORTED |
+				INITFLAGS_NEW_COMM_TYPE2_SUPPORTED |
+				INITFLAGS_FAST_JBOD_SUPPORTED);
+			init->r7.host_rrq_addr_high =
+				cpu_to_le32(upper_32_bits(dev->host_rrq_pa));
+			init->r7.host_rrq_addr_low =
+				cpu_to_le32(lower_32_bits(dev->host_rrq_pa));
+			init->r7.no_of_msix_vectors =
+				cpu_to_le32(dev->max_msix);
+			/* must be the COMM_PREFERRED_SETTINGS values */
+			pr_warn("aacraid: Comm Interface type2 enabled\n");
+		}
+	}
+
+	/*
+	 * Increment the base address by the amount already used
+	 */
+	base = base + fibsize + host_rrq_size + aac_init_size;
+	phys = (dma_addr_t)((ulong)phys + fibsize + host_rrq_size +
+			aac_init_size);
+
+	/*
+	 *	Align the beginning of Headers to commalign
+	 */
+	align = (commalign - ((uintptr_t)(base) & (commalign - 1)));
+	base = base + align;
+	phys = phys + align;
+	/*
+	 *	Fill in addresses of the Comm Area Headers and Queues
+	 */
+	*commaddr = base;
+	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+		init->r7.comm_header_address = cpu_to_le32((u32)phys);
+	/*
+	 *	Increment the base address by the size of the CommArea
+	 */
+	base = base + commsize;
+	phys = phys + commsize;
+	/*
+	 *	 Place the Printf buffer area after the Fast I/O comm area.
+	 */
+	dev->printfbuf = (void *)base;
+	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3) {
+		init->r7.printfbuf = cpu_to_le32(phys);
+		init->r7.printfbufsiz = cpu_to_le32(printfbufsiz);
+	}
+	memset(base, 0, printfbufsiz);
+	return 1;
+}
+
+static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize)
+{
+	atomic_set(&q->numpending, 0);
+	q->dev = dev;
+	init_waitqueue_head(&q->cmdready);
+	INIT_LIST_HEAD(&q->cmdq);
+	init_waitqueue_head(&q->qfull);
+	spin_lock_init(&q->lockdata);
+	q->lock = &q->lockdata;
+	q->headers.producer = (__le32 *)mem;
+	q->headers.consumer = (__le32 *)(mem+1);
+	*(q->headers.producer) = cpu_to_le32(qsize);
+	*(q->headers.consumer) = cpu_to_le32(qsize);
+	q->entries = qsize;
+}
+
+static void aac_wait_for_io_completion(struct aac_dev *aac)
+{
+	unsigned long flagv = 0;
+	int i = 0;
+
+	for (i = 60; i; --i) {
+		struct scsi_device *dev;
+		struct scsi_cmnd *command;
+		int active = 0;
+
+		__shost_for_each_device(dev, aac->scsi_host_ptr) {
+			spin_lock_irqsave(&dev->list_lock, flagv);
+			list_for_each_entry(command, &dev->cmd_list, list) {
+				if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+					active++;
+					break;
+				}
+			}
+			spin_unlock_irqrestore(&dev->list_lock, flagv);
+			if (active)
+				break;
+
+		}
+		/*
+		 * We can exit If all the commands are complete
+		 */
+		if (active == 0)
+			break;
+		ssleep(1);
+	}
+}
+
+/**
+ *	aac_send_shutdown		-	shutdown an adapter
+ *	@dev: Adapter to shutdown
+ *
+ *	This routine will send a VM_CloseAll (shutdown) request to the adapter.
+ */
+
+int aac_send_shutdown(struct aac_dev * dev)
+{
+	struct fib * fibctx;
+	struct aac_close *cmd;
+	int status = 0;
+
+	if (aac_adapter_check_health(dev))
+		return status;
+
+	if (!dev->adapter_shutdown) {
+		mutex_lock(&dev->ioctl_mutex);
+		dev->adapter_shutdown = 1;
+		mutex_unlock(&dev->ioctl_mutex);
+	}
+
+	aac_wait_for_io_completion(dev);
+
+	fibctx = aac_fib_alloc(dev);
+	if (!fibctx)
+		return -ENOMEM;
+	aac_fib_init(fibctx);
+
+	cmd = (struct aac_close *) fib_data(fibctx);
+	cmd->command = cpu_to_le32(VM_CloseAll);
+	cmd->cid = cpu_to_le32(0xfffffffe);
+
+	status = aac_fib_send(ContainerCommand,
+			  fibctx,
+			  sizeof(struct aac_close),
+			  FsaNormal,
+			  -2 /* Timeout silently */, 1,
+			  NULL, NULL);
+
+	if (status >= 0)
+		aac_fib_complete(fibctx);
+	/* FIB should be freed only after getting the response from the F/W */
+	if (status != -ERESTARTSYS)
+		aac_fib_free(fibctx);
+	if (aac_is_src(dev) &&
+	     dev->msi_enabled)
+		aac_set_intx_mode(dev);
+	return status;
+}
+
+/**
+ *	aac_comm_init	-	Initialise FSA data structures
+ *	@dev:	Adapter to initialise
+ *
+ *	Initializes the data structures that are required for the FSA commuication
+ *	interface to operate. 
+ *	Returns
+ *		1 - if we were able to init the commuication interface.
+ *		0 - If there were errors initing. This is a fatal error.
+ */
+ 
+static int aac_comm_init(struct aac_dev * dev)
+{
+	unsigned long hdrsize = (sizeof(u32) * NUMBER_OF_COMM_QUEUES) * 2;
+	unsigned long queuesize = sizeof(struct aac_entry) * TOTAL_QUEUE_ENTRIES;
+	u32 *headers;
+	struct aac_entry * queues;
+	unsigned long size;
+	struct aac_queue_block * comm = dev->queues;
+	/*
+	 *	Now allocate and initialize the zone structures used as our 
+	 *	pool of FIB context records.  The size of the zone is based
+	 *	on the system memory size.  We also initialize the mutex used
+	 *	to protect the zone.
+	 */
+	spin_lock_init(&dev->fib_lock);
+
+	/*
+	 *	Allocate the physically contiguous space for the commuication
+	 *	queue headers. 
+	 */
+
+	size = hdrsize + queuesize;
+
+	if (!aac_alloc_comm(dev, (void * *)&headers, size, QUEUE_ALIGNMENT))
+		return -ENOMEM;
+
+	queues = (struct aac_entry *)(((ulong)headers) + hdrsize);
+
+	/* Adapter to Host normal priority Command queue */ 
+	comm->queue[HostNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES);
+	queues += HOST_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* Adapter to Host high priority command queue */
+	comm->queue[HostHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES);
+    
+	queues += HOST_HIGH_CMD_ENTRIES;
+	headers +=2;
+
+	/* Host to adapter normal priority command queue */
+	comm->queue[AdapNormCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES);
+    
+	queues += ADAP_NORM_CMD_ENTRIES;
+	headers += 2;
+
+	/* host to adapter high priority command queue */
+	comm->queue[AdapHighCmdQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES);
+    
+	queues += ADAP_HIGH_CMD_ENTRIES;
+	headers += 2;
+
+	/* adapter to host normal priority response queue */
+	comm->queue[HostNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES);
+	queues += HOST_NORM_RESP_ENTRIES;
+	headers += 2;
+
+	/* adapter to host high priority response queue */
+	comm->queue[HostHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES);
+   
+	queues += HOST_HIGH_RESP_ENTRIES;
+	headers += 2;
+
+	/* host to adapter normal priority response queue */
+	comm->queue[AdapNormRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES);
+
+	queues += ADAP_NORM_RESP_ENTRIES;
+	headers += 2;
+	
+	/* host to adapter high priority response queue */ 
+	comm->queue[AdapHighRespQueue].base = queues;
+	aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES);
+
+	comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock;
+	comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock;
+	comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock;
+	comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock;
+
+	return 0;
+}
+
+void aac_define_int_mode(struct aac_dev *dev)
+{
+	int i, msi_count, min_msix;
+
+	msi_count = i = 0;
+	/* max. vectors from GET_COMM_PREFERRED_SETTINGS */
+	if (dev->max_msix == 0 ||
+	    dev->pdev->device == PMC_DEVICE_S6 ||
+	    dev->sync_mode) {
+		dev->max_msix = 1;
+		dev->vector_cap =
+			dev->scsi_host_ptr->can_queue +
+			AAC_NUM_MGT_FIB;
+		return;
+	}
+
+	/* Don't bother allocating more MSI-X vectors than cpus */
+	msi_count = min(dev->max_msix,
+		(unsigned int)num_online_cpus());
+
+	dev->max_msix = msi_count;
+
+	if (msi_count > AAC_MAX_MSIX)
+		msi_count = AAC_MAX_MSIX;
+
+	if (msi_count > 1 &&
+	    pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) {
+		min_msix = 2;
+		i = pci_alloc_irq_vectors(dev->pdev,
+					  min_msix, msi_count,
+					  PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
+		if (i > 0) {
+			dev->msi_enabled = 1;
+			msi_count = i;
+		} else {
+			dev->msi_enabled = 0;
+			dev_err(&dev->pdev->dev,
+			"MSIX not supported!! Will try INTX 0x%x.\n", i);
+		}
+	}
+
+	if (!dev->msi_enabled)
+		dev->max_msix = msi_count = 1;
+	else {
+		if (dev->max_msix > msi_count)
+			dev->max_msix = msi_count;
+	}
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3 && dev->sa_firmware)
+		dev->vector_cap = dev->scsi_host_ptr->can_queue +
+				AAC_NUM_MGT_FIB;
+	else
+		dev->vector_cap = (dev->scsi_host_ptr->can_queue +
+				AAC_NUM_MGT_FIB) / msi_count;
+
+}
+struct aac_dev *aac_init_adapter(struct aac_dev *dev)
+{
+	u32 status[5];
+	struct Scsi_Host * host = dev->scsi_host_ptr;
+	extern int aac_sync_mode;
+
+	/*
+	 *	Check the preferred comm settings, defaults from template.
+	 */
+	dev->management_fib_count = 0;
+	spin_lock_init(&dev->manage_lock);
+	spin_lock_init(&dev->sync_lock);
+	spin_lock_init(&dev->iq_lock);
+	dev->max_fib_size = sizeof(struct hw_fib);
+	dev->sg_tablesize = host->sg_tablesize = (dev->max_fib_size
+		- sizeof(struct aac_fibhdr)
+		- sizeof(struct aac_write) + sizeof(struct sgentry))
+			/ sizeof(struct sgentry);
+	dev->comm_interface = AAC_COMM_PRODUCER;
+	dev->raw_io_interface = dev->raw_io_64 = 0;
+
+
+	/*
+	 * Enable INTX mode, if not done already Enabled
+	 */
+	if (aac_is_msix_mode(dev)) {
+		aac_change_to_intx(dev);
+		dev_info(&dev->pdev->dev, "Changed firmware to INTX mode");
+	}
+
+	if ((!aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES,
+		0, 0, 0, 0, 0, 0,
+		status+0, status+1, status+2, status+3, status+4)) &&
+		(status[0] == 0x00000001)) {
+		dev->doorbell_mask = status[3];
+		if (status[1] & AAC_OPT_NEW_COMM_64)
+			dev->raw_io_64 = 1;
+		dev->sync_mode = aac_sync_mode;
+		if (dev->a_ops.adapter_comm &&
+		    (status[1] & AAC_OPT_NEW_COMM)) {
+			dev->comm_interface = AAC_COMM_MESSAGE;
+			dev->raw_io_interface = 1;
+			if ((status[1] & AAC_OPT_NEW_COMM_TYPE1)) {
+				/* driver supports TYPE1 (Tupelo) */
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE1;
+			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE2) {
+				/* driver supports TYPE2 (Denali, Yosemite) */
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE3) {
+				/* driver supports TYPE3 (Yosemite, Thor) */
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE3;
+			} else if (status[1] & AAC_OPT_NEW_COMM_TYPE4) {
+				/* not supported TYPE - switch to sync. mode */
+				dev->comm_interface = AAC_COMM_MESSAGE_TYPE2;
+				dev->sync_mode = 1;
+			}
+		}
+		if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
+			(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
+			dev->sa_firmware = 1;
+		else
+			dev->sa_firmware = 0;
+
+		if ((dev->comm_interface == AAC_COMM_MESSAGE) &&
+		    (status[2] > dev->base_size)) {
+			aac_adapter_ioremap(dev, 0);
+			dev->base_size = status[2];
+			if (aac_adapter_ioremap(dev, status[2])) {
+				/* remap failed, go back ... */
+				dev->comm_interface = AAC_COMM_PRODUCER;
+				if (aac_adapter_ioremap(dev, AAC_MIN_FOOTPRINT_SIZE)) {
+					printk(KERN_WARNING
+					  "aacraid: unable to map adapter.\n");
+					return NULL;
+				}
+			}
+		}
+	}
+	dev->max_msix = 0;
+	dev->msi_enabled = 0;
+	dev->adapter_shutdown = 0;
+	if ((!aac_adapter_sync_cmd(dev, GET_COMM_PREFERRED_SETTINGS,
+	  0, 0, 0, 0, 0, 0,
+	  status+0, status+1, status+2, status+3, status+4))
+	 && (status[0] == 0x00000001)) {
+		/*
+		 *	status[1] >> 16		maximum command size in KB
+		 *	status[1] & 0xFFFF	maximum FIB size
+		 *	status[2] >> 16		maximum SG elements to driver
+		 *	status[2] & 0xFFFF	maximum SG elements from driver
+		 *	status[3] & 0xFFFF	maximum number FIBs outstanding
+		 */
+		host->max_sectors = (status[1] >> 16) << 1;
+		/* Multiple of 32 for PMC */
+		dev->max_fib_size = status[1] & 0xFFE0;
+		host->sg_tablesize = status[2] >> 16;
+		dev->sg_tablesize = status[2] & 0xFFFF;
+		if (aac_is_src(dev)) {
+			if (host->can_queue > (status[3] >> 16) -
+					AAC_NUM_MGT_FIB)
+				host->can_queue = (status[3] >> 16) -
+					AAC_NUM_MGT_FIB;
+		} else if (host->can_queue > (status[3] & 0xFFFF) -
+				AAC_NUM_MGT_FIB)
+			host->can_queue = (status[3] & 0xFFFF) -
+				AAC_NUM_MGT_FIB;
+
+		dev->max_num_aif = status[4] & 0xFFFF;
+	}
+	if (numacb > 0) {
+		if (numacb < host->can_queue)
+			host->can_queue = numacb;
+		else
+			pr_warn("numacb=%d ignored\n", numacb);
+	}
+
+	if (aac_is_src(dev))
+		aac_define_int_mode(dev);
+	/*
+	 *	Ok now init the communication subsystem
+	 */
+
+	dev->queues = kzalloc(sizeof(struct aac_queue_block), GFP_KERNEL);
+	if (dev->queues == NULL) {
+		printk(KERN_ERR "Error could not allocate comm region.\n");
+		return NULL;
+	}
+
+	if (aac_comm_init(dev)<0){
+		kfree(dev->queues);
+		return NULL;
+	}
+	/*
+	 *	Initialize the list of fibs
+	 */
+	if (aac_fib_setup(dev) < 0) {
+		kfree(dev->queues);
+		return NULL;
+	}
+		
+	INIT_LIST_HEAD(&dev->fib_list);
+	INIT_LIST_HEAD(&dev->sync_fib_list);
+
+	return dev;
+}
+
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
new file mode 100644
index 0000000..6e1b022
--- /dev/null
+++ b/drivers/scsi/aacraid/commsup.c
@@ -0,0 +1,2607 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  commsup.c
+ *
+ * Abstract: Contain all routines that are required for FSA host/adapter
+ *    communication.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/crash_dump.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/bcd.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#include "aacraid.h"
+
+/**
+ *	fib_map_alloc		-	allocate the fib objects
+ *	@dev: Adapter to allocate for
+ *
+ *	Allocate and map the shared PCI space for the FIB blocks used to
+ *	talk to the Adaptec firmware.
+ */
+
+static int fib_map_alloc(struct aac_dev *dev)
+{
+	if (dev->max_fib_size > AAC_MAX_NATIVE_SIZE)
+		dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+	else
+		dev->max_cmd_size = dev->max_fib_size;
+	if (dev->max_fib_size < AAC_MAX_NATIVE_SIZE) {
+		dev->max_cmd_size = AAC_MAX_NATIVE_SIZE;
+	} else {
+		dev->max_cmd_size = dev->max_fib_size;
+	}
+
+	dprintk((KERN_INFO
+	  "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
+	  &dev->pdev->dev, dev->max_cmd_size, dev->scsi_host_ptr->can_queue,
+	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
+	dev->hw_fib_va = dma_alloc_coherent(&dev->pdev->dev,
+		(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr))
+		* (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) + (ALIGN32 - 1),
+		&dev->hw_fib_pa, GFP_KERNEL);
+	if (dev->hw_fib_va == NULL)
+		return -ENOMEM;
+	return 0;
+}
+
+/**
+ *	aac_fib_map_free		-	free the fib objects
+ *	@dev: Adapter to free
+ *
+ *	Free the PCI mappings and the memory allocated for FIB blocks
+ *	on this adapter.
+ */
+
+void aac_fib_map_free(struct aac_dev *dev)
+{
+	size_t alloc_size;
+	size_t fib_size;
+	int num_fibs;
+
+	if(!dev->hw_fib_va || !dev->max_cmd_size)
+		return;
+
+	num_fibs = dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+	fib_size = dev->max_fib_size + sizeof(struct aac_fib_xporthdr);
+	alloc_size = fib_size * num_fibs + ALIGN32 - 1;
+
+	dma_free_coherent(&dev->pdev->dev, alloc_size, dev->hw_fib_va,
+			  dev->hw_fib_pa);
+
+	dev->hw_fib_va = NULL;
+	dev->hw_fib_pa = 0;
+}
+
+void aac_fib_vector_assign(struct aac_dev *dev)
+{
+	u32 i = 0;
+	u32 vector = 1;
+	struct fib *fibptr = NULL;
+
+	for (i = 0, fibptr = &dev->fibs[i];
+		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+		i++, fibptr++) {
+		if ((dev->max_msix == 1) ||
+		  (i > ((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1)
+			- dev->vector_cap))) {
+			fibptr->vector_no = 0;
+		} else {
+			fibptr->vector_no = vector;
+			vector++;
+			if (vector == dev->max_msix)
+				vector = 1;
+		}
+	}
+}
+
+/**
+ *	aac_fib_setup	-	setup the fibs
+ *	@dev: Adapter to set up
+ *
+ *	Allocate the PCI space for the fibs, map it and then initialise the
+ *	fib area, the unmapped fib data and also the free list
+ */
+
+int aac_fib_setup(struct aac_dev * dev)
+{
+	struct fib *fibptr;
+	struct hw_fib *hw_fib;
+	dma_addr_t hw_fib_pa;
+	int i;
+	u32 max_cmds;
+
+	while (((i = fib_map_alloc(dev)) == -ENOMEM)
+	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
+		max_cmds = (dev->scsi_host_ptr->can_queue+AAC_NUM_MGT_FIB) >> 1;
+		dev->scsi_host_ptr->can_queue = max_cmds - AAC_NUM_MGT_FIB;
+		if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE3)
+			dev->init->r7.max_io_commands = cpu_to_le32(max_cmds);
+	}
+	if (i<0)
+		return -ENOMEM;
+
+	memset(dev->hw_fib_va, 0,
+		(dev->max_cmd_size + sizeof(struct aac_fib_xporthdr)) *
+		(dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
+
+	/* 32 byte alignment for PMC */
+	hw_fib_pa = (dev->hw_fib_pa + (ALIGN32 - 1)) & ~(ALIGN32 - 1);
+	hw_fib    = (struct hw_fib *)((unsigned char *)dev->hw_fib_va +
+					(hw_fib_pa - dev->hw_fib_pa));
+
+	/* add Xport header */
+	hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+		sizeof(struct aac_fib_xporthdr));
+	hw_fib_pa += sizeof(struct aac_fib_xporthdr);
+
+	/*
+	 *	Initialise the fibs
+	 */
+	for (i = 0, fibptr = &dev->fibs[i];
+		i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB);
+		i++, fibptr++)
+	{
+		fibptr->flags = 0;
+		fibptr->size = sizeof(struct fib);
+		fibptr->dev = dev;
+		fibptr->hw_fib_va = hw_fib;
+		fibptr->data = (void *) fibptr->hw_fib_va->data;
+		fibptr->next = fibptr+1;	/* Forward chain the fibs */
+		sema_init(&fibptr->event_wait, 0);
+		spin_lock_init(&fibptr->event_lock);
+		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
+		hw_fib->header.SenderSize =
+			cpu_to_le16(dev->max_fib_size);	/* ?? max_cmd_size */
+		fibptr->hw_fib_pa = hw_fib_pa;
+		fibptr->hw_sgl_pa = hw_fib_pa +
+			offsetof(struct aac_hba_cmd_req, sge[2]);
+		/*
+		 * one element is for the ptr to the separate sg list,
+		 * second element for 32 byte alignment
+		 */
+		fibptr->hw_error_pa = hw_fib_pa +
+			offsetof(struct aac_native_hba, resp.resp_bytes[0]);
+
+		hw_fib = (struct hw_fib *)((unsigned char *)hw_fib +
+			dev->max_cmd_size + sizeof(struct aac_fib_xporthdr));
+		hw_fib_pa = hw_fib_pa +
+			dev->max_cmd_size + sizeof(struct aac_fib_xporthdr);
+	}
+
+	/*
+	 *Assign vector numbers to fibs
+	 */
+	aac_fib_vector_assign(dev);
+
+	/*
+	 *	Add the fib chain to the free list
+	 */
+	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
+	/*
+	*	Set 8 fibs aside for management tools
+	*/
+	dev->free_fib = &dev->fibs[dev->scsi_host_ptr->can_queue];
+	return 0;
+}
+
+/**
+ *	aac_fib_alloc_tag-allocate a fib using tags
+ *	@dev: Adapter to allocate the fib for
+ *
+ *	Allocate a fib from the adapter fib pool using tags
+ *	from the blk layer.
+ */
+
+struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd)
+{
+	struct fib *fibptr;
+
+	fibptr = &dev->fibs[scmd->request->tag];
+	/*
+	 *	Null out fields that depend on being zero at the start of
+	 *	each I/O
+	 */
+	fibptr->hw_fib_va->header.XferState = 0;
+	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+	fibptr->callback_data = NULL;
+	fibptr->callback = NULL;
+
+	return fibptr;
+}
+
+/**
+ *	aac_fib_alloc	-	allocate a fib
+ *	@dev: Adapter to allocate the fib for
+ *
+ *	Allocate a fib from the adapter fib pool. If the pool is empty we
+ *	return NULL.
+ */
+
+struct fib *aac_fib_alloc(struct aac_dev *dev)
+{
+	struct fib * fibptr;
+	unsigned long flags;
+	spin_lock_irqsave(&dev->fib_lock, flags);
+	fibptr = dev->free_fib;
+	if(!fibptr){
+		spin_unlock_irqrestore(&dev->fib_lock, flags);
+		return fibptr;
+	}
+	dev->free_fib = fibptr->next;
+	spin_unlock_irqrestore(&dev->fib_lock, flags);
+	/*
+	 *	Set the proper node type code and node byte size
+	 */
+	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
+	fibptr->size = sizeof(struct fib);
+	/*
+	 *	Null out fields that depend on being zero at the start of
+	 *	each I/O
+	 */
+	fibptr->hw_fib_va->header.XferState = 0;
+	fibptr->flags = 0;
+	fibptr->callback = NULL;
+	fibptr->callback_data = NULL;
+
+	return fibptr;
+}
+
+/**
+ *	aac_fib_free	-	free a fib
+ *	@fibptr: fib to free up
+ *
+ *	Frees up a fib and places it on the appropriate queue
+ */
+
+void aac_fib_free(struct fib *fibptr)
+{
+	unsigned long flags;
+
+	if (fibptr->done == 2)
+		return;
+
+	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
+	if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+		aac_config.fib_timeouts++;
+	if (!(fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+		fibptr->hw_fib_va->header.XferState != 0) {
+		printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
+			 (void*)fibptr,
+			 le32_to_cpu(fibptr->hw_fib_va->header.XferState));
+	}
+	fibptr->next = fibptr->dev->free_fib;
+	fibptr->dev->free_fib = fibptr;
+	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
+}
+
+/**
+ *	aac_fib_init	-	initialise a fib
+ *	@fibptr: The fib to initialize
+ *
+ *	Set up the generic fib fields ready for use
+ */
+
+void aac_fib_init(struct fib *fibptr)
+{
+	struct hw_fib *hw_fib = fibptr->hw_fib_va;
+
+	memset(&hw_fib->header, 0, sizeof(struct aac_fibhdr));
+	hw_fib->header.StructType = FIB_MAGIC;
+	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
+	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
+	hw_fib->header.u.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
+	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
+}
+
+/**
+ *	fib_deallocate		-	deallocate a fib
+ *	@fibptr: fib to deallocate
+ *
+ *	Will deallocate and return to the free pool the FIB pointed to by the
+ *	caller.
+ */
+
+static void fib_dealloc(struct fib * fibptr)
+{
+	struct hw_fib *hw_fib = fibptr->hw_fib_va;
+	hw_fib->header.XferState = 0;
+}
+
+/*
+ *	Commuication primitives define and support the queuing method we use to
+ *	support host to adapter commuication. All queue accesses happen through
+ *	these routines and are the only routines which have a knowledge of the
+ *	 how these queues are implemented.
+ */
+
+/**
+ *	aac_get_entry		-	get a queue entry
+ *	@dev: Adapter
+ *	@qid: Queue Number
+ *	@entry: Entry return
+ *	@index: Index return
+ *	@nonotify: notification control
+ *
+ *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
+ *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
+ *	returned.
+ */
+
+static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
+{
+	struct aac_queue * q;
+	unsigned long idx;
+
+	/*
+	 *	All of the queues wrap when they reach the end, so we check
+	 *	to see if they have reached the end and if they have we just
+	 *	set the index back to zero. This is a wrap. You could or off
+	 *	the high bits in all updates but this is a bit faster I think.
+	 */
+
+	q = &dev->queues->queue[qid];
+
+	idx = *index = le32_to_cpu(*(q->headers.producer));
+	/* Interrupt Moderation, only interrupt for first two entries */
+	if (idx != le32_to_cpu(*(q->headers.consumer))) {
+		if (--idx == 0) {
+			if (qid == AdapNormCmdQueue)
+				idx = ADAP_NORM_CMD_ENTRIES;
+			else
+				idx = ADAP_NORM_RESP_ENTRIES;
+		}
+		if (idx != le32_to_cpu(*(q->headers.consumer)))
+			*nonotify = 1;
+	}
+
+	if (qid == AdapNormCmdQueue) {
+		if (*index >= ADAP_NORM_CMD_ENTRIES)
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	} else {
+		if (*index >= ADAP_NORM_RESP_ENTRIES)
+			*index = 0; /* Wrap to front of the Producer Queue. */
+	}
+
+	/* Queue is full */
+	if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) {
+		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
+				qid, atomic_read(&q->numpending));
+		return 0;
+	} else {
+		*entry = q->base + *index;
+		return 1;
+	}
+}
+
+/**
+ *	aac_queue_get		-	get the next free QE
+ *	@dev: Adapter
+ *	@index: Returned index
+ *	@priority: Priority of fib
+ *	@fib: Fib to associate with the queue entry
+ *	@wait: Wait if queue full
+ *	@fibptr: Driver fib object to go with fib
+ *	@nonotify: Don't notify the adapter
+ *
+ *	Gets the next free QE off the requested priorty adapter command
+ *	queue and associates the Fib with the QE. The QE represented by
+ *	index is ready to insert on the queue when this routine returns
+ *	success.
+ */
+
+int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
+{
+	struct aac_entry * entry = NULL;
+	int map = 0;
+
+	if (qid == AdapNormCmdQueue) {
+		/*  if no entries wait for some if caller wants to */
+		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
+			printk(KERN_ERR "GetEntries failed\n");
+		}
+		/*
+		 *	Setup queue entry with a command, status and fib mapped
+		 */
+		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+		map = 1;
+	} else {
+		while (!aac_get_entry(dev, qid, &entry, index, nonotify)) {
+			/* if no entries wait for some if caller wants to */
+		}
+		/*
+		 *	Setup queue entry with command, status and fib mapped
+		 */
+		entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
+		entry->addr = hw_fib->header.SenderFibAddress;
+			/* Restore adapters pointer to the FIB */
+		hw_fib->header.u.ReceiverFibAddress = hw_fib->header.SenderFibAddress;  /* Let the adapter now where to find its data */
+		map = 0;
+	}
+	/*
+	 *	If MapFib is true than we need to map the Fib and put pointers
+	 *	in the queue entry.
+	 */
+	if (map)
+		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
+	return 0;
+}
+
+/*
+ *	Define the highest level of host to adapter communication routines.
+ *	These routines will support host to adapter FS commuication. These
+ *	routines have no knowledge of the commuication method used. This level
+ *	sends and receives FIBs. This level has no knowledge of how these FIBs
+ *	get passed back and forth.
+ */
+
+/**
+ *	aac_fib_send	-	send a fib to the adapter
+ *	@command: Command to send
+ *	@fibptr: The fib
+ *	@size: Size of fib data area
+ *	@priority: Priority of Fib
+ *	@wait: Async/sync select
+ *	@reply: True if a reply is wanted
+ *	@callback: Called with reply
+ *	@callback_data: Passed to callback
+ *
+ *	Sends the requested FIB to the adapter and optionally will wait for a
+ *	response FIB. If the caller does not wish to wait for a response than
+ *	an event to wait on must be supplied. This event will be set when a
+ *	response FIB is received from the adapter.
+ */
+
+int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
+		int priority, int wait, int reply, fib_callback callback,
+		void *callback_data)
+{
+	struct aac_dev * dev = fibptr->dev;
+	struct hw_fib * hw_fib = fibptr->hw_fib_va;
+	unsigned long flags = 0;
+	unsigned long mflags = 0;
+	unsigned long sflags = 0;
+
+	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
+		return -EBUSY;
+
+	if (hw_fib->header.XferState & cpu_to_le32(AdapterProcessed))
+		return -EINVAL;
+
+	/*
+	 *	There are 5 cases with the wait and response requested flags.
+	 *	The only invalid cases are if the caller requests to wait and
+	 *	does not request a response and if the caller does not want a
+	 *	response and the Fib is not allocated from pool. If a response
+	 *	is not requested the Fib will just be deallocaed by the DPC
+	 *	routine when the response comes back from the adapter. No
+	 *	further processing will be done besides deleting the Fib. We
+	 *	will have a debug mode where the adapter can notify the host
+	 *	it had a problem and the host can log that fact.
+	 */
+	fibptr->flags = 0;
+	if (wait && !reply) {
+		return -EINVAL;
+	} else if (!wait && reply) {
+		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
+	} else if (!wait && !reply) {
+		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
+	} else if (wait && reply) {
+		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
+		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
+	}
+	/*
+	 *	Map the fib into 32bits by using the fib number
+	 */
+
+	hw_fib->header.SenderFibAddress =
+		cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
+
+	/* use the same shifted value for handle to be compatible
+	 * with the new native hba command handle
+	 */
+	hw_fib->header.Handle =
+		cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+
+	/*
+	 *	Set FIB state to indicate where it came from and if we want a
+	 *	response from the adapter. Also load the command from the
+	 *	caller.
+	 *
+	 *	Map the hw fib pointer as a 32bit value
+	 */
+	hw_fib->header.Command = cpu_to_le16(command);
+	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
+	/*
+	 *	Set the size of the Fib we want to send to the adapter
+	 */
+	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
+	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
+		return -EMSGSIZE;
+	}
+	/*
+	 *	Get a queue entry connect the FIB to it and send an notify
+	 *	the adapter a command is ready.
+	 */
+	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
+
+	/*
+	 *	Fill in the Callback and CallbackContext if we are not
+	 *	going to wait.
+	 */
+	if (!wait) {
+		fibptr->callback = callback;
+		fibptr->callback_data = callback_data;
+		fibptr->flags = FIB_CONTEXT_FLAG;
+	}
+
+	fibptr->done = 0;
+
+	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
+
+	dprintk((KERN_DEBUG "Fib contents:.\n"));
+	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
+	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
+	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
+	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib_va));
+	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
+	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
+
+	if (!dev->queues)
+		return -EBUSY;
+
+	if (wait) {
+
+		spin_lock_irqsave(&dev->manage_lock, mflags);
+		if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+			printk(KERN_INFO "No management Fibs Available:%d\n",
+						dev->management_fib_count);
+			spin_unlock_irqrestore(&dev->manage_lock, mflags);
+			return -EBUSY;
+		}
+		dev->management_fib_count++;
+		spin_unlock_irqrestore(&dev->manage_lock, mflags);
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+	}
+
+	if (dev->sync_mode) {
+		if (wait)
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		spin_lock_irqsave(&dev->sync_lock, sflags);
+		if (dev->sync_fib) {
+			list_add_tail(&fibptr->fiblink, &dev->sync_fib_list);
+			spin_unlock_irqrestore(&dev->sync_lock, sflags);
+		} else {
+			dev->sync_fib = fibptr;
+			spin_unlock_irqrestore(&dev->sync_lock, sflags);
+			aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+				(u32)fibptr->hw_fib_pa, 0, 0, 0, 0, 0,
+				NULL, NULL, NULL, NULL, NULL);
+		}
+		if (wait) {
+			fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+			if (down_interruptible(&fibptr->event_wait)) {
+				fibptr->flags &= ~FIB_CONTEXT_FLAG_WAIT;
+				return -EFAULT;
+			}
+			return 0;
+		}
+		return -EINPROGRESS;
+	}
+
+	if (aac_adapter_deliver(fibptr) != 0) {
+		printk(KERN_ERR "aac_fib_send: returned -EBUSY\n");
+		if (wait) {
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+			spin_lock_irqsave(&dev->manage_lock, mflags);
+			dev->management_fib_count--;
+			spin_unlock_irqrestore(&dev->manage_lock, mflags);
+		}
+		return -EBUSY;
+	}
+
+
+	/*
+	 *	If the caller wanted us to wait for response wait now.
+	 */
+
+	if (wait) {
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		/* Only set for first known interruptable command */
+		if (wait < 0) {
+			/*
+			 * *VERY* Dangerous to time out a command, the
+			 * assumption is made that we have no hope of
+			 * functioning because an interrupt routing or other
+			 * hardware failure has occurred.
+			 */
+			unsigned long timeout = jiffies + (180 * HZ); /* 3 minutes */
+			while (down_trylock(&fibptr->event_wait)) {
+				int blink;
+				if (time_is_before_eq_jiffies(timeout)) {
+					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
+					atomic_dec(&q->numpending);
+					if (wait == -1) {
+	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
+						  "Usually a result of a PCI interrupt routing problem;\n"
+						  "update mother board BIOS or consider utilizing one of\n"
+						  "the SAFE mode kernel options (acpi, apic etc)\n");
+					}
+					return -ETIMEDOUT;
+				}
+
+				if (unlikely(pci_channel_offline(dev->pdev)))
+					return -EFAULT;
+
+				if ((blink = aac_adapter_check_health(dev)) > 0) {
+					if (wait == -1) {
+	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
+						  "Usually a result of a serious unrecoverable hardware problem\n",
+						  blink);
+					}
+					return -EFAULT;
+				}
+				/*
+				 * Allow other processes / CPUS to use core
+				 */
+				schedule();
+			}
+		} else if (down_interruptible(&fibptr->event_wait)) {
+			/* Do nothing ... satisfy
+			 * down_interruptible must_check */
+		}
+
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+		if (fibptr->done == 0) {
+			fibptr->done = 2; /* Tell interrupt we aborted */
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+			return -ERESTARTSYS;
+		}
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		BUG_ON(fibptr->done == 0);
+
+		if(unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+			return -ETIMEDOUT;
+		return 0;
+	}
+	/*
+	 *	If the user does not want a response than return success otherwise
+	 *	return pending
+	 */
+	if (reply)
+		return -EINPROGRESS;
+	else
+		return 0;
+}
+
+int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+		void *callback_data)
+{
+	struct aac_dev *dev = fibptr->dev;
+	int wait;
+	unsigned long flags = 0;
+	unsigned long mflags = 0;
+	struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
+			fibptr->hw_fib_va;
+
+	fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
+	if (callback) {
+		wait = 0;
+		fibptr->callback = callback;
+		fibptr->callback_data = callback_data;
+	} else
+		wait = 1;
+
+
+	hbacmd->iu_type = command;
+
+	if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
+		/* bit1 of request_id must be 0 */
+		hbacmd->request_id =
+			cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+		fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
+	} else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
+		return -EINVAL;
+
+
+	if (wait) {
+		spin_lock_irqsave(&dev->manage_lock, mflags);
+		if (dev->management_fib_count >= AAC_NUM_MGT_FIB) {
+			spin_unlock_irqrestore(&dev->manage_lock, mflags);
+			return -EBUSY;
+		}
+		dev->management_fib_count++;
+		spin_unlock_irqrestore(&dev->manage_lock, mflags);
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+	}
+
+	if (aac_adapter_deliver(fibptr) != 0) {
+		if (wait) {
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+			spin_lock_irqsave(&dev->manage_lock, mflags);
+			dev->management_fib_count--;
+			spin_unlock_irqrestore(&dev->manage_lock, mflags);
+		}
+		return -EBUSY;
+	}
+	FIB_COUNTER_INCREMENT(aac_config.NativeSent);
+
+	if (wait) {
+
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+
+		if (unlikely(pci_channel_offline(dev->pdev)))
+			return -EFAULT;
+
+		fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
+		if (down_interruptible(&fibptr->event_wait))
+			fibptr->done = 2;
+		fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
+
+		spin_lock_irqsave(&fibptr->event_lock, flags);
+		if ((fibptr->done == 0) || (fibptr->done == 2)) {
+			fibptr->done = 2; /* Tell interrupt we aborted */
+			spin_unlock_irqrestore(&fibptr->event_lock, flags);
+			return -ERESTARTSYS;
+		}
+		spin_unlock_irqrestore(&fibptr->event_lock, flags);
+		WARN_ON(fibptr->done == 0);
+
+		if (unlikely(fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT))
+			return -ETIMEDOUT;
+
+		return 0;
+	}
+
+	return -EINPROGRESS;
+}
+
+/**
+ *	aac_consumer_get	-	get the top of the queue
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@entry: Return entry
+ *
+ *	Will return a pointer to the entry on the top of the queue requested that
+ *	we are a consumer of, and return the address of the queue entry. It does
+ *	not change the state of the queue.
+ */
+
+int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
+{
+	u32 index;
+	int status;
+	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
+		status = 0;
+	} else {
+		/*
+		 *	The consumer index must be wrapped if we have reached
+		 *	the end of the queue, else we just use the entry
+		 *	pointed to by the header index
+		 */
+		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+			index = 0;
+		else
+			index = le32_to_cpu(*q->headers.consumer);
+		*entry = q->base + index;
+		status = 1;
+	}
+	return(status);
+}
+
+/**
+ *	aac_consumer_free	-	free consumer entry
+ *	@dev: Adapter
+ *	@q: Queue
+ *	@qid: Queue ident
+ *
+ *	Frees up the current top of the queue we are a consumer of. If the
+ *	queue was full notify the producer that the queue is no longer full.
+ */
+
+void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
+{
+	int wasfull = 0;
+	u32 notify;
+
+	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
+		wasfull = 1;
+
+	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
+		*q->headers.consumer = cpu_to_le32(1);
+	else
+		le32_add_cpu(q->headers.consumer, 1);
+
+	if (wasfull) {
+		switch (qid) {
+
+		case HostNormCmdQueue:
+			notify = HostNormCmdNotFull;
+			break;
+		case HostNormRespQueue:
+			notify = HostNormRespNotFull;
+			break;
+		default:
+			BUG();
+			return;
+		}
+		aac_adapter_notify(dev, notify);
+	}
+}
+
+/**
+ *	aac_fib_adapter_complete	-	complete adapter issued fib
+ *	@fibptr: fib to complete
+ *	@size: size of fib
+ *
+ *	Will do all necessary work to complete a FIB that was sent from
+ *	the adapter.
+ */
+
+int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
+{
+	struct hw_fib * hw_fib = fibptr->hw_fib_va;
+	struct aac_dev * dev = fibptr->dev;
+	struct aac_queue * q;
+	unsigned long nointr = 0;
+	unsigned long qflags;
+
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE1 ||
+		dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+		dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+		kfree(hw_fib);
+		return 0;
+	}
+
+	if (hw_fib->header.XferState == 0) {
+		if (dev->comm_interface == AAC_COMM_MESSAGE)
+			kfree(hw_fib);
+		return 0;
+	}
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */
+	if (hw_fib->header.StructType != FIB_MAGIC &&
+	    hw_fib->header.StructType != FIB_MAGIC2 &&
+	    hw_fib->header.StructType != FIB_MAGIC2_64) {
+		if (dev->comm_interface == AAC_COMM_MESSAGE)
+			kfree(hw_fib);
+		return -EINVAL;
+	}
+	/*
+	 *	This block handles the case where the adapter had sent us a
+	 *	command and we have finished processing the command. We
+	 *	call completeFib when we are done processing the command
+	 *	and want to send a response back to the adapter. This will
+	 *	send the completed cdb to the adapter.
+	 */
+	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
+		if (dev->comm_interface == AAC_COMM_MESSAGE) {
+			kfree (hw_fib);
+		} else {
+			u32 index;
+			hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
+			if (size) {
+				size += sizeof(struct aac_fibhdr);
+				if (size > le16_to_cpu(hw_fib->header.SenderSize))
+					return -EMSGSIZE;
+				hw_fib->header.Size = cpu_to_le16(size);
+			}
+			q = &dev->queues->queue[AdapNormRespQueue];
+			spin_lock_irqsave(q->lock, qflags);
+			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
+			*(q->headers.producer) = cpu_to_le32(index + 1);
+			spin_unlock_irqrestore(q->lock, qflags);
+			if (!(nointr & (int)aac_config.irq_mod))
+				aac_adapter_notify(dev, AdapNormRespQueue);
+		}
+	} else {
+		printk(KERN_WARNING "aac_fib_adapter_complete: "
+			"Unknown xferstate detected.\n");
+		BUG();
+	}
+	return 0;
+}
+
+/**
+ *	aac_fib_complete	-	fib completion handler
+ *	@fib: FIB to complete
+ *
+ *	Will do all necessary work to complete a FIB.
+ */
+
+int aac_fib_complete(struct fib *fibptr)
+{
+	struct hw_fib * hw_fib = fibptr->hw_fib_va;
+
+	if (fibptr->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+		fib_dealloc(fibptr);
+		return 0;
+	}
+
+	/*
+	 *	Check for a fib which has already been completed or with a
+	 *	status wait timeout
+	 */
+
+	if (hw_fib->header.XferState == 0 || fibptr->done == 2)
+		return 0;
+	/*
+	 *	If we plan to do anything check the structure type first.
+	 */
+
+	if (hw_fib->header.StructType != FIB_MAGIC &&
+	    hw_fib->header.StructType != FIB_MAGIC2 &&
+	    hw_fib->header.StructType != FIB_MAGIC2_64)
+		return -EINVAL;
+	/*
+	 *	This block completes a cdb which orginated on the host and we
+	 *	just need to deallocate the cdb or reinit it. At this point the
+	 *	command is complete that we had sent to the adapter and this
+	 *	cdb could be reused.
+	 */
+
+	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
+		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
+	{
+		fib_dealloc(fibptr);
+	}
+	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
+	{
+		/*
+		 *	This handles the case when the host has aborted the I/O
+		 *	to the adapter because the adapter is not responding
+		 */
+		fib_dealloc(fibptr);
+	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
+		fib_dealloc(fibptr);
+	} else {
+		BUG();
+	}
+	return 0;
+}
+
+/**
+ *	aac_printf	-	handle printf from firmware
+ *	@dev: Adapter
+ *	@val: Message info
+ *
+ *	Print a message passed to us by the controller firmware on the
+ *	Adaptec board
+ */
+
+void aac_printf(struct aac_dev *dev, u32 val)
+{
+	char *cp = dev->printfbuf;
+	if (dev->printf_enabled)
+	{
+		int length = val & 0xffff;
+		int level = (val >> 16) & 0xffff;
+
+		/*
+		 *	The size of the printfbuf is set in port.c
+		 *	There is no variable or define for it
+		 */
+		if (length > 255)
+			length = 255;
+		if (cp[length] != 0)
+			cp[length] = 0;
+		if (level == LOG_AAC_HIGH_ERROR)
+			printk(KERN_WARNING "%s:%s", dev->name, cp);
+		else
+			printk(KERN_INFO "%s:%s", dev->name, cp);
+	}
+	memset(cp, 0, 256);
+}
+
+static inline int aac_aif_data(struct aac_aifcmd *aifcmd, uint32_t index)
+{
+	return le32_to_cpu(((__le32 *)aifcmd->data)[index]);
+}
+
+
+static void aac_handle_aif_bu(struct aac_dev *dev, struct aac_aifcmd *aifcmd)
+{
+	switch (aac_aif_data(aifcmd, 1)) {
+	case AifBuCacheDataLoss:
+		if (aac_aif_data(aifcmd, 2))
+			dev_info(&dev->pdev->dev, "Backup unit had cache data loss - [%d]\n",
+			aac_aif_data(aifcmd, 2));
+		else
+			dev_info(&dev->pdev->dev, "Backup Unit had cache data loss\n");
+		break;
+	case AifBuCacheDataRecover:
+		if (aac_aif_data(aifcmd, 2))
+			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully - [%d]\n",
+			aac_aif_data(aifcmd, 2));
+		else
+			dev_info(&dev->pdev->dev, "DDR cache data recovered successfully\n");
+		break;
+	}
+}
+
+/**
+ *	aac_handle_aif		-	Handle a message from the firmware
+ *	@dev: Which adapter this fib is from
+ *	@fibptr: Pointer to fibptr from adapter
+ *
+ *	This routine handles a driver notify fib from the adapter and
+ *	dispatches it to the appropriate routine for handling.
+ */
+
+#define AIF_SNIFF_TIMEOUT	(500*HZ)
+static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
+{
+	struct hw_fib * hw_fib = fibptr->hw_fib_va;
+	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
+	u32 channel, id, lun, container;
+	struct scsi_device *device;
+	enum {
+		NOTHING,
+		DELETE,
+		ADD,
+		CHANGE
+	} device_config_needed = NOTHING;
+
+	/* Sniff for container changes */
+
+	if (!dev || !dev->fsa_dev)
+		return;
+	container = channel = id = lun = (u32)-1;
+
+	/*
+	 *	We have set this up to try and minimize the number of
+	 * re-configures that take place. As a result of this when
+	 * certain AIF's come in we will set a flag waiting for another
+	 * type of AIF before setting the re-config flag.
+	 */
+	switch (le32_to_cpu(aifcmd->command)) {
+	case AifCmdDriverNotify:
+		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+		case AifRawDeviceRemove:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if ((container >> 28)) {
+				container = (u32)-1;
+				break;
+			}
+			channel = (container >> 24) & 0xF;
+			if (channel >= dev->maximum_num_channels) {
+				container = (u32)-1;
+				break;
+			}
+			id = container & 0xFFFF;
+			if (id >= dev->maximum_num_physicals) {
+				container = (u32)-1;
+				break;
+			}
+			lun = (container >> 16) & 0xFF;
+			container = (u32)-1;
+			channel = aac_phys_to_logical(channel);
+			device_config_needed = DELETE;
+			break;
+
+		/*
+		 *	Morph or Expand complete
+		 */
+		case AifDenMorphComplete:
+		case AifDenVolumeExtendComplete:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if (container >= dev->maximum_num_containers)
+				break;
+
+			/*
+			 *	Find the scsi_device associated with the SCSI
+			 * address. Make sure we have the right array, and if
+			 * so set the flag to initiate a new re-config once we
+			 * see an AifEnConfigChange AIF come through.
+			 */
+
+			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
+				device = scsi_device_lookup(dev->scsi_host_ptr,
+					CONTAINER_TO_CHANNEL(container),
+					CONTAINER_TO_ID(container),
+					CONTAINER_TO_LUN(container));
+				if (device) {
+					dev->fsa_dev[container].config_needed = CHANGE;
+					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
+					dev->fsa_dev[container].config_waiting_stamp = jiffies;
+					scsi_device_put(device);
+				}
+			}
+		}
+
+		/*
+		 *	If we are waiting on something and this happens to be
+		 * that thing then set the re-configure flag.
+		 */
+		if (container != (u32)-1) {
+			if (container >= dev->maximum_num_containers)
+				break;
+			if ((dev->fsa_dev[container].config_waiting_on ==
+			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+				dev->fsa_dev[container].config_waiting_on = 0;
+		} else for (container = 0;
+		    container < dev->maximum_num_containers; ++container) {
+			if ((dev->fsa_dev[container].config_waiting_on ==
+			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+				dev->fsa_dev[container].config_waiting_on = 0;
+		}
+		break;
+
+	case AifCmdEventNotify:
+		switch (le32_to_cpu(((__le32 *)aifcmd->data)[0])) {
+		case AifEnBatteryEvent:
+			dev->cache_protected =
+				(((__le32 *)aifcmd->data)[1] == cpu_to_le32(3));
+			break;
+		/*
+		 *	Add an Array.
+		 */
+		case AifEnAddContainer:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if (container >= dev->maximum_num_containers)
+				break;
+			dev->fsa_dev[container].config_needed = ADD;
+			dev->fsa_dev[container].config_waiting_on =
+				AifEnConfigChange;
+			dev->fsa_dev[container].config_waiting_stamp = jiffies;
+			break;
+
+		/*
+		 *	Delete an Array.
+		 */
+		case AifEnDeleteContainer:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if (container >= dev->maximum_num_containers)
+				break;
+			dev->fsa_dev[container].config_needed = DELETE;
+			dev->fsa_dev[container].config_waiting_on =
+				AifEnConfigChange;
+			dev->fsa_dev[container].config_waiting_stamp = jiffies;
+			break;
+
+		/*
+		 *	Container change detected. If we currently are not
+		 * waiting on something else, setup to wait on a Config Change.
+		 */
+		case AifEnContainerChange:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if (container >= dev->maximum_num_containers)
+				break;
+			if (dev->fsa_dev[container].config_waiting_on &&
+			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+				break;
+			dev->fsa_dev[container].config_needed = CHANGE;
+			dev->fsa_dev[container].config_waiting_on =
+				AifEnConfigChange;
+			dev->fsa_dev[container].config_waiting_stamp = jiffies;
+			break;
+
+		case AifEnConfigChange:
+			break;
+
+		case AifEnAddJBOD:
+		case AifEnDeleteJBOD:
+			container = le32_to_cpu(((__le32 *)aifcmd->data)[1]);
+			if ((container >> 28)) {
+				container = (u32)-1;
+				break;
+			}
+			channel = (container >> 24) & 0xF;
+			if (channel >= dev->maximum_num_channels) {
+				container = (u32)-1;
+				break;
+			}
+			id = container & 0xFFFF;
+			if (id >= dev->maximum_num_physicals) {
+				container = (u32)-1;
+				break;
+			}
+			lun = (container >> 16) & 0xFF;
+			container = (u32)-1;
+			channel = aac_phys_to_logical(channel);
+			device_config_needed =
+			  (((__le32 *)aifcmd->data)[0] ==
+			    cpu_to_le32(AifEnAddJBOD)) ? ADD : DELETE;
+			if (device_config_needed == ADD) {
+				device = scsi_device_lookup(dev->scsi_host_ptr,
+					channel,
+					id,
+					lun);
+				if (device) {
+					scsi_remove_device(device);
+					scsi_device_put(device);
+				}
+			}
+			break;
+
+		case AifEnEnclosureManagement:
+			/*
+			 * If in JBOD mode, automatic exposure of new
+			 * physical target to be suppressed until configured.
+			 */
+			if (dev->jbod)
+				break;
+			switch (le32_to_cpu(((__le32 *)aifcmd->data)[3])) {
+			case EM_DRIVE_INSERTION:
+			case EM_DRIVE_REMOVAL:
+			case EM_SES_DRIVE_INSERTION:
+			case EM_SES_DRIVE_REMOVAL:
+				container = le32_to_cpu(
+					((__le32 *)aifcmd->data)[2]);
+				if ((container >> 28)) {
+					container = (u32)-1;
+					break;
+				}
+				channel = (container >> 24) & 0xF;
+				if (channel >= dev->maximum_num_channels) {
+					container = (u32)-1;
+					break;
+				}
+				id = container & 0xFFFF;
+				lun = (container >> 16) & 0xFF;
+				container = (u32)-1;
+				if (id >= dev->maximum_num_physicals) {
+					/* legacy dev_t ? */
+					if ((0x2000 <= id) || lun || channel ||
+					  ((channel = (id >> 7) & 0x3F) >=
+					  dev->maximum_num_channels))
+						break;
+					lun = (id >> 4) & 7;
+					id &= 0xF;
+				}
+				channel = aac_phys_to_logical(channel);
+				device_config_needed =
+				  ((((__le32 *)aifcmd->data)[3]
+				    == cpu_to_le32(EM_DRIVE_INSERTION)) ||
+				    (((__le32 *)aifcmd->data)[3]
+				    == cpu_to_le32(EM_SES_DRIVE_INSERTION))) ?
+				  ADD : DELETE;
+				break;
+			}
+			case AifBuManagerEvent:
+				aac_handle_aif_bu(dev, aifcmd);
+			break;
+		}
+
+		/*
+		 *	If we are waiting on something and this happens to be
+		 * that thing then set the re-configure flag.
+		 */
+		if (container != (u32)-1) {
+			if (container >= dev->maximum_num_containers)
+				break;
+			if ((dev->fsa_dev[container].config_waiting_on ==
+			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+				dev->fsa_dev[container].config_waiting_on = 0;
+		} else for (container = 0;
+		    container < dev->maximum_num_containers; ++container) {
+			if ((dev->fsa_dev[container].config_waiting_on ==
+			    le32_to_cpu(*(__le32 *)aifcmd->data)) &&
+			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
+				dev->fsa_dev[container].config_waiting_on = 0;
+		}
+		break;
+
+	case AifCmdJobProgress:
+		/*
+		 *	These are job progress AIF's. When a Clear is being
+		 * done on a container it is initially created then hidden from
+		 * the OS. When the clear completes we don't get a config
+		 * change so we monitor the job status complete on a clear then
+		 * wait for a container change.
+		 */
+
+		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+		    (((__le32 *)aifcmd->data)[6] == ((__le32 *)aifcmd->data)[5] ||
+		     ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess))) {
+			for (container = 0;
+			    container < dev->maximum_num_containers;
+			    ++container) {
+				/*
+				 * Stomp on all config sequencing for all
+				 * containers?
+				 */
+				dev->fsa_dev[container].config_waiting_on =
+					AifEnContainerChange;
+				dev->fsa_dev[container].config_needed = ADD;
+				dev->fsa_dev[container].config_waiting_stamp =
+					jiffies;
+			}
+		}
+		if (((__le32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero) &&
+		    ((__le32 *)aifcmd->data)[6] == 0 &&
+		    ((__le32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning)) {
+			for (container = 0;
+			    container < dev->maximum_num_containers;
+			    ++container) {
+				/*
+				 * Stomp on all config sequencing for all
+				 * containers?
+				 */
+				dev->fsa_dev[container].config_waiting_on =
+					AifEnContainerChange;
+				dev->fsa_dev[container].config_needed = DELETE;
+				dev->fsa_dev[container].config_waiting_stamp =
+					jiffies;
+			}
+		}
+		break;
+	}
+
+	container = 0;
+retry_next:
+	if (device_config_needed == NOTHING)
+	for (; container < dev->maximum_num_containers; ++container) {
+		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
+			(dev->fsa_dev[container].config_needed != NOTHING) &&
+			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
+			device_config_needed =
+				dev->fsa_dev[container].config_needed;
+			dev->fsa_dev[container].config_needed = NOTHING;
+			channel = CONTAINER_TO_CHANNEL(container);
+			id = CONTAINER_TO_ID(container);
+			lun = CONTAINER_TO_LUN(container);
+			break;
+		}
+	}
+	if (device_config_needed == NOTHING)
+		return;
+
+	/*
+	 *	If we decided that a re-configuration needs to be done,
+	 * schedule it here on the way out the door, please close the door
+	 * behind you.
+	 */
+
+	/*
+	 *	Find the scsi_device associated with the SCSI address,
+	 * and mark it as changed, invalidating the cache. This deals
+	 * with changes to existing device IDs.
+	 */
+
+	if (!dev || !dev->scsi_host_ptr)
+		return;
+	/*
+	 * force reload of disk info via aac_probe_container
+	 */
+	if ((channel == CONTAINER_CHANNEL) &&
+	  (device_config_needed != NOTHING)) {
+		if (dev->fsa_dev[container].valid == 1)
+			dev->fsa_dev[container].valid = 2;
+		aac_probe_container(dev, container);
+	}
+	device = scsi_device_lookup(dev->scsi_host_ptr, channel, id, lun);
+	if (device) {
+		switch (device_config_needed) {
+		case DELETE:
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+			scsi_remove_device(device);
+#else
+			if (scsi_device_online(device)) {
+				scsi_device_set_state(device, SDEV_OFFLINE);
+				sdev_printk(KERN_INFO, device,
+					"Device offlined - %s\n",
+					(channel == CONTAINER_CHANNEL) ?
+						"array deleted" :
+						"enclosure services event");
+			}
+#endif
+			break;
+		case ADD:
+			if (!scsi_device_online(device)) {
+				sdev_printk(KERN_INFO, device,
+					"Device online - %s\n",
+					(channel == CONTAINER_CHANNEL) ?
+						"array created" :
+						"enclosure services event");
+				scsi_device_set_state(device, SDEV_RUNNING);
+			}
+			/* FALLTHRU */
+		case CHANGE:
+			if ((channel == CONTAINER_CHANNEL)
+			 && (!dev->fsa_dev[container].valid)) {
+#if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
+				scsi_remove_device(device);
+#else
+				if (!scsi_device_online(device))
+					break;
+				scsi_device_set_state(device, SDEV_OFFLINE);
+				sdev_printk(KERN_INFO, device,
+					"Device offlined - %s\n",
+					"array failed");
+#endif
+				break;
+			}
+			scsi_rescan_device(&device->sdev_gendev);
+
+		default:
+			break;
+		}
+		scsi_device_put(device);
+		device_config_needed = NOTHING;
+	}
+	if (device_config_needed == ADD)
+		scsi_add_device(dev->scsi_host_ptr, channel, id, lun);
+	if (channel == CONTAINER_CHANNEL) {
+		container++;
+		device_config_needed = NOTHING;
+		goto retry_next;
+	}
+}
+
+static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+{
+	int index, quirks;
+	int retval;
+	struct Scsi_Host *host;
+	struct scsi_device *dev;
+	struct scsi_cmnd *command;
+	struct scsi_cmnd *command_list;
+	int jafo = 0;
+	int bled;
+	u64 dmamask;
+	int num_of_fibs = 0;
+
+	/*
+	 * Assumptions:
+	 *	- host is locked, unless called by the aacraid thread.
+	 *	  (a matter of convenience, due to legacy issues surrounding
+	 *	  eh_host_adapter_reset).
+	 *	- in_reset is asserted, so no new i/o is getting to the
+	 *	  card.
+	 *	- The card is dead, or will be very shortly ;-/ so no new
+	 *	  commands are completing in the interrupt service.
+	 */
+	host = aac->scsi_host_ptr;
+	scsi_block_requests(host);
+	aac_adapter_disable_int(aac);
+	if (aac->thread && aac->thread->pid != current->pid) {
+		spin_unlock_irq(host->host_lock);
+		kthread_stop(aac->thread);
+		aac->thread = NULL;
+		jafo = 1;
+	}
+
+	/*
+	 *	If a positive health, means in a known DEAD PANIC
+	 * state and the adapter could be reset to `try again'.
+	 */
+	bled = forced ? 0 : aac_adapter_check_health(aac);
+	retval = aac_adapter_restart(aac, bled, reset_type);
+
+	if (retval)
+		goto out;
+
+	/*
+	 *	Loop through the fibs, close the synchronous FIBS
+	 */
+	retval = 1;
+	num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
+	for (index = 0; index <  num_of_fibs; index++) {
+
+		struct fib *fib = &aac->fibs[index];
+		__le32 XferState = fib->hw_fib_va->header.XferState;
+		bool is_response_expected = false;
+
+		if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+		   (XferState & cpu_to_le32(ResponseExpected)))
+			is_response_expected = true;
+
+		if (is_response_expected
+		  || fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+			unsigned long flagv;
+			spin_lock_irqsave(&fib->event_lock, flagv);
+			up(&fib->event_wait);
+			spin_unlock_irqrestore(&fib->event_lock, flagv);
+			schedule();
+			retval = 0;
+		}
+	}
+	/* Give some extra time for ioctls to complete. */
+	if (retval == 0)
+		ssleep(2);
+	index = aac->cardtype;
+
+	/*
+	 * Re-initialize the adapter, first free resources, then carefully
+	 * apply the initialization sequence to come back again. Only risk
+	 * is a change in Firmware dropping cache, it is assumed the caller
+	 * will ensure that i/o is queisced and the card is flushed in that
+	 * case.
+	 */
+	aac_free_irq(aac);
+	aac_fib_map_free(aac);
+	dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
+			  aac->comm_phys);
+	aac->comm_addr = NULL;
+	aac->comm_phys = 0;
+	kfree(aac->queues);
+	aac->queues = NULL;
+	kfree(aac->fsa_dev);
+	aac->fsa_dev = NULL;
+
+	dmamask = DMA_BIT_MASK(32);
+	quirks = aac_get_driver_ident(index)->quirks;
+	if (quirks & AAC_QUIRK_31BIT)
+		retval = pci_set_dma_mask(aac->pdev, dmamask);
+	else if (!(quirks & AAC_QUIRK_SRC))
+		retval = pci_set_dma_mask(aac->pdev, dmamask);
+	else
+		retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+
+	if (quirks & AAC_QUIRK_31BIT && !retval) {
+		dmamask = DMA_BIT_MASK(31);
+		retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+	}
+
+	if (retval)
+		goto out;
+
+	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
+		goto out;
+
+	if (jafo) {
+		aac->thread = kthread_run(aac_command_thread, aac, "%s",
+					  aac->name);
+		if (IS_ERR(aac->thread)) {
+			retval = PTR_ERR(aac->thread);
+			aac->thread = NULL;
+			goto out;
+		}
+	}
+	(void)aac_get_adapter_info(aac);
+	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
+		host->sg_tablesize = 34;
+		host->max_sectors = (host->sg_tablesize * 8) + 112;
+	}
+	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
+		host->sg_tablesize = 17;
+		host->max_sectors = (host->sg_tablesize * 8) + 112;
+	}
+	aac_get_config_status(aac, 1);
+	aac_get_containers(aac);
+	/*
+	 * This is where the assumption that the Adapter is quiesced
+	 * is important.
+	 */
+	command_list = NULL;
+	__shost_for_each_device(dev, host) {
+		unsigned long flags;
+		spin_lock_irqsave(&dev->list_lock, flags);
+		list_for_each_entry(command, &dev->cmd_list, list)
+			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
+				command->SCp.buffer = (struct scatterlist *)command_list;
+				command_list = command;
+			}
+		spin_unlock_irqrestore(&dev->list_lock, flags);
+	}
+	while ((command = command_list)) {
+		command_list = (struct scsi_cmnd *)command->SCp.buffer;
+		command->SCp.buffer = NULL;
+		command->result = DID_OK << 16
+		  | COMMAND_COMPLETE << 8
+		  | SAM_STAT_TASK_SET_FULL;
+		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+		command->scsi_done(command);
+	}
+	/*
+	 * Any Device that was already marked offline needs to be marked
+	 * running
+	 */
+	__shost_for_each_device(dev, host) {
+		if (!scsi_device_online(dev))
+			scsi_device_set_state(dev, SDEV_RUNNING);
+	}
+	retval = 0;
+
+out:
+	aac->in_reset = 0;
+	scsi_unblock_requests(host);
+
+	/*
+	 * Issue bus rescan to catch any configuration that might have
+	 * occurred
+	 */
+	if (!retval && !is_kdump_kernel()) {
+		dev_info(&aac->pdev->dev, "Scheduling bus rescan\n");
+		aac_schedule_safw_scan_worker(aac);
+	}
+
+	if (jafo) {
+		spin_lock_irq(host->host_lock);
+	}
+	return retval;
+}
+
+int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
+{
+	unsigned long flagv = 0;
+	int retval;
+	struct Scsi_Host * host;
+	int bled;
+
+	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+		return -EBUSY;
+
+	if (aac->in_reset) {
+		spin_unlock_irqrestore(&aac->fib_lock, flagv);
+		return -EBUSY;
+	}
+	aac->in_reset = 1;
+	spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+	/*
+	 * Wait for all commands to complete to this specific
+	 * target (block maximum 60 seconds). Although not necessary,
+	 * it does make us a good storage citizen.
+	 */
+	host = aac->scsi_host_ptr;
+	scsi_block_requests(host);
+
+	/* Quiesce build, flush cache, write through mode */
+	if (forced < 2)
+		aac_send_shutdown(aac);
+	spin_lock_irqsave(host->host_lock, flagv);
+	bled = forced ? forced :
+			(aac_check_reset != 0 && aac_check_reset != 1);
+	retval = _aac_reset_adapter(aac, bled, reset_type);
+	spin_unlock_irqrestore(host->host_lock, flagv);
+
+	if ((forced < 2) && (retval == -ENODEV)) {
+		/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
+		struct fib * fibctx = aac_fib_alloc(aac);
+		if (fibctx) {
+			struct aac_pause *cmd;
+			int status;
+
+			aac_fib_init(fibctx);
+
+			cmd = (struct aac_pause *) fib_data(fibctx);
+
+			cmd->command = cpu_to_le32(VM_ContainerConfig);
+			cmd->type = cpu_to_le32(CT_PAUSE_IO);
+			cmd->timeout = cpu_to_le32(1);
+			cmd->min = cpu_to_le32(1);
+			cmd->noRescan = cpu_to_le32(1);
+			cmd->count = cpu_to_le32(0);
+
+			status = aac_fib_send(ContainerCommand,
+			  fibctx,
+			  sizeof(struct aac_pause),
+			  FsaNormal,
+			  -2 /* Timeout silently */, 1,
+			  NULL, NULL);
+
+			if (status >= 0)
+				aac_fib_complete(fibctx);
+			/* FIB should be freed only after getting
+			 * the response from the F/W */
+			if (status != -ERESTARTSYS)
+				aac_fib_free(fibctx);
+		}
+	}
+
+	return retval;
+}
+
+int aac_check_health(struct aac_dev * aac)
+{
+	int BlinkLED;
+	unsigned long time_now, flagv = 0;
+	struct list_head * entry;
+
+	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
+	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
+		return 0;
+
+	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
+		spin_unlock_irqrestore(&aac->fib_lock, flagv);
+		return 0; /* OK */
+	}
+
+	aac->in_reset = 1;
+
+	/* Fake up an AIF:
+	 *	aac_aifcmd.command = AifCmdEventNotify = 1
+	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
+	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
+	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
+	 *	aac.aifcmd.data[2] = AifHighPriority = 3
+	 *	aac.aifcmd.data[3] = BlinkLED
+	 */
+
+	time_now = jiffies/HZ;
+	entry = aac->fib_list.next;
+
+	/*
+	 * For each Context that is on the
+	 * fibctxList, make a copy of the
+	 * fib, and then set the event to wake up the
+	 * thread that is waiting for it.
+	 */
+	while (entry != &aac->fib_list) {
+		/*
+		 * Extract the fibctx
+		 */
+		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
+		struct hw_fib * hw_fib;
+		struct fib * fib;
+		/*
+		 * Check if the queue is getting
+		 * backlogged
+		 */
+		if (fibctx->count > 20) {
+			/*
+			 * It's *not* jiffies folks,
+			 * but jiffies / HZ, so do not
+			 * panic ...
+			 */
+			u32 time_last = fibctx->jiffies;
+			/*
+			 * Has it been > 2 minutes
+			 * since the last read off
+			 * the queue?
+			 */
+			if ((time_now - time_last) > aif_timeout) {
+				entry = entry->next;
+				aac_close_fib_context(aac, fibctx);
+				continue;
+			}
+		}
+		/*
+		 * Warning: no sleep allowed while
+		 * holding spinlock
+		 */
+		hw_fib = kzalloc(sizeof(struct hw_fib), GFP_ATOMIC);
+		fib = kzalloc(sizeof(struct fib), GFP_ATOMIC);
+		if (fib && hw_fib) {
+			struct aac_aifcmd * aif;
+
+			fib->hw_fib_va = hw_fib;
+			fib->dev = aac;
+			aac_fib_init(fib);
+			fib->type = FSAFS_NTC_FIB_CONTEXT;
+			fib->size = sizeof (struct fib);
+			fib->data = hw_fib->data;
+			aif = (struct aac_aifcmd *)hw_fib->data;
+			aif->command = cpu_to_le32(AifCmdEventNotify);
+			aif->seqnum = cpu_to_le32(0xFFFFFFFF);
+			((__le32 *)aif->data)[0] = cpu_to_le32(AifEnExpEvent);
+			((__le32 *)aif->data)[1] = cpu_to_le32(AifExeFirmwarePanic);
+			((__le32 *)aif->data)[2] = cpu_to_le32(AifHighPriority);
+			((__le32 *)aif->data)[3] = cpu_to_le32(BlinkLED);
+
+			/*
+			 * Put the FIB onto the
+			 * fibctx's fibs
+			 */
+			list_add_tail(&fib->fiblink, &fibctx->fib_list);
+			fibctx->count++;
+			/*
+			 * Set the event to wake up the
+			 * thread that will waiting.
+			 */
+			up(&fibctx->wait_sem);
+		} else {
+			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
+			kfree(fib);
+			kfree(hw_fib);
+		}
+		entry = entry->next;
+	}
+
+	spin_unlock_irqrestore(&aac->fib_lock, flagv);
+
+	if (BlinkLED < 0) {
+		printk(KERN_ERR "%s: Host adapter is dead (or got a PCI error) %d\n",
+				aac->name, BlinkLED);
+		goto out;
+	}
+
+	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
+
+out:
+	aac->in_reset = 0;
+	return BlinkLED;
+}
+
+static inline int is_safw_raid_volume(struct aac_dev *aac, int bus, int target)
+{
+	return bus == CONTAINER_CHANNEL && target < aac->maximum_num_containers;
+}
+
+static struct scsi_device *aac_lookup_safw_scsi_device(struct aac_dev *dev,
+								int bus,
+								int target)
+{
+	if (bus != CONTAINER_CHANNEL)
+		bus = aac_phys_to_logical(bus);
+
+	return scsi_device_lookup(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static int aac_add_safw_device(struct aac_dev *dev, int bus, int target)
+{
+	if (bus != CONTAINER_CHANNEL)
+		bus = aac_phys_to_logical(bus);
+
+	return scsi_add_device(dev->scsi_host_ptr, bus, target, 0);
+}
+
+static void aac_put_safw_scsi_device(struct scsi_device *sdev)
+{
+	if (sdev)
+		scsi_device_put(sdev);
+}
+
+static void aac_remove_safw_device(struct aac_dev *dev, int bus, int target)
+{
+	struct scsi_device *sdev;
+
+	sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+	scsi_remove_device(sdev);
+	aac_put_safw_scsi_device(sdev);
+}
+
+static inline int aac_is_safw_scan_count_equal(struct aac_dev *dev,
+	int bus, int target)
+{
+	return dev->hba_map[bus][target].scan_counter == dev->scan_counter;
+}
+
+static int aac_is_safw_target_valid(struct aac_dev *dev, int bus, int target)
+{
+	if (is_safw_raid_volume(dev, bus, target))
+		return dev->fsa_dev[target].valid;
+	else
+		return aac_is_safw_scan_count_equal(dev, bus, target);
+}
+
+static int aac_is_safw_device_exposed(struct aac_dev *dev, int bus, int target)
+{
+	int is_exposed = 0;
+	struct scsi_device *sdev;
+
+	sdev = aac_lookup_safw_scsi_device(dev, bus, target);
+	if (sdev)
+		is_exposed = 1;
+	aac_put_safw_scsi_device(sdev);
+
+	return is_exposed;
+}
+
+static int aac_update_safw_host_devices(struct aac_dev *dev)
+{
+	int i;
+	int bus;
+	int target;
+	int is_exposed = 0;
+	int rcode = 0;
+
+	rcode = aac_setup_safw_adapter(dev);
+	if (unlikely(rcode < 0)) {
+		goto out;
+	}
+
+	for (i = 0; i < AAC_BUS_TARGET_LOOP; i++) {
+
+		bus = get_bus_number(i);
+		target = get_target_number(i);
+
+		is_exposed = aac_is_safw_device_exposed(dev, bus, target);
+
+		if (aac_is_safw_target_valid(dev, bus, target) && !is_exposed)
+			aac_add_safw_device(dev, bus, target);
+		else if (!aac_is_safw_target_valid(dev, bus, target) &&
+								is_exposed)
+			aac_remove_safw_device(dev, bus, target);
+	}
+out:
+	return rcode;
+}
+
+static int aac_scan_safw_host(struct aac_dev *dev)
+{
+	int rcode = 0;
+
+	rcode = aac_update_safw_host_devices(dev);
+	if (rcode)
+		aac_schedule_safw_scan_worker(dev);
+
+	return rcode;
+}
+
+int aac_scan_host(struct aac_dev *dev)
+{
+	int rcode = 0;
+
+	mutex_lock(&dev->scan_mutex);
+	if (dev->sa_firmware)
+		rcode = aac_scan_safw_host(dev);
+	else
+		scsi_scan_host(dev->scsi_host_ptr);
+	mutex_unlock(&dev->scan_mutex);
+
+	return rcode;
+}
+
+/**
+ *	aac_handle_sa_aif	Handle a message from the firmware
+ *	@dev: Which adapter this fib is from
+ *	@fibptr: Pointer to fibptr from adapter
+ *
+ *	This routine handles a driver notify fib from the adapter and
+ *	dispatches it to the appropriate routine for handling.
+ */
+static void aac_handle_sa_aif(struct aac_dev *dev, struct fib *fibptr)
+{
+	int i;
+	u32 events = 0;
+
+	if (fibptr->hbacmd_size & SA_AIF_HOTPLUG)
+		events = SA_AIF_HOTPLUG;
+	else if (fibptr->hbacmd_size & SA_AIF_HARDWARE)
+		events = SA_AIF_HARDWARE;
+	else if (fibptr->hbacmd_size & SA_AIF_PDEV_CHANGE)
+		events = SA_AIF_PDEV_CHANGE;
+	else if (fibptr->hbacmd_size & SA_AIF_LDEV_CHANGE)
+		events = SA_AIF_LDEV_CHANGE;
+	else if (fibptr->hbacmd_size & SA_AIF_BPSTAT_CHANGE)
+		events = SA_AIF_BPSTAT_CHANGE;
+	else if (fibptr->hbacmd_size & SA_AIF_BPCFG_CHANGE)
+		events = SA_AIF_BPCFG_CHANGE;
+
+	switch (events) {
+	case SA_AIF_HOTPLUG:
+	case SA_AIF_HARDWARE:
+	case SA_AIF_PDEV_CHANGE:
+	case SA_AIF_LDEV_CHANGE:
+	case SA_AIF_BPCFG_CHANGE:
+
+		aac_scan_host(dev);
+
+		break;
+
+	case SA_AIF_BPSTAT_CHANGE:
+		/* currently do nothing */
+		break;
+	}
+
+	for (i = 1; i <= 10; ++i) {
+		events = src_readl(dev, MUnit.IDR);
+		if (events & (1<<23)) {
+			pr_warn(" AIF not cleared by firmware - %d/%d)\n",
+				i, 10);
+			ssleep(1);
+		}
+	}
+}
+
+static int get_fib_count(struct aac_dev *dev)
+{
+	unsigned int num = 0;
+	struct list_head *entry;
+	unsigned long flagv;
+
+	/*
+	 * Warning: no sleep allowed while
+	 * holding spinlock. We take the estimate
+	 * and pre-allocate a set of fibs outside the
+	 * lock.
+	 */
+	num = le32_to_cpu(dev->init->r7.adapter_fibs_size)
+			/ sizeof(struct hw_fib); /* some extra */
+	spin_lock_irqsave(&dev->fib_lock, flagv);
+	entry = dev->fib_list.next;
+	while (entry != &dev->fib_list) {
+		entry = entry->next;
+		++num;
+	}
+	spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+	return num;
+}
+
+static int fillup_pools(struct aac_dev *dev, struct hw_fib **hw_fib_pool,
+						struct fib **fib_pool,
+						unsigned int num)
+{
+	struct hw_fib **hw_fib_p;
+	struct fib **fib_p;
+
+	hw_fib_p = hw_fib_pool;
+	fib_p = fib_pool;
+	while (hw_fib_p < &hw_fib_pool[num]) {
+		*(hw_fib_p) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL);
+		if (!(*(hw_fib_p++))) {
+			--hw_fib_p;
+			break;
+		}
+
+		*(fib_p) = kmalloc(sizeof(struct fib), GFP_KERNEL);
+		if (!(*(fib_p++))) {
+			kfree(*(--hw_fib_p));
+			break;
+		}
+	}
+
+	/*
+	 * Get the actual number of allocated fibs
+	 */
+	num = hw_fib_p - hw_fib_pool;
+	return num;
+}
+
+static void wakeup_fibctx_threads(struct aac_dev *dev,
+						struct hw_fib **hw_fib_pool,
+						struct fib **fib_pool,
+						struct fib *fib,
+						struct hw_fib *hw_fib,
+						unsigned int num)
+{
+	unsigned long flagv;
+	struct list_head *entry;
+	struct hw_fib **hw_fib_p;
+	struct fib **fib_p;
+	u32 time_now, time_last;
+	struct hw_fib *hw_newfib;
+	struct fib *newfib;
+	struct aac_fib_context *fibctx;
+
+	time_now = jiffies/HZ;
+	spin_lock_irqsave(&dev->fib_lock, flagv);
+	entry = dev->fib_list.next;
+	/*
+	 * For each Context that is on the
+	 * fibctxList, make a copy of the
+	 * fib, and then set the event to wake up the
+	 * thread that is waiting for it.
+	 */
+
+	hw_fib_p = hw_fib_pool;
+	fib_p = fib_pool;
+	while (entry != &dev->fib_list) {
+		/*
+		 * Extract the fibctx
+		 */
+		fibctx = list_entry(entry, struct aac_fib_context,
+				next);
+		/*
+		 * Check if the queue is getting
+		 * backlogged
+		 */
+		if (fibctx->count > 20) {
+			/*
+			 * It's *not* jiffies folks,
+			 * but jiffies / HZ so do not
+			 * panic ...
+			 */
+			time_last = fibctx->jiffies;
+			/*
+			 * Has it been > 2 minutes
+			 * since the last read off
+			 * the queue?
+			 */
+			if ((time_now - time_last) > aif_timeout) {
+				entry = entry->next;
+				aac_close_fib_context(dev, fibctx);
+				continue;
+			}
+		}
+		/*
+		 * Warning: no sleep allowed while
+		 * holding spinlock
+		 */
+		if (hw_fib_p >= &hw_fib_pool[num]) {
+			pr_warn("aifd: didn't allocate NewFib\n");
+			entry = entry->next;
+			continue;
+		}
+
+		hw_newfib = *hw_fib_p;
+		*(hw_fib_p++) = NULL;
+		newfib = *fib_p;
+		*(fib_p++) = NULL;
+		/*
+		 * Make the copy of the FIB
+		 */
+		memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
+		memcpy(newfib, fib, sizeof(struct fib));
+		newfib->hw_fib_va = hw_newfib;
+		/*
+		 * Put the FIB onto the
+		 * fibctx's fibs
+		 */
+		list_add_tail(&newfib->fiblink, &fibctx->fib_list);
+		fibctx->count++;
+		/*
+		 * Set the event to wake up the
+		 * thread that is waiting.
+		 */
+		up(&fibctx->wait_sem);
+
+		entry = entry->next;
+	}
+	/*
+	 *	Set the status of this FIB
+	 */
+	*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+	aac_fib_adapter_complete(fib, sizeof(u32));
+	spin_unlock_irqrestore(&dev->fib_lock, flagv);
+
+}
+
+static void aac_process_events(struct aac_dev *dev)
+{
+	struct hw_fib *hw_fib;
+	struct fib *fib;
+	unsigned long flags;
+	spinlock_t *t_lock;
+
+	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+	spin_lock_irqsave(t_lock, flags);
+
+	while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
+		struct list_head *entry;
+		struct aac_aifcmd *aifcmd;
+		unsigned int  num;
+		struct hw_fib **hw_fib_pool, **hw_fib_p;
+		struct fib **fib_pool, **fib_p;
+
+		set_current_state(TASK_RUNNING);
+
+		entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
+		list_del(entry);
+
+		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+		spin_unlock_irqrestore(t_lock, flags);
+
+		fib = list_entry(entry, struct fib, fiblink);
+		hw_fib = fib->hw_fib_va;
+		if (dev->sa_firmware) {
+			/* Thor AIF */
+			aac_handle_sa_aif(dev, fib);
+			aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+			goto free_fib;
+		}
+		/*
+		 *	We will process the FIB here or pass it to a
+		 *	worker thread that is TBD. We Really can't
+		 *	do anything at this point since we don't have
+		 *	anything defined for this thread to do.
+		 */
+		memset(fib, 0, sizeof(struct fib));
+		fib->type = FSAFS_NTC_FIB_CONTEXT;
+		fib->size = sizeof(struct fib);
+		fib->hw_fib_va = hw_fib;
+		fib->data = hw_fib->data;
+		fib->dev = dev;
+		/*
+		 *	We only handle AifRequest fibs from the adapter.
+		 */
+
+		aifcmd = (struct aac_aifcmd *) hw_fib->data;
+		if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
+			/* Handle Driver Notify Events */
+			aac_handle_aif(dev, fib);
+			*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+			aac_fib_adapter_complete(fib, (u16)sizeof(u32));
+			goto free_fib;
+		}
+		/*
+		 * The u32 here is important and intended. We are using
+		 * 32bit wrapping time to fit the adapter field
+		 */
+
+		/* Sniff events */
+		if (aifcmd->command == cpu_to_le32(AifCmdEventNotify)
+		 || aifcmd->command == cpu_to_le32(AifCmdJobProgress)) {
+			aac_handle_aif(dev, fib);
+		}
+
+		/*
+		 * get number of fibs to process
+		 */
+		num = get_fib_count(dev);
+		if (!num)
+			goto free_fib;
+
+		hw_fib_pool = kmalloc_array(num, sizeof(struct hw_fib *),
+						GFP_KERNEL);
+		if (!hw_fib_pool)
+			goto free_fib;
+
+		fib_pool = kmalloc_array(num, sizeof(struct fib *), GFP_KERNEL);
+		if (!fib_pool)
+			goto free_hw_fib_pool;
+
+		/*
+		 * Fill up fib pointer pools with actual fibs
+		 * and hw_fibs
+		 */
+		num = fillup_pools(dev, hw_fib_pool, fib_pool, num);
+		if (!num)
+			goto free_mem;
+
+		/*
+		 * wakeup the thread that is waiting for
+		 * the response from fw (ioctl)
+		 */
+		wakeup_fibctx_threads(dev, hw_fib_pool, fib_pool,
+							    fib, hw_fib, num);
+
+free_mem:
+		/* Free up the remaining resources */
+		hw_fib_p = hw_fib_pool;
+		fib_p = fib_pool;
+		while (hw_fib_p < &hw_fib_pool[num]) {
+			kfree(*hw_fib_p);
+			kfree(*fib_p);
+			++fib_p;
+			++hw_fib_p;
+		}
+		kfree(fib_pool);
+free_hw_fib_pool:
+		kfree(hw_fib_pool);
+free_fib:
+		kfree(fib);
+		t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+		spin_lock_irqsave(t_lock, flags);
+	}
+	/*
+	 *	There are no more AIF's
+	 */
+	t_lock = dev->queues->queue[HostNormCmdQueue].lock;
+	spin_unlock_irqrestore(t_lock, flags);
+}
+
+static int aac_send_wellness_command(struct aac_dev *dev, char *wellness_str,
+							u32 datasize)
+{
+	struct aac_srb *srbcmd;
+	struct sgmap64 *sg64;
+	dma_addr_t addr;
+	char *dma_buf;
+	struct fib *fibptr;
+	int ret = -ENOMEM;
+	u32 vbus, vid;
+
+	fibptr = aac_fib_alloc(dev);
+	if (!fibptr)
+		goto out;
+
+	dma_buf = dma_alloc_coherent(&dev->pdev->dev, datasize, &addr,
+				     GFP_KERNEL);
+	if (!dma_buf)
+		goto fib_free_out;
+
+	aac_fib_init(fibptr);
+
+	vbus = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_bus);
+	vid = (u32)le16_to_cpu(dev->supplement_adapter_info.virt_device_target);
+
+	srbcmd = (struct aac_srb *)fib_data(fibptr);
+
+	srbcmd->function = cpu_to_le32(SRBF_ExecuteScsi);
+	srbcmd->channel = cpu_to_le32(vbus);
+	srbcmd->id = cpu_to_le32(vid);
+	srbcmd->lun = 0;
+	srbcmd->flags = cpu_to_le32(SRB_DataOut);
+	srbcmd->timeout = cpu_to_le32(10);
+	srbcmd->retry_limit = 0;
+	srbcmd->cdb_size = cpu_to_le32(12);
+	srbcmd->count = cpu_to_le32(datasize);
+
+	memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
+	srbcmd->cdb[0] = BMIC_OUT;
+	srbcmd->cdb[6] = WRITE_HOST_WELLNESS;
+	memcpy(dma_buf, (char *)wellness_str, datasize);
+
+	sg64 = (struct sgmap64 *)&srbcmd->sg;
+	sg64->count = cpu_to_le32(1);
+	sg64->sg[0].addr[1] = cpu_to_le32((u32)(((addr) >> 16) >> 16));
+	sg64->sg[0].addr[0] = cpu_to_le32((u32)(addr & 0xffffffff));
+	sg64->sg[0].count = cpu_to_le32(datasize);
+
+	ret = aac_fib_send(ScsiPortCommand64, fibptr, sizeof(struct aac_srb),
+				FsaNormal, 1, 1, NULL, NULL);
+
+	dma_free_coherent(&dev->pdev->dev, datasize, dma_buf, addr);
+
+	/*
+	 * Do not set XferState to zero unless
+	 * receives a response from F/W
+	 */
+	if (ret >= 0)
+		aac_fib_complete(fibptr);
+
+	/*
+	 * FIB should be freed only after
+	 * getting the response from the F/W
+	 */
+	if (ret != -ERESTARTSYS)
+		goto fib_free_out;
+
+out:
+	return ret;
+fib_free_out:
+	aac_fib_free(fibptr);
+	goto out;
+}
+
+int aac_send_safw_hostttime(struct aac_dev *dev, struct timespec64 *now)
+{
+	struct tm cur_tm;
+	char wellness_str[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
+	u32 datasize = sizeof(wellness_str);
+	time64_t local_time;
+	int ret = -ENODEV;
+
+	if (!dev->sa_firmware)
+		goto out;
+
+	local_time = (now->tv_sec - (sys_tz.tz_minuteswest * 60));
+	time64_to_tm(local_time, 0, &cur_tm);
+	cur_tm.tm_mon += 1;
+	cur_tm.tm_year += 1900;
+	wellness_str[8] = bin2bcd(cur_tm.tm_hour);
+	wellness_str[9] = bin2bcd(cur_tm.tm_min);
+	wellness_str[10] = bin2bcd(cur_tm.tm_sec);
+	wellness_str[12] = bin2bcd(cur_tm.tm_mon);
+	wellness_str[13] = bin2bcd(cur_tm.tm_mday);
+	wellness_str[14] = bin2bcd(cur_tm.tm_year / 100);
+	wellness_str[15] = bin2bcd(cur_tm.tm_year % 100);
+
+	ret = aac_send_wellness_command(dev, wellness_str, datasize);
+
+out:
+	return ret;
+}
+
+int aac_send_hosttime(struct aac_dev *dev, struct timespec64 *now)
+{
+	int ret = -ENOMEM;
+	struct fib *fibptr;
+	__le32 *info;
+
+	fibptr = aac_fib_alloc(dev);
+	if (!fibptr)
+		goto out;
+
+	aac_fib_init(fibptr);
+	info = (__le32 *)fib_data(fibptr);
+	*info = cpu_to_le32(now->tv_sec); /* overflow in y2106 */
+	ret = aac_fib_send(SendHostTime, fibptr, sizeof(*info), FsaNormal,
+					1, 1, NULL, NULL);
+
+	/*
+	 * Do not set XferState to zero unless
+	 * receives a response from F/W
+	 */
+	if (ret >= 0)
+		aac_fib_complete(fibptr);
+
+	/*
+	 * FIB should be freed only after
+	 * getting the response from the F/W
+	 */
+	if (ret != -ERESTARTSYS)
+		aac_fib_free(fibptr);
+
+out:
+	return ret;
+}
+
+/**
+ *	aac_command_thread	-	command processing thread
+ *	@dev: Adapter to monitor
+ *
+ *	Waits on the commandready event in it's queue. When the event gets set
+ *	it will pull FIBs off it's queue. It will continue to pull FIBs off
+ *	until the queue is empty. When the queue is empty it will wait for
+ *	more FIBs.
+ */
+
+int aac_command_thread(void *data)
+{
+	struct aac_dev *dev = data;
+	DECLARE_WAITQUEUE(wait, current);
+	unsigned long next_jiffies = jiffies + HZ;
+	unsigned long next_check_jiffies = next_jiffies;
+	long difference = HZ;
+
+	/*
+	 *	We can only have one thread per adapter for AIF's.
+	 */
+	if (dev->aif_thread)
+		return -EINVAL;
+
+	/*
+	 *	Let the DPC know it has a place to send the AIF's to.
+	 */
+	dev->aif_thread = 1;
+	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
+	set_current_state(TASK_INTERRUPTIBLE);
+	dprintk ((KERN_INFO "aac_command_thread start\n"));
+	while (1) {
+
+		aac_process_events(dev);
+
+		/*
+		 *	Background activity
+		 */
+		if ((time_before(next_check_jiffies,next_jiffies))
+		 && ((difference = next_check_jiffies - jiffies) <= 0)) {
+			next_check_jiffies = next_jiffies;
+			if (aac_adapter_check_health(dev) == 0) {
+				difference = ((long)(unsigned)check_interval)
+					   * HZ;
+				next_check_jiffies = jiffies + difference;
+			} else if (!dev->queues)
+				break;
+		}
+		if (!time_before(next_check_jiffies,next_jiffies)
+		 && ((difference = next_jiffies - jiffies) <= 0)) {
+			struct timespec64 now;
+			int ret;
+
+			/* Don't even try to talk to adapter if its sick */
+			ret = aac_adapter_check_health(dev);
+			if (ret || !dev->queues)
+				break;
+			next_check_jiffies = jiffies
+					   + ((long)(unsigned)check_interval)
+					   * HZ;
+			ktime_get_real_ts64(&now);
+
+			/* Synchronize our watches */
+			if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec)
+			 && (now.tv_nsec > (NSEC_PER_SEC / HZ)))
+				difference = HZ + HZ / 2 -
+					     now.tv_nsec / (NSEC_PER_SEC / HZ);
+			else {
+				if (now.tv_nsec > NSEC_PER_SEC / 2)
+					++now.tv_sec;
+
+				if (dev->sa_firmware)
+					ret =
+					aac_send_safw_hostttime(dev, &now);
+				else
+					ret = aac_send_hosttime(dev, &now);
+
+				difference = (long)(unsigned)update_interval*HZ;
+			}
+			next_jiffies = jiffies + difference;
+			if (time_before(next_check_jiffies,next_jiffies))
+				difference = next_check_jiffies - jiffies;
+		}
+		if (difference <= 0)
+			difference = 1;
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		if (kthread_should_stop())
+			break;
+
+		/*
+		 * we probably want usleep_range() here instead of the
+		 * jiffies computation
+		 */
+		schedule_timeout(difference);
+
+		if (kthread_should_stop())
+			break;
+	}
+	if (dev->queues)
+		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
+	dev->aif_thread = 0;
+	return 0;
+}
+
+int aac_acquire_irq(struct aac_dev *dev)
+{
+	int i;
+	int j;
+	int ret = 0;
+
+	if (!dev->sync_mode && dev->msi_enabled && dev->max_msix > 1) {
+		for (i = 0; i < dev->max_msix; i++) {
+			dev->aac_msix[i].vector_no = i;
+			dev->aac_msix[i].dev = dev;
+			if (request_irq(pci_irq_vector(dev->pdev, i),
+					dev->a_ops.adapter_intr,
+					0, "aacraid", &(dev->aac_msix[i]))) {
+				printk(KERN_ERR "%s%d: Failed to register IRQ for vector %d.\n",
+						dev->name, dev->id, i);
+				for (j = 0 ; j < i ; j++)
+					free_irq(pci_irq_vector(dev->pdev, j),
+						 &(dev->aac_msix[j]));
+				pci_disable_msix(dev->pdev);
+				ret = -1;
+			}
+		}
+	} else {
+		dev->aac_msix[0].vector_no = 0;
+		dev->aac_msix[0].dev = dev;
+
+		if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+			IRQF_SHARED, "aacraid",
+			&(dev->aac_msix[0])) < 0) {
+			if (dev->msi)
+				pci_disable_msi(dev->pdev);
+			printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+					dev->name, dev->id);
+			ret = -1;
+		}
+	}
+	return ret;
+}
+
+void aac_free_irq(struct aac_dev *dev)
+{
+	int i;
+	int cpu;
+
+	cpu = cpumask_first(cpu_online_mask);
+	if (aac_is_src(dev)) {
+		if (dev->max_msix > 1) {
+			for (i = 0; i < dev->max_msix; i++)
+				free_irq(pci_irq_vector(dev->pdev, i),
+					 &(dev->aac_msix[i]));
+		} else {
+			free_irq(dev->pdev->irq, &(dev->aac_msix[0]));
+		}
+	} else {
+		free_irq(dev->pdev->irq, dev);
+	}
+	if (dev->msi)
+		pci_disable_msi(dev->pdev);
+	else if (dev->max_msix > 1)
+		pci_disable_msix(dev->pdev);
+}
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c
new file mode 100644
index 0000000..ddc6973
--- /dev/null
+++ b/drivers/scsi/aacraid/dpcsup.c
@@ -0,0 +1,471 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  dpcsup.c
+ *
+ * Abstract: All DPC processing routines for the cyclone board occur here.
+ *
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/completion.h>
+#include <linux/blkdev.h>
+#include <linux/semaphore.h>
+
+#include "aacraid.h"
+
+/**
+ *	aac_response_normal	-	Handle command replies
+ *	@q: Queue to read from
+ *
+ *	This DPC routine will be run when the adapter interrupts us to let us
+ *	know there is a response on our normal priority queue. We will pull off
+ *	all QE there are and wake up all the waiters before exiting. We will
+ *	take a spinlock out on the queue before operating on it.
+ */
+
+unsigned int aac_response_normal(struct aac_queue * q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	struct hw_fib * hwfib;
+	struct fib * fib;
+	int consumed = 0;
+	unsigned long flags, mflags;
+
+	spin_lock_irqsave(q->lock, flags);
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system. If no response was requested we just
+	 *	deallocate the Fib here and continue.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		int fast;
+		u32 index = le32_to_cpu(entry->addr);
+		fast = index & 0x01;
+		fib = &dev->fibs[index >> 2];
+		hwfib = fib->hw_fib_va;
+		
+		aac_consumer_free(dev, q, HostNormRespQueue);
+		/*
+		 *	Remove this fib from the Outstanding I/O queue.
+		 *	But only if it has not already been timed out.
+		 *
+		 *	If the fib has been timed out already, then just 
+		 *	continue. The caller has already been notified that
+		 *	the fib timed out.
+		 */
+		atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+		if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+			spin_unlock_irqrestore(q->lock, flags);
+			aac_fib_complete(fib);
+			aac_fib_free(fib);
+			spin_lock_irqsave(q->lock, flags);
+			continue;
+		}
+		spin_unlock_irqrestore(q->lock, flags);
+
+		if (fast) {
+			/*
+			 *	Doctor the fib
+			 */
+			*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+			hwfib->header.XferState |= cpu_to_le32(AdapterProcessed);
+			fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+		}
+
+		FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+		if (hwfib->header.Command == cpu_to_le16(NuFileSystem))
+		{
+			__le32 *pstatus = (__le32 *)hwfib->data;
+			if (*pstatus & cpu_to_le32(0xffff0000))
+				*pstatus = cpu_to_le32(ST_OK);
+		}
+		if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) 
+		{
+	        	if (hwfib->header.XferState & cpu_to_le32(NoResponseExpected))
+				FIB_COUNTER_INCREMENT(aac_config.NoResponseRecved);
+			else 
+				FIB_COUNTER_INCREMENT(aac_config.AsyncRecved);
+			/*
+			 *	NOTE:  we cannot touch the fib after this
+			 *	    call, because it may have been deallocated.
+			 */
+			fib->callback(fib->callback_data, fib);
+		} else {
+			unsigned long flagv;
+			spin_lock_irqsave(&fib->event_lock, flagv);
+			if (!fib->done) {
+				fib->done = 1;
+				up(&fib->event_wait);
+			}
+			spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+			spin_lock_irqsave(&dev->manage_lock, mflags);
+			dev->management_fib_count--;
+			spin_unlock_irqrestore(&dev->manage_lock, mflags);
+
+			FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+			if (fib->done == 2) {
+				spin_lock_irqsave(&fib->event_lock, flagv);
+				fib->done = 0;
+				spin_unlock_irqrestore(&fib->event_lock, flagv);
+				aac_fib_complete(fib);
+				aac_fib_free(fib);
+			}
+		}
+		consumed++;
+		spin_lock_irqsave(q->lock, flags);
+	}
+
+	if (consumed > aac_config.peak_fibs)
+		aac_config.peak_fibs = consumed;
+	if (consumed == 0) 
+		aac_config.zero_fibs++;
+
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
+
+
+/**
+ *	aac_command_normal	-	handle commands
+ *	@q: queue to process
+ *
+ *	This DPC routine will be queued when the adapter interrupts us to 
+ *	let us know there is a command on our normal priority queue. We will 
+ *	pull off all QE there are and wake up all the waiters before exiting.
+ *	We will take a spinlock out on the queue before operating on it.
+ */
+ 
+unsigned int aac_command_normal(struct aac_queue *q)
+{
+	struct aac_dev * dev = q->dev;
+	struct aac_entry *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->lock, flags);
+
+	/*
+	 *	Keep pulling response QEs off the response queue and waking
+	 *	up the waiters until there are no more QEs. We then return
+	 *	back to the system.
+	 */
+	while(aac_consumer_get(dev, q, &entry))
+	{
+		struct fib fibctx;
+		struct hw_fib * hw_fib;
+		u32 index;
+		struct fib *fib = &fibctx;
+		
+		index = le32_to_cpu(entry->addr) / sizeof(struct hw_fib);
+		hw_fib = &dev->aif_base_va[index];
+		
+		/*
+		 *	Allocate a FIB at all costs. For non queued stuff
+		 *	we can just use the stack so we are happy. We need
+		 *	a fib object in order to manage the linked lists
+		 */
+		if (dev->aif_thread)
+			if((fib = kmalloc(sizeof(struct fib), GFP_ATOMIC)) == NULL)
+				fib = &fibctx;
+		
+		memset(fib, 0, sizeof(struct fib));
+		INIT_LIST_HEAD(&fib->fiblink);
+		fib->type = FSAFS_NTC_FIB_CONTEXT;
+		fib->size = sizeof(struct fib);
+		fib->hw_fib_va = hw_fib;
+		fib->data = hw_fib->data;
+		fib->dev = dev;
+		
+				
+		if (dev->aif_thread && fib != &fibctx) {
+		        list_add_tail(&fib->fiblink, &q->cmdq);
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+		        wake_up_interruptible(&q->cmdready);
+		} else {
+	 	        aac_consumer_free(dev, q, HostNormCmdQueue);
+			spin_unlock_irqrestore(q->lock, flags);
+			/*
+			 *	Set the status of this FIB
+			 */
+			*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
+			aac_fib_adapter_complete(fib, sizeof(u32));
+			spin_lock_irqsave(q->lock, flags);
+		}		
+	}
+	spin_unlock_irqrestore(q->lock, flags);
+	return 0;
+}
+
+/*
+ *
+ * aac_aif_callback
+ * @context: the context set in the fib - here it is scsi cmd
+ * @fibptr: pointer to the fib
+ *
+ * Handles the AIFs - new method (SRC)
+ *
+ */
+
+static void aac_aif_callback(void *context, struct fib * fibptr)
+{
+	struct fib *fibctx;
+	struct aac_dev *dev;
+	struct aac_aifcmd *cmd;
+	int status;
+
+	fibctx = (struct fib *)context;
+	BUG_ON(fibptr == NULL);
+	dev = fibptr->dev;
+
+	if ((fibptr->hw_fib_va->header.XferState &
+	    cpu_to_le32(NoMoreAifDataAvailable)) ||
+		dev->sa_firmware) {
+		aac_fib_complete(fibptr);
+		aac_fib_free(fibptr);
+		return;
+	}
+
+	aac_intr_normal(dev, 0, 1, 0, fibptr->hw_fib_va);
+
+	aac_fib_init(fibctx);
+	cmd = (struct aac_aifcmd *) fib_data(fibctx);
+	cmd->command = cpu_to_le32(AifReqEvent);
+
+	status = aac_fib_send(AifRequest,
+		fibctx,
+		sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+		FsaNormal,
+		0, 1,
+		(fib_callback)aac_aif_callback, fibctx);
+}
+
+
+/**
+ *	aac_intr_normal	-	Handle command replies
+ *	@dev: Device
+ *	@index: completion reference
+ *
+ *	This DPC routine will be run when the adapter interrupts us to let us
+ *	know there is a response on our normal priority queue. We will pull off
+ *	all QE there are and wake up all the waiters before exiting.
+ */
+unsigned int aac_intr_normal(struct aac_dev *dev, u32 index, int isAif,
+	int isFastResponse, struct hw_fib *aif_fib)
+{
+	unsigned long mflags;
+	dprintk((KERN_INFO "aac_intr_normal(%p,%x)\n", dev, index));
+	if (isAif == 1) {	/* AIF - common */
+		struct hw_fib * hw_fib;
+		struct fib * fib;
+		struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue];
+		unsigned long flags;
+
+		/*
+		 *	Allocate a FIB. For non queued stuff we can just use
+		 * the stack so we are happy. We need a fib object in order to
+		 * manage the linked lists.
+		 */
+		if ((!dev->aif_thread)
+		 || (!(fib = kzalloc(sizeof(struct fib),GFP_ATOMIC))))
+			return 1;
+		if (!(hw_fib = kzalloc(sizeof(struct hw_fib),GFP_ATOMIC))) {
+			kfree (fib);
+			return 1;
+		}
+		if (dev->sa_firmware) {
+			fib->hbacmd_size = index;	/* store event type */
+		} else if (aif_fib != NULL) {
+			memcpy(hw_fib, aif_fib, sizeof(struct hw_fib));
+		} else {
+			memcpy(hw_fib, (struct hw_fib *)
+				(((uintptr_t)(dev->regs.sa)) + index),
+				sizeof(struct hw_fib));
+		}
+		INIT_LIST_HEAD(&fib->fiblink);
+		fib->type = FSAFS_NTC_FIB_CONTEXT;
+		fib->size = sizeof(struct fib);
+		fib->hw_fib_va = hw_fib;
+		fib->data = hw_fib->data;
+		fib->dev = dev;
+	
+		spin_lock_irqsave(q->lock, flags);
+		list_add_tail(&fib->fiblink, &q->cmdq);
+	        wake_up_interruptible(&q->cmdready);
+		spin_unlock_irqrestore(q->lock, flags);
+		return 1;
+	} else if (isAif == 2) {	/* AIF - new (SRC) */
+		struct fib *fibctx;
+		struct aac_aifcmd *cmd;
+
+		fibctx = aac_fib_alloc(dev);
+		if (!fibctx)
+			return 1;
+		aac_fib_init(fibctx);
+
+		cmd = (struct aac_aifcmd *) fib_data(fibctx);
+		cmd->command = cpu_to_le32(AifReqEvent);
+
+		return aac_fib_send(AifRequest,
+			fibctx,
+			sizeof(struct hw_fib)-sizeof(struct aac_fibhdr),
+			FsaNormal,
+			0, 1,
+			(fib_callback)aac_aif_callback, fibctx);
+	} else {
+		struct fib *fib = &dev->fibs[index];
+		int start_callback = 0;
+
+		/*
+		 *	Remove this fib from the Outstanding I/O queue.
+		 *	But only if it has not already been timed out.
+		 *
+		 *	If the fib has been timed out already, then just 
+		 *	continue. The caller has already been notified that
+		 *	the fib timed out.
+		 */
+		atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending);
+
+		if (unlikely(fib->flags & FIB_CONTEXT_FLAG_TIMED_OUT)) {
+			aac_fib_complete(fib);
+			aac_fib_free(fib);
+			return 0;
+		}
+
+		FIB_COUNTER_INCREMENT(aac_config.FibRecved);
+
+		if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) {
+
+			if (isFastResponse)
+				fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+
+			if (fib->callback) {
+				start_callback = 1;
+			} else {
+				unsigned long flagv;
+				int complete = 0;
+
+				dprintk((KERN_INFO "event_wait up\n"));
+				spin_lock_irqsave(&fib->event_lock, flagv);
+				if (fib->done == 2) {
+					fib->done = 1;
+					complete = 1;
+				} else {
+					fib->done = 1;
+					up(&fib->event_wait);
+				}
+				spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+				spin_lock_irqsave(&dev->manage_lock, mflags);
+				dev->management_fib_count--;
+				spin_unlock_irqrestore(&dev->manage_lock,
+					mflags);
+
+				FIB_COUNTER_INCREMENT(aac_config.NativeRecved);
+				if (complete)
+					aac_fib_complete(fib);
+			}
+		} else {
+			struct hw_fib *hwfib = fib->hw_fib_va;
+
+			if (isFastResponse) {
+				/* Doctor the fib */
+				*(__le32 *)hwfib->data = cpu_to_le32(ST_OK);
+				hwfib->header.XferState |=
+					cpu_to_le32(AdapterProcessed);
+				fib->flags |= FIB_CONTEXT_FLAG_FASTRESP;
+			}
+
+			if (hwfib->header.Command ==
+				cpu_to_le16(NuFileSystem)) {
+				__le32 *pstatus = (__le32 *)hwfib->data;
+
+				if (*pstatus & cpu_to_le32(0xffff0000))
+					*pstatus = cpu_to_le32(ST_OK);
+			}
+			if (hwfib->header.XferState &
+				cpu_to_le32(NoResponseExpected | Async)) {
+				if (hwfib->header.XferState & cpu_to_le32(
+					NoResponseExpected))
+					FIB_COUNTER_INCREMENT(
+						aac_config.NoResponseRecved);
+				else
+					FIB_COUNTER_INCREMENT(
+						aac_config.AsyncRecved);
+				start_callback = 1;
+			} else {
+				unsigned long flagv;
+				int complete = 0;
+
+				dprintk((KERN_INFO "event_wait up\n"));
+				spin_lock_irqsave(&fib->event_lock, flagv);
+				if (fib->done == 2) {
+					fib->done = 1;
+					complete = 1;
+				} else {
+					fib->done = 1;
+					up(&fib->event_wait);
+				}
+				spin_unlock_irqrestore(&fib->event_lock, flagv);
+
+				spin_lock_irqsave(&dev->manage_lock, mflags);
+				dev->management_fib_count--;
+				spin_unlock_irqrestore(&dev->manage_lock,
+					mflags);
+
+				FIB_COUNTER_INCREMENT(aac_config.NormalRecved);
+				if (complete)
+					aac_fib_complete(fib);
+			}
+		}
+
+
+		if (start_callback) {
+			/*
+			 * NOTE:  we cannot touch the fib after this
+			 *  call, because it may have been deallocated.
+			 */
+			if (likely(fib->callback && fib->callback_data)) {
+				fib->callback(fib->callback_data, fib);
+			} else {
+				aac_fib_complete(fib);
+				aac_fib_free(fib);
+			}
+
+		}
+		return 0;
+	}
+}
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
new file mode 100644
index 0000000..0444357
--- /dev/null
+++ b/drivers/scsi/aacraid/linit.c
@@ -0,0 +1,2139 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *   linit.c
+ *
+ * Abstract: Linux Driver entry module for Adaptec RAID Array Controller
+ */
+
+
+#include <linux/compat.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsicam.h>
+#include <scsi/scsi_eh.h>
+
+#include "aacraid.h"
+
+#define AAC_DRIVER_VERSION		"1.2.1"
+#ifndef AAC_DRIVER_BRANCH
+#define AAC_DRIVER_BRANCH		""
+#endif
+#define AAC_DRIVERNAME			"aacraid"
+
+#ifdef AAC_DRIVER_BUILD
+#define _str(x) #x
+#define str(x) _str(x)
+#define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH
+#else
+#define AAC_DRIVER_FULL_VERSION	AAC_DRIVER_VERSION AAC_DRIVER_BRANCH
+#endif
+
+MODULE_AUTHOR("Red Hat Inc and Adaptec");
+MODULE_DESCRIPTION("Dell PERC2, 2/Si, 3/Si, 3/Di, "
+		   "Adaptec Advanced Raid Products, "
+		   "HP NetRAID-4M, IBM ServeRAID & ICP SCSI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(AAC_DRIVER_FULL_VERSION);
+
+static DEFINE_MUTEX(aac_mutex);
+static LIST_HEAD(aac_devices);
+static int aac_cfg_major = AAC_CHARDEV_UNREGISTERED;
+char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
+
+/*
+ * Because of the way Linux names scsi devices, the order in this table has
+ * become important.  Check for on-board Raid first, add-in cards second.
+ *
+ * Note: The last field is used to index into aac_drivers below.
+ */
+static const struct pci_device_id aac_pci_tbl[] = {
+	{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
+	{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
+	{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
+	{ 0x1028, 0x0004, 0x1028, 0x00d0, 0, 0, 3 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+	{ 0x1028, 0x0002, 0x1028, 0x00d1, 0, 0, 4 }, /* PERC 3/Di (Viper/PERC3DiV) */
+	{ 0x1028, 0x0002, 0x1028, 0x00d9, 0, 0, 5 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+	{ 0x1028, 0x000a, 0x1028, 0x0106, 0, 0, 6 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+	{ 0x1028, 0x000a, 0x1028, 0x011b, 0, 0, 7 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+	{ 0x1028, 0x000a, 0x1028, 0x0121, 0, 0, 8 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+	{ 0x9005, 0x0283, 0x9005, 0x0283, 0, 0, 9 }, /* catapult */
+	{ 0x9005, 0x0284, 0x9005, 0x0284, 0, 0, 10 }, /* tomcat */
+	{ 0x9005, 0x0285, 0x9005, 0x0286, 0, 0, 11 }, /* Adaptec 2120S (Crusader) */
+	{ 0x9005, 0x0285, 0x9005, 0x0285, 0, 0, 12 }, /* Adaptec 2200S (Vulcan) */
+	{ 0x9005, 0x0285, 0x9005, 0x0287, 0, 0, 13 }, /* Adaptec 2200S (Vulcan-2m) */
+	{ 0x9005, 0x0285, 0x17aa, 0x0286, 0, 0, 14 }, /* Legend S220 (Legend Crusader) */
+	{ 0x9005, 0x0285, 0x17aa, 0x0287, 0, 0, 15 }, /* Legend S230 (Legend Vulcan) */
+
+	{ 0x9005, 0x0285, 0x9005, 0x0288, 0, 0, 16 }, /* Adaptec 3230S (Harrier) */
+	{ 0x9005, 0x0285, 0x9005, 0x0289, 0, 0, 17 }, /* Adaptec 3240S (Tornado) */
+	{ 0x9005, 0x0285, 0x9005, 0x028a, 0, 0, 18 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+	{ 0x9005, 0x0285, 0x9005, 0x028b, 0, 0, 19 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+	{ 0x9005, 0x0286, 0x9005, 0x028c, 0, 0, 20 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x028d, 0, 0, 21 }, /* ASR-2130S (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x029b, 0, 0, 22 }, /* AAR-2820SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x029c, 0, 0, 23 }, /* AAR-2620SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x029d, 0, 0, 24 }, /* AAR-2420SA (Intruder) */
+	{ 0x9005, 0x0286, 0x9005, 0x029e, 0, 0, 25 }, /* ICP9024RO (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x029f, 0, 0, 26 }, /* ICP9014RO (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x02a0, 0, 0, 27 }, /* ICP9047MA (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x02a1, 0, 0, 28 }, /* ICP9087MA (Lancer) */
+	{ 0x9005, 0x0286, 0x9005, 0x02a3, 0, 0, 29 }, /* ICP5445AU (Hurricane44) */
+	{ 0x9005, 0x0285, 0x9005, 0x02a4, 0, 0, 30 }, /* ICP9085LI (Marauder-X) */
+	{ 0x9005, 0x0285, 0x9005, 0x02a5, 0, 0, 31 }, /* ICP5085BR (Marauder-E) */
+	{ 0x9005, 0x0286, 0x9005, 0x02a6, 0, 0, 32 }, /* ICP9067MA (Intruder-6) */
+	{ 0x9005, 0x0287, 0x9005, 0x0800, 0, 0, 33 }, /* Themisto Jupiter Platform */
+	{ 0x9005, 0x0200, 0x9005, 0x0200, 0, 0, 33 }, /* Themisto Jupiter Platform */
+	{ 0x9005, 0x0286, 0x9005, 0x0800, 0, 0, 34 }, /* Callisto Jupiter Platform */
+	{ 0x9005, 0x0285, 0x9005, 0x028e, 0, 0, 35 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+	{ 0x9005, 0x0285, 0x9005, 0x028f, 0, 0, 36 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+	{ 0x9005, 0x0285, 0x9005, 0x0290, 0, 0, 37 }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+	{ 0x9005, 0x0285, 0x1028, 0x0291, 0, 0, 38 }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+	{ 0x9005, 0x0285, 0x9005, 0x0292, 0, 0, 39 }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+	{ 0x9005, 0x0285, 0x9005, 0x0293, 0, 0, 40 }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+	{ 0x9005, 0x0285, 0x9005, 0x0294, 0, 0, 41 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+	{ 0x9005, 0x0285, 0x103C, 0x3227, 0, 0, 42 }, /* AAR-2610SA PCI SATA 6ch */
+	{ 0x9005, 0x0285, 0x9005, 0x0296, 0, 0, 43 }, /* ASR-2240S (SabreExpress) */
+	{ 0x9005, 0x0285, 0x9005, 0x0297, 0, 0, 44 }, /* ASR-4005 */
+	{ 0x9005, 0x0285, 0x1014, 0x02F2, 0, 0, 45 }, /* IBM 8i (AvonPark) */
+	{ 0x9005, 0x0285, 0x1014, 0x0312, 0, 0, 45 }, /* IBM 8i (AvonPark Lite) */
+	{ 0x9005, 0x0286, 0x1014, 0x9580, 0, 0, 46 }, /* IBM 8k/8k-l8 (Aurora) */
+	{ 0x9005, 0x0286, 0x1014, 0x9540, 0, 0, 47 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+	{ 0x9005, 0x0285, 0x9005, 0x0298, 0, 0, 48 }, /* ASR-4000 (BlackBird) */
+	{ 0x9005, 0x0285, 0x9005, 0x0299, 0, 0, 49 }, /* ASR-4800SAS (Marauder-X) */
+	{ 0x9005, 0x0285, 0x9005, 0x029a, 0, 0, 50 }, /* ASR-4805SAS (Marauder-E) */
+	{ 0x9005, 0x0286, 0x9005, 0x02a2, 0, 0, 51 }, /* ASR-3800 (Hurricane44) */
+
+	{ 0x9005, 0x0285, 0x1028, 0x0287, 0, 0, 52 }, /* Perc 320/DC*/
+	{ 0x1011, 0x0046, 0x9005, 0x0365, 0, 0, 53 }, /* Adaptec 5400S (Mustang)*/
+	{ 0x1011, 0x0046, 0x9005, 0x0364, 0, 0, 54 }, /* Adaptec 5400S (Mustang)*/
+	{ 0x1011, 0x0046, 0x9005, 0x1364, 0, 0, 55 }, /* Dell PERC2/QC */
+	{ 0x1011, 0x0046, 0x103c, 0x10c2, 0, 0, 56 }, /* HP NetRAID-4M */
+
+	{ 0x9005, 0x0285, 0x1028, PCI_ANY_ID, 0, 0, 57 }, /* Dell Catchall */
+	{ 0x9005, 0x0285, 0x17aa, PCI_ANY_ID, 0, 0, 58 }, /* Legend Catchall */
+	{ 0x9005, 0x0285, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 59 }, /* Adaptec Catch All */
+	{ 0x9005, 0x0286, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 60 }, /* Adaptec Rocket Catch All */
+	{ 0x9005, 0x0288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 61 }, /* Adaptec NEMER/ARK Catch All */
+	{ 0x9005, 0x028b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 62 }, /* Adaptec PMC Series 6 (Tupelo) */
+	{ 0x9005, 0x028c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 63 }, /* Adaptec PMC Series 7 (Denali) */
+	{ 0x9005, 0x028d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 64 }, /* Adaptec PMC Series 8 */
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, aac_pci_tbl);
+
+/*
+ * dmb - For now we add the number of channels to this structure.
+ * In the future we should add a fib that reports the number of channels
+ * for the card.  At that time we can remove the channels from here
+ */
+static struct aac_driver_ident aac_drivers[] = {
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 2/Si (Iguana/PERC2Si) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Opal/PERC3Di) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Si (SlimFast/PERC3Si */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Iguana FlipChip/PERC3DiF */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Viper/PERC3DiV) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Lexus/PERC3DiL) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Jaguar/PERC3DiJ) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Dagger/PERC3DiD) */
+	{ aac_rx_init, "percraid", "DELL    ", "PERCRAID        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* PERC 3/Di (Boxster/PERC3DiB) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "catapult        ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* catapult */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "tomcat          ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* tomcat */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2120S   ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2120S (Crusader) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG },		      /* Adaptec 2200S (Vulcan) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 2200S   ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Adaptec 2200S (Vulcan-2m) */
+	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S220     ", 1, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S220 (Legend Crusader) */
+	{ aac_rx_init, "aacraid",  "Legend  ", "Legend S230     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend S230 (Legend Vulcan) */
+
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3230S   ", 2 }, /* Adaptec 3230S (Harrier) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "Adaptec 3240S   ", 2 }, /* Adaptec 3240S (Tornado) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020ZCR     ", 2 }, /* ASR-2020ZCR SCSI PCI-X ZCR (Skyhawk) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025ZCR     ", 2 }, /* ASR-2025ZCR SCSI SO-DIMM PCI-X ZCR (Terminator) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2230S PCI-X ", 2 }, /* ASR-2230S + ASR-2230SLP PCI-X (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-2130S PCI-X ", 1 }, /* ASR-2130S (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2820SA      ", 1 }, /* AAR-2820SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2620SA      ", 1 }, /* AAR-2620SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "AAR-2420SA      ", 1 }, /* AAR-2420SA (Intruder) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9024RO       ", 2 }, /* ICP9024RO (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9014RO       ", 1 }, /* ICP9014RO (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9047MA       ", 1 }, /* ICP9047MA (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9087MA       ", 1 }, /* ICP9087MA (Lancer) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP5445AU       ", 1 }, /* ICP5445AU (Hurricane44) */
+	{ aac_rx_init, "aacraid",  "ICP     ", "ICP9085LI       ", 1 }, /* ICP9085LI (Marauder-X) */
+	{ aac_rx_init, "aacraid",  "ICP     ", "ICP5085BR       ", 1 }, /* ICP5085BR (Marauder-E) */
+	{ aac_rkt_init, "aacraid",  "ICP     ", "ICP9067MA       ", 1 }, /* ICP9067MA (Intruder-6) */
+	{ NULL        , "aacraid",  "ADAPTEC ", "Themisto        ", 0, AAC_QUIRK_SLAVE }, /* Jupiter Platform */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "Callisto        ", 2, AAC_QUIRK_MASTER }, /* Jupiter Platform */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2020SA       ", 1 }, /* ASR-2020SA SATA PCI-X ZCR (Skyhawk) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2025SA       ", 1 }, /* ASR-2025SA SATA SO-DIMM PCI-X ZCR (Terminator) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2410SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2410SA PCI SATA 4ch (Jaguar II) */
+	{ aac_rx_init, "aacraid",  "DELL    ", "CERC SR2        ", 1, AAC_QUIRK_17SG }, /* CERC SATA RAID 2 PCI SATA 6ch (DellCorsair) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2810SA SATA ", 1, AAC_QUIRK_17SG }, /* AAR-2810SA PCI SATA 8ch (Corsair-8) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-21610SA SATA", 1, AAC_QUIRK_17SG }, /* AAR-21610SA PCI SATA 16ch (Corsair-16) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2026ZCR     ", 1 }, /* ESD SO-DIMM PCI-X SATA ZCR (Prowler) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "AAR-2610SA      ", 1 }, /* SATA 6Ch (Bearcat) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-2240S       ", 1 }, /* ASR-2240S (SabreExpress) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4005        ", 1 }, /* ASR-4005 */
+	{ aac_rx_init, "ServeRAID","IBM     ", "ServeRAID 8i    ", 1 }, /* IBM 8i (AvonPark) */
+	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l8 ", 1 }, /* IBM 8k/8k-l8 (Aurora) */
+	{ aac_rkt_init, "ServeRAID","IBM     ", "ServeRAID 8k-l4 ", 1 }, /* IBM 8k/8k-l4 (Aurora Lite) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4000        ", 1 }, /* ASR-4000 (BlackBird & AvonPark) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4800SAS     ", 1 }, /* ASR-4800SAS (Marauder-X) */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "ASR-4805SAS     ", 1 }, /* ASR-4805SAS (Marauder-E) */
+	{ aac_rkt_init, "aacraid",  "ADAPTEC ", "ASR-3800        ", 1 }, /* ASR-3800 (Hurricane44) */
+
+	{ aac_rx_init, "percraid", "DELL    ", "PERC 320/DC     ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG }, /* Perc 320/DC*/
+	{ aac_sa_init, "aacraid",  "ADAPTEC ", "Adaptec 5400S   ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+	{ aac_sa_init, "aacraid",  "ADAPTEC ", "AAC-364         ", 4, AAC_QUIRK_34SG }, /* Adaptec 5400S (Mustang)*/
+	{ aac_sa_init, "percraid", "DELL    ", "PERCRAID        ", 4, AAC_QUIRK_34SG }, /* Dell PERC2/QC */
+	{ aac_sa_init, "hpnraid",  "HP      ", "NetRAID         ", 4, AAC_QUIRK_34SG }, /* HP NetRAID-4M */
+
+	{ aac_rx_init, "aacraid",  "DELL    ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Dell Catchall */
+	{ aac_rx_init, "aacraid",  "Legend  ", "RAID            ", 2, AAC_QUIRK_31BIT | AAC_QUIRK_34SG | AAC_QUIRK_SCSI_32 }, /* Legend Catchall */
+	{ aac_rx_init, "aacraid",  "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Catch All */
+	{ aac_rkt_init, "aacraid", "ADAPTEC ", "RAID            ", 2 }, /* Adaptec Rocket Catch All */
+	{ aac_nark_init, "aacraid", "ADAPTEC ", "RAID           ", 2 }, /* Adaptec NEMER/ARK Catch All */
+	{ aac_src_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 6 (Tupelo) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 7 (Denali) */
+	{ aac_srcv_init, "aacraid", "ADAPTEC ", "RAID            ", 2, AAC_QUIRK_SRC }, /* Adaptec PMC Series 8 */
+};
+
+/**
+ *	aac_queuecommand	-	queue a SCSI command
+ *	@cmd:		SCSI command to queue
+ *	@done:		Function to call on command completion
+ *
+ *	Queues a command for execution by the associated Host Adapter.
+ *
+ *	TODO: unify with aac_scsi_cmd().
+ */
+
+static int aac_queuecommand(struct Scsi_Host *shost,
+			    struct scsi_cmnd *cmd)
+{
+	int r = 0;
+	cmd->SCp.phase = AAC_OWNER_LOWLEVEL;
+	r = (aac_scsi_cmd(cmd) ? FAILED : 0);
+	return r;
+}
+
+/**
+ *	aac_info		-	Returns the host adapter name
+ *	@shost:		Scsi host to report on
+ *
+ *	Returns a static string describing the device in question
+ */
+
+static const char *aac_info(struct Scsi_Host *shost)
+{
+	struct aac_dev *dev = (struct aac_dev *)shost->hostdata;
+	return aac_drivers[dev->cardtype].name;
+}
+
+/**
+ *	aac_get_driver_ident
+ *	@devtype: index into lookup table
+ *
+ *	Returns a pointer to the entry in the driver lookup table.
+ */
+
+struct aac_driver_ident* aac_get_driver_ident(int devtype)
+{
+	return &aac_drivers[devtype];
+}
+
+/**
+ *	aac_biosparm	-	return BIOS parameters for disk
+ *	@sdev: The scsi device corresponding to the disk
+ *	@bdev: the block device corresponding to the disk
+ *	@capacity: the sector capacity of the disk
+ *	@geom: geometry block to fill in
+ *
+ *	Return the Heads/Sectors/Cylinders BIOS Disk Parameters for Disk.
+ *	The default disk geometry is 64 heads, 32 sectors, and the appropriate
+ *	number of cylinders so as not to exceed drive capacity.  In order for
+ *	disks equal to or larger than 1 GB to be addressable by the BIOS
+ *	without exceeding the BIOS limitation of 1024 cylinders, Extended
+ *	Translation should be enabled.   With Extended Translation enabled,
+ *	drives between 1 GB inclusive and 2 GB exclusive are given a disk
+ *	geometry of 128 heads and 32 sectors, and drives above 2 GB inclusive
+ *	are given a disk geometry of 255 heads and 63 sectors.  However, if
+ *	the BIOS detects that the Extended Translation setting does not match
+ *	the geometry in the partition table, then the translation inferred
+ *	from the partition table will be used by the BIOS, and a warning may
+ *	be displayed.
+ */
+
+static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
+			sector_t capacity, int *geom)
+{
+	struct diskparm *param = (struct diskparm *)geom;
+	unsigned char *buf;
+
+	dprintk((KERN_DEBUG "aac_biosparm.\n"));
+
+	/*
+	 *	Assuming extended translation is enabled - #REVISIT#
+	 */
+	if (capacity >= 2 * 1024 * 1024) { /* 1 GB in 512 byte sectors */
+		if(capacity >= 4 * 1024 * 1024) { /* 2 GB in 512 byte sectors */
+			param->heads = 255;
+			param->sectors = 63;
+		} else {
+			param->heads = 128;
+			param->sectors = 32;
+		}
+	} else {
+		param->heads = 64;
+		param->sectors = 32;
+	}
+
+	param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+
+	/*
+	 *	Read the first 1024 bytes from the disk device, if the boot
+	 *	sector partition table is valid, search for a partition table
+	 *	entry whose end_head matches one of the standard geometry
+	 *	translations ( 64/32, 128/32, 255/63 ).
+	 */
+	buf = scsi_bios_ptable(bdev);
+	if (!buf)
+		return 0;
+	if(*(__le16 *)(buf + 0x40) == cpu_to_le16(0xaa55)) {
+		struct partition *first = (struct partition * )buf;
+		struct partition *entry = first;
+		int saved_cylinders = param->cylinders;
+		int num;
+		unsigned char end_head, end_sec;
+
+		for(num = 0; num < 4; num++) {
+			end_head = entry->end_head;
+			end_sec = entry->end_sector & 0x3f;
+
+			if(end_head == 63) {
+				param->heads = 64;
+				param->sectors = 32;
+				break;
+			} else if(end_head == 127) {
+				param->heads = 128;
+				param->sectors = 32;
+				break;
+			} else if(end_head == 254) {
+				param->heads = 255;
+				param->sectors = 63;
+				break;
+			}
+			entry++;
+		}
+
+		if (num == 4) {
+			end_head = first->end_head;
+			end_sec = first->end_sector & 0x3f;
+		}
+
+		param->cylinders = cap_to_cyls(capacity, param->heads * param->sectors);
+		if (num < 4 && end_sec == param->sectors) {
+			if (param->cylinders != saved_cylinders)
+				dprintk((KERN_DEBUG "Adopting geometry: heads=%d, sectors=%d from partition table %d.\n",
+					param->heads, param->sectors, num));
+		} else if (end_head > 0 || end_sec > 0) {
+			dprintk((KERN_DEBUG "Strange geometry: heads=%d, sectors=%d in partition table %d.\n",
+				end_head + 1, end_sec, num));
+			dprintk((KERN_DEBUG "Using geometry: heads=%d, sectors=%d.\n",
+					param->heads, param->sectors));
+		}
+	}
+	kfree(buf);
+	return 0;
+}
+
+/**
+ *	aac_slave_configure		-	compute queue depths
+ *	@sdev:	SCSI device we are considering
+ *
+ *	Selects queue depths for each target device based on the host adapter's
+ *	total capacity and the queue depth supported by the target device.
+ *	A queue depth of one automatically disables tagged queueing.
+ */
+
+static int aac_slave_configure(struct scsi_device *sdev)
+{
+	struct aac_dev *aac = (struct aac_dev *)sdev->host->hostdata;
+	int chn, tid;
+	unsigned int depth = 0;
+	unsigned int set_timeout = 0;
+	bool set_qd_dev_type = false;
+	u8 devtype = 0;
+
+	chn = aac_logical_to_phys(sdev_channel(sdev));
+	tid = sdev_id(sdev);
+	if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
+		devtype = aac->hba_map[chn][tid].devtype;
+
+		if (devtype == AAC_DEVTYPE_NATIVE_RAW)
+			depth = aac->hba_map[chn][tid].qd_limit;
+		else if (devtype == AAC_DEVTYPE_ARC_RAW)
+			set_qd_dev_type = true;
+
+		set_timeout = 1;
+		goto common_config;
+	}
+
+	if (aac->jbod && (sdev->type == TYPE_DISK))
+		sdev->removable = 1;
+
+	if (sdev->type == TYPE_DISK
+	 && sdev_channel(sdev) != CONTAINER_CHANNEL
+	 && (!aac->jbod || sdev->inq_periph_qual)
+	 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))) {
+
+		if (expose_physicals == 0)
+			return -ENXIO;
+
+		if (expose_physicals < 0)
+			sdev->no_uld_attach = 1;
+	}
+
+	if (sdev->tagged_supported
+	 &&  sdev->type == TYPE_DISK
+	 &&  (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+	 && !sdev->no_uld_attach) {
+
+		struct scsi_device * dev;
+		struct Scsi_Host *host = sdev->host;
+		unsigned num_lsu = 0;
+		unsigned num_one = 0;
+		unsigned cid;
+
+		set_timeout = 1;
+
+		for (cid = 0; cid < aac->maximum_num_containers; ++cid)
+			if (aac->fsa_dev[cid].valid)
+				++num_lsu;
+
+		__shost_for_each_device(dev, host) {
+			if (dev->tagged_supported
+			 && dev->type == TYPE_DISK
+			 && (!aac->raid_scsi_mode || (sdev_channel(sdev) != 2))
+			 && !dev->no_uld_attach) {
+				if ((sdev_channel(dev) != CONTAINER_CHANNEL)
+				 || !aac->fsa_dev[sdev_id(dev)].valid) {
+					++num_lsu;
+				}
+			} else {
+				++num_one;
+			}
+		}
+
+		if (num_lsu == 0)
+			++num_lsu;
+
+		depth = (host->can_queue - num_one) / num_lsu;
+
+		if (sdev_channel(sdev) != NATIVE_CHANNEL)
+			goto common_config;
+
+		set_qd_dev_type = true;
+
+	}
+
+common_config:
+
+	/*
+	 * Check if SATA drive
+	 */
+	if (set_qd_dev_type) {
+		if (strncmp(sdev->vendor, "ATA", 3) == 0)
+			depth = 32;
+		else
+			depth = 64;
+	}
+
+	/*
+	 * Firmware has an individual device recovery time typically
+	 * of 35 seconds, give us a margin.
+	 */
+	if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ))
+		blk_queue_rq_timeout(sdev->request_queue, 45*HZ);
+
+	if (depth > 256)
+		depth = 256;
+	else if (depth < 1)
+		depth = 1;
+
+	scsi_change_queue_depth(sdev, depth);
+
+	sdev->tagged_supported = 1;
+
+	return 0;
+}
+
+/**
+ *	aac_change_queue_depth		-	alter queue depths
+ *	@sdev:	SCSI device we are considering
+ *	@depth:	desired queue depth
+ *
+ *	Alters queue depths for target device based on the host adapter's
+ *	total capacity and the queue depth supported by the target device.
+ */
+
+static int aac_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+	int chn, tid, is_native_device = 0;
+
+	chn = aac_logical_to_phys(sdev_channel(sdev));
+	tid = sdev_id(sdev);
+	if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
+		aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW)
+		is_native_device = 1;
+
+	if (sdev->tagged_supported && (sdev->type == TYPE_DISK) &&
+	    (sdev_channel(sdev) == CONTAINER_CHANNEL)) {
+		struct scsi_device * dev;
+		struct Scsi_Host *host = sdev->host;
+		unsigned num = 0;
+
+		__shost_for_each_device(dev, host) {
+			if (dev->tagged_supported && (dev->type == TYPE_DISK) &&
+			    (sdev_channel(dev) == CONTAINER_CHANNEL))
+				++num;
+			++num;
+		}
+		if (num >= host->can_queue)
+			num = host->can_queue - 1;
+		if (depth > (host->can_queue - num))
+			depth = host->can_queue - num;
+		if (depth > 256)
+			depth = 256;
+		else if (depth < 2)
+			depth = 2;
+		return scsi_change_queue_depth(sdev, depth);
+	} else if (is_native_device) {
+		scsi_change_queue_depth(sdev, aac->hba_map[chn][tid].qd_limit);
+	} else {
+		scsi_change_queue_depth(sdev, 1);
+	}
+	return sdev->queue_depth;
+}
+
+static ssize_t aac_show_raid_level(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+	if (sdev_channel(sdev) != CONTAINER_CHANNEL)
+		return snprintf(buf, PAGE_SIZE, sdev->no_uld_attach
+		  ? "Hidden\n" :
+		  ((aac->jbod && (sdev->type == TYPE_DISK)) ? "JBOD\n" : ""));
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+	  get_container_type(aac->fsa_dev[sdev_id(sdev)].type));
+}
+
+static struct device_attribute aac_raid_level_attr = {
+	.attr = {
+		.name = "level",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_raid_level
+};
+
+static ssize_t aac_show_unique_id(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct aac_dev *aac = (struct aac_dev *)(sdev->host->hostdata);
+	unsigned char sn[16];
+
+	memset(sn, 0, sizeof(sn));
+
+	if (sdev_channel(sdev) == CONTAINER_CHANNEL)
+		memcpy(sn, aac->fsa_dev[sdev_id(sdev)].identifier, sizeof(sn));
+
+	return snprintf(buf, 16 * 2 + 2,
+		"%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n",
+		sn[0], sn[1], sn[2], sn[3],
+		sn[4], sn[5], sn[6], sn[7],
+		sn[8], sn[9], sn[10], sn[11],
+		sn[12], sn[13], sn[14], sn[15]);
+}
+
+static struct device_attribute aac_unique_id_attr = {
+	.attr = {
+		.name = "unique_id",
+		.mode = 0444,
+	},
+	.show = aac_show_unique_id
+};
+
+
+
+static struct device_attribute *aac_dev_attrs[] = {
+	&aac_raid_level_attr,
+	&aac_unique_id_attr,
+	NULL,
+};
+
+static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
+{
+	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	return aac_do_ioctl(dev, cmd, arg);
+}
+
+static int get_num_of_incomplete_fibs(struct aac_dev *aac)
+{
+
+	unsigned long flags;
+	struct scsi_device *sdev = NULL;
+	struct Scsi_Host *shost = aac->scsi_host_ptr;
+	struct scsi_cmnd *scmnd = NULL;
+	struct device *ctrl_dev;
+
+	int mlcnt  = 0;
+	int llcnt  = 0;
+	int ehcnt  = 0;
+	int fwcnt  = 0;
+	int krlcnt = 0;
+
+	__shost_for_each_device(sdev, shost) {
+		spin_lock_irqsave(&sdev->list_lock, flags);
+		list_for_each_entry(scmnd, &sdev->cmd_list, list) {
+			switch (scmnd->SCp.phase) {
+			case AAC_OWNER_FIRMWARE:
+				fwcnt++;
+				break;
+			case AAC_OWNER_ERROR_HANDLER:
+				ehcnt++;
+				break;
+			case AAC_OWNER_LOWLEVEL:
+				llcnt++;
+				break;
+			case AAC_OWNER_MIDLEVEL:
+				mlcnt++;
+				break;
+			default:
+				krlcnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&sdev->list_lock, flags);
+	}
+
+	ctrl_dev = &aac->pdev->dev;
+
+	dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
+	dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
+	dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
+	dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
+	dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
+
+	return mlcnt + llcnt + ehcnt + fwcnt;
+}
+
+static int aac_eh_abort(struct scsi_cmnd* cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int count, found;
+	u32 bus, cid;
+	int ret = FAILED;
+
+	if (aac_adapter_check_health(aac))
+		return ret;
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	cid = scmd_id(cmd);
+	if (aac->hba_map[bus][cid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
+		struct fib *fib;
+		struct aac_hba_tm_req *tmf;
+		int status;
+		u64 address;
+
+		pr_err("%s: Host adapter abort request (%d,%d,%d,%d)\n",
+		 AAC_DRIVERNAME,
+		 host->host_no, sdev_channel(dev), sdev_id(dev), (int)dev->lun);
+
+		found = 0;
+		for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+			fib = &aac->fibs[count];
+			if (*(u8 *)fib->hw_fib_va != 0 &&
+				(fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) &&
+				(fib->callback_data == cmd)) {
+				found = 1;
+				break;
+			}
+		}
+		if (!found)
+			return ret;
+
+		/* start a HBA_TMF_ABORT_TASK TMF request */
+		fib = aac_fib_alloc(aac);
+		if (!fib)
+			return ret;
+
+		tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+		memset(tmf, 0, sizeof(*tmf));
+		tmf->tmf = HBA_TMF_ABORT_TASK;
+		tmf->it_nexus = aac->hba_map[bus][cid].rmw_nexus;
+		tmf->lun[1] = cmd->device->lun;
+
+		address = (u64)fib->hw_error_pa;
+		tmf->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+		tmf->error_ptr_lo = cpu_to_le32((u32)(address & 0xffffffff));
+		tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+
+		fib->hbacmd_size = sizeof(*tmf);
+		cmd->SCp.sent_command = 0;
+
+		status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
+				  (fib_callback) aac_hba_callback,
+				  (void *) cmd);
+
+		/* Wait up to 15 secs for completion */
+		for (count = 0; count < 15; ++count) {
+			if (cmd->SCp.sent_command) {
+				ret = SUCCESS;
+				break;
+			}
+			msleep(1000);
+		}
+
+		if (ret != SUCCESS)
+			pr_err("%s: Host adapter abort request timed out\n",
+			AAC_DRIVERNAME);
+	} else {
+		pr_err(
+			"%s: Host adapter abort request.\n"
+			"%s: Outstanding commands on (%d,%d,%d,%d):\n",
+			AAC_DRIVERNAME, AAC_DRIVERNAME,
+			host->host_no, sdev_channel(dev), sdev_id(dev),
+			(int)dev->lun);
+		switch (cmd->cmnd[0]) {
+		case SERVICE_ACTION_IN_16:
+			if (!(aac->raw_io_interface) ||
+			    !(aac->raw_io_64) ||
+			    ((cmd->cmnd[1] & 0x1f) != SAI_READ_CAPACITY_16))
+				break;
+		case INQUIRY:
+		case READ_CAPACITY:
+			/*
+			 * Mark associated FIB to not complete,
+			 * eh handler does this
+			 */
+			for (count = 0;
+				count < (host->can_queue + AAC_NUM_MGT_FIB);
+				++count) {
+				struct fib *fib = &aac->fibs[count];
+
+				if (fib->hw_fib_va->header.XferState &&
+				(fib->flags & FIB_CONTEXT_FLAG) &&
+				(fib->callback_data == cmd)) {
+					fib->flags |=
+						FIB_CONTEXT_FLAG_TIMED_OUT;
+					cmd->SCp.phase =
+						AAC_OWNER_ERROR_HANDLER;
+					ret = SUCCESS;
+				}
+			}
+			break;
+		case TEST_UNIT_READY:
+			/*
+			 * Mark associated FIB to not complete,
+			 * eh handler does this
+			 */
+			for (count = 0;
+				count < (host->can_queue + AAC_NUM_MGT_FIB);
+				++count) {
+				struct scsi_cmnd *command;
+				struct fib *fib = &aac->fibs[count];
+
+				command = fib->callback_data;
+
+				if ((fib->hw_fib_va->header.XferState &
+					cpu_to_le32
+					(Async | NoResponseExpected)) &&
+					(fib->flags & FIB_CONTEXT_FLAG) &&
+					((command)) &&
+					(command->device == cmd->device)) {
+					fib->flags |=
+						FIB_CONTEXT_FLAG_TIMED_OUT;
+					command->SCp.phase =
+						AAC_OWNER_ERROR_HANDLER;
+					if (command == cmd)
+						ret = SUCCESS;
+				}
+			}
+			break;
+		}
+	}
+	return ret;
+}
+
+static u8 aac_eh_tmf_lun_reset_fib(struct aac_hba_map_info *info,
+				   struct fib *fib, u64 tmf_lun)
+{
+	struct aac_hba_tm_req *tmf;
+	u64 address;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	tmf = (struct aac_hba_tm_req *)fib->hw_fib_va;
+	memset(tmf, 0, sizeof(*tmf));
+	tmf->tmf = HBA_TMF_LUN_RESET;
+	tmf->it_nexus = info->rmw_nexus;
+	int_to_scsilun(tmf_lun, (struct scsi_lun *)tmf->lun);
+
+	address = (u64)fib->hw_error_pa;
+	tmf->error_ptr_hi = cpu_to_le32
+		((u32)(address >> 32));
+	tmf->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	tmf->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*tmf);
+
+	return HBA_IU_TYPE_SCSI_TM_REQ;
+}
+
+static u8 aac_eh_tmf_hard_reset_fib(struct aac_hba_map_info *info,
+				    struct fib *fib)
+{
+	struct aac_hba_reset_req *rst;
+	u64 address;
+
+	/* already tried, start a hard reset now */
+	rst = (struct aac_hba_reset_req *)fib->hw_fib_va;
+	memset(rst, 0, sizeof(*rst));
+	rst->it_nexus = info->rmw_nexus;
+
+	address = (u64)fib->hw_error_pa;
+	rst->error_ptr_hi = cpu_to_le32((u32)(address >> 32));
+	rst->error_ptr_lo = cpu_to_le32
+		((u32)(address & 0xffffffff));
+	rst->error_length = cpu_to_le32(FW_ERROR_BUFFER_SIZE);
+	fib->hbacmd_size = sizeof(*rst);
+
+       return HBA_IU_TYPE_SATA_REQ;
+}
+
+void aac_tmf_callback(void *context, struct fib *fibptr)
+{
+	struct aac_hba_resp *err =
+		&((struct aac_native_hba *)fibptr->hw_fib_va)->resp.err;
+	struct aac_hba_map_info *info = context;
+	int res;
+
+	switch (err->service_response) {
+	case HBA_RESP_SVCRES_TMF_REJECTED:
+		res = -1;
+		break;
+	case HBA_RESP_SVCRES_TMF_LUN_INVALID:
+		res = 0;
+		break;
+	case HBA_RESP_SVCRES_TMF_COMPLETE:
+	case HBA_RESP_SVCRES_TMF_SUCCEEDED:
+		res = 0;
+		break;
+	default:
+		res = -2;
+		break;
+	}
+	aac_fib_complete(fibptr);
+
+	info->reset_state = res;
+}
+
+/*
+ *	aac_eh_dev_reset	- Device reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
+	int count;
+	u32 bus, cid;
+	struct fib *fib;
+	int ret = FAILED;
+	int status;
+	u8 command;
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	cid = scmd_id(cmd);
+
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
+		return FAILED;
+
+	info = &aac->hba_map[bus][cid];
+
+	if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+	    info->reset_state > 0)
+		return FAILED;
+
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+	/* start a HBA_TMF_LUN_RESET TMF request */
+	command = aac_eh_tmf_lun_reset_fib(info, fib, dev->lun);
+
+	info->reset_state = 1;
+
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state == 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
+		}
+		msleep(1000);
+	}
+
+	return ret;
+}
+
+/*
+ *	aac_eh_target_reset	- Target reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	struct aac_hba_map_info *info;
+	int count;
+	u32 bus, cid;
+	int ret = FAILED;
+	struct fib *fib;
+	int status;
+	u8 command;
+
+	bus = aac_logical_to_phys(scmd_channel(cmd));
+	cid = scmd_id(cmd);
+
+	if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
+		return FAILED;
+
+	info = &aac->hba_map[bus][cid];
+
+	if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+	    info->reset_state > 0)
+		return FAILED;
+
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n",
+	       AAC_DRIVERNAME);
+
+	fib = aac_fib_alloc(aac);
+	if (!fib)
+		return ret;
+
+
+	/* already tried, start a hard reset now */
+	command = aac_eh_tmf_hard_reset_fib(info, fib);
+
+	info->reset_state = 2;
+
+	status = aac_hba_send(command, fib,
+			      (fib_callback) aac_tmf_callback,
+			      (void *) info);
+
+	/* Wait up to 15 seconds for completion */
+	for (count = 0; count < 15; ++count) {
+		if (info->reset_state <= 0) {
+			ret = info->reset_state == 0 ? SUCCESS : FAILED;
+			break;
+		}
+		msleep(1000);
+	}
+
+	return ret;
+}
+
+/*
+ *	aac_eh_bus_reset	- Bus reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int count;
+	u32 cmd_bus;
+	int status = 0;
+
+
+	cmd_bus = aac_logical_to_phys(scmd_channel(cmd));
+	/* Mark the assoc. FIB to not complete, eh handler does this */
+	for (count = 0; count < (host->can_queue + AAC_NUM_MGT_FIB); ++count) {
+		struct fib *fib = &aac->fibs[count];
+
+		if (fib->hw_fib_va->header.XferState &&
+		    (fib->flags & FIB_CONTEXT_FLAG) &&
+		    (fib->flags & FIB_CONTEXT_FLAG_SCSI_CMD)) {
+			struct aac_hba_map_info *info;
+			u32 bus, cid;
+
+			cmd = (struct scsi_cmnd *)fib->callback_data;
+			bus = aac_logical_to_phys(scmd_channel(cmd));
+			if (bus != cmd_bus)
+				continue;
+			cid = scmd_id(cmd);
+			info = &aac->hba_map[bus][cid];
+			if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS ||
+			    info->devtype != AAC_DEVTYPE_NATIVE_RAW) {
+				fib->flags |= FIB_CONTEXT_FLAG_EH_RESET;
+				cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
+			}
+		}
+	}
+
+	pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+
+	/*
+	 * Check the health of the controller
+	 */
+	status = aac_adapter_check_health(aac);
+	if (status)
+		dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
+
+	count = get_num_of_incomplete_fibs(aac);
+	return (count == 0) ? SUCCESS : FAILED;
+}
+
+/*
+ *	aac_eh_host_reset	- Host reset command handling
+ *	@scsi_cmd:	SCSI command block causing the reset
+ *
+ */
+int aac_eh_host_reset(struct scsi_cmnd *cmd)
+{
+	struct scsi_device * dev = cmd->device;
+	struct Scsi_Host * host = dev->host;
+	struct aac_dev * aac = (struct aac_dev *)host->hostdata;
+	int ret = FAILED;
+	__le32 supported_options2 = 0;
+	bool is_mu_reset;
+	bool is_ignore_reset;
+	bool is_doorbell_reset;
+
+	/*
+	 * Check if reset is supported by the firmware
+	 */
+	supported_options2 = aac->supplement_adapter_info.supported_options2;
+	is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
+	is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
+	is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
+	/*
+	 * This adapter needs a blind reset, only do so for
+	 * Adapters that support a register, instead of a commanded,
+	 * reset.
+	 */
+	if ((is_mu_reset || is_doorbell_reset)
+	 && aac_check_reset
+	 && (aac_check_reset != -1 || !is_ignore_reset)) {
+		/* Bypass wait for command quiesce */
+		if (aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET) == 0)
+			ret = SUCCESS;
+	}
+	/*
+	 * Reset EH state
+	 */
+	if (ret == SUCCESS) {
+		int bus, cid;
+		struct aac_hba_map_info *info;
+
+		for (bus = 0; bus < AAC_MAX_BUSES; bus++) {
+			for (cid = 0; cid < AAC_MAX_TARGETS; cid++) {
+				info = &aac->hba_map[bus][cid];
+				if (info->devtype == AAC_DEVTYPE_NATIVE_RAW)
+					info->reset_state = 0;
+			}
+		}
+	}
+	return ret;
+}
+
+/**
+ *	aac_cfg_open		-	open a configuration file
+ *	@inode: inode being opened
+ *	@file: file handle attached
+ *
+ *	Called when the configuration device is opened. Does the needed
+ *	set up on the handle and then returns
+ *
+ *	Bugs: This needs extending to check a given adapter is present
+ *	so we can support hot plugging, and to ref count adapters.
+ */
+
+static int aac_cfg_open(struct inode *inode, struct file *file)
+{
+	struct aac_dev *aac;
+	unsigned minor_number = iminor(inode);
+	int err = -ENODEV;
+
+	mutex_lock(&aac_mutex);  /* BKL pushdown: nothing else protects this list */
+	list_for_each_entry(aac, &aac_devices, entry) {
+		if (aac->id == minor_number) {
+			file->private_data = aac;
+			err = 0;
+			break;
+		}
+	}
+	mutex_unlock(&aac_mutex);
+
+	return err;
+}
+
+/**
+ *	aac_cfg_ioctl		-	AAC configuration request
+ *	@inode: inode of device
+ *	@file: file handle
+ *	@cmd: ioctl command code
+ *	@arg: argument
+ *
+ *	Handles a configuration ioctl. Currently this involves wrapping it
+ *	up and feeding it into the nasty windowsalike glue layer.
+ *
+ *	Bugs: Needs locking against parallel ioctls lower down
+ *	Bugs: Needs to handle hot plugging
+ */
+
+static long aac_cfg_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	struct aac_dev *aac = (struct aac_dev *)file->private_data;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	return aac_do_ioctl(aac, cmd, (void __user *)arg);
+}
+
+#ifdef CONFIG_COMPAT
+static long aac_compat_do_ioctl(struct aac_dev *dev, unsigned cmd, unsigned long arg)
+{
+	long ret;
+	switch (cmd) {
+	case FSACTL_MINIPORT_REV_CHECK:
+	case FSACTL_SENDFIB:
+	case FSACTL_OPEN_GET_ADAPTER_FIB:
+	case FSACTL_CLOSE_GET_ADAPTER_FIB:
+	case FSACTL_SEND_RAW_SRB:
+	case FSACTL_GET_PCI_INFO:
+	case FSACTL_QUERY_DISK:
+	case FSACTL_DELETE_DISK:
+	case FSACTL_FORCE_DELETE_DISK:
+	case FSACTL_GET_CONTAINERS:
+	case FSACTL_SEND_LARGE_FIB:
+		ret = aac_do_ioctl(dev, cmd, (void __user *)arg);
+		break;
+
+	case FSACTL_GET_NEXT_ADAPTER_FIB: {
+		struct fib_ioctl __user *f;
+
+		f = compat_alloc_user_space(sizeof(*f));
+		ret = 0;
+		if (clear_user(f, sizeof(*f)))
+			ret = -EFAULT;
+		if (copy_in_user(f, (void __user *)arg, sizeof(struct fib_ioctl) - sizeof(u32)))
+			ret = -EFAULT;
+		if (!ret)
+			ret = aac_do_ioctl(dev, cmd, f);
+		break;
+	}
+
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	return ret;
+}
+
+static int aac_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+	struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata;
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	return aac_compat_do_ioctl(dev, cmd, (unsigned long)arg);
+}
+
+static long aac_compat_cfg_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+	return aac_compat_do_ioctl(file->private_data, cmd, arg);
+}
+#endif
+
+static ssize_t aac_show_model(struct device *device,
+			      struct device_attribute *attr, char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len;
+
+	if (dev->supplement_adapter_info.adapter_type_text[0]) {
+		char *cp = dev->supplement_adapter_info.adapter_type_text;
+		while (*cp && *cp != ' ')
+			++cp;
+		while (*cp == ' ')
+			++cp;
+		len = snprintf(buf, PAGE_SIZE, "%s\n", cp);
+	} else
+		len = snprintf(buf, PAGE_SIZE, "%s\n",
+		  aac_drivers[dev->cardtype].model);
+	return len;
+}
+
+static ssize_t aac_show_vendor(struct device *device,
+			       struct device_attribute *attr, char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	struct aac_supplement_adapter_info *sup_adap_info;
+	int len;
+
+	sup_adap_info = &dev->supplement_adapter_info;
+	if (sup_adap_info->adapter_type_text[0]) {
+		char *cp = sup_adap_info->adapter_type_text;
+		while (*cp && *cp != ' ')
+			++cp;
+		len = snprintf(buf, PAGE_SIZE, "%.*s\n",
+			(int)(cp - (char *)sup_adap_info->adapter_type_text),
+					sup_adap_info->adapter_type_text);
+	} else
+		len = snprintf(buf, PAGE_SIZE, "%s\n",
+			aac_drivers[dev->cardtype].vname);
+	return len;
+}
+
+static ssize_t aac_show_flags(struct device *cdev,
+			      struct device_attribute *attr, char *buf)
+{
+	int len = 0;
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(cdev)->hostdata;
+
+	if (nblank(dprintk(x)))
+		len = snprintf(buf, PAGE_SIZE, "dprintk\n");
+#ifdef AAC_DETAILED_STATUS_INFO
+	len += snprintf(buf + len, PAGE_SIZE - len,
+			"AAC_DETAILED_STATUS_INFO\n");
+#endif
+	if (dev->raw_io_interface && dev->raw_io_64)
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"SAI_READ_CAPACITY_16\n");
+	if (dev->jbod)
+		len += snprintf(buf + len, PAGE_SIZE - len, "SUPPORTED_JBOD\n");
+	if (dev->supplement_adapter_info.supported_options2 &
+		AAC_OPTION_POWER_MANAGEMENT)
+		len += snprintf(buf + len, PAGE_SIZE - len,
+				"SUPPORTED_POWER_MANAGEMENT\n");
+	if (dev->msi)
+		len += snprintf(buf + len, PAGE_SIZE - len, "PCI_HAS_MSI\n");
+	return len;
+}
+
+static ssize_t aac_show_kernel_version(struct device *device,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len, tmp;
+
+	tmp = le32_to_cpu(dev->adapter_info.kernelrev);
+	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+	  le32_to_cpu(dev->adapter_info.kernelbuild));
+	return len;
+}
+
+static ssize_t aac_show_monitor_version(struct device *device,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len, tmp;
+
+	tmp = le32_to_cpu(dev->adapter_info.monitorrev);
+	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+	  le32_to_cpu(dev->adapter_info.monitorbuild));
+	return len;
+}
+
+static ssize_t aac_show_bios_version(struct device *device,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len, tmp;
+
+	tmp = le32_to_cpu(dev->adapter_info.biosrev);
+	len = snprintf(buf, PAGE_SIZE, "%d.%d-%d[%d]\n",
+	  tmp >> 24, (tmp >> 16) & 0xff, tmp & 0xff,
+	  le32_to_cpu(dev->adapter_info.biosbuild));
+	return len;
+}
+
+static ssize_t aac_show_driver_version(struct device *device,
+					struct device_attribute *attr,
+					char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", aac_driver_version);
+}
+
+static ssize_t aac_show_serial_number(struct device *device,
+			       struct device_attribute *attr, char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len = 0;
+
+	if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0)
+		len = snprintf(buf, 16, "%06X\n",
+		  le32_to_cpu(dev->adapter_info.serial[0]));
+	if (len &&
+	  !memcmp(&dev->supplement_adapter_info.mfg_pcba_serial_no[
+	    sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no)-len],
+	  buf, len-1))
+		len = snprintf(buf, 16, "%.*s\n",
+		  (int)sizeof(dev->supplement_adapter_info.mfg_pcba_serial_no),
+		  dev->supplement_adapter_info.mfg_pcba_serial_no);
+
+	return min(len, 16);
+}
+
+static ssize_t aac_show_max_channel(struct device *device,
+				    struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  class_to_shost(device)->max_channel);
+}
+
+static ssize_t aac_show_max_id(struct device *device,
+			       struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+	  class_to_shost(device)->max_id);
+}
+
+static ssize_t aac_store_reset_adapter(struct device *device,
+				       struct device_attribute *attr,
+				       const char *buf, size_t count)
+{
+	int retval = -EACCES;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return retval;
+
+	retval = aac_reset_adapter(shost_priv(class_to_shost(device)),
+					buf[0] == '!', IOP_HWSOFT_RESET);
+	if (retval >= 0)
+		retval = count;
+
+	return retval;
+}
+
+static ssize_t aac_show_reset_adapter(struct device *device,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct aac_dev *dev = (struct aac_dev*)class_to_shost(device)->hostdata;
+	int len, tmp;
+
+	tmp = aac_adapter_check_health(dev);
+	if ((tmp == 0) && dev->in_reset)
+		tmp = -EBUSY;
+	len = snprintf(buf, PAGE_SIZE, "0x%x\n", tmp);
+	return len;
+}
+
+static struct device_attribute aac_model = {
+	.attr = {
+		.name = "model",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_model,
+};
+static struct device_attribute aac_vendor = {
+	.attr = {
+		.name = "vendor",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_vendor,
+};
+static struct device_attribute aac_flags = {
+	.attr = {
+		.name = "flags",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_flags,
+};
+static struct device_attribute aac_kernel_version = {
+	.attr = {
+		.name = "hba_kernel_version",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_kernel_version,
+};
+static struct device_attribute aac_monitor_version = {
+	.attr = {
+		.name = "hba_monitor_version",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_monitor_version,
+};
+static struct device_attribute aac_bios_version = {
+	.attr = {
+		.name = "hba_bios_version",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_bios_version,
+};
+static struct device_attribute aac_lld_version = {
+	.attr = {
+		.name = "driver_version",
+		.mode = 0444,
+	},
+	.show = aac_show_driver_version,
+};
+static struct device_attribute aac_serial_number = {
+	.attr = {
+		.name = "serial_number",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_serial_number,
+};
+static struct device_attribute aac_max_channel = {
+	.attr = {
+		.name = "max_channel",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_max_channel,
+};
+static struct device_attribute aac_max_id = {
+	.attr = {
+		.name = "max_id",
+		.mode = S_IRUGO,
+	},
+	.show = aac_show_max_id,
+};
+static struct device_attribute aac_reset = {
+	.attr = {
+		.name = "reset_host",
+		.mode = S_IWUSR|S_IRUGO,
+	},
+	.store = aac_store_reset_adapter,
+	.show = aac_show_reset_adapter,
+};
+
+static struct device_attribute *aac_attrs[] = {
+	&aac_model,
+	&aac_vendor,
+	&aac_flags,
+	&aac_kernel_version,
+	&aac_monitor_version,
+	&aac_bios_version,
+	&aac_lld_version,
+	&aac_serial_number,
+	&aac_max_channel,
+	&aac_max_id,
+	&aac_reset,
+	NULL
+};
+
+ssize_t aac_get_serial_number(struct device *device, char *buf)
+{
+	return aac_show_serial_number(device, &aac_serial_number, buf);
+}
+
+static const struct file_operations aac_cfg_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl	= aac_cfg_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = aac_compat_cfg_ioctl,
+#endif
+	.open		= aac_cfg_open,
+	.llseek		= noop_llseek,
+};
+
+static struct scsi_host_template aac_driver_template = {
+	.module				= THIS_MODULE,
+	.name				= "AAC",
+	.proc_name			= AAC_DRIVERNAME,
+	.info				= aac_info,
+	.ioctl				= aac_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl			= aac_compat_ioctl,
+#endif
+	.queuecommand			= aac_queuecommand,
+	.bios_param			= aac_biosparm,
+	.shost_attrs			= aac_attrs,
+	.slave_configure		= aac_slave_configure,
+	.change_queue_depth		= aac_change_queue_depth,
+	.sdev_attrs			= aac_dev_attrs,
+	.eh_abort_handler		= aac_eh_abort,
+	.eh_device_reset_handler	= aac_eh_dev_reset,
+	.eh_target_reset_handler	= aac_eh_target_reset,
+	.eh_bus_reset_handler		= aac_eh_bus_reset,
+	.eh_host_reset_handler		= aac_eh_host_reset,
+	.can_queue			= AAC_NUM_IO_FIB,
+	.this_id			= MAXIMUM_NUM_CONTAINERS,
+	.sg_tablesize			= 16,
+	.max_sectors			= 128,
+#if (AAC_NUM_IO_FIB > 256)
+	.cmd_per_lun			= 256,
+#else
+	.cmd_per_lun			= AAC_NUM_IO_FIB,
+#endif
+	.use_clustering			= ENABLE_CLUSTERING,
+	.emulated			= 1,
+	.no_write_same			= 1,
+};
+
+static void __aac_shutdown(struct aac_dev * aac)
+{
+	int i;
+
+	mutex_lock(&aac->ioctl_mutex);
+	aac->adapter_shutdown = 1;
+	mutex_unlock(&aac->ioctl_mutex);
+
+	if (aac->aif_thread) {
+		int i;
+		/* Clear out events first */
+		for (i = 0; i < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++) {
+			struct fib *fib = &aac->fibs[i];
+			if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
+			    (fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected)))
+				up(&fib->event_wait);
+		}
+		kthread_stop(aac->thread);
+		aac->thread = NULL;
+	}
+
+	aac_send_shutdown(aac);
+
+	aac_adapter_disable_int(aac);
+
+	if (aac_is_src(aac)) {
+		if (aac->max_msix > 1) {
+			for (i = 0; i < aac->max_msix; i++) {
+				free_irq(pci_irq_vector(aac->pdev, i),
+					 &(aac->aac_msix[i]));
+			}
+		} else {
+			free_irq(aac->pdev->irq,
+				 &(aac->aac_msix[0]));
+		}
+	} else {
+		free_irq(aac->pdev->irq, aac);
+	}
+	if (aac->msi)
+		pci_disable_msi(aac->pdev);
+	else if (aac->max_msix > 1)
+		pci_disable_msix(aac->pdev);
+}
+static void aac_init_char(void)
+{
+	aac_cfg_major = register_chrdev(0, "aac", &aac_cfg_fops);
+	if (aac_cfg_major < 0) {
+		pr_err("aacraid: unable to register \"aac\" device.\n");
+	}
+}
+
+static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	unsigned index = id->driver_data;
+	struct Scsi_Host *shost;
+	struct aac_dev *aac;
+	struct list_head *insert = &aac_devices;
+	int error = -ENODEV;
+	int unique_id = 0;
+	u64 dmamask;
+	int mask_bits = 0;
+	extern int aac_sync_mode;
+
+	/*
+	 * Only series 7 needs freset.
+	 */
+	if (pdev->device == PMC_DEVICE_S7)
+		pdev->needs_freset = 1;
+
+	list_for_each_entry(aac, &aac_devices, entry) {
+		if (aac->id > unique_id)
+			break;
+		insert = &aac->entry;
+		unique_id++;
+	}
+
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+			       PCIE_LINK_STATE_CLKPM);
+
+	error = pci_enable_device(pdev);
+	if (error)
+		goto out;
+	error = -ENODEV;
+
+	if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
+		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (error) {
+			dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
+			goto out_disable_pdev;
+		}
+	}
+
+	/*
+	 * If the quirk31 bit is set, the adapter needs adapter
+	 * to driver communication memory to be allocated below 2gig
+	 */
+	if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
+		dmamask = DMA_BIT_MASK(31);
+		mask_bits = 31;
+	} else {
+		dmamask = DMA_BIT_MASK(32);
+		mask_bits = 32;
+	}
+
+	error = pci_set_consistent_dma_mask(pdev, dmamask);
+	if (error) {
+		dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
+				, mask_bits);
+		goto out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
+	if (!shost)
+		goto out_disable_pdev;
+
+	shost->irq = pdev->irq;
+	shost->unique_id = unique_id;
+	shost->max_cmd_len = 16;
+	shost->use_cmd_list = 1;
+
+	if (aac_cfg_major == AAC_CHARDEV_NEEDS_REINIT)
+		aac_init_char();
+
+	aac = (struct aac_dev *)shost->hostdata;
+	aac->base_start = pci_resource_start(pdev, 0);
+	aac->scsi_host_ptr = shost;
+	aac->pdev = pdev;
+	aac->name = aac_driver_template.name;
+	aac->id = shost->unique_id;
+	aac->cardtype = index;
+	INIT_LIST_HEAD(&aac->entry);
+
+	if (aac_reset_devices || reset_devices)
+		aac->init_reset = true;
+
+	aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
+			    sizeof(struct fib),
+			    GFP_KERNEL);
+	if (!aac->fibs)
+		goto out_free_host;
+	spin_lock_init(&aac->fib_lock);
+
+	mutex_init(&aac->ioctl_mutex);
+	mutex_init(&aac->scan_mutex);
+
+	INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker);
+	/*
+	 *	Map in the registers from the adapter.
+	 */
+	aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
+	if ((*aac_drivers[index].init)(aac)) {
+		error = -ENODEV;
+		goto out_unmap;
+	}
+
+	if (aac->sync_mode) {
+		if (aac_sync_mode)
+			printk(KERN_INFO "%s%d: Sync. mode enforced "
+				"by driver parameter. This will cause "
+				"a significant performance decrease!\n",
+				aac->name,
+				aac->id);
+		else
+			printk(KERN_INFO "%s%d: Async. mode not supported "
+				"by current driver, sync. mode enforced."
+				"\nPlease update driver to get full performance.\n",
+				aac->name,
+				aac->id);
+	}
+
+	/*
+	 *	Start any kernel threads needed
+	 */
+	aac->thread = kthread_run(aac_command_thread, aac, AAC_DRIVERNAME);
+	if (IS_ERR(aac->thread)) {
+		printk(KERN_ERR "aacraid: Unable to create command thread.\n");
+		error = PTR_ERR(aac->thread);
+		aac->thread = NULL;
+		goto out_deinit;
+	}
+
+	aac->maximum_num_channels = aac_drivers[index].channels;
+	error = aac_get_adapter_info(aac);
+	if (error < 0)
+		goto out_deinit;
+
+	/*
+	 * Lets override negotiations and drop the maximum SG limit to 34
+	 */
+	if ((aac_drivers[index].quirks & AAC_QUIRK_34SG) &&
+			(shost->sg_tablesize > 34)) {
+		shost->sg_tablesize = 34;
+		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+	}
+
+	if ((aac_drivers[index].quirks & AAC_QUIRK_17SG) &&
+			(shost->sg_tablesize > 17)) {
+		shost->sg_tablesize = 17;
+		shost->max_sectors = (shost->sg_tablesize * 8) + 112;
+	}
+
+	error = pci_set_dma_max_seg_size(pdev,
+		(aac->adapter_info.options & AAC_OPT_NEW_COMM) ?
+			(shost->max_sectors << 9) : 65536);
+	if (error)
+		goto out_deinit;
+
+	/*
+	 * Firmware printf works only with older firmware.
+	 */
+	if (aac_drivers[index].quirks & AAC_QUIRK_34SG)
+		aac->printf_enabled = 1;
+	else
+		aac->printf_enabled = 0;
+
+	/*
+	 * max channel will be the physical channels plus 1 virtual channel
+	 * all containers are on the virtual channel 0 (CONTAINER_CHANNEL)
+	 * physical channels are address by their actual physical number+1
+	 */
+	if (aac->nondasd_support || expose_physicals || aac->jbod)
+		shost->max_channel = aac->maximum_num_channels;
+	else
+		shost->max_channel = 0;
+
+	aac_get_config_status(aac, 0);
+	aac_get_containers(aac);
+	list_add(&aac->entry, insert);
+
+	shost->max_id = aac->maximum_num_containers;
+	if (shost->max_id < aac->maximum_num_physicals)
+		shost->max_id = aac->maximum_num_physicals;
+	if (shost->max_id < MAXIMUM_NUM_CONTAINERS)
+		shost->max_id = MAXIMUM_NUM_CONTAINERS;
+	else
+		shost->this_id = shost->max_id;
+
+	if (!aac->sa_firmware && aac_drivers[index].quirks & AAC_QUIRK_SRC)
+		aac_intr_normal(aac, 0, 2, 0, NULL);
+
+	/*
+	 * dmb - we may need to move the setting of these parms somewhere else once
+	 * we get a fib that can report the actual numbers
+	 */
+	shost->max_lun = AAC_MAX_LUN;
+
+	pci_set_drvdata(pdev, shost);
+
+	error = scsi_add_host(shost, &pdev->dev);
+	if (error)
+		goto out_deinit;
+
+	aac_scan_host(aac);
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_save_state(pdev);
+
+	return 0;
+
+ out_deinit:
+	__aac_shutdown(aac);
+ out_unmap:
+	aac_fib_map_free(aac);
+	if (aac->comm_addr)
+		dma_free_coherent(&aac->pdev->dev, aac->comm_size,
+				  aac->comm_addr, aac->comm_phys);
+	kfree(aac->queues);
+	aac_adapter_ioremap(aac, 0);
+	kfree(aac->fibs);
+	kfree(aac->fsa_dev);
+ out_free_host:
+	scsi_host_put(shost);
+ out_disable_pdev:
+	pci_disable_device(pdev);
+ out:
+	return error;
+}
+
+static void aac_release_resources(struct aac_dev *aac)
+{
+	aac_adapter_disable_int(aac);
+	aac_free_irq(aac);
+}
+
+static int aac_acquire_resources(struct aac_dev *dev)
+{
+	unsigned long status;
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	while (!((status = src_readl(dev, MUnit.OMR)) & KERNEL_UP_AND_RUNNING)
+		|| status == 0xffffffff)
+			msleep(20);
+
+	aac_adapter_disable_int(dev);
+	aac_adapter_enable_int(dev);
+
+
+	if (aac_is_src(dev))
+		aac_define_int_mode(dev);
+
+	if (dev->msi_enabled)
+		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+
+	if (aac_acquire_irq(dev))
+		goto error_iounmap;
+
+	aac_adapter_enable_int(dev);
+
+	/*max msix may change  after EEH
+	 * Re-assign vectors to fibs
+	 */
+	aac_fib_vector_assign(dev);
+
+	if (!dev->sync_mode) {
+		/* After EEH recovery or suspend resume, max_msix count
+		 * may change, therefore updating in init as well.
+		 */
+		dev->init->r7.no_of_msix_vectors = cpu_to_le32(dev->max_msix);
+		aac_adapter_start(dev);
+	}
+	return 0;
+
+error_iounmap:
+	return -1;
+
+}
+
+#if (defined(CONFIG_PM))
+static int aac_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+	scsi_block_requests(shost);
+	aac_cancel_safw_rescan_worker(aac);
+	aac_send_shutdown(aac);
+
+	aac_release_resources(aac);
+
+	pci_set_drvdata(pdev, shost);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+static int aac_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+	int r;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+	r = pci_enable_device(pdev);
+
+	if (r)
+		goto fail_device;
+
+	pci_set_master(pdev);
+	if (aac_acquire_resources(aac))
+		goto fail_device;
+	/*
+	* reset this flag to unblock ioctl() as it was set at
+	* aac_send_shutdown() to block ioctls from upperlayer
+	*/
+	aac->adapter_shutdown = 0;
+	scsi_unblock_requests(shost);
+
+	return 0;
+
+fail_device:
+	printk(KERN_INFO "%s%d: resume failed.\n", aac->name, aac->id);
+	scsi_host_put(shost);
+	pci_disable_device(pdev);
+	return -ENODEV;
+}
+#endif
+
+static void aac_shutdown(struct pci_dev *dev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(dev);
+	scsi_block_requests(shost);
+	__aac_shutdown((struct aac_dev *)shost->hostdata);
+}
+
+static void aac_remove_one(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
+
+	aac_cancel_safw_rescan_worker(aac);
+	scsi_remove_host(shost);
+
+	__aac_shutdown(aac);
+	aac_fib_map_free(aac);
+	dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
+			  aac->comm_phys);
+	kfree(aac->queues);
+
+	aac_adapter_ioremap(aac, 0);
+
+	kfree(aac->fibs);
+	kfree(aac->fsa_dev);
+
+	list_del(&aac->entry);
+	scsi_host_put(shost);
+	pci_disable_device(pdev);
+	if (list_empty(&aac_devices)) {
+		unregister_chrdev(aac_cfg_major, "aac");
+		aac_cfg_major = AAC_CHARDEV_NEEDS_REINIT;
+	}
+}
+
+static void aac_flush_ios(struct aac_dev *aac)
+{
+	int i;
+	struct scsi_cmnd *cmd;
+
+	for (i = 0; i < aac->scsi_host_ptr->can_queue; i++) {
+		cmd = (struct scsi_cmnd *)aac->fibs[i].callback_data;
+		if (cmd && (cmd->SCp.phase == AAC_OWNER_FIRMWARE)) {
+			scsi_dma_unmap(cmd);
+
+			if (aac->handle_pci_error)
+				cmd->result = DID_NO_CONNECT << 16;
+			else
+				cmd->result = DID_RESET << 16;
+
+			cmd->scsi_done(cmd);
+		}
+	}
+}
+
+static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev,
+					enum pci_channel_state error)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct aac_dev *aac = shost_priv(shost);
+
+	dev_err(&pdev->dev, "aacraid: PCI error detected %x\n", error);
+
+	switch (error) {
+	case pci_channel_io_normal:
+		return PCI_ERS_RESULT_CAN_RECOVER;
+	case pci_channel_io_frozen:
+		aac->handle_pci_error = 1;
+
+		scsi_block_requests(aac->scsi_host_ptr);
+		aac_cancel_safw_rescan_worker(aac);
+		aac_flush_ios(aac);
+		aac_release_resources(aac);
+
+		pci_disable_pcie_error_reporting(pdev);
+		aac_adapter_ioremap(aac, 0);
+
+		return PCI_ERS_RESULT_NEED_RESET;
+	case pci_channel_io_perm_failure:
+		aac->handle_pci_error = 1;
+
+		aac_flush_ios(aac);
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_mmio_enabled(struct pci_dev *pdev)
+{
+	dev_err(&pdev->dev, "aacraid: PCI error - mmio enabled\n");
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t aac_pci_slot_reset(struct pci_dev *pdev)
+{
+	dev_err(&pdev->dev, "aacraid: PCI error - slot reset\n");
+	pci_restore_state(pdev);
+	if (pci_enable_device(pdev)) {
+		dev_warn(&pdev->dev,
+			"aacraid: failed to enable slave\n");
+		goto fail_device;
+	}
+
+	pci_set_master(pdev);
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(&pdev->dev, "pci_enable_device_mem failed\n");
+		goto fail_device;
+	}
+
+	return PCI_ERS_RESULT_RECOVERED;
+
+fail_device:
+	dev_err(&pdev->dev, "aacraid: PCI error - slot reset failed\n");
+	return PCI_ERS_RESULT_DISCONNECT;
+}
+
+
+static void aac_pci_resume(struct pci_dev *pdev)
+{
+	struct Scsi_Host *shost = pci_get_drvdata(pdev);
+	struct scsi_device *sdev = NULL;
+	struct aac_dev *aac = (struct aac_dev *)shost_priv(shost);
+
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	if (aac_adapter_ioremap(aac, aac->base_size)) {
+
+		dev_err(&pdev->dev, "aacraid: ioremap failed\n");
+		/* remap failed, go back ... */
+		aac->comm_interface = AAC_COMM_PRODUCER;
+		if (aac_adapter_ioremap(aac, AAC_MIN_FOOTPRINT_SIZE)) {
+			dev_warn(&pdev->dev,
+				"aacraid: unable to map adapter.\n");
+
+			return;
+		}
+	}
+
+	msleep(10000);
+
+	aac_acquire_resources(aac);
+
+	/*
+	 * reset this flag to unblock ioctl() as it was set
+	 * at aac_send_shutdown() to block ioctls from upperlayer
+	 */
+	aac->adapter_shutdown = 0;
+	aac->handle_pci_error = 0;
+
+	shost_for_each_device(sdev, shost)
+		if (sdev->sdev_state == SDEV_OFFLINE)
+			sdev->sdev_state = SDEV_RUNNING;
+	scsi_unblock_requests(aac->scsi_host_ptr);
+	aac_scan_host(aac);
+	pci_save_state(pdev);
+
+	dev_err(&pdev->dev, "aacraid: PCI error - resume\n");
+}
+
+static struct pci_error_handlers aac_pci_err_handler = {
+	.error_detected		= aac_pci_error_detected,
+	.mmio_enabled		= aac_pci_mmio_enabled,
+	.slot_reset		= aac_pci_slot_reset,
+	.resume			= aac_pci_resume,
+};
+
+static struct pci_driver aac_pci_driver = {
+	.name		= AAC_DRIVERNAME,
+	.id_table	= aac_pci_tbl,
+	.probe		= aac_probe_one,
+	.remove		= aac_remove_one,
+#if (defined(CONFIG_PM))
+	.suspend	= aac_suspend,
+	.resume		= aac_resume,
+#endif
+	.shutdown	= aac_shutdown,
+	.err_handler    = &aac_pci_err_handler,
+};
+
+static int __init aac_init(void)
+{
+	int error;
+
+	printk(KERN_INFO "Adaptec %s driver %s\n",
+	  AAC_DRIVERNAME, aac_driver_version);
+
+	error = pci_register_driver(&aac_pci_driver);
+	if (error < 0)
+		return error;
+
+	aac_init_char();
+
+
+	return 0;
+}
+
+static void __exit aac_exit(void)
+{
+	if (aac_cfg_major > -1)
+		unregister_chrdev(aac_cfg_major, "aac");
+	pci_unregister_driver(&aac_pci_driver);
+}
+
+module_init(aac_init);
+module_exit(aac_exit);
diff --git a/drivers/scsi/aacraid/nark.c b/drivers/scsi/aacraid/nark.c
new file mode 100644
index 0000000..c59074e
--- /dev/null
+++ b/drivers/scsi/aacraid/nark.c
@@ -0,0 +1,85 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  nark.c
+ *
+ * Abstract: Hardware Device Interface for NEMER/ARK
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+/**
+ *	aac_nark_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_nark_ioremap(struct aac_dev * dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.rx);
+		dev->regs.rx = NULL;
+		iounmap(dev->base);
+		dev->base = NULL;
+		return 0;
+	}
+	dev->base_start = pci_resource_start(dev->pdev, 2);
+	dev->regs.rx = ioremap((u64)pci_resource_start(dev->pdev, 0) |
+	  ((u64)pci_resource_start(dev->pdev, 1) << 32),
+	  sizeof(struct rx_registers) - sizeof(struct rx_inbound));
+	dev->base = NULL;
+	if (dev->regs.rx == NULL)
+		return -1;
+	dev->base = ioremap(dev->base_start, size);
+	if (dev->base == NULL) {
+		iounmap(dev->regs.rx);
+		dev->regs.rx = NULL;
+		return -1;
+	}
+	dev->IndexRegs = &((struct rx_registers __iomem *)dev->base)->IndexRegs;
+	return 0;
+}
+
+/**
+ *	aac_nark_init	-	initialize an NEMER/ARK Split Bar card
+ *	@dev: device to configure
+ *
+ */
+
+int aac_nark_init(struct aac_dev * dev)
+{
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_ioremap = aac_nark_ioremap;
+	dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+	return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/rkt.c b/drivers/scsi/aacraid/rkt.c
new file mode 100644
index 0000000..a1bc5bb
--- /dev/null
+++ b/drivers/scsi/aacraid/rkt.c
@@ -0,0 +1,108 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rkt.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/blkdev.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+#define AAC_NUM_IO_FIB_RKT      (246 - AAC_NUM_MGT_FIB)
+
+/**
+ *	aac_rkt_select_comm	-	Select communications method
+ *	@dev: Adapter
+ *	@comm: communications method
+ */
+
+static int aac_rkt_select_comm(struct aac_dev *dev, int comm)
+{
+	int retval;
+	retval = aac_rx_select_comm(dev, comm);
+	if (comm == AAC_COMM_MESSAGE) {
+		/*
+		 * FIB Setup has already been done, but we can minimize the
+		 * damage by at least ensuring the OS never issues more
+		 * commands than we can handle. The Rocket adapters currently
+		 * can only handle 246 commands and 8 AIFs at the same time,
+		 * and in fact do notify us accordingly if we negotiate the
+		 * FIB size. The problem that causes us to add this check is
+		 * to ensure that we do not overdo it with the adapter when a
+		 * hard coded FIB override is being utilized. This special
+		 * case warrants this half baked, but convenient, check here.
+		 */
+		if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) {
+			dev->init->r7.max_io_commands =
+				cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB);
+			dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT;
+		}
+	}
+	return retval;
+}
+
+/**
+ *	aac_rkt_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_rkt_ioremap(struct aac_dev * dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.rkt);
+		return 0;
+	}
+	dev->base = dev->regs.rkt = ioremap(dev->base_start, size);
+	if (dev->base == NULL)
+		return -1;
+	dev->IndexRegs = &dev->regs.rkt->IndexRegs;
+	return 0;
+}
+
+/**
+ *	aac_rkt_init	-	initialize an i960 based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the i960 based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_rkt_init(struct aac_dev *dev)
+{
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_ioremap = aac_rkt_ioremap;
+	dev->a_ops.adapter_comm = aac_rkt_select_comm;
+
+	return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/rx.c b/drivers/scsi/aacraid/rx.c
new file mode 100644
index 0000000..576cdf9
--- /dev/null
+++ b/drivers/scsi/aacraid/rx.c
@@ -0,0 +1,687 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  rx.c
+ *
+ * Abstract: Hardware miniport for Drawbridge specific hardware functions.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_rx_intr_producer(int irq, void *dev_id)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned long bellbits;
+	u8 intstat = rx_readb(dev, MUnit.OISR);
+
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have
+	 *	been enabled.
+	 *	Check to see if this is our interrupt.  If it isn't just return
+	 */
+	if (likely(intstat & ~(dev->OIMR))) {
+		bellbits = rx_readl(dev, OutboundDoorbellReg);
+		if (unlikely(bellbits & DoorBellPrintfReady)) {
+			aac_printf(dev, readl (&dev->IndexRegs->Mailbox[5]));
+			rx_writel(dev, MUnit.ODR,DoorBellPrintfReady);
+			rx_writel(dev, InboundDoorbellReg,DoorBellPrintfDone);
+		}
+		else if (unlikely(bellbits & DoorBellAdapterNormCmdReady)) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdReady);
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+		}
+		else if (likely(bellbits & DoorBellAdapterNormRespReady)) {
+			rx_writel(dev, MUnit.ODR,DoorBellAdapterNormRespReady);
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+		}
+		else if (unlikely(bellbits & DoorBellAdapterNormCmdNotFull)) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+		}
+		else if (unlikely(bellbits & DoorBellAdapterNormRespNotFull)) {
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormCmdNotFull);
+			rx_writel(dev, MUnit.ODR, DoorBellAdapterNormRespNotFull);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+static irqreturn_t aac_rx_intr_message(int irq, void *dev_id)
+{
+	int isAif, isFastResponse, isSpecial;
+	struct aac_dev *dev = dev_id;
+	u32 Index = rx_readl(dev, MUnit.OutboundQueue);
+	if (unlikely(Index == 0xFFFFFFFFL))
+		Index = rx_readl(dev, MUnit.OutboundQueue);
+	if (likely(Index != 0xFFFFFFFFL)) {
+		do {
+			isAif = isFastResponse = isSpecial = 0;
+			if (Index & 0x00000002L) {
+				isAif = 1;
+				if (Index == 0xFFFFFFFEL)
+					isSpecial = 1;
+				Index &= ~0x00000002L;
+			} else {
+				if (Index & 0x00000001L)
+					isFastResponse = 1;
+				Index >>= 2;
+			}
+			if (!isSpecial) {
+				if (unlikely(aac_intr_normal(dev,
+						Index, isAif,
+						isFastResponse, NULL))) {
+					rx_writel(dev,
+						MUnit.OutboundQueue,
+						Index);
+					rx_writel(dev,
+						MUnit.ODR,
+						DoorBellAdapterNormRespReady);
+				}
+			}
+			Index = rx_readl(dev, MUnit.OutboundQueue);
+		} while (Index != 0xFFFFFFFFL);
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	aac_rx_disable_interrupt	-	Disable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_rx_disable_interrupt(struct aac_dev *dev)
+{
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+}
+
+/**
+ *	aac_rx_enable_interrupt_producer	-	Enable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_producer(struct aac_dev *dev)
+{
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xfb);
+}
+
+/**
+ *	aac_rx_enable_interrupt_message	-	Enable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_rx_enable_interrupt_message(struct aac_dev *dev)
+{
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xf7);
+}
+
+/**
+ *	rx_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int rx_sync_cmd(struct aac_dev *dev, u32 command,
+	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+	unsigned long start;
+	int ok;
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	writel(command, &dev->IndexRegs->Mailbox[0]);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 6
+	 */
+	writel(p1, &dev->IndexRegs->Mailbox[1]);
+	writel(p2, &dev->IndexRegs->Mailbox[2]);
+	writel(p3, &dev->IndexRegs->Mailbox[3]);
+	writel(p4, &dev->IndexRegs->Mailbox[4]);
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	rx_writeb(dev, MUnit.OIMR, dev->OIMR = 0xff);
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	rx_readb (dev, MUnit.OIMR);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	rx_writel(dev, InboundDoorbellReg, INBOUNDDOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	/*
+	 *	Wait up to 30 seconds
+	 */
+	while (time_before(jiffies, start+30*HZ)) 
+	{
+		udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+		/*
+		 *	Mon960 will set doorbell0 bit when it has completed the command.
+		 */
+		if (rx_readl(dev, OutboundDoorbellReg) & OUTBOUNDDOORBELL_0) {
+			/*
+			 *	Clear the doorbell.
+			 */
+			rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+			ok = 1;
+			break;
+		}
+		/*
+		 *	Yield the processor in case we are slow 
+		 */
+		msleep(1);
+	}
+	if (unlikely(ok != 1)) {
+		/*
+		 *	Restore interrupt mask even though we timed out
+		 */
+		aac_adapter_enable_int(dev);
+		return -ETIMEDOUT;
+	}
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	if (status)
+		*status = readl(&dev->IndexRegs->Mailbox[0]);
+	if (r1)
+		*r1 = readl(&dev->IndexRegs->Mailbox[1]);
+	if (r2)
+		*r2 = readl(&dev->IndexRegs->Mailbox[2]);
+	if (r3)
+		*r3 = readl(&dev->IndexRegs->Mailbox[3]);
+	if (r4)
+		*r4 = readl(&dev->IndexRegs->Mailbox[4]);
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	rx_writel(dev, OutboundDoorbellReg, OUTBOUNDDOORBELL_0);
+	/*
+	 *	Restore interrupt mask
+	 */
+	aac_adapter_enable_int(dev);
+	return 0;
+
+}
+
+/**
+ *	aac_rx_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_rx_interrupt_adapter(struct aac_dev *dev)
+{
+	rx_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_rx_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_rx_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_3);
+		break;
+	case HostShutdown:
+		break;
+	case FastIo:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		rx_writel(dev, MUnit.IDR,INBOUNDDOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_rx_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_rx_start_adapter(struct aac_dev *dev)
+{
+	union aac_init *init;
+
+	init = dev->init;
+	init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
+	// We can only use a 32 bit address here
+	rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
+	  0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_rx_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_rx_check_health(struct aac_dev *dev)
+{
+	u32 status = rx_readl(dev, MUnit.OMRx[0]);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (unlikely(status & SELF_TEST_FAILED))
+		return -1;
+	/*
+	 *	Check to see if the board panic'd.
+	 */
+	if (unlikely(status & KERNEL_PANIC)) {
+		char * buffer;
+		struct POSTSTATUS {
+			__le32 Post_Command;
+			__le32 Post_Address;
+		} * post;
+		dma_addr_t paddr, baddr;
+		int ret;
+
+		if (likely((status & 0xFF000000L) == 0xBC000000L))
+			return (status >> 16) & 0xFF;
+		buffer = dma_alloc_coherent(&dev->pdev->dev, 512, &baddr,
+					    GFP_KERNEL);
+		ret = -2;
+		if (unlikely(buffer == NULL))
+			return ret;
+		post = dma_alloc_coherent(&dev->pdev->dev,
+					  sizeof(struct POSTSTATUS), &paddr,
+					  GFP_KERNEL);
+		if (unlikely(post == NULL)) {
+			dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
+			return ret;
+		}
+		memset(buffer, 0, 512);
+		post->Post_Command = cpu_to_le32(COMMAND_POST_RESULTS);
+		post->Post_Address = cpu_to_le32(baddr);
+		rx_writel(dev, MUnit.IMRx[0], paddr);
+		rx_sync_cmd(dev, COMMAND_POST_RESULTS, baddr, 0, 0, 0, 0, 0,
+		  NULL, NULL, NULL, NULL, NULL);
+		dma_free_coherent(&dev->pdev->dev, sizeof(struct POSTSTATUS),
+				  post, paddr);
+		if (likely((buffer[0] == '0') && ((buffer[1] == 'x') || (buffer[1] == 'X')))) {
+			ret = (hex_to_bin(buffer[2]) << 4) +
+				hex_to_bin(buffer[3]);
+		}
+		dma_free_coherent(&dev->pdev->dev, 512, buffer, baddr);
+		return ret;
+	}
+	/*
+	 *	Wait for the adapter to be up and running.
+	 */
+	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_rx_deliver_producer
+ *	@fib: fib to issue
+ *
+ *	Will send a fib, returning 0 if successful.
+ */
+int aac_rx_deliver_producer(struct fib * fib)
+{
+	struct aac_dev *dev = fib->dev;
+	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+	u32 Index;
+	unsigned long nointr = 0;
+
+	aac_queue_get( dev, &Index, AdapNormCmdQueue, fib->hw_fib_va, 1, fib, &nointr);
+
+	atomic_inc(&q->numpending);
+	*(q->headers.producer) = cpu_to_le32(Index + 1);
+	if (!(nointr & aac_config.irq_mod))
+		aac_adapter_notify(dev, AdapNormCmdQueue);
+
+	return 0;
+}
+
+/**
+ *	aac_rx_deliver_message
+ *	@fib: fib to issue
+ *
+ *	Will send a fib, returning 0 if successful.
+ */
+static int aac_rx_deliver_message(struct fib * fib)
+{
+	struct aac_dev *dev = fib->dev;
+	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+	u32 Index;
+	u64 addr;
+	volatile void __iomem *device;
+
+	unsigned long count = 10000000L; /* 50 seconds */
+	atomic_inc(&q->numpending);
+	for(;;) {
+		Index = rx_readl(dev, MUnit.InboundQueue);
+		if (unlikely(Index == 0xFFFFFFFFL))
+			Index = rx_readl(dev, MUnit.InboundQueue);
+		if (likely(Index != 0xFFFFFFFFL))
+			break;
+		if (--count == 0) {
+			atomic_dec(&q->numpending);
+			return -ETIMEDOUT;
+		}
+		udelay(5);
+	}
+	device = dev->base + Index;
+	addr = fib->hw_fib_pa;
+	writel((u32)(addr & 0xffffffff), device);
+	device += sizeof(u32);
+	writel((u32)(addr >> 32), device);
+	device += sizeof(u32);
+	writel(le16_to_cpu(fib->hw_fib_va->header.Size), device);
+	rx_writel(dev, MUnit.InboundQueue, Index);
+	return 0;
+}
+
+/**
+ *	aac_rx_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_rx_ioremap(struct aac_dev * dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.rx);
+		return 0;
+	}
+	dev->base = dev->regs.rx = ioremap(dev->base_start, size);
+	if (dev->base == NULL)
+		return -1;
+	dev->IndexRegs = &dev->regs.rx->IndexRegs;
+	return 0;
+}
+
+static int aac_rx_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
+{
+	u32 var = 0;
+
+	if (!(dev->supplement_adapter_info.supported_options2 &
+	  AAC_OPTION_MU_RESET) || (bled >= 0) || (bled == -2)) {
+		if (bled)
+			printk(KERN_ERR "%s%d: adapter kernel panic'd %x.\n",
+				dev->name, dev->id, bled);
+		else {
+			bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
+			  0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+			if (!bled && (var != 0x00000001) && (var != 0x3803000F))
+				bled = -EINVAL;
+		}
+		if (bled && (bled != -ETIMEDOUT))
+			bled = aac_adapter_sync_cmd(dev, IOP_RESET,
+			  0, 0, 0, 0, 0, 0, &var, NULL, NULL, NULL, NULL);
+
+		if (bled && (bled != -ETIMEDOUT))
+			return -EINVAL;
+	}
+	if (bled && (var == 0x3803000F)) { /* USE_OTHER_METHOD */
+		rx_writel(dev, MUnit.reserved2, 3);
+		msleep(5000); /* Delay 5 seconds */
+		var = 0x00000001;
+	}
+	if (bled && (var != 0x00000001))
+		return -EINVAL;
+	ssleep(5);
+	if (rx_readl(dev, MUnit.OMRx[0]) & KERNEL_PANIC)
+		return -ENODEV;
+	if (startup_timeout < 300)
+		startup_timeout = 300;
+	return 0;
+}
+
+/**
+ *	aac_rx_select_comm	-	Select communications method
+ *	@dev: Adapter
+ *	@comm: communications method
+ */
+
+int aac_rx_select_comm(struct aac_dev *dev, int comm)
+{
+	switch (comm) {
+	case AAC_COMM_PRODUCER:
+		dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_producer;
+		dev->a_ops.adapter_intr = aac_rx_intr_producer;
+		dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+		break;
+	case AAC_COMM_MESSAGE:
+		dev->a_ops.adapter_enable_int = aac_rx_enable_interrupt_message;
+		dev->a_ops.adapter_intr = aac_rx_intr_message;
+		dev->a_ops.adapter_deliver = aac_rx_deliver_message;
+		break;
+	default:
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *	aac_rx_init	-	initialize an i960 based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the i960 based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int _aac_rx_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int restart = 0;
+	int instance = dev->id;
+	const char * name = dev->name;
+
+	if (aac_adapter_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/* Failure to reset here is an option ... */
+	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+	dev->a_ops.adapter_enable_int = aac_rx_disable_interrupt;
+	dev->OIMR = status = rx_readb (dev, MUnit.OIMR);
+
+	if (((status & 0x0c) != 0x0c) || dev->init_reset) {
+		dev->init_reset = false;
+		if (!aac_rx_restart_adapter(dev, 0, IOP_HWSOFT_RESET)) {
+			/* Make sure the Hardware FIFO is empty */
+			while ((++restart < 512) &&
+			       (rx_readl(dev, MUnit.OutboundQueue) != 0xFFFFFFFFL));
+		}
+	}
+
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	status = rx_readl(dev, MUnit.OMRx[0]);
+	if (status & KERNEL_PANIC) {
+		if (aac_rx_restart_adapter(dev,
+			aac_rx_check_health(dev), IOP_HWSOFT_RESET))
+			goto error_iounmap;
+		++restart;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	status = rx_readl(dev, MUnit.OMRx[0]);
+	if (status & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (status & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while (!((status = rx_readl(dev, MUnit.OMRx[0])) & KERNEL_UP_AND_RUNNING))
+	{
+		if ((restart &&
+		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+		  time_after(jiffies, start+HZ*startup_timeout)) {
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n", 
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		if (!restart &&
+		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+		  time_after(jiffies, start + HZ *
+		  ((startup_timeout > 60)
+		    ? (startup_timeout - 60)
+		    : (startup_timeout / 2))))) {
+			if (likely(!aac_rx_restart_adapter(dev,
+				aac_rx_check_health(dev), IOP_HWSOFT_RESET)))
+				start = jiffies;
+			++restart;
+		}
+		msleep(1);
+	}
+	if (restart && aac_commit)
+		aac_commit = 1;
+	/*
+	 *	Fill in the common function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_rx_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_rx_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_rx_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = rx_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_rx_check_health;
+	dev->a_ops.adapter_restart = aac_rx_restart_adapter;
+	dev->a_ops.adapter_start = aac_rx_start_adapter;
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	aac_adapter_comm(dev, AAC_COMM_PRODUCER);
+	aac_adapter_disable_int(dev);
+	rx_writel(dev, MUnit.ODR, 0xffffffff);
+	aac_adapter_enable_int(dev);
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_iounmap;
+	aac_adapter_comm(dev, dev->comm_interface);
+	dev->sync_mode = 0;	/* sync. mode not supported */
+	dev->msi = aac_msi && !pci_enable_msi(dev->pdev);
+	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+			IRQF_SHARED, "aacraid", dev) < 0) {
+		if (dev->msi)
+			pci_disable_msi(dev->pdev);
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+			name, instance);
+		goto error_iounmap;
+	}
+	dev->dbg_base = dev->base_start;
+	dev->dbg_base_mapped = dev->base;
+	dev->dbg_size = dev->base_size;
+
+	aac_adapter_enable_int(dev);
+	/*
+	 *	Tell the adapter that all is configured, and it can
+	 * start accepting requests
+	 */
+	aac_rx_start_adapter(dev);
+
+	return 0;
+
+error_iounmap:
+
+	return -1;
+}
+
+int aac_rx_init(struct aac_dev *dev)
+{
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+	dev->a_ops.adapter_ioremap = aac_rx_ioremap;
+	dev->a_ops.adapter_comm = aac_rx_select_comm;
+
+	return _aac_rx_init(dev);
+}
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c
new file mode 100644
index 0000000..efa96c1
--- /dev/null
+++ b/drivers/scsi/aacraid/sa.c
@@ -0,0 +1,418 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  sa.c
+ *
+ * Abstract: Drawbridge specific support functions
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static irqreturn_t aac_sa_intr(int irq, void *dev_id)
+{
+	struct aac_dev *dev = dev_id;
+	unsigned short intstat, mask;
+
+	intstat = sa_readw(dev, DoorbellReg_p);
+	/*
+	 *	Read mask and invert because drawbridge is reversed.
+	 *	This allows us to only service interrupts that have been enabled.
+	 */
+	mask = ~(sa_readw(dev, SaDbCSR.PRISETIRQMASK));
+
+	/* Check to see if this is our interrupt.  If it isn't just return */
+
+	if (intstat & mask) {
+		if (intstat & PrintfReady) {
+			aac_printf(dev, sa_readl(dev, Mailbox5));
+			sa_writew(dev, DoorbellClrReg_p, PrintfReady); /* clear PrintfReady */
+			sa_writew(dev, DoorbellReg_s, PrintfDone);
+		} else if (intstat & DOORBELL_1) {	// dev -> Host Normal Command Ready
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_1);
+			aac_command_normal(&dev->queues->queue[HostNormCmdQueue]);
+		} else if (intstat & DOORBELL_2) {	// dev -> Host Normal Response Ready
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_2);
+			aac_response_normal(&dev->queues->queue[HostNormRespQueue]);
+		} else if (intstat & DOORBELL_3) {	// dev -> Host Normal Command Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_3);
+		} else if (intstat & DOORBELL_4) {	// dev -> Host Normal Response Not Full
+			sa_writew(dev, DoorbellClrReg_p, DOORBELL_4);
+		}
+		return IRQ_HANDLED;
+	}
+	return IRQ_NONE;
+}
+
+/**
+ *	aac_sa_disable_interrupt	-	disable interrupt
+ *	@dev: Which adapter to enable.
+ */
+
+static void aac_sa_disable_interrupt (struct aac_dev *dev)
+{
+	sa_writew(dev, SaDbCSR.PRISETIRQMASK, 0xffff);
+}
+
+/**
+ *	aac_sa_enable_interrupt	-	enable interrupt
+ *	@dev: Which adapter to enable.
+ */
+
+static void aac_sa_enable_interrupt (struct aac_dev *dev)
+{
+	sa_writew(dev, SaDbCSR.PRICLEARIRQMASK, (PrintfReady | DOORBELL_1 |
+				DOORBELL_2 | DOORBELL_3 | DOORBELL_4));
+}
+
+/**
+ *	aac_sa_notify_adapter		-	handle adapter notification
+ *	@dev:	Adapter that notification is for
+ *	@event:	Event to notidy
+ *
+ *	Notify the adapter of an event
+ */
+ 
+static void aac_sa_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_1);
+		break;
+	case HostNormRespNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_4);
+		break;
+	case AdapNormRespQue:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_2);
+		break;
+	case HostNormCmdNotFull:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_3);
+		break;
+	case HostShutdown:
+		/*
+		sa_sync_cmd(dev, HOST_CRASHING, 0, 0, 0, 0, 0, 0,
+		NULL, NULL, NULL, NULL, NULL);
+		*/
+		break;
+	case FastIo:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_6);
+		break;
+	case AdapPrintfDone:
+		sa_writew(dev, DoorbellReg_s,DOORBELL_5);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+
+/**
+ *	sa_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait 
+ *	for its	completion.
+ */
+
+static int sa_sync_cmd(struct aac_dev *dev, u32 command, 
+		u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+		u32 *ret, u32 *r1, u32 *r2, u32 *r3, u32 *r4)
+{
+	unsigned long start;
+ 	int ok;
+	/*
+	 *	Write the Command into Mailbox 0
+	 */
+	sa_writel(dev, Mailbox0, command);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 4
+	 */
+	sa_writel(dev, Mailbox1, p1);
+	sa_writel(dev, Mailbox2, p2);
+	sa_writel(dev, Mailbox3, p3);
+	sa_writel(dev, Mailbox4, p4);
+
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	sa_writew(dev, DoorbellReg_s, DOORBELL_0);
+
+	ok = 0;
+	start = jiffies;
+
+	while(time_before(jiffies, start+30*HZ))
+	{
+		/*
+		 *	Delay 5uS so that the monitor gets access
+		 */
+		udelay(5);
+		/*
+		 *	Mon110 will set doorbell0 bit when it has 
+		 *	completed the command.
+		 */
+		if(sa_readw(dev, DoorbellReg_p) & DOORBELL_0)  {
+			ok = 1;
+			break;
+		}
+		msleep(1);
+	}
+
+	if (ok != 1)
+		return -ETIMEDOUT;
+	/*
+	 *	Clear the synch command doorbell.
+	 */
+	sa_writew(dev, DoorbellClrReg_p, DOORBELL_0);
+	/*
+	 *	Pull the synch status from Mailbox 0.
+	 */
+	if (ret)
+		*ret = sa_readl(dev, Mailbox0);
+	if (r1)
+		*r1 = sa_readl(dev, Mailbox1);
+	if (r2)
+		*r2 = sa_readl(dev, Mailbox2);
+	if (r3)
+		*r3 = sa_readl(dev, Mailbox3);
+	if (r4)
+		*r4 = sa_readl(dev, Mailbox4);
+	return 0;
+}
+
+/**
+ *	aac_sa_interrupt_adapter	-	interrupt an adapter
+ *	@dev: Which adapter to enable.
+ *
+ *	Breakpoint an adapter.
+ */
+ 
+static void aac_sa_interrupt_adapter (struct aac_dev *dev)
+{
+	sa_sync_cmd(dev, BREAKPOINT_REQUEST, 0, 0, 0, 0, 0, 0,
+			NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_sa_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an ARM based AAC adapter
+ */
+
+static void aac_sa_start_adapter(struct aac_dev *dev)
+{
+	union aac_init *init;
+	/*
+	 * Fill in the remaining pieces of the init.
+	 */
+	init = dev->init;
+	init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
+	/* We can only use a 32 bit address here */
+	sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, 
+			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
+			NULL, NULL, NULL, NULL, NULL);
+}
+
+static int aac_sa_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
+{
+	return -EINVAL;
+}
+
+/**
+ *	aac_sa_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_sa_check_health(struct aac_dev *dev)
+{
+	long status = sa_readl(dev, Mailbox7);
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (status & SELF_TEST_FAILED)
+		return -1;
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (status & KERNEL_PANIC)
+		return -2;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	if (!(status & KERNEL_UP_AND_RUNNING))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+}
+
+/**
+ *	aac_sa_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_sa_ioremap(struct aac_dev * dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.sa);
+		return 0;
+	}
+	dev->base = dev->regs.sa = ioremap(dev->base_start, size);
+	return (dev->base == NULL) ? -1 : 0;
+}
+
+/**
+ *	aac_sa_init	-	initialize an ARM based AAC card
+ *	@dev: device to configure
+ *
+ *	Allocate and set up resources for the ARM based AAC variants. The 
+ *	device_interface in the commregion will be allocated and linked 
+ *	to the comm region.
+ */
+
+int aac_sa_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int instance;
+	const char *name;
+
+	instance = dev->id;
+	name     = dev->name;
+
+	/*
+	 *	Fill in the function dispatch table.
+	 */
+
+	dev->a_ops.adapter_interrupt = aac_sa_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_sa_disable_interrupt;
+	dev->a_ops.adapter_enable_int = aac_sa_enable_interrupt;
+	dev->a_ops.adapter_notify = aac_sa_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = sa_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_sa_check_health;
+	dev->a_ops.adapter_restart = aac_sa_restart_adapter;
+	dev->a_ops.adapter_start = aac_sa_start_adapter;
+	dev->a_ops.adapter_intr = aac_sa_intr;
+	dev->a_ops.adapter_deliver = aac_rx_deliver_producer;
+	dev->a_ops.adapter_ioremap = aac_sa_ioremap;
+
+	if (aac_sa_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (sa_readl(dev, Mailbox7) & SELF_TEST_FAILED) {
+		printk(KERN_WARNING "%s%d: adapter self-test failed.\n", name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	if (sa_readl(dev, Mailbox7) & KERNEL_PANIC) {
+		printk(KERN_WARNING "%s%d: adapter kernel panic'd.\n", name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes.
+	 */
+	while (!(sa_readl(dev, Mailbox7) & KERNEL_UP_AND_RUNNING)) {
+		if (time_after(jiffies, start+startup_timeout*HZ)) {
+			status = sa_readl(dev, Mailbox7);
+			printk(KERN_WARNING "%s%d: adapter kernel failed to start, init status = %lx.\n", 
+					name, instance, status);
+			goto error_iounmap;
+		}
+		msleep(1);
+	}
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that 
+	 *	we can handle.
+	 */
+	aac_adapter_disable_int(dev);
+	aac_adapter_enable_int(dev);
+
+	if(aac_init_adapter(dev) == NULL)
+		goto error_irq;
+	dev->sync_mode = 0;	/* sync. mode not supported */
+	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+			IRQF_SHARED, "aacraid", (void *)dev) < 0) {
+		printk(KERN_WARNING "%s%d: Interrupt unavailable.\n",
+			name, instance);
+		goto error_iounmap;
+	}
+	dev->dbg_base = dev->base_start;
+	dev->dbg_base_mapped = dev->base;
+	dev->dbg_size = dev->base_size;
+
+	aac_adapter_enable_int(dev);
+
+	/*
+	 *	Tell the adapter that all is configure, and it can start 
+	 *	accepting requests
+	 */
+	aac_sa_start_adapter(dev);
+	return 0;
+
+error_irq:
+	aac_sa_disable_interrupt(dev);
+	free_irq(dev->pdev->irq, (void *)dev);
+
+error_iounmap:
+
+	return -1;
+}
+
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
new file mode 100644
index 0000000..7a51ccf
--- /dev/null
+++ b/drivers/scsi/aacraid/src.c
@@ -0,0 +1,1429 @@
+/*
+ *	Adaptec AAC series RAID controller driver
+ *	(c) Copyright 2001 Red Hat Inc.
+ *
+ * based on the old aacraid driver that is..
+ * Adaptec aacraid device driver for Linux.
+ *
+ * Copyright (c) 2000-2010 Adaptec, Inc.
+ *               2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
+ *		 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING.  If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Module Name:
+ *  src.c
+ *
+ * Abstract: Hardware Device Interface for PMC SRC based controllers
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi_host.h>
+
+#include "aacraid.h"
+
+static int aac_src_get_sync_status(struct aac_dev *dev);
+
+static irqreturn_t aac_src_intr_message(int irq, void *dev_id)
+{
+	struct aac_msix_ctx *ctx;
+	struct aac_dev *dev;
+	unsigned long bellbits, bellbits_shifted;
+	int vector_no;
+	int isFastResponse, mode;
+	u32 index, handle;
+
+	ctx = (struct aac_msix_ctx *)dev_id;
+	dev = ctx->dev;
+	vector_no = ctx->vector_no;
+
+	if (dev->msi_enabled) {
+		mode = AAC_INT_MODE_MSI;
+		if (vector_no == 0) {
+			bellbits = src_readl(dev, MUnit.ODR_MSI);
+			if (bellbits & 0x40000)
+				mode |= AAC_INT_MODE_AIF;
+			if (bellbits & 0x1000)
+				mode |= AAC_INT_MODE_SYNC;
+		}
+	} else {
+		mode = AAC_INT_MODE_INTX;
+		bellbits = src_readl(dev, MUnit.ODR_R);
+		if (bellbits & PmDoorBellResponseSent) {
+			bellbits = PmDoorBellResponseSent;
+			src_writel(dev, MUnit.ODR_C, bellbits);
+			src_readl(dev, MUnit.ODR_C);
+		} else {
+			bellbits_shifted = (bellbits >> SRC_ODR_SHIFT);
+			src_writel(dev, MUnit.ODR_C, bellbits);
+			src_readl(dev, MUnit.ODR_C);
+
+			if (bellbits_shifted & DoorBellAifPending)
+				mode |= AAC_INT_MODE_AIF;
+			else if (bellbits_shifted & OUTBOUNDDOORBELL_0)
+				mode |= AAC_INT_MODE_SYNC;
+		}
+	}
+
+	if (mode & AAC_INT_MODE_SYNC) {
+		unsigned long sflags;
+		struct list_head *entry;
+		int send_it = 0;
+		extern int aac_sync_mode;
+
+		if (!aac_sync_mode && !dev->msi_enabled) {
+			src_writel(dev, MUnit.ODR_C, bellbits);
+			src_readl(dev, MUnit.ODR_C);
+		}
+
+		if (dev->sync_fib) {
+			if (dev->sync_fib->callback)
+				dev->sync_fib->callback(dev->sync_fib->callback_data,
+					dev->sync_fib);
+			spin_lock_irqsave(&dev->sync_fib->event_lock, sflags);
+			if (dev->sync_fib->flags & FIB_CONTEXT_FLAG_WAIT) {
+				dev->management_fib_count--;
+				up(&dev->sync_fib->event_wait);
+			}
+			spin_unlock_irqrestore(&dev->sync_fib->event_lock,
+						sflags);
+			spin_lock_irqsave(&dev->sync_lock, sflags);
+			if (!list_empty(&dev->sync_fib_list)) {
+				entry = dev->sync_fib_list.next;
+				dev->sync_fib = list_entry(entry,
+							   struct fib,
+							   fiblink);
+				list_del(entry);
+				send_it = 1;
+			} else {
+				dev->sync_fib = NULL;
+			}
+			spin_unlock_irqrestore(&dev->sync_lock, sflags);
+			if (send_it) {
+				aac_adapter_sync_cmd(dev, SEND_SYNCHRONOUS_FIB,
+					(u32)dev->sync_fib->hw_fib_pa,
+					0, 0, 0, 0, 0,
+					NULL, NULL, NULL, NULL, NULL);
+			}
+		}
+		if (!dev->msi_enabled)
+			mode = 0;
+
+	}
+
+	if (mode & AAC_INT_MODE_AIF) {
+		/* handle AIF */
+		if (dev->sa_firmware) {
+			u32 events = src_readl(dev, MUnit.SCR0);
+
+			aac_intr_normal(dev, events, 1, 0, NULL);
+			writel(events, &dev->IndexRegs->Mailbox[0]);
+			src_writel(dev, MUnit.IDR, 1 << 23);
+		} else {
+			if (dev->aif_thread && dev->fsa_dev)
+				aac_intr_normal(dev, 0, 2, 0, NULL);
+		}
+		if (dev->msi_enabled)
+			aac_src_access_devreg(dev, AAC_CLEAR_AIF_BIT);
+		mode = 0;
+	}
+
+	if (mode) {
+		index = dev->host_rrq_idx[vector_no];
+
+		for (;;) {
+			isFastResponse = 0;
+			/* remove toggle bit (31) */
+			handle = le32_to_cpu((dev->host_rrq[index])
+				& 0x7fffffff);
+			/* check fast response bits (30, 1) */
+			if (handle & 0x40000000)
+				isFastResponse = 1;
+			handle &= 0x0000ffff;
+			if (handle == 0)
+				break;
+			handle >>= 2;
+			if (dev->msi_enabled && dev->max_msix > 1)
+				atomic_dec(&dev->rrq_outstanding[vector_no]);
+			aac_intr_normal(dev, handle, 0, isFastResponse, NULL);
+			dev->host_rrq[index++] = 0;
+			if (index == (vector_no + 1) * dev->vector_cap)
+				index = vector_no * dev->vector_cap;
+			dev->host_rrq_idx[vector_no] = index;
+		}
+		mode = 0;
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ *	aac_src_disable_interrupt	-	Disable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_src_disable_interrupt(struct aac_dev *dev)
+{
+	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+}
+
+/**
+ *	aac_src_enable_interrupt_message	-	Enable interrupts
+ *	@dev: Adapter
+ */
+
+static void aac_src_enable_interrupt_message(struct aac_dev *dev)
+{
+	aac_src_access_devreg(dev, AAC_ENABLE_INTERRUPT);
+}
+
+/**
+ *	src_sync_cmd	-	send a command and wait
+ *	@dev: Adapter
+ *	@command: Command to execute
+ *	@p1: first parameter
+ *	@ret: adapter status
+ *
+ *	This routine will send a synchronous command to the adapter and wait
+ *	for its	completion.
+ */
+
+static int src_sync_cmd(struct aac_dev *dev, u32 command,
+	u32 p1, u32 p2, u32 p3, u32 p4, u32 p5, u32 p6,
+	u32 *status, u32 * r1, u32 * r2, u32 * r3, u32 * r4)
+{
+	unsigned long start;
+	unsigned long delay;
+	int ok;
+
+	/*
+	 *	Write the command into Mailbox 0
+	 */
+	writel(command, &dev->IndexRegs->Mailbox[0]);
+	/*
+	 *	Write the parameters into Mailboxes 1 - 6
+	 */
+	writel(p1, &dev->IndexRegs->Mailbox[1]);
+	writel(p2, &dev->IndexRegs->Mailbox[2]);
+	writel(p3, &dev->IndexRegs->Mailbox[3]);
+	writel(p4, &dev->IndexRegs->Mailbox[4]);
+
+	/*
+	 *	Clear the synch command doorbell to start on a clean slate.
+	 */
+	if (!dev->msi_enabled)
+		src_writel(dev,
+			   MUnit.ODR_C,
+			   OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+
+	/*
+	 *	Disable doorbell interrupts
+	 */
+	src_writel(dev, MUnit.OIMR, dev->OIMR = 0xffffffff);
+
+	/*
+	 *	Force the completion of the mask register write before issuing
+	 *	the interrupt.
+	 */
+	src_readl(dev, MUnit.OIMR);
+
+	/*
+	 *	Signal that there is a new synch command
+	 */
+	src_writel(dev, MUnit.IDR, INBOUNDDOORBELL_0 << SRC_IDR_SHIFT);
+
+	if ((!dev->sync_mode || command != SEND_SYNCHRONOUS_FIB) &&
+		!dev->in_soft_reset) {
+		ok = 0;
+		start = jiffies;
+
+		if (command == IOP_RESET_ALWAYS) {
+			/* Wait up to 10 sec */
+			delay = 10*HZ;
+		} else {
+			/* Wait up to 5 minutes */
+			delay = 300*HZ;
+		}
+		while (time_before(jiffies, start+delay)) {
+			udelay(5);	/* Delay 5 microseconds to let Mon960 get info. */
+			/*
+			 *	Mon960 will set doorbell0 bit when it has completed the command.
+			 */
+			if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
+				/*
+				 *	Clear the doorbell.
+				 */
+				if (dev->msi_enabled)
+					aac_src_access_devreg(dev,
+						AAC_CLEAR_SYNC_BIT);
+				else
+					src_writel(dev,
+						MUnit.ODR_C,
+						OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+				ok = 1;
+				break;
+			}
+			/*
+			 *	Yield the processor in case we are slow
+			 */
+			msleep(1);
+		}
+		if (unlikely(ok != 1)) {
+			/*
+			 *	Restore interrupt mask even though we timed out
+			 */
+			aac_adapter_enable_int(dev);
+			return -ETIMEDOUT;
+		}
+		/*
+		 *	Pull the synch status from Mailbox 0.
+		 */
+		if (status)
+			*status = readl(&dev->IndexRegs->Mailbox[0]);
+		if (r1)
+			*r1 = readl(&dev->IndexRegs->Mailbox[1]);
+		if (r2)
+			*r2 = readl(&dev->IndexRegs->Mailbox[2]);
+		if (r3)
+			*r3 = readl(&dev->IndexRegs->Mailbox[3]);
+		if (r4)
+			*r4 = readl(&dev->IndexRegs->Mailbox[4]);
+		if (command == GET_COMM_PREFERRED_SETTINGS)
+			dev->max_msix =
+				readl(&dev->IndexRegs->Mailbox[5]) & 0xFFFF;
+		/*
+		 *	Clear the synch command doorbell.
+		 */
+		if (!dev->msi_enabled)
+			src_writel(dev,
+				MUnit.ODR_C,
+				OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+	}
+
+	/*
+	 *	Restore interrupt mask
+	 */
+	aac_adapter_enable_int(dev);
+	return 0;
+}
+
+/**
+ *	aac_src_interrupt_adapter	-	interrupt adapter
+ *	@dev: Adapter
+ *
+ *	Send an interrupt to the i960 and breakpoint it.
+ */
+
+static void aac_src_interrupt_adapter(struct aac_dev *dev)
+{
+	src_sync_cmd(dev, BREAKPOINT_REQUEST,
+		0, 0, 0, 0, 0, 0,
+		NULL, NULL, NULL, NULL, NULL);
+}
+
+/**
+ *	aac_src_notify_adapter		-	send an event to the adapter
+ *	@dev: Adapter
+ *	@event: Event to send
+ *
+ *	Notify the i960 that something it probably cares about has
+ *	happened.
+ */
+
+static void aac_src_notify_adapter(struct aac_dev *dev, u32 event)
+{
+	switch (event) {
+
+	case AdapNormCmdQue:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_1 << SRC_ODR_SHIFT);
+		break;
+	case HostNormRespNotFull:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_4 << SRC_ODR_SHIFT);
+		break;
+	case AdapNormRespQue:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_2 << SRC_ODR_SHIFT);
+		break;
+	case HostNormCmdNotFull:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_3 << SRC_ODR_SHIFT);
+		break;
+	case FastIo:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_6 << SRC_ODR_SHIFT);
+		break;
+	case AdapPrintfDone:
+		src_writel(dev, MUnit.ODR_C,
+			INBOUNDDOORBELL_5 << SRC_ODR_SHIFT);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ *	aac_src_start_adapter		-	activate adapter
+ *	@dev:	Adapter
+ *
+ *	Start up processing on an i960 based AAC adapter
+ */
+
+static void aac_src_start_adapter(struct aac_dev *dev)
+{
+	union aac_init *init;
+	int i;
+
+	 /* reset host_rrq_idx first */
+	for (i = 0; i < dev->max_msix; i++) {
+		dev->host_rrq_idx[i] = i * dev->vector_cap;
+		atomic_set(&dev->rrq_outstanding[i], 0);
+	}
+	atomic_set(&dev->msix_counter, 0);
+	dev->fibs_pushed_no = 0;
+
+	init = dev->init;
+	if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+		init->r8.host_elapsed_seconds =
+			cpu_to_le32(ktime_get_real_seconds());
+		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+			lower_32_bits(dev->init_pa),
+			upper_32_bits(dev->init_pa),
+			sizeof(struct _r8) +
+			(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
+			0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+	} else {
+		init->r7.host_elapsed_seconds =
+			cpu_to_le32(ktime_get_real_seconds());
+		// We can only use a 32 bit address here
+		src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
+			(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
+			NULL, NULL, NULL, NULL, NULL);
+	}
+
+}
+
+/**
+ *	aac_src_check_health
+ *	@dev: device to check if healthy
+ *
+ *	Will attempt to determine if the specified adapter is alive and
+ *	capable of handling requests, returning 0 if alive.
+ */
+static int aac_src_check_health(struct aac_dev *dev)
+{
+	u32 status = src_readl(dev, MUnit.OMR);
+
+	/*
+	 *	Check to see if the board panic'd.
+	 */
+	if (unlikely(status & KERNEL_PANIC))
+		goto err_blink;
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (unlikely(status & SELF_TEST_FAILED))
+		goto err_out;
+
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	if (unlikely(status & MONITOR_PANIC))
+		goto err_out;
+
+	/*
+	 *	Wait for the adapter to be up and running.
+	 */
+	if (unlikely(!(status & KERNEL_UP_AND_RUNNING)))
+		return -3;
+	/*
+	 *	Everything is OK
+	 */
+	return 0;
+
+err_out:
+	return -1;
+
+err_blink:
+	return (status >> 16) & 0xFF;
+}
+
+static inline u32 aac_get_vector(struct aac_dev *dev)
+{
+	return atomic_inc_return(&dev->msix_counter)%dev->max_msix;
+}
+
+/**
+ *	aac_src_deliver_message
+ *	@fib: fib to issue
+ *
+ *	Will send a fib, returning 0 if successful.
+ */
+static int aac_src_deliver_message(struct fib *fib)
+{
+	struct aac_dev *dev = fib->dev;
+	struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue];
+	u32 fibsize;
+	dma_addr_t address;
+	struct aac_fib_xporthdr *pFibX;
+	int native_hba;
+#if !defined(writeq)
+	unsigned long flags;
+#endif
+
+	u16 vector_no;
+
+	atomic_inc(&q->numpending);
+
+	native_hba = (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA) ? 1 : 0;
+
+
+	if (dev->msi_enabled && dev->max_msix > 1 &&
+		(native_hba || fib->hw_fib_va->header.Command != AifRequest)) {
+
+		if ((dev->comm_interface == AAC_COMM_MESSAGE_TYPE3)
+			&& dev->sa_firmware)
+			vector_no = aac_get_vector(dev);
+		else
+			vector_no = fib->vector_no;
+
+		if (native_hba) {
+			if (fib->flags & FIB_CONTEXT_FLAG_NATIVE_HBA_TMF) {
+				struct aac_hba_tm_req *tm_req;
+
+				tm_req = (struct aac_hba_tm_req *)
+						fib->hw_fib_va;
+				if (tm_req->iu_type ==
+					HBA_IU_TYPE_SCSI_TM_REQ) {
+					((struct aac_hba_tm_req *)
+						fib->hw_fib_va)->reply_qid
+							= vector_no;
+					((struct aac_hba_tm_req *)
+						fib->hw_fib_va)->request_id
+							+= (vector_no << 16);
+				} else {
+					((struct aac_hba_reset_req *)
+						fib->hw_fib_va)->reply_qid
+							= vector_no;
+					((struct aac_hba_reset_req *)
+						fib->hw_fib_va)->request_id
+							+= (vector_no << 16);
+				}
+			} else {
+				((struct aac_hba_cmd_req *)
+					fib->hw_fib_va)->reply_qid
+						= vector_no;
+				((struct aac_hba_cmd_req *)
+					fib->hw_fib_va)->request_id
+						+= (vector_no << 16);
+			}
+		} else {
+			fib->hw_fib_va->header.Handle += (vector_no << 16);
+		}
+	} else {
+		vector_no = 0;
+	}
+
+	atomic_inc(&dev->rrq_outstanding[vector_no]);
+
+	if (native_hba) {
+		address = fib->hw_fib_pa;
+		fibsize = (fib->hbacmd_size + 127) / 128 - 1;
+		if (fibsize > 31)
+			fibsize = 31;
+		address |= fibsize;
+#if defined(writeq)
+		src_writeq(dev, MUnit.IQN_L, (u64)address);
+#else
+		spin_lock_irqsave(&fib->dev->iq_lock, flags);
+		src_writel(dev, MUnit.IQN_H,
+			upper_32_bits(address) & 0xffffffff);
+		src_writel(dev, MUnit.IQN_L, address & 0xffffffff);
+		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+#endif
+	} else {
+		if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 ||
+			dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
+			/* Calculate the amount to the fibsize bits */
+			fibsize = (le16_to_cpu(fib->hw_fib_va->header.Size)
+				+ 127) / 128 - 1;
+			/* New FIB header, 32-bit */
+			address = fib->hw_fib_pa;
+			fib->hw_fib_va->header.StructType = FIB_MAGIC2;
+			fib->hw_fib_va->header.SenderFibAddress =
+				cpu_to_le32((u32)address);
+			fib->hw_fib_va->header.u.TimeStamp = 0;
+			WARN_ON(upper_32_bits(address) != 0L);
+		} else {
+			/* Calculate the amount to the fibsize bits */
+			fibsize = (sizeof(struct aac_fib_xporthdr) +
+				le16_to_cpu(fib->hw_fib_va->header.Size)
+				+ 127) / 128 - 1;
+			/* Fill XPORT header */
+			pFibX = (struct aac_fib_xporthdr *)
+				((unsigned char *)fib->hw_fib_va -
+				sizeof(struct aac_fib_xporthdr));
+			pFibX->Handle = fib->hw_fib_va->header.Handle;
+			pFibX->HostAddress =
+				cpu_to_le64((u64)fib->hw_fib_pa);
+			pFibX->Size = cpu_to_le32(
+				le16_to_cpu(fib->hw_fib_va->header.Size));
+			address = fib->hw_fib_pa -
+				(u64)sizeof(struct aac_fib_xporthdr);
+		}
+		if (fibsize > 31)
+			fibsize = 31;
+		address |= fibsize;
+
+#if defined(writeq)
+		src_writeq(dev, MUnit.IQ_L, (u64)address);
+#else
+		spin_lock_irqsave(&fib->dev->iq_lock, flags);
+		src_writel(dev, MUnit.IQ_H,
+			upper_32_bits(address) & 0xffffffff);
+		src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
+		spin_unlock_irqrestore(&fib->dev->iq_lock, flags);
+#endif
+	}
+	return 0;
+}
+
+/**
+ *	aac_src_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_src_ioremap(struct aac_dev *dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.src.bar1);
+		dev->regs.src.bar1 = NULL;
+		iounmap(dev->regs.src.bar0);
+		dev->base = dev->regs.src.bar0 = NULL;
+		return 0;
+	}
+	dev->regs.src.bar1 = ioremap(pci_resource_start(dev->pdev, 2),
+		AAC_MIN_SRC_BAR1_SIZE);
+	dev->base = NULL;
+	if (dev->regs.src.bar1 == NULL)
+		return -1;
+	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+	if (dev->base == NULL) {
+		iounmap(dev->regs.src.bar1);
+		dev->regs.src.bar1 = NULL;
+		return -1;
+	}
+	dev->IndexRegs = &((struct src_registers __iomem *)
+		dev->base)->u.tupelo.IndexRegs;
+	return 0;
+}
+
+/**
+ *  aac_srcv_ioremap
+ *	@size: mapping resize request
+ *
+ */
+static int aac_srcv_ioremap(struct aac_dev *dev, u32 size)
+{
+	if (!size) {
+		iounmap(dev->regs.src.bar0);
+		dev->base = dev->regs.src.bar0 = NULL;
+		return 0;
+	}
+
+	dev->regs.src.bar1 =
+	ioremap(pci_resource_start(dev->pdev, 2), AAC_MIN_SRCV_BAR1_SIZE);
+	dev->base = NULL;
+	if (dev->regs.src.bar1 == NULL)
+		return -1;
+	dev->base = dev->regs.src.bar0 = ioremap(dev->base_start, size);
+	if (dev->base == NULL) {
+		iounmap(dev->regs.src.bar1);
+		dev->regs.src.bar1 = NULL;
+		return -1;
+	}
+	dev->IndexRegs = &((struct src_registers __iomem *)
+		dev->base)->u.denali.IndexRegs;
+	return 0;
+}
+
+void aac_set_intx_mode(struct aac_dev *dev)
+{
+	if (dev->msi_enabled) {
+		aac_src_access_devreg(dev, AAC_ENABLE_INTX);
+		dev->msi_enabled = 0;
+		msleep(5000); /* Delay 5 seconds */
+	}
+}
+
+static void aac_clear_omr(struct aac_dev *dev)
+{
+	u32 omr_value = 0;
+
+	omr_value = src_readl(dev, MUnit.OMR);
+
+	/*
+	 * Check for PCI Errors or Kernel Panic
+	 */
+	if ((omr_value == INVALID_OMR) || (omr_value & KERNEL_PANIC))
+		omr_value = 0;
+
+	/*
+	 * Preserve MSIX Value if any
+	 */
+	src_writel(dev, MUnit.OMR, omr_value & AAC_INT_MODE_MSIX);
+	src_readl(dev, MUnit.OMR);
+}
+
+static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
+{
+	__le32 supported_options3;
+
+	if (!aac_fib_dump)
+		return;
+
+	supported_options3  = dev->supplement_adapter_info.supported_options3;
+	if (!(supported_options3 & AAC_OPTION_SUPPORTED3_IOP_RESET_FIB_DUMP))
+		return;
+
+	aac_adapter_sync_cmd(dev, IOP_RESET_FW_FIB_DUMP,
+			0, 0, 0,  0, 0, 0, NULL, NULL, NULL, NULL, NULL);
+}
+
+static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
+{
+	bool ctrl_up = true;
+	unsigned long status, start;
+	bool is_up = false;
+
+	start = jiffies;
+	do {
+		schedule();
+		status = src_readl(dev, MUnit.OMR);
+
+		if (status == 0xffffffff)
+			status = 0;
+
+		if (status & KERNEL_BOOTING) {
+			start = jiffies;
+			continue;
+		}
+
+		if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
+			ctrl_up = false;
+			break;
+		}
+
+		is_up = status & KERNEL_UP_AND_RUNNING;
+
+	} while (!is_up);
+
+	return ctrl_up;
+}
+
+static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
+{
+	aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
+						NULL, NULL, NULL, NULL);
+}
+
+static void aac_send_iop_reset(struct aac_dev *dev)
+{
+	aac_dump_fw_fib_iop_reset(dev);
+
+	aac_notify_fw_of_iop_reset(dev);
+
+	aac_set_intx_mode(dev);
+
+	aac_clear_omr(dev);
+
+	src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
+
+	msleep(5000);
+}
+
+static void aac_send_hardware_soft_reset(struct aac_dev *dev)
+{
+	u_int32_t val;
+
+	aac_clear_omr(dev);
+	val = readl(((char *)(dev->base) + IBW_SWR_OFFSET));
+	val |= 0x01;
+	writel(val, ((char *)(dev->base) + IBW_SWR_OFFSET));
+	msleep_interruptible(20000);
+}
+
+static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
+{
+	bool is_ctrl_up;
+	int ret = 0;
+
+	if (bled < 0)
+		goto invalid_out;
+
+	if (bled)
+		dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
+
+	/*
+	 * When there is a BlinkLED, IOP_RESET has not effect
+	 */
+	if (bled >= 2 && dev->sa_firmware && reset_type & HW_IOP_RESET)
+		reset_type &= ~HW_IOP_RESET;
+
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+
+	dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
+
+	if (reset_type & HW_IOP_RESET) {
+		dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
+		aac_send_iop_reset(dev);
+
+		/*
+		 * Creates a delay or wait till up and running comes thru
+		 */
+		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+		if (!is_ctrl_up)
+			dev_err(&dev->pdev->dev, "IOP reset failed\n");
+		else {
+			dev_info(&dev->pdev->dev, "IOP reset succeeded\n");
+			goto set_startup;
+		}
+	}
+
+	if (!dev->sa_firmware) {
+		dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
+		ret = -ENODEV;
+		goto out;
+	}
+
+	if (reset_type & HW_SOFT_RESET) {
+		dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
+		aac_send_hardware_soft_reset(dev);
+		dev->msi_enabled = 0;
+
+		is_ctrl_up = aac_is_ctrl_up_and_running(dev);
+		if (!is_ctrl_up) {
+			dev_err(&dev->pdev->dev, "SOFT reset failed\n");
+			ret = -ENODEV;
+			goto out;
+		} else
+			dev_info(&dev->pdev->dev, "SOFT reset succeeded\n");
+	}
+
+set_startup:
+	if (startup_timeout < 300)
+		startup_timeout = 300;
+
+out:
+	return ret;
+
+invalid_out:
+	if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
+		ret = -ENODEV;
+goto out;
+}
+
+/**
+ *	aac_src_select_comm	-	Select communications method
+ *	@dev: Adapter
+ *	@comm: communications method
+ */
+static int aac_src_select_comm(struct aac_dev *dev, int comm)
+{
+	switch (comm) {
+	case AAC_COMM_MESSAGE:
+		dev->a_ops.adapter_intr = aac_src_intr_message;
+		dev->a_ops.adapter_deliver = aac_src_deliver_message;
+		break;
+	default:
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *  aac_src_init	-	initialize an Cardinal Frey Bar card
+ *  @dev: device to configure
+ *
+ */
+
+int aac_src_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int restart = 0;
+	int instance = dev->id;
+	const char *name = dev->name;
+
+	dev->a_ops.adapter_ioremap = aac_src_ioremap;
+	dev->a_ops.adapter_comm = aac_src_select_comm;
+
+	dev->base_size = AAC_MIN_SRC_BAR0_SIZE;
+	if (aac_adapter_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/* Failure to reset here is an option ... */
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+
+	if (dev->init_reset) {
+		dev->init_reset = false;
+		if (!aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET))
+			++restart;
+	}
+
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & KERNEL_PANIC) {
+		if (aac_src_restart_adapter(dev,
+			aac_src_check_health(dev), IOP_HWSOFT_RESET))
+			goto error_iounmap;
+		++restart;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n",
+			dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (status & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n",
+			dev->name, instance);
+		goto error_iounmap;
+	}
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	while (!((status = src_readl(dev, MUnit.OMR)) &
+		KERNEL_UP_AND_RUNNING)) {
+		if ((restart &&
+		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+		  time_after(jiffies, start+HZ*startup_timeout)) {
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		if (!restart &&
+		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+		  time_after(jiffies, start + HZ *
+		  ((startup_timeout > 60)
+		    ? (startup_timeout - 60)
+		    : (startup_timeout / 2))))) {
+			if (likely(!aac_src_restart_adapter(dev,
+				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
+				start = jiffies;
+			++restart;
+		}
+		msleep(1);
+	}
+	if (restart && aac_commit)
+		aac_commit = 1;
+	/*
+	 *	Fill in the common function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_src_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_src_check_health;
+	dev->a_ops.adapter_restart = aac_src_restart_adapter;
+	dev->a_ops.adapter_start = aac_src_start_adapter;
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+	aac_adapter_disable_int(dev);
+	src_writel(dev, MUnit.ODR_C, 0xffffffff);
+	aac_adapter_enable_int(dev);
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_iounmap;
+	if (dev->comm_interface != AAC_COMM_MESSAGE_TYPE1)
+		goto error_iounmap;
+
+	dev->msi = !pci_enable_msi(dev->pdev);
+
+	dev->aac_msix[0].vector_no = 0;
+	dev->aac_msix[0].dev = dev;
+
+	if (request_irq(dev->pdev->irq, dev->a_ops.adapter_intr,
+			IRQF_SHARED, "aacraid", &(dev->aac_msix[0]))  < 0) {
+
+		if (dev->msi)
+			pci_disable_msi(dev->pdev);
+
+		printk(KERN_ERR "%s%d: Interrupt unavailable.\n",
+			name, instance);
+		goto error_iounmap;
+	}
+	dev->dbg_base = pci_resource_start(dev->pdev, 2);
+	dev->dbg_base_mapped = dev->regs.src.bar1;
+	dev->dbg_size = AAC_MIN_SRC_BAR1_SIZE;
+	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+
+	aac_adapter_enable_int(dev);
+
+	if (!dev->sync_mode) {
+		/*
+		 * Tell the adapter that all is configured, and it can
+		 * start accepting requests
+		 */
+		aac_src_start_adapter(dev);
+	}
+	return 0;
+
+error_iounmap:
+
+	return -1;
+}
+
+static int aac_src_wait_sync(struct aac_dev *dev, int *status)
+{
+	unsigned long start = jiffies;
+	unsigned long usecs = 0;
+	int delay = 5 * HZ;
+	int rc = 1;
+
+	while (time_before(jiffies, start+delay)) {
+		/*
+		 * Delay 5 microseconds to let Mon960 get info.
+		 */
+		udelay(5);
+
+		/*
+		 * Mon960 will set doorbell0 bit when it has completed the
+		 * command.
+		 */
+		if (aac_src_get_sync_status(dev) & OUTBOUNDDOORBELL_0) {
+			/*
+			 * Clear: the doorbell.
+			 */
+			if (dev->msi_enabled)
+				aac_src_access_devreg(dev, AAC_CLEAR_SYNC_BIT);
+			else
+				src_writel(dev, MUnit.ODR_C,
+					OUTBOUNDDOORBELL_0 << SRC_ODR_SHIFT);
+			rc = 0;
+
+			break;
+		}
+
+		/*
+		 * Yield the processor in case we are slow
+		 */
+		usecs = 1 * USEC_PER_MSEC;
+		usleep_range(usecs, usecs + 50);
+	}
+	/*
+	 * Pull the synch status from Mailbox 0.
+	 */
+	if (status && !rc) {
+		status[0] = readl(&dev->IndexRegs->Mailbox[0]);
+		status[1] = readl(&dev->IndexRegs->Mailbox[1]);
+		status[2] = readl(&dev->IndexRegs->Mailbox[2]);
+		status[3] = readl(&dev->IndexRegs->Mailbox[3]);
+		status[4] = readl(&dev->IndexRegs->Mailbox[4]);
+	}
+
+	return rc;
+}
+
+/**
+ *  aac_src_soft_reset	-	perform soft reset to speed up
+ *  access
+ *
+ *  Assumptions: That the controller is in a state where we can
+ *  bring it back to life with an init struct. We can only use
+ *  fast sync commands, as the timeout is 5 seconds.
+ *
+ *  @dev: device to configure
+ *
+ */
+
+static int aac_src_soft_reset(struct aac_dev *dev)
+{
+	u32 status_omr = src_readl(dev, MUnit.OMR);
+	u32 status[5];
+	int rc = 1;
+	int state = 0;
+	char *state_str[7] = {
+		"GET_ADAPTER_PROPERTIES Failed",
+		"GET_ADAPTER_PROPERTIES timeout",
+		"SOFT_RESET not supported",
+		"DROP_IO Failed",
+		"DROP_IO timeout",
+		"Check Health failed"
+	};
+
+	if (status_omr == INVALID_OMR)
+		return 1;       // pcie hosed
+
+	if (!(status_omr & KERNEL_UP_AND_RUNNING))
+		return 1;       // not up and running
+
+	/*
+	 * We go into soft reset mode to allow us to handle response
+	 */
+	dev->in_soft_reset = 1;
+	dev->msi_enabled = status_omr & AAC_INT_MODE_MSIX;
+
+	/* Get adapter properties */
+	rc = aac_adapter_sync_cmd(dev, GET_ADAPTER_PROPERTIES, 0, 0, 0,
+		0, 0, 0, status+0, status+1, status+2, status+3, status+4);
+	if (rc)
+		goto out;
+
+	state++;
+	if (aac_src_wait_sync(dev, status)) {
+		rc = 1;
+		goto out;
+	}
+
+	state++;
+	if (!(status[1] & le32_to_cpu(AAC_OPT_EXTENDED) &&
+		(status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)))) {
+		rc = 2;
+		goto out;
+	}
+
+	if ((status[1] & le32_to_cpu(AAC_OPT_EXTENDED)) &&
+		(status[4] & le32_to_cpu(AAC_EXTOPT_SA_FIRMWARE)))
+		dev->sa_firmware = 1;
+
+	state++;
+	rc = aac_adapter_sync_cmd(dev, DROP_IO, 0, 0, 0, 0, 0, 0,
+		 status+0, status+1, status+2, status+3, status+4);
+
+	if (rc)
+		goto out;
+
+	state++;
+	if (aac_src_wait_sync(dev, status)) {
+		rc = 3;
+		goto out;
+	}
+
+	if (status[1])
+		dev_err(&dev->pdev->dev, "%s: %d outstanding I/O pending\n",
+			__func__, status[1]);
+
+	state++;
+	rc = aac_src_check_health(dev);
+
+out:
+	dev->in_soft_reset = 0;
+	dev->msi_enabled = 0;
+	if (rc)
+		dev_err(&dev->pdev->dev, "%s: %s status = %d", __func__,
+			state_str[state], rc);
+
+return rc;
+}
+/**
+ *  aac_srcv_init	-	initialize an SRCv card
+ *  @dev: device to configure
+ *
+ */
+
+int aac_srcv_init(struct aac_dev *dev)
+{
+	unsigned long start;
+	unsigned long status;
+	int restart = 0;
+	int instance = dev->id;
+	const char *name = dev->name;
+
+	dev->a_ops.adapter_ioremap = aac_srcv_ioremap;
+	dev->a_ops.adapter_comm = aac_src_select_comm;
+
+	dev->base_size = AAC_MIN_SRCV_BAR0_SIZE;
+	if (aac_adapter_ioremap(dev, dev->base_size)) {
+		printk(KERN_WARNING "%s: unable to map adapter.\n", name);
+		goto error_iounmap;
+	}
+
+	/* Failure to reset here is an option ... */
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+
+	if (dev->init_reset) {
+		dev->init_reset = false;
+		if (aac_src_soft_reset(dev)) {
+			aac_src_restart_adapter(dev, 0, IOP_HWSOFT_RESET);
+			++restart;
+		}
+	}
+
+	/*
+	 *	Check to see if flash update is running.
+	 *	Wait for the adapter to be up and running. Wait up to 5 minutes
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & FLASH_UPD_PENDING) {
+		start = jiffies;
+		do {
+			status = src_readl(dev, MUnit.OMR);
+			if (time_after(jiffies, start+HZ*FWUPD_TIMEOUT)) {
+				printk(KERN_ERR "%s%d: adapter flash update failed.\n",
+					dev->name, instance);
+				goto error_iounmap;
+			}
+		} while (!(status & FLASH_UPD_SUCCESS) &&
+			 !(status & FLASH_UPD_FAILED));
+		/* Delay 10 seconds.
+		 * Because right now FW is doing a soft reset,
+		 * do not read scratch pad register at this time
+		 */
+		ssleep(10);
+	}
+	/*
+	 *	Check to see if the board panic'd while booting.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & KERNEL_PANIC) {
+		if (aac_src_restart_adapter(dev,
+			aac_src_check_health(dev), IOP_HWSOFT_RESET))
+			goto error_iounmap;
+		++restart;
+	}
+	/*
+	 *	Check to see if the board failed any self tests.
+	 */
+	status = src_readl(dev, MUnit.OMR);
+	if (status & SELF_TEST_FAILED) {
+		printk(KERN_ERR "%s%d: adapter self-test failed.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+	/*
+	 *	Check to see if the monitor panic'd while booting.
+	 */
+	if (status & MONITOR_PANIC) {
+		printk(KERN_ERR "%s%d: adapter monitor panic.\n", dev->name, instance);
+		goto error_iounmap;
+	}
+
+	start = jiffies;
+	/*
+	 *	Wait for the adapter to be up and running. Wait up to 3 minutes
+	 */
+	do {
+		status = src_readl(dev, MUnit.OMR);
+		if (status == INVALID_OMR)
+			status = 0;
+
+		if ((restart &&
+		  (status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC))) ||
+		  time_after(jiffies, start+HZ*startup_timeout)) {
+			printk(KERN_ERR "%s%d: adapter kernel failed to start, init status = %lx.\n",
+					dev->name, instance, status);
+			goto error_iounmap;
+		}
+		if (!restart &&
+		  ((status & (KERNEL_PANIC|SELF_TEST_FAILED|MONITOR_PANIC)) ||
+		  time_after(jiffies, start + HZ *
+		  ((startup_timeout > 60)
+		    ? (startup_timeout - 60)
+		    : (startup_timeout / 2))))) {
+			if (likely(!aac_src_restart_adapter(dev,
+				aac_src_check_health(dev), IOP_HWSOFT_RESET)))
+				start = jiffies;
+			++restart;
+		}
+		msleep(1);
+	} while (!(status & KERNEL_UP_AND_RUNNING));
+
+	if (restart && aac_commit)
+		aac_commit = 1;
+	/*
+	 *	Fill in the common function dispatch table.
+	 */
+	dev->a_ops.adapter_interrupt = aac_src_interrupt_adapter;
+	dev->a_ops.adapter_disable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
+	dev->a_ops.adapter_notify = aac_src_notify_adapter;
+	dev->a_ops.adapter_sync_cmd = src_sync_cmd;
+	dev->a_ops.adapter_check_health = aac_src_check_health;
+	dev->a_ops.adapter_restart = aac_src_restart_adapter;
+	dev->a_ops.adapter_start = aac_src_start_adapter;
+
+	/*
+	 *	First clear out all interrupts.  Then enable the one's that we
+	 *	can handle.
+	 */
+	aac_adapter_comm(dev, AAC_COMM_MESSAGE);
+	aac_adapter_disable_int(dev);
+	src_writel(dev, MUnit.ODR_C, 0xffffffff);
+	aac_adapter_enable_int(dev);
+
+	if (aac_init_adapter(dev) == NULL)
+		goto error_iounmap;
+	if ((dev->comm_interface != AAC_COMM_MESSAGE_TYPE2) &&
+		(dev->comm_interface != AAC_COMM_MESSAGE_TYPE3))
+		goto error_iounmap;
+	if (dev->msi_enabled)
+		aac_src_access_devreg(dev, AAC_ENABLE_MSIX);
+
+	if (aac_acquire_irq(dev))
+		goto error_iounmap;
+
+	dev->dbg_base = pci_resource_start(dev->pdev, 2);
+	dev->dbg_base_mapped = dev->regs.src.bar1;
+	dev->dbg_size = AAC_MIN_SRCV_BAR1_SIZE;
+	dev->a_ops.adapter_enable_int = aac_src_enable_interrupt_message;
+
+	aac_adapter_enable_int(dev);
+
+	if (!dev->sync_mode) {
+		/*
+		 * Tell the adapter that all is configured, and it can
+		 * start accepting requests
+		 */
+		aac_src_start_adapter(dev);
+	}
+	return 0;
+
+error_iounmap:
+
+	return -1;
+}
+
+void aac_src_access_devreg(struct aac_dev *dev, int mode)
+{
+	u_int32_t val;
+
+	switch (mode) {
+	case AAC_ENABLE_INTERRUPT:
+		src_writel(dev,
+			   MUnit.OIMR,
+			   dev->OIMR = (dev->msi_enabled ?
+					AAC_INT_ENABLE_TYPE1_MSIX :
+					AAC_INT_ENABLE_TYPE1_INTX));
+		break;
+
+	case AAC_DISABLE_INTERRUPT:
+		src_writel(dev,
+			   MUnit.OIMR,
+			   dev->OIMR = AAC_INT_DISABLE_ALL);
+		break;
+
+	case AAC_ENABLE_MSIX:
+		/* set bit 6 */
+		val = src_readl(dev, MUnit.IDR);
+		val |= 0x40;
+		src_writel(dev,  MUnit.IDR, val);
+		src_readl(dev, MUnit.IDR);
+		/* unmask int. */
+		val = PMC_ALL_INTERRUPT_BITS;
+		src_writel(dev, MUnit.IOAR, val);
+		val = src_readl(dev, MUnit.OIMR);
+		src_writel(dev,
+			   MUnit.OIMR,
+			   val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0)));
+		break;
+
+	case AAC_DISABLE_MSIX:
+		/* reset bit 6 */
+		val = src_readl(dev, MUnit.IDR);
+		val &= ~0x40;
+		src_writel(dev, MUnit.IDR, val);
+		src_readl(dev, MUnit.IDR);
+		break;
+
+	case AAC_CLEAR_AIF_BIT:
+		/* set bit 5 */
+		val = src_readl(dev, MUnit.IDR);
+		val |= 0x20;
+		src_writel(dev, MUnit.IDR, val);
+		src_readl(dev, MUnit.IDR);
+		break;
+
+	case AAC_CLEAR_SYNC_BIT:
+		/* set bit 4 */
+		val = src_readl(dev, MUnit.IDR);
+		val |= 0x10;
+		src_writel(dev, MUnit.IDR, val);
+		src_readl(dev, MUnit.IDR);
+		break;
+
+	case AAC_ENABLE_INTX:
+		/* set bit 7 */
+		val = src_readl(dev, MUnit.IDR);
+		val |= 0x80;
+		src_writel(dev, MUnit.IDR, val);
+		src_readl(dev, MUnit.IDR);
+		/* unmask int. */
+		val = PMC_ALL_INTERRUPT_BITS;
+		src_writel(dev, MUnit.IOAR, val);
+		src_readl(dev, MUnit.IOAR);
+		val = src_readl(dev, MUnit.OIMR);
+		src_writel(dev, MUnit.OIMR,
+				val & (~(PMC_GLOBAL_INT_BIT2)));
+		break;
+
+	default:
+		break;
+	}
+}
+
+static int aac_src_get_sync_status(struct aac_dev *dev)
+{
+	int msix_val = 0;
+	int legacy_val = 0;
+
+	msix_val = src_readl(dev, MUnit.ODR_MSI) & SRC_MSI_READ_MASK ? 1 : 0;
+
+	if (!dev->msi_enabled) {
+		/*
+		 * if Legacy int status indicates cmd is not complete
+		 * sample MSIx register to see if it indiactes cmd complete,
+		 * if yes set the controller in MSIx mode and consider cmd
+		 * completed
+		 */
+		legacy_val = src_readl(dev, MUnit.ODR_R) >> SRC_ODR_SHIFT;
+		if (!(legacy_val & 1) && msix_val)
+			dev->msi_enabled = 1;
+		return legacy_val;
+	}
+
+	return msix_val;
+}